repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Utility methods for getting the time and computing intervals. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public final class Time { /** * number of nano seconds in 1 millisecond */ private static final long NANOSECONDS_PER_MILLISECOND = 1000000; /** * Current system time. Do not use this to calculate a duration or interval * to sleep, because it will be broken by settimeofday. Instead, use * monotonicNow. * @return current time in msec. */ public static long now() { return System.currentTimeMillis(); } /** * Current time from some arbitrary time base in the past, counting in * milliseconds, and not affected by settimeofday or similar system clock * changes. This is appropriate to use when computing how much longer to * wait for an interval to expire. * This function can return a negative value and it must be handled correctly * by callers. See the documentation of System#nanoTime for caveats. * @return a monotonic clock that counts in milliseconds. */ public static long monotonicNow() { return System.nanoTime() / NANOSECONDS_PER_MILLISECOND; } }
2,152
36.12069
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/XMLUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import javax.xml.transform.*; import javax.xml.transform.stream.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import java.io.*; /** * General xml utilities. * */ @InterfaceAudience.Private @InterfaceStability.Unstable public class XMLUtils { /** * Transform input xml given a stylesheet. * * @param styleSheet the style-sheet * @param xml input xml data * @param out output * @throws TransformerConfigurationException * @throws TransformerException */ public static void transform( InputStream styleSheet, InputStream xml, Writer out ) throws TransformerConfigurationException, TransformerException { // Instantiate a TransformerFactory TransformerFactory tFactory = TransformerFactory.newInstance(); // Use the TransformerFactory to process the // stylesheet and generate a Transformer Transformer transformer = tFactory.newTransformer( new StreamSource(styleSheet) ); // Use the Transformer to transform an XML Source // and send the output to a Result object. transformer.transform(new StreamSource(xml), new StreamResult(out)); } }
2,205
34.015873
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/QuickSort.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * An implementation of the core algorithm of QuickSort. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class QuickSort implements IndexedSorter { private static final IndexedSorter alt = new HeapSort(); public QuickSort() { } private static void fix(IndexedSortable s, int p, int r) { if (s.compare(p, r) > 0) { s.swap(p, r); } } /** * Deepest recursion before giving up and doing a heapsort. * Returns 2 * ceil(log(n)). */ protected static int getMaxDepth(int x) { if (x <= 0) throw new IllegalArgumentException("Undefined for " + x); return (32 - Integer.numberOfLeadingZeros(x - 1)) << 2; } /** * Sort the given range of items using quick sort. * {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth}, * then switch to {@link HeapSort}. */ @Override public void sort(IndexedSortable s, int p, int r) { sort(s, p, r, null); } @Override public void sort(final IndexedSortable s, int p, int r, final Progressable rep) { sortInternal(s, p, r, rep, getMaxDepth(r - p)); } private static void sortInternal(final IndexedSortable s, int p, int r, final Progressable rep, int depth) { if (null != rep) { rep.progress(); } while (true) { if (r-p < 13) { for (int i = p; i < r; ++i) { for (int j = i; j > p && s.compare(j-1, j) > 0; --j) { s.swap(j, j-1); } } return; } if (--depth < 0) { // give up alt.sort(s, p, r, rep); return; } // select, move pivot into first position fix(s, (p+r) >>> 1, p); fix(s, (p+r) >>> 1, r - 1); fix(s, p, r-1); // Divide int i = p; int j = r; int ll = p; int rr = r; int cr; while(true) { while (++i < j) { if ((cr = s.compare(i, p)) > 0) break; if (0 == cr && ++ll != i) { s.swap(ll, i); } } while (--j > i) { if ((cr = s.compare(p, j)) > 0) break; if (0 == cr && --rr != j) { s.swap(rr, j); } } if (i < j) s.swap(i, j); else break; } j = i; // swap pivot- and all eq values- into position while (ll >= p) { s.swap(ll--, --i); } while (rr < r) { s.swap(rr++, j++); } // Conquer // Recurse on smaller interval first to keep stack shallow assert i != j; if (i - p < r - j) { sortInternal(s, p, i, rep, depth); p = j; } else { sortInternal(s, j, r, rep, depth); r = i; } } } }
3,554
25.139706
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Tool.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; /** * A tool interface that supports handling of generic command-line options. * * <p><code>Tool</code>, is the standard for any Map-Reduce tool/application. * The tool/application should delegate the handling of * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options"> * standard command-line options</a> to {@link ToolRunner#run(Tool, String[])} * and only handle its custom arguments.</p> * * <p>Here is how a typical <code>Tool</code> is implemented:</p> * <p><blockquote><pre> * public class MyApp extends Configured implements Tool { * * public int run(String[] args) throws Exception { * // <code>Configuration</code> processed by <code>ToolRunner</code> * Configuration conf = getConf(); * * // Create a JobConf using the processed <code>conf</code> * JobConf job = new JobConf(conf, MyApp.class); * * // Process custom command-line options * Path in = new Path(args[1]); * Path out = new Path(args[2]); * * // Specify various job-specific parameters * job.setJobName("my-app"); * job.setInputPath(in); * job.setOutputPath(out); * job.setMapperClass(MyMapper.class); * job.setReducerClass(MyReducer.class); * * // Submit the job, then poll for progress until the job is complete * RunningJob runningJob = JobClient.runJob(job); * if (runningJob.isSuccessful()) { * return 0; * } else { * return 1; * } * } * * public static void main(String[] args) throws Exception { * // Let <code>ToolRunner</code> handle generic command-line options * int res = ToolRunner.run(new Configuration(), new MyApp(), args); * * System.exit(res); * } * } * </pre></blockquote></p> * * @see GenericOptionsParser * @see ToolRunner */ @InterfaceAudience.Public @InterfaceStability.Stable public interface Tool extends Configurable { /** * Execute the command with the given arguments. * * @param args command specific arguments. * @return exit code. * @throws Exception */ int run(String [] args) throws Exception; }
3,301
36.101124
97
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Options.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.apache.hadoop.util; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; /** * This class allows generic access to variable length type-safe parameter * lists. */ public class Options { public static abstract class StringOption { private final String value; protected StringOption(String value) { this.value = value; } public String getValue() { return value; } } public static abstract class ClassOption { private final Class<?> value; protected ClassOption(Class<?> value) { this.value = value; } public Class<?> getValue() { return value; } } public static abstract class BooleanOption { private final boolean value; protected BooleanOption(boolean value) { this.value = value; } public boolean getValue() { return value; } } public static abstract class IntegerOption { private final int value; protected IntegerOption(int value) { this.value = value; } public int getValue() { return value; } } public static abstract class LongOption { private final long value; protected LongOption(long value) { this.value = value; } public long getValue() { return value; } } public static abstract class PathOption { private final Path value; protected PathOption(Path value) { this.value = value; } public Path getValue() { return value; } } public static abstract class FSDataInputStreamOption { private final FSDataInputStream value; protected FSDataInputStreamOption(FSDataInputStream value) { this.value = value; } public FSDataInputStream getValue() { return value; } } public static abstract class FSDataOutputStreamOption { private final FSDataOutputStream value; protected FSDataOutputStreamOption(FSDataOutputStream value) { this.value = value; } public FSDataOutputStream getValue() { return value; } } public static abstract class ProgressableOption { private final Progressable value; protected ProgressableOption(Progressable value) { this.value = value; } public Progressable getValue() { return value; } } /** * Find the first option of the required class. * @param <T> the static class to find * @param <base> the parent class of the array * @param cls the dynamic class to find * @param opts the list of options to look through * @return the first option that matches * @throws IOException */ @SuppressWarnings("unchecked") public static <base, T extends base> T getOption(Class<T> cls, base [] opts ) throws IOException { for(base o: opts) { if (o.getClass() == cls) { return (T) o; } } return null; } /** * Prepend some new options to the old options * @param <T> the type of options * @param oldOpts the old options * @param newOpts the new options * @return a new array of options */ public static <T> T[] prependOptions(T[] oldOpts, T... newOpts) { // copy the new options to the front of the array T[] result = Arrays.copyOf(newOpts, newOpts.length+oldOpts.length); // now copy the old options System.arraycopy(oldOpts, 0, result, newOpts.length, oldOpts.length); return result; } }
4,340
26.649682
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PriorityQueue.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** A PriorityQueue maintains a partial ordering of its elements such that the least element can always be found in constant time. Put()'s and pop()'s require log(size) time. */ @InterfaceAudience.Private @InterfaceStability.Unstable public abstract class PriorityQueue<T> { private T[] heap; private int size; private int maxSize; /** Determines the ordering of objects in this priority queue. Subclasses must define this one method. */ protected abstract boolean lessThan(Object a, Object b); /** Subclass constructors must call this. */ @SuppressWarnings("unchecked") protected final void initialize(int maxSize) { size = 0; int heapSize = maxSize + 1; heap = (T[]) new Object[heapSize]; this.maxSize = maxSize; } /** * Adds an Object to a PriorityQueue in log(size) time. * If one tries to add more objects than maxSize from initialize * a RuntimeException (ArrayIndexOutOfBound) is thrown. */ public final void put(T element) { size++; heap[size] = element; upHeap(); } /** * Adds element to the PriorityQueue in log(size) time if either * the PriorityQueue is not full, or not lessThan(element, top()). * @param element * @return true if element is added, false otherwise. */ public boolean insert(T element){ if (size < maxSize){ put(element); return true; } else if (size > 0 && !lessThan(element, top())){ heap[1] = element; adjustTop(); return true; } else return false; } /** Returns the least element of the PriorityQueue in constant time. */ public final T top() { if (size > 0) return heap[1]; else return null; } /** Removes and returns the least element of the PriorityQueue in log(size) time. */ public final T pop() { if (size > 0) { T result = heap[1]; // save first value heap[1] = heap[size]; // move last to first heap[size] = null; // permit GC of objects size--; downHeap(); // adjust heap return result; } else return null; } /** Should be called when the Object at top changes values. Still log(n) * worst case, but it's at least twice as fast to <pre> * { pq.top().change(); pq.adjustTop(); } * </pre> instead of <pre> * { o = pq.pop(); o.change(); pq.push(o); } * </pre> */ public final void adjustTop() { downHeap(); } /** Returns the number of elements currently stored in the PriorityQueue. */ public final int size() { return size; } /** Removes all entries from the PriorityQueue. */ public final void clear() { for (int i = 0; i <= size; i++) heap[i] = null; size = 0; } private final void upHeap() { int i = size; T node = heap[i]; // save bottom node int j = i >>> 1; while (j > 0 && lessThan(node, heap[j])) { heap[i] = heap[j]; // shift parents down i = j; j = j >>> 1; } heap[i] = node; // install saved node } private final void downHeap() { int i = 1; T node = heap[i]; // save top node int j = i << 1; // find smaller child int k = j + 1; if (k <= size && lessThan(heap[k], heap[j])) { j = k; } while (j <= size && lessThan(heap[j], node)) { heap[i] = heap[j]; // shift up child i = j; j = i << 1; k = j + 1; if (k <= size && lessThan(heap[k], heap[j])) { j = k; } } heap[i] = node; // install saved node } }
4,500
27.852564
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.InputStreamReader; import java.io.IOException; import java.math.BigInteger; import java.nio.charset.Charset; import java.util.HashMap; import java.util.HashSet; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Shell.ShellCommandExecutor; /** * Plugin to calculate resource information on Linux systems. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class SysInfoLinux extends SysInfo { private static final Log LOG = LogFactory.getLog(SysInfoLinux.class); /** * proc's meminfo virtual file has keys-values in the format * "key:[ \t]*value[ \t]kB". */ private static final String PROCFS_MEMFILE = "/proc/meminfo"; private static final Pattern PROCFS_MEMFILE_FORMAT = Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB"); // We need the values for the following keys in meminfo private static final String MEMTOTAL_STRING = "MemTotal"; private static final String SWAPTOTAL_STRING = "SwapTotal"; private static final String MEMFREE_STRING = "MemFree"; private static final String SWAPFREE_STRING = "SwapFree"; private static final String INACTIVE_STRING = "Inactive"; /** * Patterns for parsing /proc/cpuinfo. */ private static final String PROCFS_CPUINFO = "/proc/cpuinfo"; private static final Pattern PROCESSOR_FORMAT = Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)"); private static final Pattern FREQUENCY_FORMAT = Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)"); private static final Pattern PHYSICAL_ID_FORMAT = Pattern.compile("^physical id[ \t]*:[ \t]*([0-9]*)"); private static final Pattern CORE_ID_FORMAT = Pattern.compile("^core id[ \t]*:[ \t]*([0-9]*)"); /** * Pattern for parsing /proc/stat. */ private static final String PROCFS_STAT = "/proc/stat"; private static final Pattern CPU_TIME_FORMAT = Pattern.compile("^cpu[ \t]*([0-9]*)" + "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*"); private CpuTimeTracker cpuTimeTracker; /** * Pattern for parsing /proc/net/dev. */ private static final String PROCFS_NETFILE = "/proc/net/dev"; private static final Pattern PROCFS_NETFILE_FORMAT = Pattern .compile("^[ \t]*([a-zA-Z]+[0-9]*):" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+).*"); /** * Pattern for parsing /proc/diskstats. */ private static final String PROCFS_DISKSFILE = "/proc/diskstats"; private static final Pattern PROCFS_DISKSFILE_FORMAT = Pattern.compile("^[ \t]*([0-9]+)[ \t]*([0-9 ]+)" + "(?!([a-zA-Z]+[0-9]+))([a-zA-Z]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)"); /** * Pattern for parsing /sys/block/partition_name/queue/hw_sector_size. */ private static final Pattern PROCFS_DISKSECTORFILE_FORMAT = Pattern.compile("^([0-9]+)"); private String procfsMemFile; private String procfsCpuFile; private String procfsStatFile; private String procfsNetFile; private String procfsDisksFile; private long jiffyLengthInMillis; private long ramSize = 0; private long swapSize = 0; private long ramSizeFree = 0; // free ram space on the machine (kB) private long swapSizeFree = 0; // free swap space on the machine (kB) private long inactiveSize = 0; // inactive cache memory (kB) /* number of logical processors on the system. */ private int numProcessors = 0; /* number of physical cores on the system. */ private int numCores = 0; private long cpuFrequency = 0L; // CPU frequency on the system (kHz) private long numNetBytesRead = 0L; // aggregated bytes read from network private long numNetBytesWritten = 0L; // aggregated bytes written to network private long numDisksBytesRead = 0L; // aggregated bytes read from disks private long numDisksBytesWritten = 0L; // aggregated bytes written to disks private boolean readMemInfoFile = false; private boolean readCpuInfoFile = false; /* map for every disk its sector size */ private HashMap<String, Integer> perDiskSectorSize = null; public static final long PAGE_SIZE = getConf("PAGESIZE"); public static final long JIFFY_LENGTH_IN_MILLIS = Math.max(Math.round(1000D / getConf("CLK_TCK")), -1); private static long getConf(String attr) { if(Shell.LINUX) { try { ShellCommandExecutor shellExecutorClk = new ShellCommandExecutor( new String[] {"getconf", attr }); shellExecutorClk.execute(); return Long.parseLong(shellExecutorClk.getOutput().replace("\n", "")); } catch (IOException|NumberFormatException e) { return -1; } } return -1; } /** * Get current time. * @return Unix time stamp in millisecond */ long getCurrentTime() { return System.currentTimeMillis(); } public SysInfoLinux() { this(PROCFS_MEMFILE, PROCFS_CPUINFO, PROCFS_STAT, PROCFS_NETFILE, PROCFS_DISKSFILE, JIFFY_LENGTH_IN_MILLIS); } /** * Constructor which allows assigning the /proc/ directories. This will be * used only in unit tests. * @param procfsMemFile fake file for /proc/meminfo * @param procfsCpuFile fake file for /proc/cpuinfo * @param procfsStatFile fake file for /proc/stat * @param procfsNetFile fake file for /proc/net/dev * @param procfsDisksFile fake file for /proc/diskstats * @param jiffyLengthInMillis fake jiffy length value */ @VisibleForTesting public SysInfoLinux(String procfsMemFile, String procfsCpuFile, String procfsStatFile, String procfsNetFile, String procfsDisksFile, long jiffyLengthInMillis) { this.procfsMemFile = procfsMemFile; this.procfsCpuFile = procfsCpuFile; this.procfsStatFile = procfsStatFile; this.procfsNetFile = procfsNetFile; this.procfsDisksFile = procfsDisksFile; this.jiffyLengthInMillis = jiffyLengthInMillis; this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis); this.perDiskSectorSize = new HashMap<String, Integer>(); } /** * Read /proc/meminfo, parse and compute memory information only once. */ private void readProcMemInfoFile() { readProcMemInfoFile(false); } /** * Read /proc/meminfo, parse and compute memory information. * @param readAgain if false, read only on the first time */ private void readProcMemInfoFile(boolean readAgain) { if (readMemInfoFile && !readAgain) { return; } // Read "/proc/memInfo" file BufferedReader in; InputStreamReader fReader; try { fReader = new InputStreamReader( new FileInputStream(procfsMemFile), Charset.forName("UTF-8")); in = new BufferedReader(fReader); } catch (FileNotFoundException f) { // shouldn't happen.... LOG.warn("Couldn't read " + procfsMemFile + "; can't determine memory settings"); return; } Matcher mat; try { String str = in.readLine(); while (str != null) { mat = PROCFS_MEMFILE_FORMAT.matcher(str); if (mat.find()) { if (mat.group(1).equals(MEMTOTAL_STRING)) { ramSize = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(SWAPTOTAL_STRING)) { swapSize = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(MEMFREE_STRING)) { ramSizeFree = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(SWAPFREE_STRING)) { swapSizeFree = Long.parseLong(mat.group(2)); } else if (mat.group(1).equals(INACTIVE_STRING)) { inactiveSize = Long.parseLong(mat.group(2)); } } str = in.readLine(); } } catch (IOException io) { LOG.warn("Error reading the stream " + io); } finally { // Close the streams try { fReader.close(); try { in.close(); } catch (IOException i) { LOG.warn("Error closing the stream " + in); } } catch (IOException i) { LOG.warn("Error closing the stream " + fReader); } } readMemInfoFile = true; } /** * Read /proc/cpuinfo, parse and calculate CPU information. */ private void readProcCpuInfoFile() { // This directory needs to be read only once if (readCpuInfoFile) { return; } HashSet<String> coreIdSet = new HashSet<>(); // Read "/proc/cpuinfo" file BufferedReader in; InputStreamReader fReader; try { fReader = new InputStreamReader( new FileInputStream(procfsCpuFile), Charset.forName("UTF-8")); in = new BufferedReader(fReader); } catch (FileNotFoundException f) { // shouldn't happen.... LOG.warn("Couldn't read " + procfsCpuFile + "; can't determine cpu info"); return; } Matcher mat; try { numProcessors = 0; numCores = 1; String currentPhysicalId = ""; String str = in.readLine(); while (str != null) { mat = PROCESSOR_FORMAT.matcher(str); if (mat.find()) { numProcessors++; } mat = FREQUENCY_FORMAT.matcher(str); if (mat.find()) { cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz } mat = PHYSICAL_ID_FORMAT.matcher(str); if (mat.find()) { currentPhysicalId = str; } mat = CORE_ID_FORMAT.matcher(str); if (mat.find()) { coreIdSet.add(currentPhysicalId + " " + str); numCores = coreIdSet.size(); } str = in.readLine(); } } catch (IOException io) { LOG.warn("Error reading the stream " + io); } finally { // Close the streams try { fReader.close(); try { in.close(); } catch (IOException i) { LOG.warn("Error closing the stream " + in); } } catch (IOException i) { LOG.warn("Error closing the stream " + fReader); } } readCpuInfoFile = true; } /** * Read /proc/stat file, parse and calculate cumulative CPU. */ private void readProcStatFile() { // Read "/proc/stat" file BufferedReader in; InputStreamReader fReader; try { fReader = new InputStreamReader( new FileInputStream(procfsStatFile), Charset.forName("UTF-8")); in = new BufferedReader(fReader); } catch (FileNotFoundException f) { // shouldn't happen.... return; } Matcher mat; try { String str = in.readLine(); while (str != null) { mat = CPU_TIME_FORMAT.matcher(str); if (mat.find()) { long uTime = Long.parseLong(mat.group(1)); long nTime = Long.parseLong(mat.group(2)); long sTime = Long.parseLong(mat.group(3)); cpuTimeTracker.updateElapsedJiffies( BigInteger.valueOf(uTime + nTime + sTime), getCurrentTime()); break; } str = in.readLine(); } } catch (IOException io) { LOG.warn("Error reading the stream " + io); } finally { // Close the streams try { fReader.close(); try { in.close(); } catch (IOException i) { LOG.warn("Error closing the stream " + in); } } catch (IOException i) { LOG.warn("Error closing the stream " + fReader); } } } /** * Read /proc/net/dev file, parse and calculate amount * of bytes read and written through the network. */ private void readProcNetInfoFile() { numNetBytesRead = 0L; numNetBytesWritten = 0L; // Read "/proc/net/dev" file BufferedReader in; InputStreamReader fReader; try { fReader = new InputStreamReader( new FileInputStream(procfsNetFile), Charset.forName("UTF-8")); in = new BufferedReader(fReader); } catch (FileNotFoundException f) { return; } Matcher mat; try { String str = in.readLine(); while (str != null) { mat = PROCFS_NETFILE_FORMAT.matcher(str); if (mat.find()) { assert mat.groupCount() >= 16; // ignore loopback interfaces if (mat.group(1).equals("lo")) { str = in.readLine(); continue; } numNetBytesRead += Long.parseLong(mat.group(2)); numNetBytesWritten += Long.parseLong(mat.group(10)); } str = in.readLine(); } } catch (IOException io) { LOG.warn("Error reading the stream " + io); } finally { // Close the streams try { fReader.close(); try { in.close(); } catch (IOException i) { LOG.warn("Error closing the stream " + in); } } catch (IOException i) { LOG.warn("Error closing the stream " + fReader); } } } /** * Read /proc/diskstats file, parse and calculate amount * of bytes read and written from/to disks. */ private void readProcDisksInfoFile() { numDisksBytesRead = 0L; numDisksBytesWritten = 0L; // Read "/proc/diskstats" file BufferedReader in; try { in = new BufferedReader(new InputStreamReader( new FileInputStream(procfsDisksFile), Charset.forName("UTF-8"))); } catch (FileNotFoundException f) { return; } Matcher mat; try { String str = in.readLine(); while (str != null) { mat = PROCFS_DISKSFILE_FORMAT.matcher(str); if (mat.find()) { String diskName = mat.group(4); assert diskName != null; // ignore loop or ram partitions if (diskName.contains("loop") || diskName.contains("ram")) { str = in.readLine(); continue; } Integer sectorSize; synchronized (perDiskSectorSize) { sectorSize = perDiskSectorSize.get(diskName); if (null == sectorSize) { // retrieve sectorSize // if unavailable or error, assume 512 sectorSize = readDiskBlockInformation(diskName, 512); perDiskSectorSize.put(diskName, sectorSize); } } String sectorsRead = mat.group(7); String sectorsWritten = mat.group(11); if (null == sectorsRead || null == sectorsWritten) { return; } numDisksBytesRead += Long.parseLong(sectorsRead) * sectorSize; numDisksBytesWritten += Long.parseLong(sectorsWritten) * sectorSize; } str = in.readLine(); } } catch (IOException e) { LOG.warn("Error reading the stream " + procfsDisksFile, e); } finally { // Close the streams try { in.close(); } catch (IOException e) { LOG.warn("Error closing the stream " + procfsDisksFile, e); } } } /** * Read /sys/block/diskName/queue/hw_sector_size file, parse and calculate * sector size for a specific disk. * @return sector size of specified disk, or defSector */ int readDiskBlockInformation(String diskName, int defSector) { assert perDiskSectorSize != null && diskName != null; String procfsDiskSectorFile = "/sys/block/" + diskName + "/queue/hw_sector_size"; BufferedReader in; try { in = new BufferedReader(new InputStreamReader( new FileInputStream(procfsDiskSectorFile), Charset.forName("UTF-8"))); } catch (FileNotFoundException f) { return defSector; } Matcher mat; try { String str = in.readLine(); while (str != null) { mat = PROCFS_DISKSECTORFILE_FORMAT.matcher(str); if (mat.find()) { String secSize = mat.group(1); if (secSize != null) { return Integer.parseInt(secSize); } } str = in.readLine(); } return defSector; } catch (IOException|NumberFormatException e) { LOG.warn("Error reading the stream " + procfsDiskSectorFile, e); return defSector; } finally { // Close the streams try { in.close(); } catch (IOException e) { LOG.warn("Error closing the stream " + procfsDiskSectorFile, e); } } } /** {@inheritDoc} */ @Override public long getPhysicalMemorySize() { readProcMemInfoFile(); return ramSize * 1024; } /** {@inheritDoc} */ @Override public long getVirtualMemorySize() { readProcMemInfoFile(); return (ramSize + swapSize) * 1024; } /** {@inheritDoc} */ @Override public long getAvailablePhysicalMemorySize() { readProcMemInfoFile(true); return (ramSizeFree + inactiveSize) * 1024; } /** {@inheritDoc} */ @Override public long getAvailableVirtualMemorySize() { readProcMemInfoFile(true); return (ramSizeFree + swapSizeFree + inactiveSize) * 1024; } /** {@inheritDoc} */ @Override public int getNumProcessors() { readProcCpuInfoFile(); return numProcessors; } /** {@inheritDoc} */ @Override public int getNumCores() { readProcCpuInfoFile(); return numCores; } /** {@inheritDoc} */ @Override public long getCpuFrequency() { readProcCpuInfoFile(); return cpuFrequency; } /** {@inheritDoc} */ @Override public long getCumulativeCpuTime() { readProcStatFile(); return cpuTimeTracker.getCumulativeCpuTime(); } /** {@inheritDoc} */ @Override public float getCpuUsage() { readProcStatFile(); float overallCpuUsage = cpuTimeTracker.getCpuTrackerUsagePercent(); if (overallCpuUsage != CpuTimeTracker.UNAVAILABLE) { overallCpuUsage = overallCpuUsage / getNumProcessors(); } return overallCpuUsage; } /** {@inheritDoc} */ @Override public long getNetworkBytesRead() { readProcNetInfoFile(); return numNetBytesRead; } /** {@inheritDoc} */ @Override public long getNetworkBytesWritten() { readProcNetInfoFile(); return numNetBytesWritten; } @Override public long getStorageBytesRead() { readProcDisksInfoFile(); return numDisksBytesRead; } @Override public long getStorageBytesWritten() { readProcDisksInfoFile(); return numDisksBytesWritten; } /** * Test the {@link SysInfoLinux}. * * @param args - arguments to this calculator test */ public static void main(String[] args) { SysInfoLinux plugin = new SysInfoLinux(); System.out.println("Physical memory Size (bytes) : " + plugin.getPhysicalMemorySize()); System.out.println("Total Virtual memory Size (bytes) : " + plugin.getVirtualMemorySize()); System.out.println("Available Physical memory Size (bytes) : " + plugin.getAvailablePhysicalMemorySize()); System.out.println("Total Available Virtual memory Size (bytes) : " + plugin.getAvailableVirtualMemorySize()); System.out.println("Number of Processors : " + plugin.getNumProcessors()); System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency()); System.out.println("Cumulative CPU time (ms) : " + plugin.getCumulativeCpuTime()); System.out.println("Total network read (bytes) : " + plugin.getNetworkBytesRead()); System.out.println("Total network written (bytes) : " + plugin.getNetworkBytesWritten()); System.out.println("Total storage read (bytes) : " + plugin.getStorageBytesRead()); System.out.println("Total storage written (bytes) : " + plugin.getStorageBytesWritten()); try { // Sleep so we can compute the CPU usage Thread.sleep(500L); } catch (InterruptedException e) { // do nothing } System.out.println("CPU usage % : " + plugin.getCpuUsage()); } @VisibleForTesting void setReadCpuInfoFile(boolean readCpuInfoFileValue) { this.readCpuInfoFile = readCpuInfoFileValue; } public long getJiffyLengthInMillis() { return this.jiffyLengthInMillis; } }
21,665
30.354559
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.SerializationFactory; import org.apache.hadoop.io.serializer.Serializer; /** * General reflection utils */ @InterfaceAudience.Public @InterfaceStability.Evolving public class ReflectionUtils { private static final Class<?>[] EMPTY_ARRAY = new Class[]{}; volatile private static SerializationFactory serialFactory = null; /** * Cache of constructors for each class. Pins the classes so they * can't be garbage collected until ReflectionUtils can be collected. */ private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = new ConcurrentHashMap<Class<?>, Constructor<?>>(); /** * Check and set 'configuration' if necessary. * * @param theObject object for which to set configuration * @param conf Configuration */ public static void setConf(Object theObject, Configuration conf) { if (conf != null) { if (theObject instanceof Configurable) { ((Configurable) theObject).setConf(conf); } setJobConf(theObject, conf); } } /** * This code is to support backward compatibility and break the compile * time dependency of core on mapred. * This should be made deprecated along with the mapred package HADOOP-1230. * Should be removed when mapred package is removed. */ private static void setJobConf(Object theObject, Configuration conf) { //If JobConf and JobConfigurable are in classpath, AND //theObject is of type JobConfigurable AND //conf is of type JobConf then //invoke configure on theObject try { Class<?> jobConfClass = conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConf"); if (jobConfClass == null) { return; } Class<?> jobConfigurableClass = conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConfigurable"); if (jobConfigurableClass == null) { return; } if (jobConfClass.isAssignableFrom(conf.getClass()) && jobConfigurableClass.isAssignableFrom(theObject.getClass())) { Method configureMethod = jobConfigurableClass.getMethod("configure", jobConfClass); configureMethod.invoke(theObject, conf); } } catch (Exception e) { throw new RuntimeException("Error in configuring object", e); } } /** Create an object for the given class and initialize it from conf * * @param theClass class of which an object is created * @param conf Configuration * @return a new object */ @SuppressWarnings("unchecked") public static <T> T newInstance(Class<T> theClass, Configuration conf) { T result; try { Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass); if (meth == null) { meth = theClass.getDeclaredConstructor(EMPTY_ARRAY); meth.setAccessible(true); CONSTRUCTOR_CACHE.put(theClass, meth); } result = meth.newInstance(); } catch (Exception e) { throw new RuntimeException(e); } setConf(result, conf); return result; } static private ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); public static void setContentionTracing(boolean val) { threadBean.setThreadContentionMonitoringEnabled(val); } private static String getTaskName(long id, String name) { if (name == null) { return Long.toString(id); } return id + " (" + name + ")"; } /** * Print all of the thread's information and stack traces. * * @param stream the stream to * @param title a string title for the stack trace */ public synchronized static void printThreadInfo(PrintStream stream, String title) { final int STACK_DEPTH = 20; boolean contention = threadBean.isThreadContentionMonitoringEnabled(); long[] threadIds = threadBean.getAllThreadIds(); stream.println("Process Thread Dump: " + title); stream.println(threadIds.length + " active threads"); for (long tid: threadIds) { ThreadInfo info = threadBean.getThreadInfo(tid, STACK_DEPTH); if (info == null) { stream.println(" Inactive"); continue; } stream.println("Thread " + getTaskName(info.getThreadId(), info.getThreadName()) + ":"); Thread.State state = info.getThreadState(); stream.println(" State: " + state); stream.println(" Blocked count: " + info.getBlockedCount()); stream.println(" Waited count: " + info.getWaitedCount()); if (contention) { stream.println(" Blocked time: " + info.getBlockedTime()); stream.println(" Waited time: " + info.getWaitedTime()); } if (state == Thread.State.WAITING) { stream.println(" Waiting on " + info.getLockName()); } else if (state == Thread.State.BLOCKED) { stream.println(" Blocked on " + info.getLockName()); stream.println(" Blocked by " + getTaskName(info.getLockOwnerId(), info.getLockOwnerName())); } stream.println(" Stack:"); for (StackTraceElement frame: info.getStackTrace()) { stream.println(" " + frame.toString()); } } stream.flush(); } private static long previousLogTime = 0; /** * Log the current thread stacks at INFO level. * @param log the logger that logs the stack trace * @param title a descriptive title for the call stacks * @param minInterval the minimum time from the last */ public static void logThreadInfo(Log log, String title, long minInterval) { boolean dumpStack = false; if (log.isInfoEnabled()) { synchronized (ReflectionUtils.class) { long now = Time.now(); if (now - previousLogTime >= minInterval * 1000) { previousLogTime = now; dumpStack = true; } } if (dumpStack) { try { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title); log.info(buffer.toString(Charset.defaultCharset().name())); } catch (UnsupportedEncodingException ignored) { } } } } /** * Return the correctly-typed {@link Class} of the given object. * * @param o object whose correctly-typed <code>Class</code> is to be obtained * @return the correctly typed <code>Class</code> of the given object. */ @SuppressWarnings("unchecked") public static <T> Class<T> getClass(T o) { return (Class<T>)o.getClass(); } // methods to support testing static void clearCache() { CONSTRUCTOR_CACHE.clear(); } static int getCacheSize() { return CONSTRUCTOR_CACHE.size(); } /** * A pair of input/output buffers that we use to clone writables. */ private static class CopyInCopyOutBuffer { DataOutputBuffer outBuffer = new DataOutputBuffer(); DataInputBuffer inBuffer = new DataInputBuffer(); /** * Move the data from the output buffer to the input buffer. */ void moveData() { inBuffer.reset(outBuffer.getData(), outBuffer.getLength()); } } /** * Allocate a buffer for each thread that tries to clone objects. */ private static final ThreadLocal<CopyInCopyOutBuffer> CLONE_BUFFERS = new ThreadLocal<CopyInCopyOutBuffer>() { @Override protected synchronized CopyInCopyOutBuffer initialValue() { return new CopyInCopyOutBuffer(); } }; private static SerializationFactory getFactory(Configuration conf) { if (serialFactory == null) { serialFactory = new SerializationFactory(conf); } return serialFactory; } /** * Make a copy of the writable object using serialization to a buffer * @param src the object to copy from * @param dst the object to copy into, which is destroyed * @return dst param (the copy) * @throws IOException */ @SuppressWarnings("unchecked") public static <T> T copy(Configuration conf, T src, T dst) throws IOException { CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get(); buffer.outBuffer.reset(); SerializationFactory factory = getFactory(conf); Class<T> cls = (Class<T>) src.getClass(); Serializer<T> serializer = factory.getSerializer(cls); serializer.open(buffer.outBuffer); serializer.serialize(src); buffer.moveData(); Deserializer<T> deserializer = factory.getDeserializer(cls); deserializer.open(buffer.inBuffer); dst = deserializer.deserialize(dst); return dst; } @Deprecated public static void cloneWritableInto(Writable dst, Writable src) throws IOException { CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get(); buffer.outBuffer.reset(); src.write(buffer.outBuffer); buffer.moveData(); dst.readFields(buffer.inBuffer); } /** * Gets all the declared fields of a class including fields declared in * superclasses. */ public static List<Field> getDeclaredFieldsIncludingInherited(Class<?> clazz) { List<Field> fields = new ArrayList<Field>(); while (clazz != null) { for (Field field : clazz.getDeclaredFields()) { fields.add(field); } clazz = clazz.getSuperclass(); } return fields; } /** * Gets all the declared methods of a class including methods declared in * superclasses. */ public static List<Method> getDeclaredMethodsIncludingInherited(Class<?> clazz) { List<Method> methods = new ArrayList<Method>(); while (clazz != null) { for (Method method : clazz.getDeclaredMethods()) { methods.add(method); } clazz = clazz.getSuperclass(); } return methods; } }
11,756
32.784483
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Timer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Utility methods for getting the time and computing intervals. * * It has the same behavior as {{@link Time}}, with the exception that its * functions can be overridden for dependency injection purposes. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class Timer { /** * Current system time. Do not use this to calculate a duration or interval * to sleep, because it will be broken by settimeofday. Instead, use * monotonicNow. * @return current time in msec. */ public long now() { return Time.now(); } /** * Current time from some arbitrary time base in the past, counting in * milliseconds, and not affected by settimeofday or similar system clock * changes. This is appropriate to use when computing how much longer to * wait for an interval to expire. * @return a monotonic clock that counts in milliseconds. */ public long monotonicNow() { return Time.monotonicNow(); } }
1,919
35.923077
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.util.AbstractList; import java.util.Iterator; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; /** * Simplified List implementation which stores elements as a list * of chunks, each chunk having a maximum size. This improves over * using an ArrayList in that creating a large list will never require * a large amount of contiguous heap space -- thus reducing the likelihood * of triggering a CMS compaction pause due to heap fragmentation. * * The first chunks allocated are small, but each additional chunk is * 50% larger than the previous, ramping up to a configurable maximum * chunk size. Reasonable defaults are provided which should be a good * balance between not making any large allocations while still retaining * decent performance. * * This currently only supports a small subset of List operations -- * namely addition and iteration. */ @InterfaceAudience.Private public class ChunkedArrayList<T> extends AbstractList<T> { /** * The chunks which make up the full list. */ private final List<List<T>> chunks = Lists.newArrayList(); /** * Cache of the last element in the 'chunks' array above. * This speeds up the add operation measurably. */ private List<T> lastChunk = null; /** * The capacity with which the last chunk was allocated. */ private int lastChunkCapacity; /** * The capacity of the first chunk to allocate in a cleared list. */ private final int initialChunkCapacity; /** * The maximum number of elements for any chunk. */ private final int maxChunkSize; /** * Total number of elements in the list. */ private int size; /** * Default initial size is 6 elements, since typical minimum object * size is 64 bytes, and this leaves enough space for the object * header. */ private static final int DEFAULT_INITIAL_CHUNK_CAPACITY = 6; /** * Default max size is 8K elements - which, at 8 bytes per element * should be about 64KB -- small enough to easily fit in contiguous * free heap space even with a fair amount of fragmentation. */ private static final int DEFAULT_MAX_CHUNK_SIZE = 8*1024; public ChunkedArrayList() { this(DEFAULT_INITIAL_CHUNK_CAPACITY, DEFAULT_MAX_CHUNK_SIZE); } /** * @param initialChunkCapacity the capacity of the first chunk to be * allocated * @param maxChunkSize the maximum size of any chunk allocated */ public ChunkedArrayList(int initialChunkCapacity, int maxChunkSize) { Preconditions.checkArgument(maxChunkSize >= initialChunkCapacity); this.initialChunkCapacity = initialChunkCapacity; this.maxChunkSize = maxChunkSize; } @Override public Iterator<T> iterator() { final Iterator<T> it = Iterables.concat(chunks).iterator(); return new Iterator<T>() { @Override public boolean hasNext() { return it.hasNext(); } @Override public T next() { return it.next(); } @Override public void remove() { it.remove(); size--; } }; } @Override public boolean add(T e) { if (size == Integer.MAX_VALUE) { throw new RuntimeException("Can't add an additional element to the " + "list; list already has INT_MAX elements."); } if (lastChunk == null) { addChunk(initialChunkCapacity); } else if (lastChunk.size() >= lastChunkCapacity) { int newCapacity = lastChunkCapacity + (lastChunkCapacity >> 1); addChunk(Math.min(newCapacity, maxChunkSize)); } size++; return lastChunk.add(e); } @Override public void clear() { chunks.clear(); lastChunk = null; lastChunkCapacity = 0; size = 0; } private void addChunk(int capacity) { lastChunk = Lists.newArrayListWithCapacity(capacity); chunks.add(lastChunk); lastChunkCapacity = capacity; } @Override public boolean isEmpty() { return size == 0; } @Override public int size() { return size; } @VisibleForTesting int getNumChunks() { return chunks.size(); } @VisibleForTesting int getMaxChunkSize() { int size = 0; for (List<T> chunk : chunks) { size = Math.max(size, chunk.size()); } return size; } @Override public T get(int idx) { if (idx < 0) { throw new IndexOutOfBoundsException(); } int base = 0; Iterator<List<T>> it = chunks.iterator(); while (it.hasNext()) { List<T> list = it.next(); int size = list.size(); if (idx < base + size) { return list.get(idx - base); } base += size; } throw new IndexOutOfBoundsException(); } }
5,719
26.76699
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.Closeable; import java.util.concurrent.TimeUnit; /** * A simplified StopWatch implementation which can measure times in nanoseconds. */ public class StopWatch implements Closeable { private boolean isStarted; private long startNanos; private long currentElapsedNanos; public StopWatch() { } /** * The method is used to find out if the StopWatch is started. * @return boolean If the StopWatch is started. */ public boolean isRunning() { return isStarted; } /** * Start to measure times and make the state of stopwatch running. * @return this instance of StopWatch. */ public StopWatch start() { if (isStarted) { throw new IllegalStateException("StopWatch is already running"); } isStarted = true; startNanos = System.nanoTime(); return this; } /** * Stop elapsed time and make the state of stopwatch stop. * @return this instance of StopWatch. */ public StopWatch stop() { if (!isStarted) { throw new IllegalStateException("StopWatch is already stopped"); } long now = System.nanoTime(); isStarted = false; currentElapsedNanos += now - startNanos; return this; } /** * Reset elapsed time to zero and make the state of stopwatch stop. * @return this instance of StopWatch. */ public StopWatch reset() { currentElapsedNanos = 0; isStarted = false; return this; } /** * @return current elapsed time in specified timeunit. */ public long now(TimeUnit timeUnit) { return timeUnit.convert(now(), TimeUnit.NANOSECONDS); } /** * @return current elapsed time in nanosecond. */ public long now() { return isStarted ? System.nanoTime() - startNanos + currentElapsedNanos : currentElapsedNanos; } @Override public String toString() { return String.valueOf(now()); } @Override public void close() { if (isStarted) { stop(); } } }
2,805
24.743119
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; /** * Helper class to shutdown {@link Thread}s and {@link ExecutorService}s. */ public class ShutdownThreadsHelper { private static Log LOG = LogFactory.getLog(ShutdownThreadsHelper.class); @VisibleForTesting static final int SHUTDOWN_WAIT_MS = 3000; /** * @param thread {@link Thread to be shutdown} * @return <tt>true</tt> if the thread is successfully interrupted, * <tt>false</tt> otherwise * @throws InterruptedException */ public static boolean shutdownThread(Thread thread) { return shutdownThread(thread, SHUTDOWN_WAIT_MS); } /** * @param thread {@link Thread to be shutdown} * @param timeoutInMilliSeconds time to wait for thread to join after being * interrupted * @return <tt>true</tt> if the thread is successfully interrupted, * <tt>false</tt> otherwise * @throws InterruptedException */ public static boolean shutdownThread(Thread thread, long timeoutInMilliSeconds) { if (thread == null) { return true; } try { thread.interrupt(); thread.join(timeoutInMilliSeconds); return true; } catch (InterruptedException ie) { LOG.warn("Interrupted while shutting down thread - " + thread.getName()); return false; } } /** * @param service {@link ExecutorService to be shutdown} * @return <tt>true</tt> if the service is terminated, * <tt>false</tt> otherwise * @throws InterruptedException */ public static boolean shutdownExecutorService(ExecutorService service) throws InterruptedException { return shutdownExecutorService(service, SHUTDOWN_WAIT_MS); } /** * @param service {@link ExecutorService to be shutdown} * @param timeoutInMs time to wait for {@link * ExecutorService#awaitTermination(long, java.util.concurrent.TimeUnit)} * calls in milli seconds. * @return <tt>true</tt> if the service is terminated, * <tt>false</tt> otherwise * @throws InterruptedException */ public static boolean shutdownExecutorService(ExecutorService service, long timeoutInMs) throws InterruptedException { if (service == null) { return true; } service.shutdown(); if (!service.awaitTermination(timeoutInMs, TimeUnit.MILLISECONDS)) { service.shutdownNow(); return service.awaitTermination(timeoutInMs, TimeUnit.MILLISECONDS); } else { return true; } } }
3,574
32.411215
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.OpensslCipher; import org.apache.hadoop.io.compress.Lz4Codec; import org.apache.hadoop.io.compress.SnappyCodec; import org.apache.hadoop.io.compress.bzip2.Bzip2Factory; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Unstable public class NativeLibraryChecker { /** * A tool to test native library availability, */ public static void main(String[] args) { String usage = "NativeLibraryChecker [-a|-h]\n" + " -a use -a to check all libraries are available\n" + " by default just check hadoop library (and\n" + " winutils.exe on Windows OS) is available\n" + " exit with error code 1 if check failed\n" + " -h print this message\n"; if (args.length > 1 || (args.length == 1 && !(args[0].equals("-a") || args[0].equals("-h")))) { System.err.println(usage); ExitUtil.terminate(1); } boolean checkAll = false; if (args.length == 1) { if (args[0].equals("-h")) { System.out.println(usage); return; } checkAll = true; } Configuration conf = new Configuration(); boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded(); boolean zlibLoaded = false; boolean snappyLoaded = false; // lz4 is linked within libhadoop boolean lz4Loaded = nativeHadoopLoaded; boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf); boolean openSslLoaded = false; boolean winutilsExists = false; String openSslDetail = ""; String hadoopLibraryName = ""; String zlibLibraryName = ""; String snappyLibraryName = ""; String lz4LibraryName = ""; String bzip2LibraryName = ""; String winutilsPath = null; if (nativeHadoopLoaded) { hadoopLibraryName = NativeCodeLoader.getLibraryName(); zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf); if (zlibLoaded) { zlibLibraryName = ZlibFactory.getLibraryName(); } snappyLoaded = NativeCodeLoader.buildSupportsSnappy() && SnappyCodec.isNativeCodeLoaded(); if (snappyLoaded && NativeCodeLoader.buildSupportsSnappy()) { snappyLibraryName = SnappyCodec.getLibraryName(); } if (OpensslCipher.getLoadingFailureReason() != null) { openSslDetail = OpensslCipher.getLoadingFailureReason(); openSslLoaded = false; } else { openSslDetail = OpensslCipher.getLibraryName(); openSslLoaded = true; } if (lz4Loaded) { lz4LibraryName = Lz4Codec.getLibraryName(); } if (bzip2Loaded) { bzip2LibraryName = Bzip2Factory.getLibraryName(conf); } } // winutils.exe is required on Windows winutilsPath = Shell.getWinUtilsPath(); if (winutilsPath != null) { winutilsExists = true; } else { winutilsPath = ""; } System.out.println("Native library checking:"); System.out.printf("hadoop: %b %s%n", nativeHadoopLoaded, hadoopLibraryName); System.out.printf("zlib: %b %s%n", zlibLoaded, zlibLibraryName); System.out.printf("snappy: %b %s%n", snappyLoaded, snappyLibraryName); System.out.printf("lz4: %b %s%n", lz4Loaded, lz4LibraryName); System.out.printf("bzip2: %b %s%n", bzip2Loaded, bzip2LibraryName); System.out.printf("openssl: %b %s%n", openSslLoaded, openSslDetail); if (Shell.WINDOWS) { System.out.printf("winutils: %b %s%n", winutilsExists, winutilsPath); } if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) || (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) { // return 1 to indicated check failed ExitUtil.terminate(1); } } }
4,819
36.65625
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLClassLoader; import java.util.ArrayList; import java.util.List; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; /** * <code>GenericOptionsParser</code> is a utility to parse command line * arguments generic to the Hadoop framework. * * <code>GenericOptionsParser</code> recognizes several standard command * line arguments, enabling applications to easily specify a namenode, a * ResourceManager, additional configuration resources etc. * * <h4 id="GenericOptions">Generic Options</h4> * * <p>The supported generic options are:</p> * <p><blockquote><pre> * -conf &lt;configuration file&gt; specify a configuration file * -D &lt;property=value&gt; use value for given property * -fs &lt;local|namenode:port&gt; specify a namenode * -jt &lt;local|resourcemanager:port&gt; specify a ResourceManager * -files &lt;comma separated list of files&gt; specify comma separated * files to be copied to the map reduce cluster * -libjars &lt;comma separated list of jars&gt; specify comma separated * jar files to include in the classpath. * -archives &lt;comma separated list of archives&gt; specify comma * separated archives to be unarchived on the compute machines. * </pre></blockquote></p> * * <p>The general command line syntax is:</p> * <p><tt><pre> * bin/hadoop command [genericOptions] [commandOptions] * </pre></tt></p> * * <p>Generic command line arguments <strong>might</strong> modify * <code>Configuration </code> objects, given to constructors.</p> * * <p>The functionality is implemented using Commons CLI.</p> * * <p>Examples:</p> * <p><blockquote><pre> * $ bin/hadoop dfs -fs darwin:8020 -ls /data * list /data directory in dfs with namenode darwin:8020 * * $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data * list /data directory in dfs with namenode darwin:8020 * * $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data * list /data directory in dfs with multiple conf files specified. * * $ bin/hadoop job -D yarn.resourcemanager.address=darwin:8032 -submit job.xml * submit a job to ResourceManager darwin:8032 * * $ bin/hadoop job -jt darwin:8032 -submit job.xml * submit a job to ResourceManager darwin:8032 * * $ bin/hadoop job -jt local -submit job.xml * submit a job to local runner * * $ bin/hadoop jar -libjars testlib.jar * -archives test.tgz -files file.txt inputjar args * job submission with libjars, files and archives * </pre></blockquote></p> * * @see Tool * @see ToolRunner */ @InterfaceAudience.Private @InterfaceStability.Evolving public class GenericOptionsParser { private static final Log LOG = LogFactory.getLog(GenericOptionsParser.class); private Configuration conf; private CommandLine commandLine; /** * Create an options parser with the given options to parse the args. * @param opts the options * @param args the command line arguments * @throws IOException */ public GenericOptionsParser(Options opts, String[] args) throws IOException { this(new Configuration(), opts, args); } /** * Create an options parser to parse the args. * @param args the command line arguments * @throws IOException */ public GenericOptionsParser(String[] args) throws IOException { this(new Configuration(), new Options(), args); } /** * Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop * arguments. * * The array of string arguments other than the generic arguments can be * obtained by {@link #getRemainingArgs()}. * * @param conf the <code>Configuration</code> to modify. * @param args command-line arguments. * @throws IOException */ public GenericOptionsParser(Configuration conf, String[] args) throws IOException { this(conf, new Options(), args); } /** * Create a <code>GenericOptionsParser</code> to parse given options as well * as generic Hadoop options. * * The resulting <code>CommandLine</code> object can be obtained by * {@link #getCommandLine()}. * * @param conf the configuration to modify * @param options options built by the caller * @param args User-specified arguments * @throws IOException */ public GenericOptionsParser(Configuration conf, Options options, String[] args) throws IOException { parseGeneralOptions(options, conf, args); this.conf = conf; } /** * Returns an array of Strings containing only application-specific arguments. * * @return array of <code>String</code>s containing the un-parsed arguments * or <strong>empty array</strong> if commandLine was not defined. */ public String[] getRemainingArgs() { return (commandLine == null) ? new String[]{} : commandLine.getArgs(); } /** * Get the modified configuration * @return the configuration that has the modified parameters. */ public Configuration getConfiguration() { return conf; } /** * Returns the commons-cli <code>CommandLine</code> object * to process the parsed arguments. * * Note: If the object is created with * {@link #GenericOptionsParser(Configuration, String[])}, then returned * object will only contain parsed generic options. * * @return <code>CommandLine</code> representing list of arguments * parsed against Options descriptor. */ public CommandLine getCommandLine() { return commandLine; } /** * Specify properties of each generic option */ @SuppressWarnings("static-access") private static Options buildGeneralOptions(Options opts) { Option fs = OptionBuilder.withArgName("local|namenode:port") .hasArg() .withDescription("specify a namenode") .create("fs"); Option jt = OptionBuilder.withArgName("local|resourcemanager:port") .hasArg() .withDescription("specify a ResourceManager") .create("jt"); Option oconf = OptionBuilder.withArgName("configuration file") .hasArg() .withDescription("specify an application configuration file") .create("conf"); Option property = OptionBuilder.withArgName("property=value") .hasArg() .withDescription("use value for given property") .create('D'); Option libjars = OptionBuilder.withArgName("paths") .hasArg() .withDescription("comma separated jar files to include in the classpath.") .create("libjars"); Option files = OptionBuilder.withArgName("paths") .hasArg() .withDescription("comma separated files to be copied to the " + "map reduce cluster") .create("files"); Option archives = OptionBuilder.withArgName("paths") .hasArg() .withDescription("comma separated archives to be unarchived" + " on the compute machines.") .create("archives"); // file with security tokens Option tokensFile = OptionBuilder.withArgName("tokensFile") .hasArg() .withDescription("name of the file with the tokens") .create("tokenCacheFile"); opts.addOption(fs); opts.addOption(jt); opts.addOption(oconf); opts.addOption(property); opts.addOption(libjars); opts.addOption(files); opts.addOption(archives); opts.addOption(tokensFile); return opts; } /** * Modify configuration according user-specified generic options * @param conf Configuration to be modified * @param line User-specified generic options */ private void processGeneralOptions(Configuration conf, CommandLine line) throws IOException { if (line.hasOption("fs")) { FileSystem.setDefaultUri(conf, line.getOptionValue("fs")); } if (line.hasOption("jt")) { String optionValue = line.getOptionValue("jt"); if (optionValue.equalsIgnoreCase("local")) { conf.set("mapreduce.framework.name", optionValue); } conf.set("yarn.resourcemanager.address", optionValue, "from -jt command line option"); } if (line.hasOption("conf")) { String[] values = line.getOptionValues("conf"); for(String value : values) { conf.addResource(new Path(value)); } } if (line.hasOption('D')) { String[] property = line.getOptionValues('D'); for(String prop : property) { String[] keyval = prop.split("=", 2); if (keyval.length == 2) { conf.set(keyval[0], keyval[1], "from command line"); } } } if (line.hasOption("libjars")) { conf.set("tmpjars", validateFiles(line.getOptionValue("libjars"), conf), "from -libjars command line option"); //setting libjars in client classpath URL[] libjars = getLibJars(conf); if(libjars!=null && libjars.length>0) { conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader())); Thread.currentThread().setContextClassLoader( new URLClassLoader(libjars, Thread.currentThread().getContextClassLoader())); } } if (line.hasOption("files")) { conf.set("tmpfiles", validateFiles(line.getOptionValue("files"), conf), "from -files command line option"); } if (line.hasOption("archives")) { conf.set("tmparchives", validateFiles(line.getOptionValue("archives"), conf), "from -archives command line option"); } conf.setBoolean("mapreduce.client.genericoptionsparser.used", true); // tokensFile if(line.hasOption("tokenCacheFile")) { String fileName = line.getOptionValue("tokenCacheFile"); // check if the local file exists FileSystem localFs = FileSystem.getLocal(conf); Path p = localFs.makeQualified(new Path(fileName)); if (!localFs.exists(p)) { throw new FileNotFoundException("File "+fileName+" does not exist."); } if(LOG.isDebugEnabled()) { LOG.debug("setting conf tokensFile: " + fileName); } UserGroupInformation.getCurrentUser().addCredentials( Credentials.readTokenStorageFile(p, conf)); conf.set("mapreduce.job.credentials.binary", p.toString(), "from -tokenCacheFile command line option"); } } /** * If libjars are set in the conf, parse the libjars. * @param conf * @return libjar urls * @throws IOException */ public static URL[] getLibJars(Configuration conf) throws IOException { String jars = conf.get("tmpjars"); if(jars==null) { return null; } String[] files = jars.split(","); List<URL> cp = new ArrayList<URL>(); for (String file : files) { Path tmp = new Path(file); if (tmp.getFileSystem(conf).equals(FileSystem.getLocal(conf))) { cp.add(FileSystem.getLocal(conf).pathToFile(tmp).toURI().toURL()); } else { LOG.warn("The libjars file " + tmp + " is not on the local " + "filesystem. Ignoring."); } } return cp.toArray(new URL[0]); } /** * takes input as a comma separated list of files * and verifies if they exist. It defaults for file:/// * if the files specified do not have a scheme. * it returns the paths uri converted defaulting to file:///. * So an input of /home/user/file1,/home/user/file2 would return * file:///home/user/file1,file:///home/user/file2 * @param files * @return */ private String validateFiles(String files, Configuration conf) throws IOException { if (files == null) return null; String[] fileArr = files.split(","); if (fileArr.length == 0) { throw new IllegalArgumentException("File name can't be empty string"); } String[] finalArr = new String[fileArr.length]; for (int i =0; i < fileArr.length; i++) { String tmp = fileArr[i]; if (tmp.isEmpty()) { throw new IllegalArgumentException("File name can't be empty string"); } String finalPath; URI pathURI; try { pathURI = new URI(tmp); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } Path path = new Path(pathURI); FileSystem localFs = FileSystem.getLocal(conf); if (pathURI.getScheme() == null) { //default to the local file system //check if the file exists or not first if (!localFs.exists(path)) { throw new FileNotFoundException("File " + tmp + " does not exist."); } finalPath = path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toString(); } else { // check if the file exists in this file system // we need to recreate this filesystem object to copy // these files to the file system ResourceManager is running // on. FileSystem fs = path.getFileSystem(conf); if (!fs.exists(path)) { throw new FileNotFoundException("File " + tmp + " does not exist."); } finalPath = path.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(); } finalArr[i] = finalPath; } return StringUtils.arrayToString(finalArr); } /** * Windows powershell and cmd can parse key=value themselves, because * /pkey=value is same as /pkey value under windows. However this is not * compatible with how we get arbitrary key values in -Dkey=value format. * Under windows -D key=value or -Dkey=value might be passed as * [-Dkey, value] or [-D key, value]. This method does undo these and * return a modified args list by manually changing [-D, key, value] * into [-D, key=value] * * @param args command line arguments * @return fixed command line arguments that GnuParser can parse */ private String[] preProcessForWindows(String[] args) { if (!Shell.WINDOWS) { return args; } if (args == null) { return null; } List<String> newArgs = new ArrayList<String>(args.length); for (int i=0; i < args.length; i++) { String prop = null; if (args[i].equals("-D")) { newArgs.add(args[i]); if (i < args.length - 1) { prop = args[++i]; } } else if (args[i].startsWith("-D")) { prop = args[i]; } else { newArgs.add(args[i]); } if (prop != null) { if (prop.contains("=")) { // everything good } else { if (i < args.length - 1) { prop += "=" + args[++i]; } } newArgs.add(prop); } } return newArgs.toArray(new String[newArgs.size()]); } /** * Parse the user-specified options, get the generic options, and modify * configuration accordingly * @param opts Options to use for parsing args. * @param conf Configuration to be modified * @param args User-specified arguments */ private void parseGeneralOptions(Options opts, Configuration conf, String[] args) throws IOException { opts = buildGeneralOptions(opts); CommandLineParser parser = new GnuParser(); try { commandLine = parser.parse(opts, preProcessForWindows(args), true); processGeneralOptions(conf, commandLine); } catch(ParseException e) { LOG.warn("options parsing failed: "+e.getMessage()); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("general options are: ", opts); } } /** * Print the usage message for generic command-line options supported. * * @param out stream to print the usage message to. */ public static void printGenericCommandUsage(PrintStream out) { out.println("Generic options supported are"); out.println("-conf <configuration file> specify an application configuration file"); out.println("-D <property=value> use value for given property"); out.println("-fs <local|namenode:port> specify a namenode"); out.println("-jt <local|resourcemanager:port> specify a ResourceManager"); out.println("-files <comma separated list of files> " + "specify comma separated files to be copied to the map reduce cluster"); out.println("-libjars <comma separated list of jars> " + "specify comma separated jar files to include in the classpath."); out.println("-archives <comma separated list of archives> " + "specify comma separated archives to be unarchived" + " on the compute machines.\n"); out.println("The general command line syntax is"); out.println("command [genericOptions] [commandOptions]\n"); } }
18,412
34.409615
92
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.IOException; import java.io.InputStream; import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.IOUtils; /** * This class returns build information about Hadoop components. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class VersionInfo { private static final Log LOG = LogFactory.getLog(VersionInfo.class); private Properties info; protected VersionInfo(String component) { info = new Properties(); String versionInfoFile = component + "-version-info.properties"; InputStream is = null; try { is = Thread.currentThread().getContextClassLoader() .getResourceAsStream(versionInfoFile); if (is == null) { throw new IOException("Resource not found"); } info.load(is); } catch (IOException ex) { LogFactory.getLog(getClass()).warn("Could not read '" + versionInfoFile + "', " + ex.toString(), ex); } finally { IOUtils.closeStream(is); } } protected String _getVersion() { return info.getProperty("version", "Unknown"); } protected String _getRevision() { return info.getProperty("revision", "Unknown"); } protected String _getBranch() { return info.getProperty("branch", "Unknown"); } protected String _getDate() { return info.getProperty("date", "Unknown"); } protected String _getUser() { return info.getProperty("user", "Unknown"); } protected String _getUrl() { return info.getProperty("url", "Unknown"); } protected String _getSrcChecksum() { return info.getProperty("srcChecksum", "Unknown"); } protected String _getBuildVersion(){ return getVersion() + " from " + _getRevision() + " by " + _getUser() + " source checksum " + _getSrcChecksum(); } protected String _getProtocVersion() { return info.getProperty("protocVersion", "Unknown"); } private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common"); /** * Get the Hadoop version. * @return the Hadoop version string, eg. "0.6.3-dev" */ public static String getVersion() { return COMMON_VERSION_INFO._getVersion(); } /** * Get the subversion revision number for the root directory * @return the revision number, eg. "451451" */ public static String getRevision() { return COMMON_VERSION_INFO._getRevision(); } /** * Get the branch on which this originated. * @return The branch name, e.g. "trunk" or "branches/branch-0.20" */ public static String getBranch() { return COMMON_VERSION_INFO._getBranch(); } /** * The date that Hadoop was compiled. * @return the compilation date in unix date format */ public static String getDate() { return COMMON_VERSION_INFO._getDate(); } /** * The user that compiled Hadoop. * @return the username of the user */ public static String getUser() { return COMMON_VERSION_INFO._getUser(); } /** * Get the subversion URL for the root Hadoop directory. */ public static String getUrl() { return COMMON_VERSION_INFO._getUrl(); } /** * Get the checksum of the source files from which Hadoop was * built. **/ public static String getSrcChecksum() { return COMMON_VERSION_INFO._getSrcChecksum(); } /** * Returns the buildVersion which includes version, * revision, user and date. */ public static String getBuildVersion(){ return COMMON_VERSION_INFO._getBuildVersion(); } /** * Returns the protoc version used for the build. */ public static String getProtocVersion(){ return COMMON_VERSION_INFO._getProtocVersion(); } public static void main(String[] args) { LOG.debug("version: "+ getVersion()); System.out.println("Hadoop " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); System.out.println("Compiled with protoc " + getProtocVersion()); System.out.println("From source with checksum " + getSrcChecksum()); System.out.println("This command was run using " + ClassUtil.findContainingJar(VersionInfo.class)); } }
5,226
27.878453
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Timer; import java.util.TimerTask; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; /** * * The class which provides functionality of checking the health of the node * using the configured node health script and reporting back to the service * for which the health checker has been asked to report. */ public class NodeHealthScriptRunner extends AbstractService { private static Log LOG = LogFactory.getLog(NodeHealthScriptRunner.class); /** Absolute path to the health script. */ private String nodeHealthScript; /** Delay after which node health script to be executed */ private long intervalTime; /** Time after which the script should be timedout */ private long scriptTimeout; /** Timer used to schedule node health monitoring script execution */ private Timer nodeHealthScriptScheduler; /** ShellCommandExecutor used to execute monitoring script */ ShellCommandExecutor shexec = null; /** Pattern used for searching in the output of the node health script */ static private final String ERROR_PATTERN = "ERROR"; /** Time out error message */ public static final String NODE_HEALTH_SCRIPT_TIMED_OUT_MSG = "Node health script timed out"; private boolean isHealthy; private String healthReport; private long lastReportedTime; private TimerTask timer; private enum HealthCheckerExitStatus { SUCCESS, TIMED_OUT, FAILED_WITH_EXIT_CODE, FAILED_WITH_EXCEPTION, FAILED } /** * Class which is used by the {@link Timer} class to periodically execute the * node health script. * */ private class NodeHealthMonitorExecutor extends TimerTask { String exceptionStackTrace = ""; public NodeHealthMonitorExecutor(String[] args) { ArrayList<String> execScript = new ArrayList<String>(); execScript.add(nodeHealthScript); if (args != null) { execScript.addAll(Arrays.asList(args)); } shexec = new ShellCommandExecutor(execScript .toArray(new String[execScript.size()]), null, null, scriptTimeout); } @Override public void run() { HealthCheckerExitStatus status = HealthCheckerExitStatus.SUCCESS; try { shexec.execute(); } catch (ExitCodeException e) { // ignore the exit code of the script status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE; // On Windows, we will not hit the Stream closed IOException // thrown by stdout buffered reader for timeout event. if (Shell.WINDOWS && shexec.isTimedOut()) { status = HealthCheckerExitStatus.TIMED_OUT; } } catch (Exception e) { LOG.warn("Caught exception : " + e.getMessage()); if (!shexec.isTimedOut()) { status = HealthCheckerExitStatus.FAILED_WITH_EXCEPTION; } else { status = HealthCheckerExitStatus.TIMED_OUT; } exceptionStackTrace = StringUtils.stringifyException(e); } finally { if (status == HealthCheckerExitStatus.SUCCESS) { if (hasErrors(shexec.getOutput())) { status = HealthCheckerExitStatus.FAILED; } } reportHealthStatus(status); } } /** * Method which is used to parse output from the node health monitor and * send to the report address. * * The timed out script or script which causes IOException output is * ignored. * * The node is marked unhealthy if * <ol> * <li>The node health script times out</li> * <li>The node health scripts output has a line which begins with ERROR</li> * <li>An exception is thrown while executing the script</li> * </ol> * If the script throws {@link IOException} or {@link ExitCodeException} the * output is ignored and node is left remaining healthy, as script might * have syntax error. * * @param status */ void reportHealthStatus(HealthCheckerExitStatus status) { long now = System.currentTimeMillis(); switch (status) { case SUCCESS: setHealthStatus(true, "", now); break; case TIMED_OUT: setHealthStatus(false, NODE_HEALTH_SCRIPT_TIMED_OUT_MSG); break; case FAILED_WITH_EXCEPTION: setHealthStatus(false, exceptionStackTrace); break; case FAILED_WITH_EXIT_CODE: setHealthStatus(true, "", now); break; case FAILED: setHealthStatus(false, shexec.getOutput()); break; } } /** * Method to check if the output string has line which begins with ERROR. * * @param output * string * @return true if output string has error pattern in it. */ private boolean hasErrors(String output) { String[] splits = output.split("\n"); for (String split : splits) { if (split.startsWith(ERROR_PATTERN)) { return true; } } return false; } } public NodeHealthScriptRunner(String scriptName, long chkInterval, long timeout, String[] scriptArgs) { super(NodeHealthScriptRunner.class.getName()); this.lastReportedTime = System.currentTimeMillis(); this.isHealthy = true; this.healthReport = ""; this.nodeHealthScript = scriptName; this.intervalTime = chkInterval; this.scriptTimeout = timeout; this.timer = new NodeHealthMonitorExecutor(scriptArgs); } /* * Method which initializes the values for the script path and interval time. */ @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); } /** * Method used to start the Node health monitoring. * */ @Override protected void serviceStart() throws Exception { nodeHealthScriptScheduler = new Timer("NodeHealthMonitor-Timer", true); // Start the timer task immediately and // then periodically at interval time. nodeHealthScriptScheduler.scheduleAtFixedRate(timer, 0, intervalTime); super.serviceStart(); } /** * Method used to terminate the node health monitoring service. * */ @Override protected void serviceStop() { if (nodeHealthScriptScheduler != null) { nodeHealthScriptScheduler.cancel(); } if (shexec != null) { Process p = shexec.getProcess(); if (p != null) { p.destroy(); } } } /** * Gets the if the node is healthy or not * * @return true if node is healthy */ public boolean isHealthy() { return isHealthy; } /** * Sets if the node is healhty or not considering disks' health also. * * @param isHealthy * if or not node is healthy */ private synchronized void setHealthy(boolean isHealthy) { this.isHealthy = isHealthy; } /** * Returns output from health script. if node is healthy then an empty string * is returned. * * @return output from health script */ public String getHealthReport() { return healthReport; } /** * Sets the health report from the node health script. Also set the disks' * health info obtained from DiskHealthCheckerService. * * @param healthReport */ private synchronized void setHealthReport(String healthReport) { this.healthReport = healthReport; } /** * Returns time stamp when node health script was last run. * * @return timestamp when node health script was last run */ public long getLastReportedTime() { return lastReportedTime; } /** * Sets the last run time of the node health script. * * @param lastReportedTime */ private synchronized void setLastReportedTime(long lastReportedTime) { this.lastReportedTime = lastReportedTime; } /** * Method used to determine if or not node health monitoring service should be * started or not. Returns true if following conditions are met: * * <ol> * <li>Path to Node health check script is not empty</li> * <li>Node health check script file exists</li> * </ol> * * @return true if node health monitoring service can be started. */ public static boolean shouldRun(String healthScript) { if (healthScript == null || healthScript.trim().isEmpty()) { return false; } File f = new File(healthScript); return f.exists() && FileUtil.canExecute(f); } private synchronized void setHealthStatus(boolean isHealthy, String output) { LOG.info("health status being set as " + output); this.setHealthy(isHealthy); this.setHealthReport(output); } private synchronized void setHealthStatus(boolean isHealthy, String output, long time) { LOG.info("health status being set as " + output); this.setHealthStatus(isHealthy, output); this.setLastReportedTime(time); } /** * Used only by tests to access the timer task directly * @return the timer task */ public TimerTask getTimerTask() { return timer; } }
10,233
29.278107
95
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.lang.reflect.Array; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.jar.JarEntry; import java.util.jar.JarFile; import java.util.jar.Manifest; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.io.IOUtils; /** Run a Hadoop job jar. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class RunJar { /** Pattern that matches any string */ public static final Pattern MATCH_ANY = Pattern.compile(".*"); /** * Priority of the RunJar shutdown hook. */ public static final int SHUTDOWN_HOOK_PRIORITY = 10; /** * Environment key for using the client classloader. */ public static final String HADOOP_USE_CLIENT_CLASSLOADER = "HADOOP_USE_CLIENT_CLASSLOADER"; /** * Environment key for the (user-provided) hadoop classpath. */ public static final String HADOOP_CLASSPATH = "HADOOP_CLASSPATH"; /** * Environment key for the system classes. */ public static final String HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES = "HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES"; /** * Unpack a jar file into a directory. * * This version unpacks all files inside the jar regardless of filename. */ public static void unJar(File jarFile, File toDir) throws IOException { unJar(jarFile, toDir, MATCH_ANY); } /** * Unpack matching files from a jar. Entries inside the jar that do * not match the given pattern will be skipped. * * @param jarFile the .jar file to unpack * @param toDir the destination directory into which to unpack the jar * @param unpackRegex the pattern to match jar entries against */ public static void unJar(File jarFile, File toDir, Pattern unpackRegex) throws IOException { JarFile jar = new JarFile(jarFile); try { Enumeration<JarEntry> entries = jar.entries(); while (entries.hasMoreElements()) { final JarEntry entry = entries.nextElement(); if (!entry.isDirectory() && unpackRegex.matcher(entry.getName()).matches()) { InputStream in = jar.getInputStream(entry); try { File file = new File(toDir, entry.getName()); ensureDirectory(file.getParentFile()); OutputStream out = new FileOutputStream(file); try { IOUtils.copyBytes(in, out, 8192); } finally { out.close(); } } finally { in.close(); } } } } finally { jar.close(); } } /** * Ensure the existence of a given directory. * * @throws IOException if it cannot be created and does not already exist */ private static void ensureDirectory(File dir) throws IOException { if (!dir.mkdirs() && !dir.isDirectory()) { throw new IOException("Mkdirs failed to create " + dir.toString()); } } /** Run a Hadoop job jar. If the main class is not in the jar's manifest, * then it must be provided on the command line. */ public static void main(String[] args) throws Throwable { new RunJar().run(args); } public void run(String[] args) throws Throwable { String usage = "RunJar jarFile [mainClass] args..."; if (args.length < 1) { System.err.println(usage); System.exit(-1); } int firstArg = 0; String fileName = args[firstArg++]; File file = new File(fileName); if (!file.exists() || !file.isFile()) { System.err.println("JAR does not exist or is not a normal file: " + file.getCanonicalPath()); System.exit(-1); } String mainClassName = null; JarFile jarFile; try { jarFile = new JarFile(fileName); } catch(IOException io) { throw new IOException("Error opening job jar: " + fileName) .initCause(io); } Manifest manifest = jarFile.getManifest(); if (manifest != null) { mainClassName = manifest.getMainAttributes().getValue("Main-Class"); } jarFile.close(); if (mainClassName == null) { if (args.length < 2) { System.err.println(usage); System.exit(-1); } mainClassName = args[firstArg++]; } mainClassName = mainClassName.replaceAll("/", "."); File tmpDir = new File(System.getProperty("java.io.tmpdir")); ensureDirectory(tmpDir); final File workDir; try { workDir = File.createTempFile("hadoop-unjar", "", tmpDir); } catch (IOException ioe) { // If user has insufficient perms to write to tmpDir, default // "Permission denied" message doesn't specify a filename. System.err.println("Error creating temp dir in java.io.tmpdir " + tmpDir + " due to " + ioe.getMessage()); System.exit(-1); return; } if (!workDir.delete()) { System.err.println("Delete failed for " + workDir); System.exit(-1); } ensureDirectory(workDir); ShutdownHookManager.get().addShutdownHook( new Runnable() { @Override public void run() { FileUtil.fullyDelete(workDir); } }, SHUTDOWN_HOOK_PRIORITY); unJar(file, workDir); ClassLoader loader = createClassLoader(file, workDir); Thread.currentThread().setContextClassLoader(loader); Class<?> mainClass = Class.forName(mainClassName, true, loader); Method main = mainClass.getMethod("main", new Class[] { Array.newInstance(String.class, 0).getClass() }); String[] newArgs = Arrays.asList(args) .subList(firstArg, args.length).toArray(new String[0]); try { main.invoke(null, new Object[] { newArgs }); } catch (InvocationTargetException e) { throw e.getTargetException(); } } /** * Creates a classloader based on the environment that was specified by the * user. If HADOOP_USE_CLIENT_CLASSLOADER is specified, it creates an * application classloader that provides the isolation of the user class space * from the hadoop classes and their dependencies. It forms a class space for * the user jar as well as the HADOOP_CLASSPATH. Otherwise, it creates a * classloader that simply adds the user jar to the classpath. */ private ClassLoader createClassLoader(File file, final File workDir) throws MalformedURLException { ClassLoader loader; // see if the client classloader is enabled if (useClientClassLoader()) { StringBuilder sb = new StringBuilder(); sb.append(workDir+"/"). append(File.pathSeparator).append(file). append(File.pathSeparator).append(workDir+"/classes/"). append(File.pathSeparator).append(workDir+"/lib/*"); // HADOOP_CLASSPATH is added to the client classpath String hadoopClasspath = getHadoopClasspath(); if (hadoopClasspath != null && !hadoopClasspath.isEmpty()) { sb.append(File.pathSeparator).append(hadoopClasspath); } String clientClasspath = sb.toString(); // get the system classes String systemClasses = getSystemClasses(); List<String> systemClassesList = systemClasses == null ? null : Arrays.asList(StringUtils.getTrimmedStrings(systemClasses)); // create an application classloader that isolates the user classes loader = new ApplicationClassLoader(clientClasspath, getClass().getClassLoader(), systemClassesList); } else { List<URL> classPath = new ArrayList<URL>(); classPath.add(new File(workDir+"/").toURI().toURL()); classPath.add(file.toURI().toURL()); classPath.add(new File(workDir, "classes/").toURI().toURL()); File[] libs = new File(workDir, "lib").listFiles(); if (libs != null) { for (int i = 0; i < libs.length; i++) { classPath.add(libs[i].toURI().toURL()); } } // create a normal parent-delegating classloader loader = new URLClassLoader(classPath.toArray(new URL[0])); } return loader; } boolean useClientClassLoader() { return Boolean.parseBoolean(System.getenv(HADOOP_USE_CLIENT_CLASSLOADER)); } String getHadoopClasspath() { return System.getenv(HADOOP_CLASSPATH); } String getSystemClasses() { return System.getenv(HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES); } }
9,678
32.491349
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPWhiteList.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; public class CombinedIPWhiteList implements IPList { public static final Log LOG = LogFactory.getLog(CombinedIPWhiteList.class); private static final String LOCALHOST_IP = "127.0.0.1"; private final IPList[] networkLists; public CombinedIPWhiteList(String fixedWhiteListFile, String variableWhiteListFile, long cacheExpiryInSeconds) { IPList fixedNetworkList = new FileBasedIPList(fixedWhiteListFile); if (variableWhiteListFile != null){ IPList variableNetworkList = new CacheableIPList( new FileBasedIPList(variableWhiteListFile),cacheExpiryInSeconds); networkLists = new IPList[] {fixedNetworkList, variableNetworkList}; } else { networkLists = new IPList[] {fixedNetworkList}; } } @Override public boolean isIn(String ipAddress) { if (ipAddress == null) { throw new IllegalArgumentException("ipAddress is null"); } if (LOCALHOST_IP.equals(ipAddress)) { return true; } for (IPList networkList:networkLists) { if (networkList.isIn(ipAddress)) { return true; } } return false; } }
2,043
32.508197
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MergeSort.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.util.Comparator; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.IntWritable; /** An implementation of the core algorithm of MergeSort. */ @InterfaceAudience.LimitedPrivate({"MapReduce"}) @InterfaceStability.Unstable public class MergeSort { //Reusable IntWritables IntWritable I = new IntWritable(0); IntWritable J = new IntWritable(0); //the comparator that the algo should use private Comparator<IntWritable> comparator; public MergeSort(Comparator<IntWritable> comparator) { this.comparator = comparator; } public void mergeSort(int src[], int dest[], int low, int high) { int length = high - low; // Insertion sort on smallest arrays if (length < 7) { for (int i=low; i<high; i++) { for (int j=i;j > low; j--) { I.set(dest[j-1]); J.set(dest[j]); if (comparator.compare(I, J)>0) swap(dest, j, j-1); } } return; } // Recursively sort halves of dest into src int mid = (low + high) >>> 1; mergeSort(dest, src, low, mid); mergeSort(dest, src, mid, high); I.set(src[mid-1]); J.set(src[mid]); // If list is already sorted, just copy from src to dest. This is an // optimization that results in faster sorts for nearly ordered lists. if (comparator.compare(I, J) <= 0) { System.arraycopy(src, low, dest, low, length); return; } // Merge sorted halves (now in src) into dest for (int i = low, p = low, q = mid; i < high; i++) { if (q < high && p < mid) { I.set(src[p]); J.set(src[q]); } if (q>=high || p<mid && comparator.compare(I, J) <= 0) dest[i] = src[p++]; else dest[i] = src[q++]; } } private void swap(int x[], int a, int b) { int t = x[a]; x[a] = x[b]; x[b] = t; } }
2,790
29.67033
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Classpath.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException; /** * Command-line utility for getting the full classpath needed to launch a Hadoop * client application. If the hadoop script is called with "classpath" as the * command, then it simply prints the classpath and exits immediately without * launching a JVM. The output likely will include wildcards in the classpath. * If there are arguments passed to the classpath command, then this class gets * called. With the --glob argument, it prints the full classpath with wildcards * expanded. This is useful in situations where wildcard syntax isn't usable. * With the --jar argument, it writes the classpath as a manifest in a jar file. * This is useful in environments with short limitations on the maximum command * line length, where it may not be possible to specify the full classpath in a * command. For example, the maximum command line length on Windows is 8191 * characters. */ @InterfaceAudience.Private public final class Classpath { private static final String usage = "classpath [--glob|--jar <path>|-h|--help] :\n" + " Prints the classpath needed to get the Hadoop jar and the required\n" + " libraries.\n" + " Options:\n" + "\n" + " --glob expand wildcards\n" + " --jar <path> write classpath as manifest in jar named <path>\n" + " -h, --help print help\n"; /** * Main entry point. * * @param args command-line arguments */ public static void main(String[] args) { if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) { System.out.println(usage); return; } // Copy args, because CommandFormat mutates the list. List<String> argsList = new ArrayList<String>(Arrays.asList(args)); CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar"); try { cf.parse(argsList); } catch (UnknownOptionException e) { terminate(1, "unrecognized option"); return; } String classPath = System.getProperty("java.class.path"); if (cf.getOpt("-glob")) { // The classpath returned from the property has been globbed already. System.out.println(classPath); } else if (cf.getOpt("-jar")) { if (argsList.isEmpty() || argsList.get(0) == null || argsList.get(0).isEmpty()) { terminate(1, "-jar option requires path of jar file to write"); return; } // Write the classpath into the manifest of a temporary jar file. Path workingDir = new Path(System.getProperty("user.dir")); final String tmpJarPath; try { tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir, System.getenv())[0]; } catch (IOException e) { terminate(1, "I/O error creating jar: " + e.getMessage()); return; } // Rename the temporary file to its final location. String jarPath = argsList.get(0); try { FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath)); } catch (IOException e) { terminate(1, "I/O error renaming jar temporary file to path: " + e.getMessage()); return; } } } /** * Prints a message to stderr and exits with a status code. * * @param status exit code * @param msg message */ private static void terminate(int status, String msg) { System.err.println(msg); ExitUtil.terminate(status, msg); } }
4,621
35.68254
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.zip.CRC32; import java.util.zip.Checksum; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ChecksumException; /** * This class provides interface and utilities for processing checksums for * DFS data transfers. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public class DataChecksum implements Checksum { // checksum types public static final int CHECKSUM_NULL = 0; public static final int CHECKSUM_CRC32 = 1; public static final int CHECKSUM_CRC32C = 2; public static final int CHECKSUM_DEFAULT = 3; public static final int CHECKSUM_MIXED = 4; /** The checksum types */ public static enum Type { NULL (CHECKSUM_NULL, 0), CRC32 (CHECKSUM_CRC32, 4), CRC32C(CHECKSUM_CRC32C, 4), DEFAULT(CHECKSUM_DEFAULT, 0), // This cannot be used to create DataChecksum MIXED (CHECKSUM_MIXED, 0); // This cannot be used to create DataChecksum public final int id; public final int size; private Type(int id, int size) { this.id = id; this.size = size; } /** @return the type corresponding to the id. */ public static Type valueOf(int id) { if (id < 0 || id >= values().length) { throw new IllegalArgumentException("id=" + id + " out of range [0, " + values().length + ")"); } return values()[id]; } } /** * Create a Crc32 Checksum object. The implementation of the Crc32 algorithm * is chosen depending on the platform. */ public static Checksum newCrc32() { return Shell.isJava7OrAbove()? new CRC32(): new PureJavaCrc32(); } public static DataChecksum newDataChecksum(Type type, int bytesPerChecksum ) { if ( bytesPerChecksum <= 0 ) { return null; } switch ( type ) { case NULL : return new DataChecksum(type, new ChecksumNull(), bytesPerChecksum ); case CRC32 : return new DataChecksum(type, newCrc32(), bytesPerChecksum ); case CRC32C: return new DataChecksum(type, new PureJavaCrc32C(), bytesPerChecksum); default: return null; } } /** * Creates a DataChecksum from HEADER_LEN bytes from arr[offset]. * @return DataChecksum of the type in the array or null in case of an error. */ public static DataChecksum newDataChecksum( byte bytes[], int offset ) { if (offset < 0 || bytes.length < offset + getChecksumHeaderSize()) { return null; } // like readInt(): int bytesPerChecksum = ( (bytes[offset+1] & 0xff) << 24 ) | ( (bytes[offset+2] & 0xff) << 16 ) | ( (bytes[offset+3] & 0xff) << 8 ) | ( (bytes[offset+4] & 0xff) ); return newDataChecksum( Type.valueOf(bytes[offset]), bytesPerChecksum ); } /** * This constructs a DataChecksum by reading HEADER_LEN bytes from input * stream <i>in</i> */ public static DataChecksum newDataChecksum( DataInputStream in ) throws IOException { int type = in.readByte(); int bpc = in.readInt(); DataChecksum summer = newDataChecksum(Type.valueOf(type), bpc ); if ( summer == null ) { throw new IOException( "Could not create DataChecksum of type " + type + " with bytesPerChecksum " + bpc ); } return summer; } /** * Writes the checksum header to the output stream <i>out</i>. */ public void writeHeader( DataOutputStream out ) throws IOException { out.writeByte( type.id ); out.writeInt( bytesPerChecksum ); } public byte[] getHeader() { byte[] header = new byte[getChecksumHeaderSize()]; header[0] = (byte) (type.id & 0xff); // Writing in buffer just like DataOutput.WriteInt() header[1+0] = (byte) ((bytesPerChecksum >>> 24) & 0xff); header[1+1] = (byte) ((bytesPerChecksum >>> 16) & 0xff); header[1+2] = (byte) ((bytesPerChecksum >>> 8) & 0xff); header[1+3] = (byte) (bytesPerChecksum & 0xff); return header; } /** * Writes the current checksum to the stream. * If <i>reset</i> is true, then resets the checksum. * @return number of bytes written. Will be equal to getChecksumSize(); */ public int writeValue( DataOutputStream out, boolean reset ) throws IOException { if ( type.size <= 0 ) { return 0; } if ( type.size == 4 ) { out.writeInt( (int) summer.getValue() ); } else { throw new IOException( "Unknown Checksum " + type ); } if ( reset ) { reset(); } return type.size; } /** * Writes the current checksum to a buffer. * If <i>reset</i> is true, then resets the checksum. * @return number of bytes written. Will be equal to getChecksumSize(); */ public int writeValue( byte[] buf, int offset, boolean reset ) throws IOException { if ( type.size <= 0 ) { return 0; } if ( type.size == 4 ) { int checksum = (int) summer.getValue(); buf[offset+0] = (byte) ((checksum >>> 24) & 0xff); buf[offset+1] = (byte) ((checksum >>> 16) & 0xff); buf[offset+2] = (byte) ((checksum >>> 8) & 0xff); buf[offset+3] = (byte) (checksum & 0xff); } else { throw new IOException( "Unknown Checksum " + type ); } if ( reset ) { reset(); } return type.size; } /** * Compares the checksum located at buf[offset] with the current checksum. * @return true if the checksum matches and false otherwise. */ public boolean compare( byte buf[], int offset ) { if ( type.size == 4 ) { int checksum = ( (buf[offset+0] & 0xff) << 24 ) | ( (buf[offset+1] & 0xff) << 16 ) | ( (buf[offset+2] & 0xff) << 8 ) | ( (buf[offset+3] & 0xff) ); return checksum == (int) summer.getValue(); } return type.size == 0; } private final Type type; private final Checksum summer; private final int bytesPerChecksum; private int inSum = 0; private DataChecksum( Type type, Checksum checksum, int chunkSize ) { this.type = type; summer = checksum; bytesPerChecksum = chunkSize; } /** @return the checksum algorithm type. */ public Type getChecksumType() { return type; } /** @return the size for a checksum. */ public int getChecksumSize() { return type.size; } /** @return the required checksum size given the data length. */ public int getChecksumSize(int dataSize) { return ((dataSize - 1)/getBytesPerChecksum() + 1) * getChecksumSize(); } public int getBytesPerChecksum() { return bytesPerChecksum; } public int getNumBytesInSum() { return inSum; } public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE; static public int getChecksumHeaderSize() { return 1 + SIZE_OF_INTEGER; // type byte, bytesPerChecksum int } //Checksum Interface. Just a wrapper around member summer. @Override public long getValue() { return summer.getValue(); } @Override public void reset() { summer.reset(); inSum = 0; } @Override public void update( byte[] b, int off, int len ) { if ( len > 0 ) { summer.update( b, off, len ); inSum += len; } } @Override public void update( int b ) { summer.update( b ); inSum += 1; } /** * Verify that the given checksums match the given data. * * The 'mark' of the ByteBuffer parameters may be modified by this function,. * but the position is maintained. * * @param data the DirectByteBuffer pointing to the data to verify. * @param checksums the DirectByteBuffer pointing to a series of stored * checksums * @param fileName the name of the file being read, for error-reporting * @param basePos the file position to which the start of 'data' corresponds * @throws ChecksumException if the checksums do not match */ public void verifyChunkedSums(ByteBuffer data, ByteBuffer checksums, String fileName, long basePos) throws ChecksumException { if (type.size == 0) return; if (data.hasArray() && checksums.hasArray()) { verifyChunkedSums( data.array(), data.arrayOffset() + data.position(), data.remaining(), checksums.array(), checksums.arrayOffset() + checksums.position(), fileName, basePos); return; } if (NativeCrc32.isAvailable()) { NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data, fileName, basePos); return; } int startDataPos = data.position(); data.mark(); checksums.mark(); try { byte[] buf = new byte[bytesPerChecksum]; byte[] sum = new byte[type.size]; while (data.remaining() > 0) { int n = Math.min(data.remaining(), bytesPerChecksum); checksums.get(sum); data.get(buf, 0, n); summer.reset(); summer.update(buf, 0, n); int calculated = (int)summer.getValue(); int stored = (sum[0] << 24 & 0xff000000) | (sum[1] << 16 & 0xff0000) | (sum[2] << 8 & 0xff00) | sum[3] & 0xff; if (calculated != stored) { long errPos = basePos + data.position() - startDataPos - n; throw new ChecksumException( "Checksum error: "+ fileName + " at "+ errPos + " exp: " + stored + " got: " + calculated, errPos); } } } finally { data.reset(); checksums.reset(); } } /** * Implementation of chunked verification specifically on byte arrays. This * is to avoid the copy when dealing with ByteBuffers that have array backing. */ private void verifyChunkedSums( byte[] data, int dataOff, int dataLen, byte[] checksums, int checksumsOff, String fileName, long basePos) throws ChecksumException { if (type.size == 0) return; if (NativeCrc32.isAvailable()) { NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id, checksums, checksumsOff, data, dataOff, dataLen, fileName, basePos); return; } int remaining = dataLen; int dataPos = 0; while (remaining > 0) { int n = Math.min(remaining, bytesPerChecksum); summer.reset(); summer.update(data, dataOff + dataPos, n); dataPos += n; remaining -= n; int calculated = (int)summer.getValue(); int stored = (checksums[checksumsOff] << 24 & 0xff000000) | (checksums[checksumsOff + 1] << 16 & 0xff0000) | (checksums[checksumsOff + 2] << 8 & 0xff00) | checksums[checksumsOff + 3] & 0xff; checksumsOff += 4; if (calculated != stored) { long errPos = basePos + dataPos - n; throw new ChecksumException( "Checksum error: "+ fileName + " at "+ errPos + " exp: " + stored + " got: " + calculated, errPos); } } } /** * Calculate checksums for the given data. * * The 'mark' of the ByteBuffer parameters may be modified by this function, * but the position is maintained. * * @param data the DirectByteBuffer pointing to the data to checksum. * @param checksums the DirectByteBuffer into which checksums will be * stored. Enough space must be available in this * buffer to put the checksums. */ public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) { if (type.size == 0) return; if (data.hasArray() && checksums.hasArray()) { calculateChunkedSums(data.array(), data.arrayOffset() + data.position(), data.remaining(), checksums.array(), checksums.arrayOffset() + checksums.position()); return; } if (NativeCrc32.isAvailable()) { NativeCrc32.calculateChunkedSums(bytesPerChecksum, type.id, checksums, data); return; } data.mark(); checksums.mark(); try { byte[] buf = new byte[bytesPerChecksum]; while (data.remaining() > 0) { int n = Math.min(data.remaining(), bytesPerChecksum); data.get(buf, 0, n); summer.reset(); summer.update(buf, 0, n); checksums.putInt((int)summer.getValue()); } } finally { data.reset(); checksums.reset(); } } /** * Implementation of chunked calculation specifically on byte arrays. This * is to avoid the copy when dealing with ByteBuffers that have array backing. */ public void calculateChunkedSums( byte[] data, int dataOffset, int dataLength, byte[] sums, int sumsOffset) { if (type.size == 0) return; if (NativeCrc32.isAvailable()) { NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id, sums, sumsOffset, data, dataOffset, dataLength); return; } int remaining = dataLength; while (remaining > 0) { int n = Math.min(remaining, bytesPerChecksum); summer.reset(); summer.update(data, dataOffset, n); dataOffset += n; remaining -= n; long calculated = summer.getValue(); sums[sumsOffset++] = (byte) (calculated >> 24); sums[sumsOffset++] = (byte) (calculated >> 16); sums[sumsOffset++] = (byte) (calculated >> 8); sums[sumsOffset++] = (byte) (calculated); } } @Override public boolean equals(Object other) { if (!(other instanceof DataChecksum)) { return false; } DataChecksum o = (DataChecksum)other; return o.bytesPerChecksum == this.bytesPerChecksum && o.type == this.type; } @Override public int hashCode() { return (this.type.id + 31) * this.bytesPerChecksum; } @Override public String toString() { return "DataChecksum(type=" + type + ", chunkSize=" + bytesPerChecksum + ")"; } /** * This just provides a dummy implimentation for Checksum class * This is used when there is no checksum available or required for * data */ static class ChecksumNull implements Checksum { public ChecksumNull() {} //Dummy interface @Override public long getValue() { return 0; } @Override public void reset() {} @Override public void update(byte[] b, int off, int len) {} @Override public void update(int b) {} }; }
15,523
30.617108
96
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util; import java.util.zip.Checksum; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A pure-java implementation of the CRC32 checksum that uses * the same polynomial as the built-in native CRC32. * * This is to avoid the JNI overhead for certain uses of Checksumming * where many small pieces of data are checksummed in succession. * * The current version is ~10x to 1.8x as fast as Sun's native * java.util.zip.CRC32 in Java 1.6 * * @see java.util.zip.CRC32 */ @InterfaceAudience.Public @InterfaceStability.Stable public class PureJavaCrc32 implements Checksum { /** the current CRC value, bit-flipped */ private int crc; /** Create a new PureJavaCrc32 object. */ public PureJavaCrc32() { reset(); } @Override public long getValue() { return (~crc) & 0xffffffffL; } @Override public void reset() { crc = 0xffffffff; } @Override public void update(final byte[] b, final int offset, final int len) { int localCrc = crc; final int remainder = len & 0x7; int i = offset; for(final int end = offset + len - remainder; i < end; i += 8) { final int x = localCrc ^ ((((b[i ] << 24) >>> 24) + ((b[i+1] << 24) >>> 16)) + (((b[i+2] << 24) >>> 8 ) + (b[i+3] << 24))); localCrc = ((T[((x << 24) >>> 24) + 0x700] ^ T[((x << 16) >>> 24) + 0x600]) ^ (T[((x << 8) >>> 24) + 0x500] ^ T[ (x >>> 24) + 0x400])) ^ ((T[((b[i+4] << 24) >>> 24) + 0x300] ^ T[((b[i+5] << 24) >>> 24) + 0x200]) ^ (T[((b[i+6] << 24) >>> 24) + 0x100] ^ T[((b[i+7] << 24) >>> 24)])); } /* loop unroll - duff's device style */ switch(remainder) { case 7: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; case 6: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; case 5: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; case 4: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; case 3: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; case 2: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; case 1: localCrc = (localCrc >>> 8) ^ T[((localCrc ^ b[i++]) << 24) >>> 24]; default: /* nothing */ } // Publish crc out to object crc = localCrc; } @Override final public void update(int b) { crc = (crc >>> 8) ^ T[(((crc ^ b) << 24) >>> 24)]; } /* * CRC-32 lookup tables generated by the polynomial 0xEDB88320. * See also TestPureJavaCrc32.Table. */ private static final int[] T = new int[] { /* T8_0 */ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, /* T8_1 */ 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A, 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034, 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C, 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97, 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F, 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864, 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31, 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539, 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1, 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277, 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72, /* T8_2 */ 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91, 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315, 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD, 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151, 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579, 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5, 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D, 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05, 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711, 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339, 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED, /* T8_3 */ 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E, 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018, 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7, 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C, 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3, 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED, 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825, 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA, 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82, 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC, 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1, /* T8_4 */ 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693, 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496, 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57, 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA, 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D, 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF, 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E, 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82, 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601, 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546, 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5, 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C, /* T8_5 */ 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258, 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715, 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578, 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834, 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4, 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14, 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460, 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680, 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340, 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0, 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC, /* T8_6 */ 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834, 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC, 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB, 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A, 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D, 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678, 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2, 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3, 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49, 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30, /* T8_7 */ 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D, 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64, 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1, 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425, 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90, 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758, 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889, 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C, 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F, 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 }; }
30,799
48.28
91
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/MurmurHash.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.hash; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This is a very fast, non-cryptographic hash suitable for general hash-based * lookup. See http://murmurhash.googlepages.com/ for more details. * * <p>The C version of MurmurHash 2.0 found at that site was ported * to Java by Andrzej Bialecki (ab at getopt org).</p> */ @InterfaceAudience.Private @InterfaceStability.Unstable public class MurmurHash extends Hash { private static MurmurHash _instance = new MurmurHash(); public static Hash getInstance() { return _instance; } @Override public int hash(byte[] data, int length, int seed) { int m = 0x5bd1e995; int r = 24; int h = seed ^ length; int len_4 = length >> 2; for (int i = 0; i < len_4; i++) { int i_4 = i << 2; int k = data[i_4 + 3]; k = k << 8; k = k | (data[i_4 + 2] & 0xff); k = k << 8; k = k | (data[i_4 + 1] & 0xff); k = k << 8; k = k | (data[i_4 + 0] & 0xff); k *= m; k ^= k >>> r; k *= m; h *= m; h ^= k; } // avoid calculating modulo int len_m = len_4 << 2; int left = length - len_m; if (left != 0) { if (left >= 3) { h ^= (int) data[length - 3] << 16; } if (left >= 2) { h ^= (int) data[length - 2] << 8; } if (left >= 1) { h ^= (int) data[length - 1]; } h *= m; } h ^= h >>> 13; h *= m; h ^= h >>> 15; return h; } }
2,400
25.677778
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/JenkinsHash.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.hash; import java.io.FileInputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Produces 32-bit hash for hash table lookup. * * <pre>lookup3.c, by Bob Jenkins, May 2006, Public Domain. * * You can use this free for any purpose. It's in the public domain. * It has no warranty. * </pre> * * @see <a href="http://burtleburtle.net/bob/c/lookup3.c">lookup3.c</a> * @see <a href="http://www.ddj.com/184410284">Hash Functions (and how this * function compares to others such as CRC, MD?, etc</a> * @see <a href="http://burtleburtle.net/bob/hash/doobs.html">Has update on the * Dr. Dobbs Article</a> */ @InterfaceAudience.Private @InterfaceStability.Unstable public class JenkinsHash extends Hash { private static long INT_MASK = 0x00000000ffffffffL; private static long BYTE_MASK = 0x00000000000000ffL; private static JenkinsHash _instance = new JenkinsHash(); public static Hash getInstance() { return _instance; } private static long rot(long val, int pos) { return ((Integer.rotateLeft( (int)(val & INT_MASK), pos)) & INT_MASK); } /** * taken from hashlittle() -- hash a variable-length key into a 32-bit value * * @param key the key (the unaligned variable-length array of bytes) * @param nbytes number of bytes to include in hash * @param initval can be any integer value * @return a 32-bit value. Every bit of the key affects every bit of the * return value. Two keys differing by one or two bits will have totally * different hash values. * * <p>The best hash table sizes are powers of 2. There is no need to do mod * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. * For example, if you need only 10 bits, do * <code>h = (h & hashmask(10));</code> * In which case, the hash table should have hashsize(10) elements. * * <p>If you are hashing n strings byte[][] k, do it like this: * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); * * <p>By Bob Jenkins, 2006. [email protected]. You may use this * code any way you wish, private, educational, or commercial. It's free. * * <p>Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ @Override @SuppressWarnings("fallthrough") public int hash(byte[] key, int nbytes, int initval) { int length = nbytes; long a, b, c; // We use longs because we don't have unsigned ints a = b = c = (0x00000000deadbeefL + length + initval) & INT_MASK; int offset = 0; for (; length > 12; offset += 12, length -= 12) { a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK; a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK; b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK; c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; /* * mix -- mix 3 32-bit values reversibly. * This is reversible, so any information in (a,b,c) before mix() is * still in (a,b,c) after mix(). * * If four pairs of (a,b,c) inputs are run through mix(), or through * mix() in reverse, there are at least 32 bits of the output that * are sometimes the same for one pair and different for another pair. * * This was tested for: * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. * * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that * satisfy this are * 4 6 8 16 19 4 * 9 15 3 18 27 15 * 14 9 3 7 17 3 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for * "differ" defined as + with a one-bit base and a two-bit delta. I * used http://burtleburtle.net/bob/hash/avalanche.html to choose * the operations, constants, and arrangements of the variables. * * This does not achieve avalanche. There are input bits of (a,b,c) * that fail to affect some output bits of (a,b,c), especially of a. * The most thoroughly mixed value is c, but it doesn't really even * achieve avalanche in c. * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the * opposite direction as the goal of parallelism. I did what I could. * Rotates seem to cost as much as shifts on every machine I could lay * my hands on, and rotates are much kinder to the top and bottom bits, * so I used rotates. * * #define mix(a,b,c) \ * { \ * a -= c; a ^= rot(c, 4); c += b; \ * b -= a; b ^= rot(a, 6); a += c; \ * c -= b; c ^= rot(b, 8); b += a; \ * a -= c; a ^= rot(c,16); c += b; \ * b -= a; b ^= rot(a,19); a += c; \ * c -= b; c ^= rot(b, 4); b += a; \ * } * * mix(a,b,c); */ a = (a - c) & INT_MASK; a ^= rot(c, 4); c = (c + b) & INT_MASK; b = (b - a) & INT_MASK; b ^= rot(a, 6); a = (a + c) & INT_MASK; c = (c - b) & INT_MASK; c ^= rot(b, 8); b = (b + a) & INT_MASK; a = (a - c) & INT_MASK; a ^= rot(c,16); c = (c + b) & INT_MASK; b = (b - a) & INT_MASK; b ^= rot(a,19); a = (a + c) & INT_MASK; c = (c - b) & INT_MASK; c ^= rot(b, 4); b = (b + a) & INT_MASK; } //-------------------------------- last block: affect all 32 bits of (c) switch (length) { // all the case statements fall through case 12: c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; case 11: c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; case 10: c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 9: c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK; case 8: b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; case 7: b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; case 6: b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 5: b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK; case 4: a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; case 3: a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; case 2: a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 1: a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK; break; case 0: return (int)(c & INT_MASK); } /* * final -- final mixing of 3 32-bit values (a,b,c) into c * * Pairs of (a,b,c) values differing in only a few bits will usually * produce values of c that look totally different. This was tested for * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). * * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. * * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. * * These constants passed: * 14 11 25 16 4 14 24 * 12 14 25 16 4 14 24 * and these came close: * 4 8 15 26 3 22 24 * 10 8 15 26 3 22 24 * 11 8 15 26 3 22 24 * * #define final(a,b,c) \ * { * c ^= b; c -= rot(b,14); \ * a ^= c; a -= rot(c,11); \ * b ^= a; b -= rot(a,25); \ * c ^= b; c -= rot(b,16); \ * a ^= c; a -= rot(c,4); \ * b ^= a; b -= rot(a,14); \ * c ^= b; c -= rot(b,24); \ * } * */ c ^= b; c = (c - rot(b,14)) & INT_MASK; a ^= c; a = (a - rot(c,11)) & INT_MASK; b ^= a; b = (b - rot(a,25)) & INT_MASK; c ^= b; c = (c - rot(b,16)) & INT_MASK; a ^= c; a = (a - rot(c,4)) & INT_MASK; b ^= a; b = (b - rot(a,14)) & INT_MASK; c ^= b; c = (c - rot(b,24)) & INT_MASK; return (int)(c & INT_MASK); } /** * Compute the hash of the specified file * @param args name of file to compute hash of. * @throws IOException */ public static void main(String[] args) throws IOException { if (args.length != 1) { System.err.println("Usage: JenkinsHash filename"); System.exit(-1); } try (FileInputStream in = new FileInputStream(args[0])) { byte[] bytes = new byte[512]; int value = 0; JenkinsHash hash = new JenkinsHash(); for (int length = in.read(bytes); length > 0; length = in.read(bytes)) { value = hash.hash(bytes, length, value); } System.out.println(Math.abs(value)); } } }
11,106
40.755639
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.hash; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; /** * This class represents a common API for hashing functions. */ @InterfaceAudience.Private @InterfaceStability.Unstable public abstract class Hash { /** Constant to denote invalid hash type. */ public static final int INVALID_HASH = -1; /** Constant to denote {@link JenkinsHash}. */ public static final int JENKINS_HASH = 0; /** Constant to denote {@link MurmurHash}. */ public static final int MURMUR_HASH = 1; /** * This utility method converts String representation of hash function name * to a symbolic constant. Currently two function types are supported, * "jenkins" and "murmur". * @param name hash function name * @return one of the predefined constants */ public static int parseHashType(String name) { if ("jenkins".equalsIgnoreCase(name)) { return JENKINS_HASH; } else if ("murmur".equalsIgnoreCase(name)) { return MURMUR_HASH; } else { return INVALID_HASH; } } /** * This utility method converts the name of the configured * hash type to a symbolic constant. * @param conf configuration * @return one of the predefined constants */ public static int getHashType(Configuration conf) { String name = conf.get("hadoop.util.hash.type", "murmur"); return parseHashType(name); } /** * Get a singleton instance of hash function of a given type. * @param type predefined hash type * @return hash function instance, or null if type is invalid */ public static Hash getInstance(int type) { switch(type) { case JENKINS_HASH: return JenkinsHash.getInstance(); case MURMUR_HASH: return MurmurHash.getInstance(); default: return null; } } /** * Get a singleton instance of hash function of a type * defined in the configuration. * @param conf current configuration * @return defined hash type, or null if type is invalid */ public static Hash getInstance(Configuration conf) { int type = getHashType(conf); return getInstance(type); } /** * Calculate a hash using all bytes from the input argument, and * a seed of -1. * @param bytes input bytes * @return hash value */ public int hash(byte[] bytes) { return hash(bytes, bytes.length, -1); } /** * Calculate a hash using all bytes from the input argument, * and a provided seed value. * @param bytes input bytes * @param initval seed value * @return hash value */ public int hash(byte[] bytes, int initval) { return hash(bytes, bytes.length, initval); } /** * Calculate a hash using bytes from 0 to <code>length</code>, and * the provided seed value * @param bytes input bytes * @param length length of the valid bytes to consider * @param initval seed value * @return hash value */ public abstract int hash(byte[] bytes, int length, int initval); }
3,899
30.451613
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/BloomFilter.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.BitSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Implements a <i>Bloom filter</i>, as defined by Bloom in 1970. * <p> * The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by * the networking research community in the past decade thanks to the bandwidth efficiencies that it * offers for the transmission of set membership information between networked hosts. A sender encodes * the information into a bit vector, the Bloom filter, that is more compact than a conventional * representation. Computation and space costs for construction are linear in the number of elements. * The receiver uses the filter to test whether various elements are members of the set. Though the * filter will occasionally return a false positive, it will never return a false negative. When creating * the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size. * * <p> * Originally created by * <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>. * * @see Filter The general behavior of a filter * * @see <a href="http://portal.acm.org/citation.cfm?id=362692&dl=ACM&coll=portal">Space/Time Trade-Offs in Hash Coding with Allowable Errors</a> */ @InterfaceAudience.Public @InterfaceStability.Stable public class BloomFilter extends Filter { private static final byte[] bitvalues = new byte[] { (byte)0x01, (byte)0x02, (byte)0x04, (byte)0x08, (byte)0x10, (byte)0x20, (byte)0x40, (byte)0x80 }; /** The bit vector. */ BitSet bits; /** Default constructor - use with readFields */ public BloomFilter() { super(); } /** * Constructor * @param vectorSize The vector size of <i>this</i> filter. * @param nbHash The number of hash function to consider. * @param hashType type of the hashing function (see * {@link org.apache.hadoop.util.hash.Hash}). */ public BloomFilter(int vectorSize, int nbHash, int hashType) { super(vectorSize, nbHash, hashType); bits = new BitSet(this.vectorSize); } @Override public void add(Key key) { if(key == null) { throw new NullPointerException("key cannot be null"); } int[] h = hash.hash(key); hash.clear(); for(int i = 0; i < nbHash; i++) { bits.set(h[i]); } } @Override public void and(Filter filter) { if(filter == null || !(filter instanceof BloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be and-ed"); } this.bits.and(((BloomFilter) filter).bits); } @Override public boolean membershipTest(Key key) { if(key == null) { throw new NullPointerException("key cannot be null"); } int[] h = hash.hash(key); hash.clear(); for(int i = 0; i < nbHash; i++) { if(!bits.get(h[i])) { return false; } } return true; } @Override public void not() { bits.flip(0, vectorSize); } @Override public void or(Filter filter) { if(filter == null || !(filter instanceof BloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be or-ed"); } bits.or(((BloomFilter) filter).bits); } @Override public void xor(Filter filter) { if(filter == null || !(filter instanceof BloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be xor-ed"); } bits.xor(((BloomFilter) filter).bits); } @Override public String toString() { return bits.toString(); } /** * @return size of the the bloomfilter */ public int getVectorSize() { return this.vectorSize; } // Writable @Override public void write(DataOutput out) throws IOException { super.write(out); byte[] bytes = new byte[getNBytes()]; for(int i = 0, byteIndex = 0, bitIndex = 0; i < vectorSize; i++, bitIndex++) { if (bitIndex == 8) { bitIndex = 0; byteIndex++; } if (bitIndex == 0) { bytes[byteIndex] = 0; } if (bits.get(i)) { bytes[byteIndex] |= bitvalues[bitIndex]; } } out.write(bytes); } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); bits = new BitSet(this.vectorSize); byte[] bytes = new byte[getNBytes()]; in.readFully(bytes); for(int i = 0, byteIndex = 0, bitIndex = 0; i < vectorSize; i++, bitIndex++) { if (bitIndex == 8) { bitIndex = 0; byteIndex++; } if ((bytes[byteIndex] & bitvalues[bitIndex]) != 0) { bits.set(i); } } } /* @return number of bytes needed to hold bit vector */ private int getNBytes() { return (vectorSize + 7) / 8; } }//end class
7,742
31.2625
144
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/RemoveScheme.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 * (http://www.one-lab.org) * * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Defines the different remove scheme for retouched Bloom filters. * <p> * Originally created by * <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>. */ @InterfaceAudience.Public @InterfaceStability.Stable public interface RemoveScheme { /** * Random selection. * <p> * The idea is to randomly select a bit to reset. */ public final static short RANDOM = 0; /** * MinimumFN Selection. * <p> * The idea is to select the bit to reset that will generate the minimum * number of false negative. */ public final static short MINIMUM_FN = 1; /** * MaximumFP Selection. * <p> * The idea is to select the bit to reset that will remove the maximum number * of false positive. */ public final static short MAXIMUM_FP = 2; /** * Ratio Selection. * <p> * The idea is to select the bit to reset that will, at the same time, remove * the maximum number of false positve while minimizing the amount of false * negative generated. */ public final static short RATIO = 3; }
3,749
37.659794
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/RetouchedBloomFilter.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Random; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Implements a <i>retouched Bloom filter</i>, as defined in the CoNEXT 2006 paper. * <p> * It allows the removal of selected false positives at the cost of introducing * random false negatives, and with the benefit of eliminating some random false * positives at the same time. * * <p> * Originally created by * <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>. * * @see Filter The general behavior of a filter * @see BloomFilter A Bloom filter * @see RemoveScheme The different selective clearing algorithms * * @see <a href="http://www-rp.lip6.fr/site_npa/site_rp/_publications/740-rbf_cameraready.pdf">Retouched Bloom Filters: Allowing Networked Applications to Trade Off Selected False Positives Against False Negatives</a> */ @InterfaceAudience.Public @InterfaceStability.Stable public final class RetouchedBloomFilter extends BloomFilter implements RemoveScheme { /** * KeyList vector (or ElementList Vector, as defined in the paper) of false positives. */ List<Key>[] fpVector; /** * KeyList vector of keys recorded in the filter. */ List<Key>[] keyVector; /** * Ratio vector. */ double[] ratio; private Random rand; /** Default constructor - use with readFields */ public RetouchedBloomFilter() {} /** * Constructor * @param vectorSize The vector size of <i>this</i> filter. * @param nbHash The number of hash function to consider. * @param hashType type of the hashing function (see * {@link org.apache.hadoop.util.hash.Hash}). */ public RetouchedBloomFilter(int vectorSize, int nbHash, int hashType) { super(vectorSize, nbHash, hashType); this.rand = null; createVector(); } @Override public void add(Key key) { if (key == null) { throw new NullPointerException("key can not be null"); } int[] h = hash.hash(key); hash.clear(); for (int i = 0; i < nbHash; i++) { bits.set(h[i]); keyVector[h[i]].add(key); } } /** * Adds a false positive information to <i>this</i> retouched Bloom filter. * <p> * <b>Invariant</b>: if the false positive is <code>null</code>, nothing happens. * @param key The false positive key to add. */ public void addFalsePositive(Key key) { if (key == null) { throw new NullPointerException("key can not be null"); } int[] h = hash.hash(key); hash.clear(); for (int i = 0; i < nbHash; i++) { fpVector[h[i]].add(key); } } /** * Adds a collection of false positive information to <i>this</i> retouched Bloom filter. * @param coll The collection of false positive. */ public void addFalsePositive(Collection<Key> coll) { if (coll == null) { throw new NullPointerException("Collection<Key> can not be null"); } for (Key k : coll) { addFalsePositive(k); } } /** * Adds a list of false positive information to <i>this</i> retouched Bloom filter. * @param keys The list of false positive. */ public void addFalsePositive(List<Key> keys) { if (keys == null) { throw new NullPointerException("ArrayList<Key> can not be null"); } for (Key k : keys) { addFalsePositive(k); } } /** * Adds an array of false positive information to <i>this</i> retouched Bloom filter. * @param keys The array of false positive. */ public void addFalsePositive(Key[] keys) { if (keys == null) { throw new NullPointerException("Key[] can not be null"); } for (int i = 0; i < keys.length; i++) { addFalsePositive(keys[i]); } } /** * Performs the selective clearing for a given key. * @param k The false positive key to remove from <i>this</i> retouched Bloom filter. * @param scheme The selective clearing scheme to apply. */ public void selectiveClearing(Key k, short scheme) { if (k == null) { throw new NullPointerException("Key can not be null"); } if (!membershipTest(k)) { throw new IllegalArgumentException("Key is not a member"); } int index = 0; int[] h = hash.hash(k); switch(scheme) { case RANDOM: index = randomRemove(); break; case MINIMUM_FN: index = minimumFnRemove(h); break; case MAXIMUM_FP: index = maximumFpRemove(h); break; case RATIO: index = ratioRemove(h); break; default: throw new AssertionError("Undefined selective clearing scheme"); } clearBit(index); } private int randomRemove() { if (rand == null) { rand = new Random(); } return rand.nextInt(nbHash); } /** * Chooses the bit position that minimizes the number of false negative generated. * @param h The different bit positions. * @return The position that minimizes the number of false negative generated. */ private int minimumFnRemove(int[] h) { int minIndex = Integer.MAX_VALUE; double minValue = Double.MAX_VALUE; for (int i = 0; i < nbHash; i++) { double keyWeight = getWeight(keyVector[h[i]]); if (keyWeight < minValue) { minIndex = h[i]; minValue = keyWeight; } } return minIndex; } /** * Chooses the bit position that maximizes the number of false positive removed. * @param h The different bit positions. * @return The position that maximizes the number of false positive removed. */ private int maximumFpRemove(int[] h) { int maxIndex = Integer.MIN_VALUE; double maxValue = Double.MIN_VALUE; for (int i = 0; i < nbHash; i++) { double fpWeight = getWeight(fpVector[h[i]]); if (fpWeight > maxValue) { maxValue = fpWeight; maxIndex = h[i]; } } return maxIndex; } /** * Chooses the bit position that minimizes the number of false negative generated while maximizing. * the number of false positive removed. * @param h The different bit positions. * @return The position that minimizes the number of false negative generated while maximizing. */ private int ratioRemove(int[] h) { computeRatio(); int minIndex = Integer.MAX_VALUE; double minValue = Double.MAX_VALUE; for (int i = 0; i < nbHash; i++) { if (ratio[h[i]] < minValue) { minValue = ratio[h[i]]; minIndex = h[i]; } } return minIndex; } /** * Clears a specified bit in the bit vector and keeps up-to-date the KeyList vectors. * @param index The position of the bit to clear. */ private void clearBit(int index) { if (index < 0 || index >= vectorSize) { throw new ArrayIndexOutOfBoundsException(index); } List<Key> kl = keyVector[index]; List<Key> fpl = fpVector[index]; // update key list int listSize = kl.size(); for (int i = 0; i < listSize && !kl.isEmpty(); i++) { removeKey(kl.get(0), keyVector); } kl.clear(); keyVector[index].clear(); //update false positive list listSize = fpl.size(); for (int i = 0; i < listSize && !fpl.isEmpty(); i++) { removeKey(fpl.get(0), fpVector); } fpl.clear(); fpVector[index].clear(); //update ratio ratio[index] = 0.0; //update bit vector bits.clear(index); } /** * Removes a given key from <i>this</i> filer. * @param k The key to remove. * @param vector The counting vector associated to the key. */ private void removeKey(Key k, List<Key>[] vector) { if (k == null) { throw new NullPointerException("Key can not be null"); } if (vector == null) { throw new NullPointerException("ArrayList<Key>[] can not be null"); } int[] h = hash.hash(k); hash.clear(); for (int i = 0; i < nbHash; i++) { vector[h[i]].remove(k); } } /** * Computes the ratio A/FP. */ private void computeRatio() { for (int i = 0; i < vectorSize; i++) { double keyWeight = getWeight(keyVector[i]); double fpWeight = getWeight(fpVector[i]); if (keyWeight > 0 && fpWeight > 0) { ratio[i] = keyWeight / fpWeight; } } } private double getWeight(List<Key> keyList) { double weight = 0.0; for (Key k : keyList) { weight += k.getWeight(); } return weight; } /** * Creates and initialises the various vectors. */ @SuppressWarnings("unchecked") private void createVector() { fpVector = new List[vectorSize]; keyVector = new List[vectorSize]; ratio = new double[vectorSize]; for (int i = 0; i < vectorSize; i++) { fpVector[i] = Collections.synchronizedList(new ArrayList<Key>()); keyVector[i] = Collections.synchronizedList(new ArrayList<Key>()); ratio[i] = 0.0; } } // Writable @Override public void write(DataOutput out) throws IOException { super.write(out); for (int i = 0; i < fpVector.length; i++) { List<Key> list = fpVector[i]; out.writeInt(list.size()); for (Key k : list) { k.write(out); } } for (int i = 0; i < keyVector.length; i++) { List<Key> list = keyVector[i]; out.writeInt(list.size()); for (Key k : list) { k.write(out); } } for (int i = 0; i < ratio.length; i++) { out.writeDouble(ratio[i]); } } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); createVector(); for (int i = 0; i < fpVector.length; i++) { List<Key> list = fpVector[i]; int size = in.readInt(); for (int j = 0; j < size; j++) { Key k = new Key(); k.readFields(in); list.add(k); } } for (int i = 0; i < keyVector.length; i++) { List<Key> list = keyVector[i]; int size = in.readInt(); for (int j = 0; j < size; j++) { Key k = new Key(); k.readFields(in); list.add(k); } } for (int i = 0; i < ratio.length; i++) { ratio[i] = in.readDouble(); } } }
12,884
27.256579
217
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/CountingBloomFilter.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Implements a <i>counting Bloom filter</i>, as defined by Fan et al. in a ToN * 2000 paper. * <p> * A counting Bloom filter is an improvement to standard a Bloom filter as it * allows dynamic additions and deletions of set membership information. This * is achieved through the use of a counting vector instead of a bit vector. * <p> * Originally created by * <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>. * * @see Filter The general behavior of a filter * * @see <a href="http://portal.acm.org/citation.cfm?id=343571.343572">Summary cache: a scalable wide-area web cache sharing protocol</a> */ @InterfaceAudience.Public @InterfaceStability.Stable public final class CountingBloomFilter extends Filter { /** Storage for the counting buckets */ private long[] buckets; /** We are using 4bit buckets, so each bucket can count to 15 */ private final static long BUCKET_MAX_VALUE = 15; /** Default constructor - use with readFields */ public CountingBloomFilter() {} /** * Constructor * @param vectorSize The vector size of <i>this</i> filter. * @param nbHash The number of hash function to consider. * @param hashType type of the hashing function (see * {@link org.apache.hadoop.util.hash.Hash}). */ public CountingBloomFilter(int vectorSize, int nbHash, int hashType) { super(vectorSize, nbHash, hashType); buckets = new long[buckets2words(vectorSize)]; } /** returns the number of 64 bit words it would take to hold vectorSize buckets */ private static int buckets2words(int vectorSize) { return ((vectorSize - 1) >>> 4) + 1; } @Override public void add(Key key) { if(key == null) { throw new NullPointerException("key can not be null"); } int[] h = hash.hash(key); hash.clear(); for(int i = 0; i < nbHash; i++) { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; // only increment if the count in the bucket is less than BUCKET_MAX_VALUE if(bucketValue < BUCKET_MAX_VALUE) { // increment by 1 buckets[wordNum] = (buckets[wordNum] & ~bucketMask) | ((bucketValue + 1) << bucketShift); } } } /** * Removes a specified key from <i>this</i> counting Bloom filter. * <p> * <b>Invariant</b>: nothing happens if the specified key does not belong to <i>this</i> counter Bloom filter. * @param key The key to remove. */ public void delete(Key key) { if(key == null) { throw new NullPointerException("Key may not be null"); } if(!membershipTest(key)) { throw new IllegalArgumentException("Key is not a member"); } int[] h = hash.hash(key); hash.clear(); for(int i = 0; i < nbHash; i++) { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; // only decrement if the count in the bucket is between 0 and BUCKET_MAX_VALUE if(bucketValue >= 1 && bucketValue < BUCKET_MAX_VALUE) { // decrement by 1 buckets[wordNum] = (buckets[wordNum] & ~bucketMask) | ((bucketValue - 1) << bucketShift); } } } @Override public void and(Filter filter) { if(filter == null || !(filter instanceof CountingBloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be and-ed"); } CountingBloomFilter cbf = (CountingBloomFilter)filter; int sizeInWords = buckets2words(vectorSize); for(int i = 0; i < sizeInWords; i++) { this.buckets[i] &= cbf.buckets[i]; } } @Override public boolean membershipTest(Key key) { if(key == null) { throw new NullPointerException("Key may not be null"); } int[] h = hash.hash(key); hash.clear(); for(int i = 0; i < nbHash; i++) { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 long bucketMask = 15L << bucketShift; if((buckets[wordNum] & bucketMask) == 0) { return false; } } return true; } /** * This method calculates an approximate count of the key, i.e. how many * times the key was added to the filter. This allows the filter to be * used as an approximate <code>key -&gt; count</code> map. * <p>NOTE: due to the bucket size of this filter, inserting the same * key more than 15 times will cause an overflow at all filter positions * associated with this key, and it will significantly increase the error * rate for this and other keys. For this reason the filter can only be * used to store small count values <code>0 &lt;= N &lt;&lt; 15</code>. * @param key key to be tested * @return 0 if the key is not present. Otherwise, a positive value v will * be returned such that <code>v == count</code> with probability equal to the * error rate of this filter, and <code>v &gt; count</code> otherwise. * Additionally, if the filter experienced an underflow as a result of * {@link #delete(Key)} operation, the return value may be lower than the * <code>count</code> with the probability of the false negative rate of such * filter. */ public int approximateCount(Key key) { int res = Integer.MAX_VALUE; int[] h = hash.hash(key); hash.clear(); for (int i = 0; i < nbHash; i++) { // find the bucket int wordNum = h[i] >> 4; // div 16 int bucketShift = (h[i] & 0x0f) << 2; // (mod 16) * 4 long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; if (bucketValue < res) res = (int)bucketValue; } if (res != Integer.MAX_VALUE) { return res; } else { return 0; } } @Override public void not() { throw new UnsupportedOperationException("not() is undefined for " + this.getClass().getName()); } @Override public void or(Filter filter) { if(filter == null || !(filter instanceof CountingBloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be or-ed"); } CountingBloomFilter cbf = (CountingBloomFilter)filter; int sizeInWords = buckets2words(vectorSize); for(int i = 0; i < sizeInWords; i++) { this.buckets[i] |= cbf.buckets[i]; } } @Override public void xor(Filter filter) { throw new UnsupportedOperationException("xor() is undefined for " + this.getClass().getName()); } @Override public String toString() { StringBuilder res = new StringBuilder(); for(int i = 0; i < vectorSize; i++) { if(i > 0) { res.append(" "); } int wordNum = i >> 4; // div 16 int bucketShift = (i & 0x0f) << 2; // (mod 16) * 4 long bucketMask = 15L << bucketShift; long bucketValue = (buckets[wordNum] & bucketMask) >>> bucketShift; res.append(bucketValue); } return res.toString(); } // Writable @Override public void write(DataOutput out) throws IOException { super.write(out); int sizeInWords = buckets2words(vectorSize); for(int i = 0; i < sizeInWords; i++) { out.writeLong(buckets[i]); } } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); int sizeInWords = buckets2words(vectorSize); buckets = new long[sizeInWords]; for(int i = 0; i < sizeInWords; i++) { buckets[i] = in.readLong(); } } }
10,755
33.696774
136
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/DynamicBloomFilter.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Implements a <i>dynamic Bloom filter</i>, as defined in the INFOCOM 2006 paper. * <p> * A dynamic Bloom filter (DBF) makes use of a <code>s * m</code> bit matrix but * each of the <code>s</code> rows is a standard Bloom filter. The creation * process of a DBF is iterative. At the start, the DBF is a <code>1 * m</code> * bit matrix, i.e., it is composed of a single standard Bloom filter. * It assumes that <code>n<sub>r</sub></code> elements are recorded in the * initial bit vector, where <code>n<sub>r</sub> <= n</code> (<code>n</code> is * the cardinality of the set <code>A</code> to record in the filter). * <p> * As the size of <code>A</code> grows during the execution of the application, * several keys must be inserted in the DBF. When inserting a key into the DBF, * one must first get an active Bloom filter in the matrix. A Bloom filter is * active when the number of recorded keys, <code>n<sub>r</sub></code>, is * strictly less than the current cardinality of <code>A</code>, <code>n</code>. * If an active Bloom filter is found, the key is inserted and * <code>n<sub>r</sub></code> is incremented by one. On the other hand, if there * is no active Bloom filter, a new one is created (i.e., a new row is added to * the matrix) according to the current size of <code>A</code> and the element * is added in this new Bloom filter and the <code>n<sub>r</sub></code> value of * this new Bloom filter is set to one. A given key is said to belong to the * DBF if the <code>k</code> positions are set to one in one of the matrix rows. * <p> * Originally created by * <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>. * * @see Filter The general behavior of a filter * @see BloomFilter A Bloom filter * * @see <a href="http://www.cse.fau.edu/~jie/research/publications/Publication_files/infocom2006.pdf">Theory and Network Applications of Dynamic Bloom Filters</a> */ @InterfaceAudience.Public @InterfaceStability.Stable public class DynamicBloomFilter extends Filter { /** * Threshold for the maximum number of key to record in a dynamic Bloom filter row. */ private int nr; /** * The number of keys recorded in the current standard active Bloom filter. */ private int currentNbRecord; /** * The matrix of Bloom filter. */ private BloomFilter[] matrix; /** * Zero-args constructor for the serialization. */ public DynamicBloomFilter() { } /** * Constructor. * <p> * Builds an empty Dynamic Bloom filter. * @param vectorSize The number of bits in the vector. * @param nbHash The number of hash function to consider. * @param hashType type of the hashing function (see * {@link org.apache.hadoop.util.hash.Hash}). * @param nr The threshold for the maximum number of keys to record in a * dynamic Bloom filter row. */ public DynamicBloomFilter(int vectorSize, int nbHash, int hashType, int nr) { super(vectorSize, nbHash, hashType); this.nr = nr; this.currentNbRecord = 0; matrix = new BloomFilter[1]; matrix[0] = new BloomFilter(this.vectorSize, this.nbHash, this.hashType); } @Override public void add(Key key) { if (key == null) { throw new NullPointerException("Key can not be null"); } BloomFilter bf = getActiveStandardBF(); if (bf == null) { addRow(); bf = matrix[matrix.length - 1]; currentNbRecord = 0; } bf.add(key); currentNbRecord++; } @Override public void and(Filter filter) { if (filter == null || !(filter instanceof DynamicBloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be and-ed"); } DynamicBloomFilter dbf = (DynamicBloomFilter)filter; if (dbf.matrix.length != this.matrix.length || dbf.nr != this.nr) { throw new IllegalArgumentException("filters cannot be and-ed"); } for (int i = 0; i < matrix.length; i++) { matrix[i].and(dbf.matrix[i]); } } @Override public boolean membershipTest(Key key) { if (key == null) { return true; } for (int i = 0; i < matrix.length; i++) { if (matrix[i].membershipTest(key)) { return true; } } return false; } @Override public void not() { for (int i = 0; i < matrix.length; i++) { matrix[i].not(); } } @Override public void or(Filter filter) { if (filter == null || !(filter instanceof DynamicBloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be or-ed"); } DynamicBloomFilter dbf = (DynamicBloomFilter)filter; if (dbf.matrix.length != this.matrix.length || dbf.nr != this.nr) { throw new IllegalArgumentException("filters cannot be or-ed"); } for (int i = 0; i < matrix.length; i++) { matrix[i].or(dbf.matrix[i]); } } @Override public void xor(Filter filter) { if (filter == null || !(filter instanceof DynamicBloomFilter) || filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) { throw new IllegalArgumentException("filters cannot be xor-ed"); } DynamicBloomFilter dbf = (DynamicBloomFilter)filter; if (dbf.matrix.length != this.matrix.length || dbf.nr != this.nr) { throw new IllegalArgumentException("filters cannot be xor-ed"); } for(int i = 0; i<matrix.length; i++) { matrix[i].xor(dbf.matrix[i]); } } @Override public String toString() { StringBuilder res = new StringBuilder(); for (int i = 0; i < matrix.length; i++) { res.append(matrix[i]); res.append(Character.LINE_SEPARATOR); } return res.toString(); } // Writable @Override public void write(DataOutput out) throws IOException { super.write(out); out.writeInt(nr); out.writeInt(currentNbRecord); out.writeInt(matrix.length); for (int i = 0; i < matrix.length; i++) { matrix[i].write(out); } } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); nr = in.readInt(); currentNbRecord = in.readInt(); int len = in.readInt(); matrix = new BloomFilter[len]; for (int i = 0; i < matrix.length; i++) { matrix[i] = new BloomFilter(); matrix[i].readFields(in); } } /** * Adds a new row to <i>this</i> dynamic Bloom filter. */ private void addRow() { BloomFilter[] tmp = new BloomFilter[matrix.length + 1]; for (int i = 0; i < matrix.length; i++) { tmp[i] = matrix[i]; } tmp[tmp.length-1] = new BloomFilter(vectorSize, nbHash, hashType); matrix = tmp; } /** * Returns the active standard Bloom filter in <i>this</i> dynamic Bloom filter. * @return BloomFilter The active standard Bloom filter. * <code>Null</code> otherwise. */ private BloomFilter getActiveStandardBF() { if (currentNbRecord >= nr) { return null; } return matrix[matrix.length - 1]; } }
9,916
32.167224
162
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Filter.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 * (http://www.one-lab.org) * * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Writable; import org.apache.hadoop.util.hash.Hash; /** * Defines the general behavior of a filter. * <p> * A filter is a data structure which aims at offering a lossy summary of a set <code>A</code>. The * key idea is to map entries of <code>A</code> (also called <i>keys</i>) into several positions * in a vector through the use of several hash functions. * <p> * Typically, a filter will be implemented as a Bloom filter (or a Bloom filter extension). * <p> * It must be extended in order to define the real behavior. * * @see Key The general behavior of a key * @see HashFunction A hash function */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public abstract class Filter implements Writable { private static final int VERSION = -1; // negative to accommodate for old format /** The vector size of <i>this</i> filter. */ protected int vectorSize; /** The hash function used to map a key to several positions in the vector. */ protected HashFunction hash; /** The number of hash function to consider. */ protected int nbHash; /** Type of hashing function to use. */ protected int hashType; protected Filter() {} /** * Constructor. * @param vectorSize The vector size of <i>this</i> filter. * @param nbHash The number of hash functions to consider. * @param hashType type of the hashing function (see {@link Hash}). */ protected Filter(int vectorSize, int nbHash, int hashType) { this.vectorSize = vectorSize; this.nbHash = nbHash; this.hashType = hashType; this.hash = new HashFunction(this.vectorSize, this.nbHash, this.hashType); } /** * Adds a key to <i>this</i> filter. * @param key The key to add. */ public abstract void add(Key key); /** * Determines wether a specified key belongs to <i>this</i> filter. * @param key The key to test. * @return boolean True if the specified key belongs to <i>this</i> filter. * False otherwise. */ public abstract boolean membershipTest(Key key); /** * Peforms a logical AND between <i>this</i> filter and a specified filter. * <p> * <b>Invariant</b>: The result is assigned to <i>this</i> filter. * @param filter The filter to AND with. */ public abstract void and(Filter filter); /** * Peforms a logical OR between <i>this</i> filter and a specified filter. * <p> * <b>Invariant</b>: The result is assigned to <i>this</i> filter. * @param filter The filter to OR with. */ public abstract void or(Filter filter); /** * Peforms a logical XOR between <i>this</i> filter and a specified filter. * <p> * <b>Invariant</b>: The result is assigned to <i>this</i> filter. * @param filter The filter to XOR with. */ public abstract void xor(Filter filter); /** * Performs a logical NOT on <i>this</i> filter. * <p> * The result is assigned to <i>this</i> filter. */ public abstract void not(); /** * Adds a list of keys to <i>this</i> filter. * @param keys The list of keys. */ public void add(List<Key> keys){ if(keys == null) { throw new IllegalArgumentException("ArrayList<Key> may not be null"); } for(Key key: keys) { add(key); } }//end add() /** * Adds a collection of keys to <i>this</i> filter. * @param keys The collection of keys. */ public void add(Collection<Key> keys){ if(keys == null) { throw new IllegalArgumentException("Collection<Key> may not be null"); } for(Key key: keys) { add(key); } }//end add() /** * Adds an array of keys to <i>this</i> filter. * @param keys The array of keys. */ public void add(Key[] keys){ if(keys == null) { throw new IllegalArgumentException("Key[] may not be null"); } for(int i = 0; i < keys.length; i++) { add(keys[i]); } }//end add() // Writable interface @Override public void write(DataOutput out) throws IOException { out.writeInt(VERSION); out.writeInt(this.nbHash); out.writeByte(this.hashType); out.writeInt(this.vectorSize); } @Override public void readFields(DataInput in) throws IOException { int ver = in.readInt(); if (ver > 0) { // old unversioned format this.nbHash = ver; this.hashType = Hash.JENKINS_HASH; } else if (ver == VERSION) { this.nbHash = in.readInt(); this.hashType = in.readByte(); } else { throw new IOException("Unsupported version: " + ver); } this.vectorSize = in.readInt(); this.hash = new HashFunction(this.vectorSize, this.nbHash, this.hashType); } }//end class
7,492
33.059091
100
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/HashFunction.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 * (http://www.one-lab.org) * * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.hash.Hash; /** * Implements a hash object that returns a certain number of hashed values. * * @see Key The general behavior of a key being stored in a filter * @see Filter The general behavior of a filter */ @InterfaceAudience.Public @InterfaceStability.Stable public final class HashFunction { /** The number of hashed values. */ private int nbHash; /** The maximum highest returned value. */ private int maxValue; /** Hashing algorithm to use. */ private Hash hashFunction; /** * Constructor. * <p> * Builds a hash function that must obey to a given maximum number of returned values and a highest value. * @param maxValue The maximum highest returned value. * @param nbHash The number of resulting hashed values. * @param hashType type of the hashing function (see {@link Hash}). */ public HashFunction(int maxValue, int nbHash, int hashType) { if (maxValue <= 0) { throw new IllegalArgumentException("maxValue must be > 0"); } if (nbHash <= 0) { throw new IllegalArgumentException("nbHash must be > 0"); } this.maxValue = maxValue; this.nbHash = nbHash; this.hashFunction = Hash.getInstance(hashType); if (this.hashFunction == null) throw new IllegalArgumentException("hashType must be known"); } /** Clears <i>this</i> hash function. A NOOP */ public void clear() { } /** * Hashes a specified key into several integers. * @param k The specified key. * @return The array of hashed values. */ public int[] hash(Key k){ byte[] b = k.getBytes(); if (b == null) { throw new NullPointerException("buffer reference is null"); } if (b.length == 0) { throw new IllegalArgumentException("key length must be > 0"); } int[] result = new int[nbHash]; for (int i = 0, initval = 0; i < nbHash; i++) { initval = hashFunction.hash(b, initval); result[i] = Math.abs(initval % maxValue); } return result; } }
4,707
37.276423
108
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/bloom/Key.java
/** * * Copyright (c) 2005, European Commission project OneLab under contract 034819 (http://www.one-lab.org) * All rights reserved. * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * - Neither the name of the University Catholique de Louvain - UCL * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.util.bloom; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.WritableComparable; /** * The general behavior of a key that must be stored in a filter. * * @see Filter The general behavior of a filter */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public class Key implements WritableComparable<Key> { /** Byte value of key */ byte[] bytes; /** * The weight associated to <i>this</i> key. * <p> * <b>Invariant</b>: if it is not specified, each instance of * <code>Key</code> will have a default weight of 1.0 */ double weight; /** default constructor - use with readFields */ public Key() {} /** * Constructor. * <p> * Builds a key with a default weight. * @param value The byte value of <i>this</i> key. */ public Key(byte[] value) { this(value, 1.0); } /** * Constructor. * <p> * Builds a key with a specified weight. * @param value The value of <i>this</i> key. * @param weight The weight associated to <i>this</i> key. */ public Key(byte[] value, double weight) { set(value, weight); } /** * @param value * @param weight */ public void set(byte[] value, double weight) { if (value == null) { throw new IllegalArgumentException("value can not be null"); } this.bytes = value; this.weight = weight; } /** @return byte[] The value of <i>this</i> key. */ public byte[] getBytes() { return this.bytes; } /** @return Returns the weight associated to <i>this</i> key. */ public double getWeight() { return weight; } /** * Increments the weight of <i>this</i> key with a specified value. * @param weight The increment. */ public void incrementWeight(double weight) { this.weight += weight; } /** Increments the weight of <i>this</i> key by one. */ public void incrementWeight() { this.weight++; } @Override public boolean equals(Object o) { if (!(o instanceof Key)) { return false; } return this.compareTo((Key)o) == 0; } @Override public int hashCode() { int result = 0; for (int i = 0; i < bytes.length; i++) { result ^= Byte.valueOf(bytes[i]).hashCode(); } result ^= Double.valueOf(weight).hashCode(); return result; } // Writable @Override public void write(DataOutput out) throws IOException { out.writeInt(bytes.length); out.write(bytes); out.writeDouble(weight); } @Override public void readFields(DataInput in) throws IOException { this.bytes = new byte[in.readInt()]; in.readFully(this.bytes); weight = in.readDouble(); } // Comparable @Override public int compareTo(Key other) { int result = this.bytes.length - other.getBytes().length; for (int i = 0; result == 0 && i < bytes.length; i++) { result = this.bytes[i] - other.bytes[i]; } if (result == 0) { result = Double.valueOf(this.weight - other.weight).intValue(); } return result; } }
5,742
30.211957
104
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hadoop.util.curator; import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import org.apache.curator.framework.recipes.locks.Reaper; import org.apache.curator.utils.CloseableUtils; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.utils.CloseableScheduledExecutorService; import org.apache.curator.utils.ThreadUtils; import org.apache.curator.utils.ZKPaths; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.apache.curator.utils.PathUtils; /** * This is a copy of Curator 2.7.1's ChildReaper class, modified to work with * Guava 11.0.2. The problem is the 'paths' Collection, which calls Guava's * Sets.newConcurrentHashSet(), which was added in Guava 15.0. * <p> * Utility to reap empty child nodes of a parent node. Periodically calls getChildren on * the node and adds empty nodes to an internally managed {@link Reaper} */ @InterfaceAudience.Private @InterfaceStability.Unstable public class ChildReaper implements Closeable { private final Logger log = LoggerFactory.getLogger(getClass()); private final Reaper reaper; private final AtomicReference<State> state = new AtomicReference<State>(State.LATENT); private final CuratorFramework client; private final Collection<String> paths = newConcurrentHashSet(); private final Reaper.Mode mode; private final CloseableScheduledExecutorService executor; private final int reapingThresholdMs; private volatile Future<?> task; // This is copied from Curator's Reaper class static final int DEFAULT_REAPING_THRESHOLD_MS = (int)TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES); // This is copied from Guava /** * Creates a thread-safe set backed by a hash map. The set is backed by a * {@link ConcurrentHashMap} instance, and thus carries the same concurrency * guarantees. * * <p>Unlike {@code HashSet}, this class does NOT allow {@code null} to be * used as an element. The set is serializable. * * @return a new, empty thread-safe {@code Set} * @since 15.0 */ public static <E> Set<E> newConcurrentHashSet() { return Sets.newSetFromMap(new ConcurrentHashMap<E, Boolean>()); } private enum State { LATENT, STARTED, CLOSED } /** * @param client the client * @param path path to reap children from * @param mode reaping mode */ public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode) { this(client, path, mode, newExecutorService(), DEFAULT_REAPING_THRESHOLD_MS, null); } /** * @param client the client * @param path path to reap children from * @param reapingThresholdMs threshold in milliseconds that determines that a path can be deleted * @param mode reaping mode */ public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode, int reapingThresholdMs) { this(client, path, mode, newExecutorService(), reapingThresholdMs, null); } /** * @param client the client * @param path path to reap children from * @param executor executor to use for background tasks * @param reapingThresholdMs threshold in milliseconds that determines that a path can be deleted * @param mode reaping mode */ public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode, ScheduledExecutorService executor, int reapingThresholdMs) { this(client, path, mode, executor, reapingThresholdMs, null); } /** * @param client the client * @param path path to reap children from * @param executor executor to use for background tasks * @param reapingThresholdMs threshold in milliseconds that determines that a path can be deleted * @param mode reaping mode * @param leaderPath if not null, uses a leader selection so that only 1 reaper is active in the cluster */ public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode, ScheduledExecutorService executor, int reapingThresholdMs, String leaderPath) { this.client = client; this.mode = mode; this.executor = new CloseableScheduledExecutorService(executor); this.reapingThresholdMs = reapingThresholdMs; this.reaper = new Reaper(client, executor, reapingThresholdMs, leaderPath); addPath(path); } /** * The reaper must be started * * @throws Exception errors */ public void start() throws Exception { Preconditions.checkState(state.compareAndSet(State.LATENT, State.STARTED), "Cannot be started more than once"); task = executor.scheduleWithFixedDelay ( new Runnable() { @Override public void run() { doWork(); } }, reapingThresholdMs, reapingThresholdMs, TimeUnit.MILLISECONDS ); reaper.start(); } @Override public void close() throws IOException { if ( state.compareAndSet(State.STARTED, State.CLOSED) ) { CloseableUtils.closeQuietly(reaper); task.cancel(true); } } /** * Add a path to reap children from * * @param path the path * @return this for chaining */ public ChildReaper addPath(String path) { paths.add(PathUtils.validatePath(path)); return this; } /** * Remove a path from reaping * * @param path the path * @return true if the path existed and was removed */ public boolean removePath(String path) { return paths.remove(PathUtils.validatePath(path)); } private static ScheduledExecutorService newExecutorService() { return ThreadUtils.newFixedThreadScheduledPool(2, "ChildReaper"); } private void doWork() { for ( String path : paths ) { try { List<String> children = client.getChildren().forPath(path); for ( String name : children ) { String thisPath = ZKPaths.makePath(path, name); Stat stat = client.checkExists().forPath(thisPath); if ( (stat != null) && (stat.getNumChildren() == 0) ) { reaper.addPath(thisPath, mode); } } } catch ( Exception e ) { log.error("Could not get children for path: " + path, e); } } } }
7,574
31.234043
154
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.jmx; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.http.HttpServer2; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import javax.management.AttributeNotFoundException; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; import javax.management.MBeanAttributeInfo; import javax.management.MBeanException; import javax.management.MBeanInfo; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; import javax.management.ReflectionException; import javax.management.RuntimeErrorException; import javax.management.RuntimeMBeanException; import javax.management.openmbean.CompositeData; import javax.management.openmbean.CompositeType; import javax.management.openmbean.TabularData; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.io.PrintWriter; import java.lang.management.ManagementFactory; import java.lang.reflect.Array; import java.util.Iterator; import java.util.Set; /* * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has * been rewritten to be read only and to output in a JSON format so it is not * really that close to the original. */ /** * Provides Read only web access to JMX. * <p> * This servlet generally will be placed under the /jmx URL for each * HttpServer. It provides read only * access to JMX metrics. The optional <code>qry</code> parameter * may be used to query only a subset of the JMX Beans. This query * functionality is provided through the * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} * method. * <p> * For example <code>http://.../jmx?qry=Hadoop:*</code> will return * all hadoop metrics exposed through JMX. * <p> * The optional <code>get</code> parameter is used to query an specific * attribute of a JMX bean. The format of the URL is * <code>http://.../jmx?get=MXBeanName::AttributeName<code> * <p> * For example * <code> * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId * </code> will return the cluster id of the namenode mxbean. * <p> * If the <code>qry</code> or the <code>get</code> parameter is not formatted * correctly then a 400 BAD REQUEST http response code will be returned. * <p> * If a resouce such as a mbean or attribute can not be found, * a 404 SC_NOT_FOUND http response code will be returned. * <p> * The return format is JSON and in the form * <p> * <code><pre> * { * "beans" : [ * { * "name":"bean-name" * ... * } * ] * } * </pre></code> * <p> * The servlet attempts to convert the the JMXBeans into JSON. Each * bean's attributes will be converted to a JSON object member. * * If the attribute is a boolean, a number, a string, or an array * it will be converted to the JSON equivalent. * * If the value is a {@link CompositeData} then it will be converted * to a JSON object with the keys as the name of the JSON member and * the value is converted following these same rules. * * If the value is a {@link TabularData} then it will be converted * to an array of the {@link CompositeData} elements that it contains. * * All other objects will be converted to a string and output as such. * * The bean's name and modelerType will be returned for all beans. * */ public class JMXJsonServlet extends HttpServlet { private static final Log LOG = LogFactory.getLog(JMXJsonServlet.class); static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods"; static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"; private static final long serialVersionUID = 1L; /** * MBean server. */ protected transient MBeanServer mBeanServer = null; // --------------------------------------------------------- Public Methods /** * Initialize this servlet. */ @Override public void init() throws ServletException { // Retrieve the MBean server mBeanServer = ManagementFactory.getPlatformMBeanServer(); } protected boolean isInstrumentationAccessAllowed(HttpServletRequest request, HttpServletResponse response) throws IOException { return HttpServer2.isInstrumentationAccessAllowed(getServletContext(), request, response); } /** * Process a GET request for the specified resource. * * @param request * The servlet request we are processing * @param response * The servlet response we are creating */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response) { String jsonpcb = null; PrintWriter writer = null; try { if (!isInstrumentationAccessAllowed(request, response)) { return; } JsonGenerator jg = null; try { writer = response.getWriter(); response.setContentType("application/json; charset=utf8"); response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, "GET"); response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*"); JsonFactory jsonFactory = new JsonFactory(); jg = jsonFactory.createJsonGenerator(writer); jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); jg.useDefaultPrettyPrinter(); jg.writeStartObject(); if (mBeanServer == null) { jg.writeStringField("result", "ERROR"); jg.writeStringField("message", "No MBeanServer could be found"); jg.close(); LOG.error("No MBeanServer could be found."); response.setStatus(HttpServletResponse.SC_NOT_FOUND); return; } // query per mbean attribute String getmethod = request.getParameter("get"); if (getmethod != null) { String[] splitStrings = getmethod.split("\\:\\:"); if (splitStrings.length != 2) { jg.writeStringField("result", "ERROR"); jg.writeStringField("message", "query format is not as expected."); jg.close(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } listBeans(jg, new ObjectName(splitStrings[0]), splitStrings[1], response); jg.close(); return; } // query per mbean String qry = request.getParameter("qry"); if (qry == null) { qry = "*:*"; } listBeans(jg, new ObjectName(qry), null, response); } finally { if (jg != null) { jg.close(); } if (writer != null) { writer.close(); } } } catch ( IOException e ) { LOG.error("Caught an exception while processing JMX request", e); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } catch ( MalformedObjectNameException e ) { LOG.error("Caught an exception while processing JMX request", e); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } finally { if (writer != null) { writer.close(); } } } // --------------------------------------------------------- Private Methods private void listBeans(JsonGenerator jg, ObjectName qry, String attribute, HttpServletResponse response) throws IOException { LOG.debug("Listing beans for "+qry); Set<ObjectName> names = null; names = mBeanServer.queryNames(qry, null); jg.writeArrayFieldStart("beans"); Iterator<ObjectName> it = names.iterator(); while (it.hasNext()) { ObjectName oname = it.next(); MBeanInfo minfo; String code = ""; Object attributeinfo = null; try { minfo = mBeanServer.getMBeanInfo(oname); code = minfo.getClassName(); String prs = ""; try { if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { prs = "modelerType"; code = (String) mBeanServer.getAttribute(oname, prs); } if (attribute!=null) { prs = attribute; attributeinfo = mBeanServer.getAttribute(oname, prs); } } catch (AttributeNotFoundException e) { // If the modelerType attribute was not found, the class name is used // instead. LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); } catch (MBeanException e) { // The code inside the attribute getter threw an exception so log it, // and fall back on the class name LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); } catch (RuntimeException e) { // For some reason even with an MBeanException available to them // Runtime exceptionscan still find their way through, so treat them // the same as MBeanException LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); } catch ( ReflectionException e ) { // This happens when the code inside the JMX bean (setter?? from the // java docs) threw an exception, so log it and fall back on the // class name LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); } } catch (InstanceNotFoundException e) { //Ignored for some reason the bean was not found so don't output it continue; } catch ( IntrospectionException e ) { // This is an internal error, something odd happened with reflection so // log it and don't output the bean. LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, e); continue; } catch ( ReflectionException e ) { // This happens when the code inside the JMX bean threw an exception, so // log it and don't output the bean. LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, e); continue; } jg.writeStartObject(); jg.writeStringField("name", oname.toString()); jg.writeStringField("modelerType", code); if ((attribute != null) && (attributeinfo == null)) { jg.writeStringField("result", "ERROR"); jg.writeStringField("message", "No attribute with name " + attribute + " was found."); jg.writeEndObject(); jg.writeEndArray(); jg.close(); response.setStatus(HttpServletResponse.SC_NOT_FOUND); return; } if (attribute != null) { writeAttribute(jg, attribute, attributeinfo); } else { MBeanAttributeInfo attrs[] = minfo.getAttributes(); for (int i = 0; i < attrs.length; i++) { writeAttribute(jg, oname, attrs[i]); } } jg.writeEndObject(); } jg.writeEndArray(); } private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException { if (!attr.isReadable()) { return; } String attName = attr.getName(); if ("modelerType".equals(attName)) { return; } if (attName.indexOf("=") >= 0 || attName.indexOf(":") >= 0 || attName.indexOf(" ") >= 0) { return; } Object value = null; try { value = mBeanServer.getAttribute(oname, attName); } catch (RuntimeMBeanException e) { // UnsupportedOperationExceptions happen in the normal course of business, // so no need to log them as errors all the time. if (e.getCause() instanceof UnsupportedOperationException) { LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e); } else { LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e); } return; } catch (RuntimeErrorException e) { // RuntimeErrorException happens when an unexpected failure occurs in getAttribute // for example https://issues.apache.org/jira/browse/DAEMON-120 LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e); return; } catch (AttributeNotFoundException e) { //Ignored the attribute was not found, which should never happen because the bean //just told us that it has this attribute, but if this happens just don't output //the attribute. return; } catch (MBeanException e) { //The code inside the attribute getter threw an exception so log it, and // skip outputting the attribute LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e); return; } catch (RuntimeException e) { //For some reason even with an MBeanException available to them Runtime exceptions //can still find their way through, so treat them the same as MBeanException LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e); return; } catch (ReflectionException e) { //This happens when the code inside the JMX bean (setter?? from the java docs) //threw an exception, so log it and skip outputting the attribute LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e); return; } catch (InstanceNotFoundException e) { //Ignored the mbean itself was not found, which should never happen because we //just accessed it (perhaps something unregistered in-between) but if this //happens just don't output the attribute. return; } writeAttribute(jg, attName, value); } private void writeAttribute(JsonGenerator jg, String attName, Object value) throws IOException { jg.writeFieldName(attName); writeObject(jg, value); } private void writeObject(JsonGenerator jg, Object value) throws IOException { if(value == null) { jg.writeNull(); } else { Class<?> c = value.getClass(); if (c.isArray()) { jg.writeStartArray(); int len = Array.getLength(value); for (int j = 0; j < len; j++) { Object item = Array.get(value, j); writeObject(jg, item); } jg.writeEndArray(); } else if(value instanceof Number) { Number n = (Number)value; jg.writeNumber(n.toString()); } else if(value instanceof Boolean) { Boolean b = (Boolean)value; jg.writeBoolean(b); } else if(value instanceof CompositeData) { CompositeData cds = (CompositeData)value; CompositeType comp = cds.getCompositeType(); Set<String> keys = comp.keySet(); jg.writeStartObject(); for(String key: keys) { writeAttribute(jg, key, cds.get(key)); } jg.writeEndObject(); } else if(value instanceof TabularData) { TabularData tds = (TabularData)value; jg.writeStartArray(); for(Object entry : tds.values()) { writeObject(jg, entry); } jg.writeEndArray(); } else { jg.writeString(value.toString()); } } } }
16,097
36.177829
111
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This package provides access to JMX primarily through the * {@link org.apache.hadoop.jmx.JMXJsonServlet} class. */ @InterfaceAudience.Private package org.apache.hadoop.jmx; import org.apache.hadoop.classification.InterfaceAudience;
1,047
40.92
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java
/* * MetricsException.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * General-purpose, unchecked metrics exception. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public class MetricsException extends RuntimeException { private static final long serialVersionUID = -1643257498540498497L; /** Creates a new instance of MetricsException */ public MetricsException() { } /** Creates a new instance of MetricsException * * @param message an error message */ public MetricsException(String message) { super(message); } }
1,531
30.916667
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/Updater.java
/* * Updater.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Call-back interface. See <code>MetricsContext.registerUpdater()</code>. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public interface Updater { /** * Timer-based call-back from the metric library. */ public abstract void doUpdates(MetricsContext context); }
1,310
32.615385
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
/* * MetricsContext.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import java.io.IOException; import java.util.Collection; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.spi.OutputRecord; /** * The main interface to the metrics package. */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface MetricsContext { /** * Default period in seconds at which data is sent to the metrics system. */ public static final int DEFAULT_PERIOD = 5; /** * Initialize this context. * @param contextName The given name for this context * @param factory The creator of this context */ public void init(String contextName, ContextFactory factory); /** * Returns the context name. * * @return the context name */ public abstract String getContextName(); /** * Starts or restarts monitoring, the emitting of metrics records as they are * updated. */ public abstract void startMonitoring() throws IOException; /** * Stops monitoring. This does not free any data that the implementation * may have buffered for sending at the next timer event. It * is OK to call <code>startMonitoring()</code> again after calling * this. * @see #close() */ public abstract void stopMonitoring(); /** * Returns true if monitoring is currently in progress. */ public abstract boolean isMonitoring(); /** * Stops monitoring and also frees any buffered data, returning this * object to its initial state. */ public abstract void close(); /** * Creates a new MetricsRecord instance with the given <code>recordName</code>. * Throws an exception if the metrics implementation is configured with a fixed * set of record names and <code>recordName</code> is not in that set. * * @param recordName the name of the record * @throws MetricsException if recordName conflicts with configuration data */ public abstract MetricsRecord createRecord(String recordName); /** * Registers a callback to be called at regular time intervals, as * determined by the implementation-class specific configuration. * * @param updater object to be run periodically; it should updated * some metrics records and then return */ public abstract void registerUpdater(Updater updater); /** * Removes a callback, if it exists. * * @param updater object to be removed from the callback list */ public abstract void unregisterUpdater(Updater updater); /** * Returns the timer period. */ public abstract int getPeriod(); /** * Retrieves all the records managed by this MetricsContext. * Useful for monitoring systems that are polling-based. * * @return A non-null map from all record names to the records managed. */ Map<String, Collection<OutputRecord>> getAllRecords(); }
3,798
29.886179
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import java.net.InetAddress; import java.net.UnknownHostException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Utility class to simplify creation and reporting of hadoop metrics. * * For examples of usage, see NameNodeMetrics. * @see org.apache.hadoop.metrics.MetricsRecord * @see org.apache.hadoop.metrics.MetricsContext * @see org.apache.hadoop.metrics.ContextFactory */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public class MetricsUtil { public static final Log LOG = LogFactory.getLog(MetricsUtil.class); /** * Don't allow creation of a new instance of Metrics */ private MetricsUtil() {} public static MetricsContext getContext(String contextName) { return getContext(contextName, contextName); } /** * Utility method to return the named context. * If the desired context cannot be created for any reason, the exception * is logged, and a null context is returned. */ public static MetricsContext getContext(String refName, String contextName) { MetricsContext metricsContext; try { metricsContext = ContextFactory.getFactory().getContext(refName, contextName); if (!metricsContext.isMonitoring()) { metricsContext.startMonitoring(); } } catch (Exception ex) { LOG.error("Unable to create metrics context " + contextName, ex); metricsContext = ContextFactory.getNullContext(contextName); } return metricsContext; } /** * Utility method to create and return new metrics record instance within the * given context. This record is tagged with the host name. * * @param context the context * @param recordName name of the record * @return newly created metrics record */ public static MetricsRecord createRecord(MetricsContext context, String recordName) { MetricsRecord metricsRecord = context.createRecord(recordName); metricsRecord.setTag("hostName", getHostName()); return metricsRecord; } /** * Returns the host name. If the host name is unobtainable, logs the * exception and returns "unknown". */ private static String getHostName() { String hostName = null; try { hostName = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException ex) { LOG.info("Unable to obtain hostName", ex); hostName = "unknown"; } return hostName; } }
3,493
32.27619
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.TreeMap; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.metrics.spi.OutputRecord; import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap; import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap; import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON.Output; /** * A servlet to print out metrics data. By default, the servlet returns a * textual representation (no promises are made for parseability), and * users can use "?format=json" for parseable output. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class MetricsServlet extends HttpServlet { /** * A helper class to hold a TagMap and MetricMap. */ static class TagsMetricsPair implements JSON.Convertible { final TagMap tagMap; final MetricMap metricMap; public TagsMetricsPair(TagMap tagMap, MetricMap metricMap) { this.tagMap = tagMap; this.metricMap = metricMap; } @SuppressWarnings("unchecked") public void fromJSON(Map map) { throw new UnsupportedOperationException(); } /** Converts to JSON by providing an array. */ public void toJSON(Output out) { out.add(new Object[] { tagMap, metricMap }); } } /** * Collects all metric data, and returns a map: * contextName -> recordName -> [ (tag->tagValue), (metric->metricValue) ]. * The values are either String or Number. The final value is implemented * as a list of TagsMetricsPair. */ Map<String, Map<String, List<TagsMetricsPair>>> makeMap( Collection<MetricsContext> contexts) throws IOException { Map<String, Map<String, List<TagsMetricsPair>>> map = new TreeMap<String, Map<String, List<TagsMetricsPair>>>(); for (MetricsContext context : contexts) { Map<String, List<TagsMetricsPair>> records = new TreeMap<String, List<TagsMetricsPair>>(); map.put(context.getContextName(), records); for (Map.Entry<String, Collection<OutputRecord>> r : context.getAllRecords().entrySet()) { List<TagsMetricsPair> metricsAndTags = new ArrayList<TagsMetricsPair>(); records.put(r.getKey(), metricsAndTags); for (OutputRecord outputRecord : r.getValue()) { TagMap tagMap = outputRecord.getTagsCopy(); MetricMap metricMap = outputRecord.getMetricsCopy(); metricsAndTags.add(new TagsMetricsPair(tagMap, metricMap)); } } } return map; } @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } String format = request.getParameter("format"); Collection<MetricsContext> allContexts = ContextFactory.getFactory().getAllContexts(); if ("json".equals(format)) { response.setContentType("application/json; charset=utf-8"); PrintWriter out = response.getWriter(); try { // Uses Jetty's built-in JSON support to convert the map into JSON. out.print(new JSON().toJSON(makeMap(allContexts))); } finally { out.close(); } } else { PrintWriter out = response.getWriter(); try { printMap(out, makeMap(allContexts)); } finally { out.close(); } } } /** * Prints metrics data in a multi-line text form. */ void printMap(PrintWriter out, Map<String, Map<String, List<TagsMetricsPair>>> map) { for (Map.Entry<String, Map<String, List<TagsMetricsPair>>> context : map.entrySet()) { out.print(context.getKey()); out.print("\n"); for (Map.Entry<String, List<TagsMetricsPair>> record : context.getValue().entrySet()) { indent(out, 1); out.print(record.getKey()); out.print("\n"); for (TagsMetricsPair pair : record.getValue()) { indent(out, 2); // Prints tag values in the form "{key=value,key=value}:" out.print("{"); boolean first = true; for (Map.Entry<String, Object> tagValue : pair.tagMap.entrySet()) { if (first) { first = false; } else { out.print(","); } out.print(tagValue.getKey()); out.print("="); out.print(tagValue.getValue().toString()); } out.print("}:\n"); // Now print metric values, one per line for (Map.Entry<String, Number> metricValue : pair.metricMap.entrySet()) { indent(out, 3); out.print(metricValue.getKey()); out.print("="); out.print(metricValue.getValue().toString()); out.print("\n"); } } } } } private void indent(PrintWriter out, int indent) { for (int i = 0; i < indent; ++i) { out.append(" "); } } }
6,343
33.478261
93
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java
/* * MetricsRecord.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A named and optionally tagged set of records to be sent to the metrics * system. <p/> * * A record name identifies the kind of data to be reported. For example, a * program reporting statistics relating to the disks on a computer might use * a record name "diskStats".<p/> * * A record has zero or more <i>tags</i>. A tag has a name and a value. To * continue the example, the "diskStats" record might use a tag named * "diskName" to identify a particular disk. Sometimes it is useful to have * more than one tag, so there might also be a "diskType" with value "ide" or * "scsi" or whatever.<p/> * * A record also has zero or more <i>metrics</i>. These are the named * values that are to be reported to the metrics system. In the "diskStats" * example, possible metric names would be "diskPercentFull", "diskPercentBusy", * "kbReadPerSecond", etc.<p/> * * The general procedure for using a MetricsRecord is to fill in its tag and * metric values, and then call <code>update()</code> to pass the record to the * client library. * Metric data is not immediately sent to the metrics system * each time that <code>update()</code> is called. * An internal table is maintained, identified by the record name. This * table has columns * corresponding to the tag and the metric names, and rows * corresponding to each unique set of tag values. An update * either modifies an existing row in the table, or adds a new row with a set of * tag values that are different from all the other rows. Note that if there * are no tags, then there can be at most one row in the table. <p/> * * Once a row is added to the table, its data will be sent to the metrics system * on every timer period, whether or not it has been updated since the previous * timer period. If this is inappropriate, for example if metrics were being * reported by some transient object in an application, the <code>remove()</code> * method can be used to remove the row and thus stop the data from being * sent.<p/> * * Note that the <code>update()</code> method is atomic. This means that it is * safe for different threads to be updating the same metric. More precisely, * it is OK for different threads to call <code>update()</code> on MetricsRecord instances * with the same set of tag names and tag values. Different threads should * <b>not</b> use the same MetricsRecord instance at the same time. */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface MetricsRecord { /** * Returns the record name. * * @return the record name */ public abstract String getRecordName(); /** * Sets the named tag to the specified value. The tagValue may be null, * which is treated the same as an empty String. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public abstract void setTag(String tagName, String tagValue); /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public abstract void setTag(String tagName, int tagValue); /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public abstract void setTag(String tagName, long tagValue); /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public abstract void setTag(String tagName, short tagValue); /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public abstract void setTag(String tagName, byte tagValue); /** * Removes any tag of the specified name. * * @param tagName name of a tag */ public abstract void removeTag(String tagName); /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void setMetric(String metricName, int metricValue); /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void setMetric(String metricName, long metricValue); /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void setMetric(String metricName, short metricValue); /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void setMetric(String metricName, byte metricValue); /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void setMetric(String metricName, float metricValue); /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void incrMetric(String metricName, int metricValue); /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void incrMetric(String metricName, long metricValue); /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void incrMetric(String metricName, short metricValue); /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void incrMetric(String metricName, byte metricValue); /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public abstract void incrMetric(String metricName, float metricValue); /** * Updates the table of buffered data which is to be sent periodically. * If the tag values match an existing row, that row is updated; * otherwise, a new row is added. */ public abstract void update(); /** * Removes, from the buffered data table, all rows having tags * that equal the tags that have been set on this record. For example, * if there are no tags on this record, all rows for this record name * would be removed. Or, if there is a single tag on this record, then * just rows containing a tag with the same name and value would be removed. */ public abstract void remove(); }
9,477
36.611111
91
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
/* * ContextFactory.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.spi.NullContext; /** * Factory class for creating MetricsContext objects. To obtain an instance * of this class, use the static <code>getFactory()</code> method. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public class ContextFactory { private static final String PROPERTIES_FILE = "/hadoop-metrics.properties"; private static final String CONTEXT_CLASS_SUFFIX = ".class"; private static final String DEFAULT_CONTEXT_CLASSNAME = "org.apache.hadoop.metrics.spi.NullContext"; private static ContextFactory theFactory = null; private Map<String,Object> attributeMap = new HashMap<String,Object>(); private Map<String,MetricsContext> contextMap = new HashMap<String,MetricsContext>(); // Used only when contexts, or the ContextFactory itself, cannot be // created. private static Map<String,MetricsContext> nullContextMap = new HashMap<String,MetricsContext>(); /** Creates a new instance of ContextFactory */ protected ContextFactory() { } /** * Returns the value of the named attribute, or null if there is no * attribute of that name. * * @param attributeName the attribute name * @return the attribute value */ public Object getAttribute(String attributeName) { return attributeMap.get(attributeName); } /** * Returns the names of all the factory's attributes. * * @return the attribute names */ public String[] getAttributeNames() { String[] result = new String[attributeMap.size()]; int i = 0; // for (String attributeName : attributeMap.keySet()) { Iterator it = attributeMap.keySet().iterator(); while (it.hasNext()) { result[i++] = (String) it.next(); } return result; } /** * Sets the named factory attribute to the specified value, creating it * if it did not already exist. If the value is null, this is the same as * calling removeAttribute. * * @param attributeName the attribute name * @param value the new attribute value */ public void setAttribute(String attributeName, Object value) { attributeMap.put(attributeName, value); } /** * Removes the named attribute if it exists. * * @param attributeName the attribute name */ public void removeAttribute(String attributeName) { attributeMap.remove(attributeName); } /** * Returns the named MetricsContext instance, constructing it if necessary * using the factory's current configuration attributes. <p/> * * When constructing the instance, if the factory property * <i>contextName</i>.class</code> exists, * its value is taken to be the name of the class to instantiate. Otherwise, * the default is to create an instance of * <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a * dummy "no-op" context which will cause all metric data to be discarded. * * @param contextName the name of the context * @return the named MetricsContext */ public synchronized MetricsContext getContext(String refName, String contextName) throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException { MetricsContext metricsContext = contextMap.get(refName); if (metricsContext == null) { String classNameAttribute = refName + CONTEXT_CLASS_SUFFIX; String className = (String) getAttribute(classNameAttribute); if (className == null) { className = DEFAULT_CONTEXT_CLASSNAME; } Class contextClass = Class.forName(className); metricsContext = (MetricsContext) contextClass.newInstance(); metricsContext.init(contextName, this); contextMap.put(contextName, metricsContext); } return metricsContext; } public synchronized MetricsContext getContext(String contextName) throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException { return getContext(contextName, contextName); } /** * Returns all MetricsContexts built by this factory. */ public synchronized Collection<MetricsContext> getAllContexts() { // Make a copy to avoid race conditions with creating new contexts. return new ArrayList<MetricsContext>(contextMap.values()); } /** * Returns a "null" context - one which does nothing. */ public static synchronized MetricsContext getNullContext(String contextName) { MetricsContext nullContext = nullContextMap.get(contextName); if (nullContext == null) { nullContext = new NullContext(); nullContextMap.put(contextName, nullContext); } return nullContext; } /** * Returns the singleton ContextFactory instance, constructing it if * necessary. <p/> * * When the instance is constructed, this method checks if the file * <code>hadoop-metrics.properties</code> exists on the class path. If it * exists, it must be in the format defined by java.util.Properties, and all * the properties in the file are set as attributes on the newly created * ContextFactory instance. * * @return the singleton ContextFactory instance */ public static synchronized ContextFactory getFactory() throws IOException { if (theFactory == null) { theFactory = new ContextFactory(); theFactory.setAttributes(); } return theFactory; } private void setAttributes() throws IOException { InputStream is = getClass().getResourceAsStream(PROPERTIES_FILE); if (is != null) { try { Properties properties = new Properties(); properties.load(is); //for (Object propertyNameObj : properties.keySet()) { Iterator it = properties.keySet().iterator(); while (it.hasNext()) { String propertyName = (String) it.next(); String propertyValue = properties.getProperty(propertyName); setAttribute(propertyName, propertyValue); } } finally { is.close(); } } } }
7,291
33.396226
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/OutputRecord.java
/* * OutputRecord.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import java.util.Collections; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap; import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap; /** * Represents a record of metric data to be sent to a metrics system. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class OutputRecord { private TagMap tagMap; private MetricMap metricMap; /** Creates a new instance of OutputRecord */ OutputRecord(TagMap tagMap, MetricMap metricMap) { this.tagMap = tagMap; this.metricMap = metricMap; } /** * Returns the set of tag names */ public Set<String> getTagNames() { return Collections.unmodifiableSet(tagMap.keySet()); } /** * Returns a tag object which is can be a String, Integer, Short or Byte. * * @return the tag value, or null if there is no such tag */ public Object getTag(String name) { return tagMap.get(name); } /** * Returns the set of metric names. */ public Set<String> getMetricNames() { return Collections.unmodifiableSet(metricMap.keySet()); } /** * Returns the metric object which can be a Float, Integer, Short or Byte. */ public Number getMetric(String name) { return metricMap.get(name); } /** * Returns a copy of this record's tags. */ public TagMap getTagsCopy() { return new TagMap(tagMap); } /** * Returns a copy of this record's metrics. */ public MetricMap getMetricsCopy() { return new MetricMap(metricMap); } }
2,548
27.010989
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
/* * AbstractMetricsContext.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Timer; import java.util.TimerTask; import java.util.TreeMap; import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsException; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.Updater; /** * The main class of the Service Provider Interface. This class should be * extended in order to integrate the Metrics API with a specific metrics * client library. <p/> * * This class implements the internal table of metric data, and the timer * on which data is to be sent to the metrics system. Subclasses must * override the abstract <code>emitRecord</code> method in order to transmit * the data. <p/> */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class AbstractMetricsContext implements MetricsContext { private int period = MetricsContext.DEFAULT_PERIOD; private Timer timer = null; private Set<Updater> updaters = new HashSet<Updater>(1); private volatile boolean isMonitoring = false; private ContextFactory factory = null; private String contextName = null; @InterfaceAudience.Private public static class TagMap extends TreeMap<String,Object> { private static final long serialVersionUID = 3546309335061952993L; TagMap() { super(); } TagMap(TagMap orig) { super(orig); } /** * Returns true if this tagmap contains every tag in other. */ public boolean containsAll(TagMap other) { for (Map.Entry<String,Object> entry : other.entrySet()) { Object value = get(entry.getKey()); if (value == null || !value.equals(entry.getValue())) { // either key does not exist here, or the value is different return false; } } return true; } } @InterfaceAudience.Private public static class MetricMap extends TreeMap<String,Number> { private static final long serialVersionUID = -7495051861141631609L; MetricMap() { super(); } MetricMap(MetricMap orig) { super(orig); } } static class RecordMap extends HashMap<TagMap,MetricMap> { private static final long serialVersionUID = 259835619700264611L; } private Map<String,RecordMap> bufferedData = new HashMap<String,RecordMap>(); /** * Creates a new instance of AbstractMetricsContext */ protected AbstractMetricsContext() { } /** * Initializes the context. */ public void init(String contextName, ContextFactory factory) { this.contextName = contextName; this.factory = factory; } /** * Convenience method for subclasses to access factory attributes. */ protected String getAttribute(String attributeName) { String factoryAttribute = contextName + "." + attributeName; return (String) factory.getAttribute(factoryAttribute); } /** * Returns an attribute-value map derived from the factory attributes * by finding all factory attributes that begin with * <i>contextName</i>.<i>tableName</i>. The returned map consists of * those attributes with the contextName and tableName stripped off. */ protected Map<String,String> getAttributeTable(String tableName) { String prefix = contextName + "." + tableName + "."; Map<String,String> result = new HashMap<String,String>(); for (String attributeName : factory.getAttributeNames()) { if (attributeName.startsWith(prefix)) { String name = attributeName.substring(prefix.length()); String value = (String) factory.getAttribute(attributeName); result.put(name, value); } } return result; } /** * Returns the context name. */ public String getContextName() { return contextName; } /** * Returns the factory by which this context was created. */ public ContextFactory getContextFactory() { return factory; } /** * Starts or restarts monitoring, the emitting of metrics records. */ public synchronized void startMonitoring() throws IOException { if (!isMonitoring) { startTimer(); isMonitoring = true; } } /** * Stops monitoring. This does not free buffered data. * @see #close() */ public synchronized void stopMonitoring() { if (isMonitoring) { stopTimer(); isMonitoring = false; } } /** * Returns true if monitoring is currently in progress. */ public boolean isMonitoring() { return isMonitoring; } /** * Stops monitoring and frees buffered data, returning this * object to its initial state. */ public synchronized void close() { stopMonitoring(); clearUpdaters(); } /** * Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>. * Throws an exception if the metrics implementation is configured with a fixed * set of record names and <code>recordName</code> is not in that set. * * @param recordName the name of the record * @throws MetricsException if recordName conflicts with configuration data */ public final synchronized MetricsRecord createRecord(String recordName) { if (bufferedData.get(recordName) == null) { bufferedData.put(recordName, new RecordMap()); } return newRecord(recordName); } /** * Subclasses should override this if they subclass MetricsRecordImpl. * @param recordName the name of the record * @return newly created instance of MetricsRecordImpl or subclass */ protected MetricsRecord newRecord(String recordName) { return new MetricsRecordImpl(recordName, this); } /** * Registers a callback to be called at time intervals determined by * the configuration. * * @param updater object to be run periodically; it should update * some metrics records */ public synchronized void registerUpdater(final Updater updater) { if (!updaters.contains(updater)) { updaters.add(updater); } } /** * Removes a callback, if it exists. * * @param updater object to be removed from the callback list */ public synchronized void unregisterUpdater(Updater updater) { updaters.remove(updater); } private synchronized void clearUpdaters() { updaters.clear(); } /** * Starts timer if it is not already started */ private synchronized void startTimer() { if (timer == null) { timer = new Timer("Timer thread for monitoring " + getContextName(), true); TimerTask task = new TimerTask() { public void run() { try { timerEvent(); } catch (IOException ioe) { ioe.printStackTrace(); } } }; long millis = period * 1000; timer.scheduleAtFixedRate(task, millis, millis); } } /** * Stops timer if it is running */ private synchronized void stopTimer() { if (timer != null) { timer.cancel(); timer = null; } } /** * Timer callback. */ private void timerEvent() throws IOException { if (isMonitoring) { Collection<Updater> myUpdaters; synchronized (this) { myUpdaters = new ArrayList<Updater>(updaters); } // Run all the registered updates without holding a lock // on this context for (Updater updater : myUpdaters) { try { updater.doUpdates(this); } catch (Throwable throwable) { throwable.printStackTrace(); } } emitRecords(); } } /** * Emits the records. */ private synchronized void emitRecords() throws IOException { for (Map.Entry<String,RecordMap> recordEntry : bufferedData.entrySet()) { RecordMap recordMap = recordEntry.getValue(); synchronized (recordMap) { Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet (); for (Entry<TagMap, MetricMap> entry : entrySet) { OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue()); emitRecord(contextName, recordEntry.getKey(), outRec); } } } flush(); } /** * Retrieves all the records managed by this MetricsContext. * Useful for monitoring systems that are polling-based. * @return A non-null collection of all monitoring records. */ public synchronized Map<String, Collection<OutputRecord>> getAllRecords() { Map<String, Collection<OutputRecord>> out = new TreeMap<String, Collection<OutputRecord>>(); for (Map.Entry<String,RecordMap> recordEntry : bufferedData.entrySet()) { RecordMap recordMap = recordEntry.getValue(); synchronized (recordMap) { List<OutputRecord> records = new ArrayList<OutputRecord>(); Set<Entry<TagMap, MetricMap>> entrySet = recordMap.entrySet(); for (Entry<TagMap, MetricMap> entry : entrySet) { OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue()); records.add(outRec); } out.put(recordEntry.getKey(), records); } } return out; } /** * Sends a record to the metrics system. */ protected abstract void emitRecord(String contextName, String recordName, OutputRecord outRec) throws IOException; /** * Called each period after all records have been emitted, this method does nothing. * Subclasses may override it in order to perform some kind of flush. */ protected void flush() throws IOException { } /** * Called by MetricsRecordImpl.update(). Creates or updates a row in * the internal table of metric data. */ protected void update(MetricsRecordImpl record) { String recordName = record.getRecordName(); TagMap tagTable = record.getTagTable(); Map<String,MetricValue> metricUpdates = record.getMetricTable(); RecordMap recordMap = getRecordMap(recordName); synchronized (recordMap) { MetricMap metricMap = recordMap.get(tagTable); if (metricMap == null) { metricMap = new MetricMap(); TagMap tagMap = new TagMap(tagTable); // clone tags recordMap.put(tagMap, metricMap); } Set<Entry<String, MetricValue>> entrySet = metricUpdates.entrySet(); for (Entry<String, MetricValue> entry : entrySet) { String metricName = entry.getKey (); MetricValue updateValue = entry.getValue (); Number updateNumber = updateValue.getNumber(); Number currentNumber = metricMap.get(metricName); if (currentNumber == null || updateValue.isAbsolute()) { metricMap.put(metricName, updateNumber); } else { Number newNumber = sum(updateNumber, currentNumber); metricMap.put(metricName, newNumber); } } } } private synchronized RecordMap getRecordMap(String recordName) { return bufferedData.get(recordName); } /** * Adds two numbers, coercing the second to the type of the first. * */ private Number sum(Number a, Number b) { if (a instanceof Integer) { return Integer.valueOf(a.intValue() + b.intValue()); } else if (a instanceof Float) { return new Float(a.floatValue() + b.floatValue()); } else if (a instanceof Short) { return Short.valueOf((short)(a.shortValue() + b.shortValue())); } else if (a instanceof Byte) { return Byte.valueOf((byte)(a.byteValue() + b.byteValue())); } else if (a instanceof Long) { return Long.valueOf((a.longValue() + b.longValue())); } else { // should never happen throw new MetricsException("Invalid number type"); } } /** * Called by MetricsRecordImpl.remove(). Removes all matching rows in * the internal table of metric data. A row matches if it has the same * tag names and values as record, but it may also have additional * tags. */ protected void remove(MetricsRecordImpl record) { String recordName = record.getRecordName(); TagMap tagTable = record.getTagTable(); RecordMap recordMap = getRecordMap(recordName); synchronized (recordMap) { Iterator<TagMap> it = recordMap.keySet().iterator(); while (it.hasNext()) { TagMap rowTags = it.next(); if (rowTags.containsAll(tagTable)) { it.remove(); } } } } /** * Returns the timer period. */ public int getPeriod() { return period; } /** * Sets the timer period */ protected void setPeriod(int period) { this.period = period; } /** * If a period is set in the attribute passed in, override * the default with it. */ protected void parseAndSetPeriod(String attributeName) { String periodStr = getAttribute(attributeName); if (periodStr != null) { int period = 0; try { period = Integer.parseInt(periodStr); } catch (NumberFormatException nfe) { } if (period <= 0) { throw new MetricsException("Invalid period: " + periodStr); } setPeriod(period); } } }
14,497
29.078838
96
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NullContextWithUpdateThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; /** * A null context which has a thread calling * periodically when monitoring is started. This keeps the data sampled * correctly. * In all other respects, this is like the NULL context: No data is emitted. * This is suitable for Monitoring systems like JMX which reads the metrics * when someone reads the data from JMX. * * The default impl of start and stop monitoring: * is the AbstractMetricsContext is good enough. * */ @InterfaceAudience.Public @InterfaceStability.Evolving public class NullContextWithUpdateThread extends AbstractMetricsContext { private static final String PERIOD_PROPERTY = "period"; /** Creates a new instance of NullContextWithUpdateThread */ @InterfaceAudience.Private public NullContextWithUpdateThread() { } @InterfaceAudience.Private public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); parseAndSetPeriod(PERIOD_PROPERTY); } /** * Do-nothing version of emitRecord */ @InterfaceAudience.Private protected void emitRecord(String contextName, String recordName, OutputRecord outRec) {} /** * Do-nothing version of update */ @InterfaceAudience.Private protected void update(MetricsRecordImpl record) { } /** * Do-nothing version of remove */ @InterfaceAudience.Private protected void remove(MetricsRecordImpl record) { } }
2,455
30.896104
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NullContext.java
/* * NullContext.java * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Null metrics context: a metrics context which does nothing. Used as the * default context, so that no performance data is emitted if no configuration * data is found. * */ @InterfaceAudience.Public @InterfaceStability.Evolving public class NullContext extends AbstractMetricsContext { /** Creates a new instance of NullContext */ @InterfaceAudience.Private public NullContext() { } /** * Do-nothing version of startMonitoring */ @InterfaceAudience.Private public void startMonitoring() { } /** * Do-nothing version of emitRecord */ @InterfaceAudience.Private protected void emitRecord(String contextName, String recordName, OutputRecord outRec) {} /** * Do-nothing version of update */ @InterfaceAudience.Private protected void update(MetricsRecordImpl record) { } /** * Do-nothing version of remove */ @InterfaceAudience.Private protected void remove(MetricsRecordImpl record) { } }
2,002
28.028986
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricValue.java
/* * MetricValue.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A Number that is either an absolute or an incremental amount. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MetricValue { public static final boolean ABSOLUTE = false; public static final boolean INCREMENT = true; private boolean isIncrement; private Number number; /** Creates a new instance of MetricValue */ public MetricValue(Number number, boolean isIncrement) { this.number = number; this.isIncrement = isIncrement; } public boolean isIncrement() { return isIncrement; } public boolean isAbsolute() { return !isIncrement; } public Number getNumber() { return number; } }
1,673
27.862069
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import java.io.IOException; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.ArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; @InterfaceAudience.Public @InterfaceStability.Evolving public class CompositeContext extends AbstractMetricsContext { private static final Log LOG = LogFactory.getLog(CompositeContext.class); private static final String ARITY_LABEL = "arity"; private static final String SUB_FMT = "%s.sub%d"; private final ArrayList<MetricsContext> subctxt = new ArrayList<MetricsContext>(); @InterfaceAudience.Private public CompositeContext() { } @InterfaceAudience.Private public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); int nKids; try { String sKids = getAttribute(ARITY_LABEL); nKids = Integer.parseInt(sKids); } catch (Exception e) { LOG.error("Unable to initialize composite metric " + contextName + ": could not init arity", e); return; } for (int i = 0; i < nKids; ++i) { MetricsContext ctxt = MetricsUtil.getContext( String.format(SUB_FMT, contextName, i), contextName); if (null != ctxt) { subctxt.add(ctxt); } } } @InterfaceAudience.Private @Override public MetricsRecord newRecord(String recordName) { return (MetricsRecord) Proxy.newProxyInstance( MetricsRecord.class.getClassLoader(), new Class[] { MetricsRecord.class }, new MetricsRecordDelegator(recordName, subctxt)); } @InterfaceAudience.Private @Override protected void emitRecord(String contextName, String recordName, OutputRecord outRec) throws IOException { for (MetricsContext ctxt : subctxt) { try { ((AbstractMetricsContext)ctxt).emitRecord( contextName, recordName, outRec); if (contextName == null || recordName == null || outRec == null) { throw new IOException(contextName + ":" + recordName + ":" + outRec); } } catch (IOException e) { LOG.warn("emitRecord failed: " + ctxt.getContextName(), e); } } } @InterfaceAudience.Private @Override protected void flush() throws IOException { for (MetricsContext ctxt : subctxt) { try { ((AbstractMetricsContext)ctxt).flush(); } catch (IOException e) { LOG.warn("flush failed: " + ctxt.getContextName(), e); } } } @InterfaceAudience.Private @Override public void startMonitoring() throws IOException { for (MetricsContext ctxt : subctxt) { try { ctxt.startMonitoring(); } catch (IOException e) { LOG.warn("startMonitoring failed: " + ctxt.getContextName(), e); } } } @InterfaceAudience.Private @Override public void stopMonitoring() { for (MetricsContext ctxt : subctxt) { ctxt.stopMonitoring(); } } /** * Return true if all subcontexts are monitoring. */ @InterfaceAudience.Private @Override public boolean isMonitoring() { boolean ret = true; for (MetricsContext ctxt : subctxt) { ret &= ctxt.isMonitoring(); } return ret; } @InterfaceAudience.Private @Override public void close() { for (MetricsContext ctxt : subctxt) { ctxt.close(); } } @InterfaceAudience.Private @Override public void registerUpdater(Updater updater) { for (MetricsContext ctxt : subctxt) { ctxt.registerUpdater(updater); } } @InterfaceAudience.Private @Override public void unregisterUpdater(Updater updater) { for (MetricsContext ctxt : subctxt) { ctxt.unregisterUpdater(updater); } } private static class MetricsRecordDelegator implements InvocationHandler { private static final Method m_getRecordName = initMethod(); private static Method initMethod() { try { return MetricsRecord.class.getMethod("getRecordName", new Class[0]); } catch (Exception e) { throw new RuntimeException("Internal error", e); } } private final String recordName; private final ArrayList<MetricsRecord> subrecs; MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) { this.recordName = recordName; this.subrecs = new ArrayList<MetricsRecord>(ctxts.size()); for (MetricsContext ctxt : ctxts) { subrecs.add(ctxt.createRecord(recordName)); } } public Object invoke(Object p, Method m, Object[] args) throws Throwable { if (m_getRecordName.equals(m)) { return recordName; } assert Void.TYPE.equals(m.getReturnType()); for (MetricsRecord rec : subrecs) { m.invoke(rec, args); } return null; } } }
6,032
29.014925
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
/* * MetricsRecordImpl.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import java.util.LinkedHashMap; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.MetricsException; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap; /** * An implementation of MetricsRecord. Keeps a back-pointer to the context * from which it was created, and delegates back to it on <code>update</code> * and <code>remove()</code>. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MetricsRecordImpl implements MetricsRecord { private TagMap tagTable = new TagMap(); private Map<String,MetricValue> metricTable = new LinkedHashMap<String,MetricValue>(); private String recordName; private AbstractMetricsContext context; /** Creates a new instance of FileRecord */ protected MetricsRecordImpl(String recordName, AbstractMetricsContext context) { this.recordName = recordName; this.context = context; } /** * Returns the record name. * * @return the record name */ public String getRecordName() { return recordName; } /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public void setTag(String tagName, String tagValue) { if (tagValue == null) { tagValue = ""; } tagTable.put(tagName, tagValue); } /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public void setTag(String tagName, int tagValue) { tagTable.put(tagName, Integer.valueOf(tagValue)); } /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public void setTag(String tagName, long tagValue) { tagTable.put(tagName, Long.valueOf(tagValue)); } /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public void setTag(String tagName, short tagValue) { tagTable.put(tagName, Short.valueOf(tagValue)); } /** * Sets the named tag to the specified value. * * @param tagName name of the tag * @param tagValue new value of the tag * @throws MetricsException if the tagName conflicts with the configuration */ public void setTag(String tagName, byte tagValue) { tagTable.put(tagName, Byte.valueOf(tagValue)); } /** * Removes any tag of the specified name. */ public void removeTag(String tagName) { tagTable.remove(tagName); } /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void setMetric(String metricName, int metricValue) { setAbsolute(metricName, Integer.valueOf(metricValue)); } /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void setMetric(String metricName, long metricValue) { setAbsolute(metricName, Long.valueOf(metricValue)); } /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void setMetric(String metricName, short metricValue) { setAbsolute(metricName, Short.valueOf(metricValue)); } /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void setMetric(String metricName, byte metricValue) { setAbsolute(metricName, Byte.valueOf(metricValue)); } /** * Sets the named metric to the specified value. * * @param metricName name of the metric * @param metricValue new value of the metric * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void setMetric(String metricName, float metricValue) { setAbsolute(metricName, new Float(metricValue)); } /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void incrMetric(String metricName, int metricValue) { setIncrement(metricName, Integer.valueOf(metricValue)); } /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void incrMetric(String metricName, long metricValue) { setIncrement(metricName, Long.valueOf(metricValue)); } /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void incrMetric(String metricName, short metricValue) { setIncrement(metricName, Short.valueOf(metricValue)); } /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void incrMetric(String metricName, byte metricValue) { setIncrement(metricName, Byte.valueOf(metricValue)); } /** * Increments the named metric by the specified value. * * @param metricName name of the metric * @param metricValue incremental value * @throws MetricsException if the metricName or the type of the metricValue * conflicts with the configuration */ public void incrMetric(String metricName, float metricValue) { setIncrement(metricName, new Float(metricValue)); } private void setAbsolute(String metricName, Number metricValue) { metricTable.put(metricName, new MetricValue(metricValue, MetricValue.ABSOLUTE)); } private void setIncrement(String metricName, Number metricValue) { metricTable.put(metricName, new MetricValue(metricValue, MetricValue.INCREMENT)); } /** * Updates the table of buffered data which is to be sent periodically. * If the tag values match an existing row, that row is updated; * otherwise, a new row is added. */ public void update() { context.update(this); } /** * Removes the row, if it exists, in the buffered data table having tags * that equal the tags that have been set on this record. */ public void remove() { context.remove(this); } TagMap getTagTable() { return tagTable; } Map<String, MetricValue> getMetricTable() { return metricTable; } }
8,909
30.595745
88
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/NoEmitMetricsContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.metrics.MetricsServlet; /** * A MetricsContext that does not emit data, but, unlike NullContextWithUpdate, * does save it for retrieval with getAllRecords(). * * This is useful if you want to support {@link MetricsServlet}, but * not emit metrics in any other way. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class NoEmitMetricsContext extends AbstractMetricsContext { private static final String PERIOD_PROPERTY = "period"; /** Creates a new instance of NullContextWithUpdateThread */ @InterfaceAudience.Private public NoEmitMetricsContext() { } @InterfaceAudience.Private public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); parseAndSetPeriod(PERIOD_PROPERTY); } /** * Do-nothing version of emitRecord */ @InterfaceAudience.Private protected void emitRecord(String contextName, String recordName, OutputRecord outRec) { } }
2,063
35.210526
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java
/* * Util.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.spi; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.net.NetUtils; /** * Static utility methods */ @InterfaceAudience.Public @InterfaceStability.Evolving public class Util { /** * This class is not intended to be instantiated */ private Util() {} /** * Parses a space and/or comma separated sequence of server specifications * of the form <i>hostname</i> or <i>hostname:port</i>. If * the specs string is null, defaults to localhost:defaultPort. * * @return a list of InetSocketAddress objects. */ public static List<InetSocketAddress> parse(String specs, int defaultPort) { List<InetSocketAddress> result = new ArrayList<InetSocketAddress>(1); if (specs == null) { result.add(new InetSocketAddress("localhost", defaultPort)); } else { String[] specStrings = specs.split("[ ,]+"); for (String specString : specStrings) { result.add(NetUtils.createSocketAddr(specString, defaultPort)); } } return result; } }
2,061
30.242424
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * The MetricsTimeVaryingInt class is for a metric that naturally * varies over time (e.g. number of files created). The metrics is accumulated * over an interval (set in the metrics config file); the metrics is * published at the end of each interval and then * reset to zero. Hence the counter has the value in the current interval. * * Note if one wants a time associated with the metric then use * @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MetricsTimeVaryingInt extends MetricsBase { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.metrics.util"); private int currentValue; private int previousIntervalValue; /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered * @param description - the description */ public MetricsTimeVaryingInt(final String nam, final MetricsRegistry registry, final String description) { super(nam, description); currentValue = 0; previousIntervalValue = 0; registry.add(nam, this); } /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered * A description of {@link #NO_DESCRIPTION} is used */ public MetricsTimeVaryingInt(final String nam, final MetricsRegistry registry) { this(nam, registry, NO_DESCRIPTION); } /** * Inc metrics for incr vlaue * @param incr - number of operations */ public synchronized void inc(final int incr) { currentValue += incr; } /** * Inc metrics by one */ public synchronized void inc() { currentValue++; } private synchronized void intervalHeartBeat() { previousIntervalValue = currentValue; currentValue = 0; } /** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #previousIntervalValue} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.incrMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" , e); } } /** * The Value at the Previous interval * @return prev interval value */ public synchronized int getPreviousIntervalValue() { return previousIntervalValue; } /** * The Value at the current interval * @return prev interval value */ public synchronized int getCurrentIntervalValue() { return currentValue; } }
3,932
29.726563
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"}) @InterfaceStability.Evolving package org.apache.hadoop.metrics.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,060
45.130435
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsLongValue.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; /** * The MetricsLongValue class is for a metric that is not time varied * but changes only when it is set. * Each time its value is set, it is published only *once* at the next update * call. * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MetricsLongValue extends MetricsBase{ private long value; private boolean changed; /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered */ public MetricsLongValue(final String nam, final MetricsRegistry registry, final String description) { super(nam, description); value = 0; changed = false; registry.add(nam, this); } /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered * A description of {@link #NO_DESCRIPTION} is used */ public MetricsLongValue(final String nam, MetricsRegistry registry) { this(nam, registry, NO_DESCRIPTION); } /** * Set the value * @param newValue */ public synchronized void set(final long newValue) { value = newValue; changed = true; } /** * Get value * @return the value last set */ public synchronized long get() { return value; } /** * Push the metric to the mr. * The metric is pushed only if it was updated since last push * * Note this does NOT push to JMX * (JMX gets the info via {@link #get()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { if (changed) mr.setMetric(getName(), value); changed = false; } }
2,722
28.923077
103
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java
package org.apache.hadoop.metrics.util; /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * The MetricsTimeVaryingLong class is for a metric that naturally * varies over time (e.g. number of files created). The metrics is accumulated * over an interval (set in the metrics config file); the metrics is * published at the end of each interval and then * reset to zero. Hence the counter has the value in the current interval. * * Note if one wants a time associated with the metric then use * @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MetricsTimeVaryingLong extends MetricsBase{ private static final Log LOG = LogFactory.getLog("org.apache.hadoop.metrics.util"); private long currentValue; private long previousIntervalValue; /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered */ public MetricsTimeVaryingLong(final String nam, MetricsRegistry registry, final String description) { super(nam, description); currentValue = 0; previousIntervalValue = 0; registry.add(nam, this); } /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered * A description of {@link #NO_DESCRIPTION} is used */ public MetricsTimeVaryingLong(final String nam, MetricsRegistry registry) { this(nam, registry, NO_DESCRIPTION); } /** * Inc metrics for incr vlaue * @param incr - number of operations */ public synchronized void inc(final long incr) { currentValue += incr; } /** * Inc metrics by one */ public synchronized void inc() { currentValue++; } private synchronized void intervalHeartBeat() { previousIntervalValue = currentValue; currentValue = 0; } /** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #previousIntervalValue} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.incrMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" , e); } } /** * The Value at the Previous interval * @return prev interval value */ public synchronized long getPreviousIntervalValue() { return previousIntervalValue; } /** * The Value at the current interval * @return prev interval value */ public synchronized long getCurrentIntervalValue() { return currentValue; } }
3,821
29.822581
103
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsRegistry.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import java.util.Collection; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience; /** * * This is the registry for metrics. * Related set of metrics should be declared in a holding class and registered * in a registry for those metrics which is also stored in the the holding class. * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MetricsRegistry { private ConcurrentHashMap<String, MetricsBase> metricsList = new ConcurrentHashMap<String, MetricsBase>(); public MetricsRegistry() { } /** * * @return number of metrics in the registry */ public int size() { return metricsList.size(); } /** * Add a new metrics to the registry * @param metricsName - the name * @param theMetricsObj - the metrics * @throws IllegalArgumentException if a name is already registered */ public void add(final String metricsName, final MetricsBase theMetricsObj) { if (metricsList.putIfAbsent(metricsName, theMetricsObj) != null) { throw new IllegalArgumentException("Duplicate metricsName:" + metricsName); } } /** * * @param metricsName * @return the metrics if there is one registered by the supplied name. * Returns null if none is registered */ public MetricsBase get(final String metricsName) { return metricsList.get(metricsName); } /** * * @return the list of metrics names */ public Collection<String> getKeyList() { return metricsList.keySet(); } /** * * @return the list of metrics */ public Collection<MetricsBase> getMetricsList() { return metricsList.values(); } }
2,565
27.831461
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; /** * * This is base class for all metrics * */ @InterfaceAudience.Private public abstract class MetricsBase { public static final String NO_DESCRIPTION = "NoDescription"; final private String name; final private String description; protected MetricsBase(final String nam) { name = nam; description = NO_DESCRIPTION; } protected MetricsBase(final String nam, final String desc) { name = nam; description = desc; } public abstract void pushMetric(final MetricsRecord mr); public String getName() { return name; } public String getDescription() { return description; }; }
1,580
30.62
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsDynamicMBeanBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import javax.management.Attribute; import javax.management.AttributeList; import javax.management.AttributeNotFoundException; import javax.management.DynamicMBean; import javax.management.InvalidAttributeValueException; import javax.management.MBeanAttributeInfo; import javax.management.MBeanException; import javax.management.MBeanInfo; import javax.management.MBeanOperationInfo; import javax.management.ReflectionException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsUtil; /** * This abstract base class facilitates creating dynamic mbeans automatically from * metrics. * The metrics constructors registers metrics in a registry. * Different categories of metrics should be in differnt classes with their own * registry (as in NameNodeMetrics and DataNodeMetrics). * Then the MBean can be created passing the registry to the constructor. * The MBean should be then registered using a mbean name (example): * MetricsHolder myMetrics = new MetricsHolder(); // has metrics and registry * MetricsTestMBean theMBean = new MetricsTestMBean(myMetrics.mregistry); * ObjectName mbeanName = MBeanUtil.registerMBean("ServiceFoo", * "TestStatistics", theMBean); * * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public abstract class MetricsDynamicMBeanBase implements DynamicMBean { private final static String AVG_TIME = "AvgTime"; private final static String MIN_TIME = "MinTime"; private final static String MAX_TIME = "MaxTime"; private final static String NUM_OPS = "NumOps"; private final static String RESET_ALL_MIN_MAX_OP = "resetAllMinMax"; private MetricsRegistry metricsRegistry; private MBeanInfo mbeanInfo; private Map<String, MetricsBase> metricsRateAttributeMod; private int numEntriesInRegistry = 0; private String mbeanDescription; protected MetricsDynamicMBeanBase(final MetricsRegistry mr, final String aMBeanDescription) { metricsRegistry = mr; mbeanDescription = aMBeanDescription; metricsRateAttributeMod = new ConcurrentHashMap<String, MetricsBase>(); createMBeanInfo(); } private void updateMbeanInfoIfMetricsListChanged() { if (numEntriesInRegistry != metricsRegistry.size()) createMBeanInfo(); } private void createMBeanInfo() { boolean needsMinMaxResetOperation = false; List<MBeanAttributeInfo> attributesInfo = new ArrayList<MBeanAttributeInfo>(); MBeanOperationInfo[] operationsInfo = null; numEntriesInRegistry = metricsRegistry.size(); for (MetricsBase o : metricsRegistry.getMetricsList()) { if (MetricsTimeVaryingRate.class.isInstance(o)) { // For each of the metrics there are 3 different attributes attributesInfo.add(new MBeanAttributeInfo(o.getName() + NUM_OPS, "java.lang.Integer", o.getDescription(), true, false, false)); attributesInfo.add(new MBeanAttributeInfo(o.getName() + AVG_TIME, "java.lang.Long", o.getDescription(), true, false, false)); attributesInfo.add(new MBeanAttributeInfo(o.getName() + MIN_TIME, "java.lang.Long", o.getDescription(), true, false, false)); attributesInfo.add(new MBeanAttributeInfo(o.getName() + MAX_TIME, "java.lang.Long", o.getDescription(), true, false, false)); needsMinMaxResetOperation = true; // the min and max can be reset. // Note the special attributes (AVG_TIME, MIN_TIME, ..) are derived from metrics // Rather than check for the suffix we store them in a map. metricsRateAttributeMod.put(o.getName() + NUM_OPS, o); metricsRateAttributeMod.put(o.getName() + AVG_TIME, o); metricsRateAttributeMod.put(o.getName() + MIN_TIME, o); metricsRateAttributeMod.put(o.getName() + MAX_TIME, o); } else if ( MetricsIntValue.class.isInstance(o) || MetricsTimeVaryingInt.class.isInstance(o) ) { attributesInfo.add(new MBeanAttributeInfo(o.getName(), "java.lang.Integer", o.getDescription(), true, false, false)); } else if ( MetricsLongValue.class.isInstance(o) || MetricsTimeVaryingLong.class.isInstance(o) ) { attributesInfo.add(new MBeanAttributeInfo(o.getName(), "java.lang.Long", o.getDescription(), true, false, false)); } else { MetricsUtil.LOG.error("unknown metrics type: " + o.getClass().getName()); } if (needsMinMaxResetOperation) { operationsInfo = new MBeanOperationInfo[] { new MBeanOperationInfo(RESET_ALL_MIN_MAX_OP, "Reset (zero) All Min Max", null, "void", MBeanOperationInfo.ACTION) }; } } MBeanAttributeInfo[] attrArray = new MBeanAttributeInfo[attributesInfo.size()]; mbeanInfo = new MBeanInfo(this.getClass().getName(), mbeanDescription, attributesInfo.toArray(attrArray), null, operationsInfo, null); } @Override public Object getAttribute(String attributeName) throws AttributeNotFoundException, MBeanException, ReflectionException { if (attributeName == null || attributeName.isEmpty()) throw new IllegalArgumentException(); updateMbeanInfoIfMetricsListChanged(); Object o = metricsRateAttributeMod.get(attributeName); if (o == null) { o = metricsRegistry.get(attributeName); } if (o == null) throw new AttributeNotFoundException(); if (o instanceof MetricsIntValue) return ((MetricsIntValue) o).get(); else if (o instanceof MetricsLongValue) return ((MetricsLongValue) o).get(); else if (o instanceof MetricsTimeVaryingInt) return ((MetricsTimeVaryingInt) o).getPreviousIntervalValue(); else if (o instanceof MetricsTimeVaryingLong) return ((MetricsTimeVaryingLong) o).getPreviousIntervalValue(); else if (o instanceof MetricsTimeVaryingRate) { MetricsTimeVaryingRate or = (MetricsTimeVaryingRate) o; if (attributeName.endsWith(NUM_OPS)) return or.getPreviousIntervalNumOps(); else if (attributeName.endsWith(AVG_TIME)) return or.getPreviousIntervalAverageTime(); else if (attributeName.endsWith(MIN_TIME)) return or.getMinTime(); else if (attributeName.endsWith(MAX_TIME)) return or.getMaxTime(); else { MetricsUtil.LOG.error("Unexpected attribute suffix"); throw new AttributeNotFoundException(); } } else { MetricsUtil.LOG.error("unknown metrics type: " + o.getClass().getName()); throw new AttributeNotFoundException(); } } @Override public AttributeList getAttributes(String[] attributeNames) { if (attributeNames == null || attributeNames.length == 0) throw new IllegalArgumentException(); updateMbeanInfoIfMetricsListChanged(); AttributeList result = new AttributeList(attributeNames.length); for (String iAttributeName : attributeNames) { try { Object value = getAttribute(iAttributeName); result.add(new Attribute(iAttributeName, value)); } catch (Exception e) { continue; } } return result; } @Override public MBeanInfo getMBeanInfo() { return mbeanInfo; } @Override public Object invoke(String actionName, Object[] parms, String[] signature) throws MBeanException, ReflectionException { if (actionName == null || actionName.isEmpty()) throw new IllegalArgumentException(); // Right now we support only one fixed operation (if it applies) if (!(actionName.equals(RESET_ALL_MIN_MAX_OP)) || mbeanInfo.getOperations().length != 1) { throw new ReflectionException(new NoSuchMethodException(actionName)); } for (MetricsBase m : metricsRegistry.getMetricsList()) { if ( MetricsTimeVaryingRate.class.isInstance(m) ) { MetricsTimeVaryingRate.class.cast(m).resetMinMax(); } } return null; } @Override public void setAttribute(Attribute attribute) throws AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException { throw new ReflectionException(new NoSuchMethodException("set" + attribute)); } @Override public AttributeList setAttributes(AttributeList attributes) { return null; } }
9,278
39.519651
104
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * The MetricsTimeVaryingRate class is for a rate based metric that * naturally varies over time (e.g. time taken to create a file). * The rate is averaged at each interval heart beat (the interval * is set in the metrics config file). * This class also keeps track of the min and max rates along with * a method to reset the min-max. * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MetricsTimeVaryingRate extends MetricsBase { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.metrics.util"); static class Metrics { int numOperations = 0; long time = 0; // total time or average time void set(final Metrics resetTo) { numOperations = resetTo.numOperations; time = resetTo.time; } void reset() { numOperations = 0; time = 0; } } static class MinMax { long minTime = -1; long maxTime = 0; void set(final MinMax newVal) { minTime = newVal.minTime; maxTime = newVal.maxTime; } void reset() { minTime = -1; maxTime = 0; } void update(final long time) { // update min max minTime = (minTime == -1) ? time : Math.min(minTime, time); minTime = Math.min(minTime, time); maxTime = Math.max(maxTime, time); } } private Metrics currentData; private Metrics previousIntervalData; private MinMax minMax; /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered */ public MetricsTimeVaryingRate(final String nam, final MetricsRegistry registry, final String description) { super(nam, description); currentData = new Metrics(); previousIntervalData = new Metrics(); minMax = new MinMax(); registry.add(nam, this); } /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered * A description of {@link #NO_DESCRIPTION} is used */ public MetricsTimeVaryingRate(final String nam, MetricsRegistry registry) { this(nam, registry, NO_DESCRIPTION); } /** * Increment the metrics for numOps operations * @param numOps - number of operations * @param time - time for numOps operations */ public synchronized void inc(final int numOps, final long time) { currentData.numOperations += numOps; currentData.time += time; long timePerOps = time/numOps; minMax.update(timePerOps); } /** * Increment the metrics for one operation * @param time for one operation */ public synchronized void inc(final long time) { currentData.numOperations++; currentData.time += time; minMax.update(time); } private synchronized void intervalHeartBeat() { previousIntervalData.numOperations = currentData.numOperations; previousIntervalData.time = (currentData.numOperations == 0) ? 0 : currentData.time / currentData.numOperations; currentData.reset(); } /** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and * {@link #getPreviousIntervalNumOps()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps()); mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" , e); } } /** * The number of operations in the previous interval * @return - ops in prev interval */ public synchronized int getPreviousIntervalNumOps() { return previousIntervalData.numOperations; } /** * The average rate of an operation in the previous interval * @return - the average rate. */ public synchronized long getPreviousIntervalAverageTime() { return previousIntervalData.time; } /** * The min time for a single operation since the last reset * {@link #resetMinMax()} * @return min time for an operation */ public synchronized long getMinTime() { return minMax.minTime; } /** * The max time for a single operation since the last reset * {@link #resetMinMax()} * @return max time for an operation */ public synchronized long getMaxTime() { return minMax.maxTime; } /** * Reset the min max values */ public synchronized void resetMinMax() { minMax.reset(); } }
5,817
28.683673
109
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * The MetricsIntValue class is for a metric that is not time varied * but changes only when it is set. * Each time its value is set, it is published only *once* at the next update * call. * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MetricsIntValue extends MetricsBase { private static final Log LOG = LogFactory.getLog("org.apache.hadoop.metrics.util"); private int value; private boolean changed; /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered */ public MetricsIntValue(final String nam, final MetricsRegistry registry, final String description) { super(nam, description); value = 0; changed = false; registry.add(nam, this); } /** * Constructor - create a new metric * @param nam the name of the metrics to be used to publish the metric * @param registry - where the metrics object will be registered * A description of {@link #NO_DESCRIPTION} is used */ public MetricsIntValue(final String nam, MetricsRegistry registry) { this(nam, registry, NO_DESCRIPTION); } /** * Set the value * @param newValue */ public synchronized void set(final int newValue) { value = newValue; changed = true; } /** * Get value * @return the value last set */ public synchronized int get() { return value; } /** * Push the metric to the mr. * The metric is pushed only if it was updated since last push * * Note this does NOT push to JMX * (JMX gets the info via {@link #get()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { if (changed) { try { mr.setMetric(getName(), value); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n", e); } } changed = false; } }
3,027
28.115385
102
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MBeanUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.util; import java.lang.management.ManagementFactory; import javax.management.InstanceNotFoundException; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; import javax.management.InstanceAlreadyExistsException; import org.apache.hadoop.classification.InterfaceAudience; /** * This util class provides a method to register an MBean using * our standard naming convention as described in the doc * for {link {@link #registerMBean(String, String, Object)} * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) public class MBeanUtil { /** * Register the MBean using our standard MBeanName format * "hadoop:service=<serviceName>,name=<nameName>" * Where the <serviceName> and <nameName> are the supplied parameters * * @param serviceName * @param nameName * @param theMbean - the MBean to register * @return the named used to register the MBean */ static public ObjectName registerMBean(final String serviceName, final String nameName, final Object theMbean) { final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName name = getMBeanName(serviceName, nameName); try { mbs.registerMBean(theMbean, name); return name; } catch (InstanceAlreadyExistsException ie) { // Ignore if instance already exists } catch (Exception e) { e.printStackTrace(); } return null; } static public void unregisterMBean(ObjectName mbeanName) { final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); if (mbeanName == null) return; try { mbs.unregisterMBean(mbeanName); } catch (InstanceNotFoundException e ) { // ignore } catch (Exception e) { e.printStackTrace(); } } static private ObjectName getMBeanName(final String serviceName, final String nameName) { ObjectName name = null; try { name = new ObjectName("hadoop:" + "service=" + serviceName + ",name=" + nameName); } catch (MalformedObjectNameException e) { e.printStackTrace(); } return name; } }
3,042
32.43956
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
/* * GangliaContext.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.ganglia; import java.io.IOException; import java.net.*; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.io.Charsets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.metrics.spi.AbstractMetricsContext; import org.apache.hadoop.metrics.spi.OutputRecord; import org.apache.hadoop.metrics.spi.Util; /** * Context for sending metrics to Ganglia. * */ @InterfaceAudience.Public @InterfaceStability.Evolving public class GangliaContext extends AbstractMetricsContext { private static final String PERIOD_PROPERTY = "period"; private static final String SERVERS_PROPERTY = "servers"; private static final String UNITS_PROPERTY = "units"; private static final String SLOPE_PROPERTY = "slope"; private static final String TMAX_PROPERTY = "tmax"; private static final String DMAX_PROPERTY = "dmax"; private static final String MULTICAST_PROPERTY = "multicast"; private static final String MULTICAST_TTL_PROPERTY = "multicast.ttl"; private static final String DEFAULT_UNITS = ""; private static final String DEFAULT_SLOPE = "both"; private static final int DEFAULT_TMAX = 60; private static final int DEFAULT_DMAX = 0; private static final int DEFAULT_PORT = 8649; private static final int BUFFER_SIZE = 1500; // as per libgmond.c private static final int DEFAULT_MULTICAST_TTL = 1; private final Log LOG = LogFactory.getLog(this.getClass()); private static final Map<Class,String> typeTable = new HashMap<Class,String>(5); static { typeTable.put(String.class, "string"); typeTable.put(Byte.class, "int8"); typeTable.put(Short.class, "int16"); typeTable.put(Integer.class, "int32"); typeTable.put(Long.class, "float"); typeTable.put(Float.class, "float"); } protected byte[] buffer = new byte[BUFFER_SIZE]; protected int offset; protected List<? extends SocketAddress> metricsServers; private Map<String,String> unitsTable; private Map<String,String> slopeTable; private Map<String,String> tmaxTable; private Map<String,String> dmaxTable; private boolean multicastEnabled; private int multicastTtl; protected DatagramSocket datagramSocket; /** Creates a new instance of GangliaContext */ @InterfaceAudience.Private public GangliaContext() { } @Override @InterfaceAudience.Private public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); parseAndSetPeriod(PERIOD_PROPERTY); metricsServers = Util.parse(getAttribute(SERVERS_PROPERTY), DEFAULT_PORT); unitsTable = getAttributeTable(UNITS_PROPERTY); slopeTable = getAttributeTable(SLOPE_PROPERTY); tmaxTable = getAttributeTable(TMAX_PROPERTY); dmaxTable = getAttributeTable(DMAX_PROPERTY); multicastEnabled = Boolean.parseBoolean(getAttribute(MULTICAST_PROPERTY)); String multicastTtlValue = getAttribute(MULTICAST_TTL_PROPERTY); if (multicastEnabled) { if (multicastTtlValue == null) { multicastTtl = DEFAULT_MULTICAST_TTL; } else { multicastTtl = Integer.parseInt(multicastTtlValue); } } try { if (multicastEnabled) { LOG.info("Enabling multicast for Ganglia with TTL " + multicastTtl); datagramSocket = new MulticastSocket(); ((MulticastSocket) datagramSocket).setTimeToLive(multicastTtl); } else { datagramSocket = new DatagramSocket(); } } catch (IOException e) { LOG.error(e); } } /** * method to close the datagram socket */ @Override public void close() { super.close(); if (datagramSocket != null) { datagramSocket.close(); } } @Override @InterfaceAudience.Private public void emitRecord(String contextName, String recordName, OutputRecord outRec) throws IOException { // Setup so that the records have the proper leader names so they are // unambiguous at the ganglia level, and this prevents a lot of rework StringBuilder sb = new StringBuilder(); sb.append(contextName); sb.append('.'); if (contextName.equals("jvm") && outRec.getTag("processName") != null) { sb.append(outRec.getTag("processName")); sb.append('.'); } sb.append(recordName); sb.append('.'); int sbBaseLen = sb.length(); // emit each metric in turn for (String metricName : outRec.getMetricNames()) { Object metric = outRec.getMetric(metricName); String type = typeTable.get(metric.getClass()); if (type != null) { sb.append(metricName); emitMetric(sb.toString(), type, metric.toString()); sb.setLength(sbBaseLen); } else { LOG.warn("Unknown metrics type: " + metric.getClass()); } } } protected void emitMetric(String name, String type, String value) throws IOException { String units = getUnits(name); int slope = getSlope(name); int tmax = getTmax(name); int dmax = getDmax(name); offset = 0; xdr_int(0); // metric_user_defined xdr_string(type); xdr_string(name); xdr_string(value); xdr_string(units); xdr_int(slope); xdr_int(tmax); xdr_int(dmax); for (SocketAddress socketAddress : metricsServers) { DatagramPacket packet = new DatagramPacket(buffer, offset, socketAddress); datagramSocket.send(packet); } } protected String getUnits(String metricName) { String result = unitsTable.get(metricName); if (result == null) { result = DEFAULT_UNITS; } return result; } protected int getSlope(String metricName) { String slopeString = slopeTable.get(metricName); if (slopeString == null) { slopeString = DEFAULT_SLOPE; } return ("zero".equals(slopeString) ? 0 : 3); // see gmetric.c } protected int getTmax(String metricName) { if (tmaxTable == null) { return DEFAULT_TMAX; } String tmaxString = tmaxTable.get(metricName); if (tmaxString == null) { return DEFAULT_TMAX; } else { return Integer.parseInt(tmaxString); } } protected int getDmax(String metricName) { String dmaxString = dmaxTable.get(metricName); if (dmaxString == null) { return DEFAULT_DMAX; } else { return Integer.parseInt(dmaxString); } } /** * Puts a string into the buffer by first writing the size of the string * as an int, followed by the bytes of the string, padded if necessary to * a multiple of 4. */ protected void xdr_string(String s) { byte[] bytes = s.getBytes(Charsets.UTF_8); int len = bytes.length; xdr_int(len); System.arraycopy(bytes, 0, buffer, offset, len); offset += len; pad(); } /** * Pads the buffer with zero bytes up to the nearest multiple of 4. */ private void pad() { int newOffset = ((offset + 3) / 4) * 4; while (offset < newOffset) { buffer[offset++] = 0; } } /** * Puts an integer into the buffer as 4 bytes, big-endian. */ protected void xdr_int(int i) { buffer[offset++] = (byte)((i >> 24) & 0xff); buffer[offset++] = (byte)((i >> 16) & 0xff); buffer[offset++] = (byte)((i >> 8) & 0xff); buffer[offset++] = (byte)(i & 0xff); } }
8,445
29.824818
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
/* * GangliaContext.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.ganglia; import java.io.IOException; import java.net.DatagramPacket; import java.net.SocketAddress; import java.net.UnknownHostException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics.ContextFactory; import org.apache.hadoop.net.DNS; /** * Context for sending metrics to Ganglia version 3.1.x. * * 3.1.1 has a slightly different wire portal compared to 3.0.x. */ public class GangliaContext31 extends GangliaContext { String hostName = "UNKNOWN.example.com"; private static final Log LOG = LogFactory.getLog("org.apache.hadoop.util.GangliaContext31"); public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics."); // Take the hostname from the DNS class. Configuration conf = new Configuration(); if (conf.get("slave.host.name") != null) { hostName = conf.get("slave.host.name"); } else { try { hostName = DNS.getDefaultHost( conf.get("dfs.datanode.dns.interface","default"), conf.get("dfs.datanode.dns.nameserver","default")); } catch (UnknownHostException uhe) { LOG.error(uhe); hostName = "UNKNOWN.example.com"; } } } protected void emitMetric(String name, String type, String value) throws IOException { if (name == null) { LOG.warn("Metric was emitted with no name."); return; } else if (value == null) { LOG.warn("Metric name " + name +" was emitted with a null value."); return; } else if (type == null) { LOG.warn("Metric name " + name + ", value " + value + " has no type."); return; } if (LOG.isDebugEnabled()) { LOG.debug("Emitting metric " + name + ", type " + type + ", value " + value + " from hostname" + hostName); } String units = getUnits(name); int slope = getSlope(name); int tmax = getTmax(name); int dmax = getDmax(name); offset = 0; String groupName = name.substring(0,name.lastIndexOf(".")); // The following XDR recipe was done through a careful reading of // gm_protocol.x in Ganglia 3.1 and carefully examining the output of // the gmetric utility with strace. // First we send out a metadata message xdr_int(128); // metric_id = metadata_msg xdr_string(hostName); // hostname xdr_string(name); // metric name xdr_int(0); // spoof = False xdr_string(type); // metric type xdr_string(name); // metric name xdr_string(units); // units xdr_int(slope); // slope xdr_int(tmax); // tmax, the maximum time between metrics xdr_int(dmax); // dmax, the maximum data value xdr_int(1); /*Num of the entries in extra_value field for Ganglia 3.1.x*/ xdr_string("GROUP"); /*Group attribute*/ xdr_string(groupName); /*Group value*/ for (SocketAddress socketAddress : metricsServers) { DatagramPacket packet = new DatagramPacket(buffer, offset, socketAddress); datagramSocket.send(packet); } // Now we send out a message with the actual value. // Technically, we only need to send out the metadata message once for // each metric, but I don't want to have to record which metrics we did and // did not send. offset = 0; xdr_int(133); // we are sending a string value xdr_string(hostName); // hostName xdr_string(name); // metric name xdr_int(0); // spoof = False xdr_string("%s"); // format field xdr_string(value); // metric value for (SocketAddress socketAddress : metricsServers) { DatagramPacket packet = new DatagramPacket(buffer, offset, socketAddress); datagramSocket.send(packet); } } }
4,846
33.133803
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/jvm/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private @InterfaceStability.Evolving package org.apache.hadoop.metrics.jvm; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,020
43.391304
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/jvm/JvmMetrics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.jvm; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.lang.management.MemoryUsage; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; import static java.lang.Thread.State.*; import java.lang.management.GarbageCollectorMXBean; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * Singleton class which reports Java Virtual Machine metrics to the metrics API. * Any application can create an instance of this class in order to emit * Java VM metrics. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class JvmMetrics implements Updater { private static final float M = 1024*1024; private static JvmMetrics theInstance = null; private static Log log = LogFactory.getLog(JvmMetrics.class); private MetricsRecord metrics; // garbage collection counters private long gcCount = 0; private long gcTimeMillis = 0; // logging event counters private long fatalCount = 0; private long errorCount = 0; private long warnCount = 0; private long infoCount = 0; public synchronized static JvmMetrics init(String processName, String sessionId) { return init(processName, sessionId, "metrics"); } public synchronized static JvmMetrics init(String processName, String sessionId, String recordName) { if (theInstance != null) { log.info("Cannot initialize JVM Metrics with processName=" + processName + ", sessionId=" + sessionId + " - already initialized"); } else { log.info("Initializing JVM Metrics with processName=" + processName + ", sessionId=" + sessionId); theInstance = new JvmMetrics(processName, sessionId, recordName); } return theInstance; } /** Creates a new instance of JvmMetrics */ private JvmMetrics(String processName, String sessionId, String recordName) { MetricsContext context = MetricsUtil.getContext("jvm"); metrics = MetricsUtil.createRecord(context, recordName); metrics.setTag("processName", processName); metrics.setTag("sessionId", sessionId); context.registerUpdater(this); } /** * This will be called periodically (with the period being configuration * dependent). */ public void doUpdates(MetricsContext context) { doMemoryUpdates(); doGarbageCollectionUpdates(); doThreadUpdates(); doEventCountUpdates(); metrics.update(); } private void doMemoryUpdates() { MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); MemoryUsage memNonHeap = memoryMXBean.getNonHeapMemoryUsage(); MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage(); Runtime runtime = Runtime.getRuntime(); metrics.setMetric("memNonHeapUsedM", memNonHeap.getUsed()/M); metrics.setMetric("memNonHeapCommittedM", memNonHeap.getCommitted()/M); metrics.setMetric("memHeapUsedM", memHeap.getUsed()/M); metrics.setMetric("memHeapCommittedM", memHeap.getCommitted()/M); metrics.setMetric("maxMemoryM", runtime.maxMemory()/M); } private void doGarbageCollectionUpdates() { List<GarbageCollectorMXBean> gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); long count = 0; long timeMillis = 0; for (GarbageCollectorMXBean gcBean : gcBeans) { count += gcBean.getCollectionCount(); timeMillis += gcBean.getCollectionTime(); } metrics.incrMetric("gcCount", (int)(count - gcCount)); metrics.incrMetric("gcTimeMillis", (int)(timeMillis - gcTimeMillis)); gcCount = count; gcTimeMillis = timeMillis; } private void doThreadUpdates() { ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); long threadIds[] = threadMXBean.getAllThreadIds(); ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadIds, 0); int threadsNew = 0; int threadsRunnable = 0; int threadsBlocked = 0; int threadsWaiting = 0; int threadsTimedWaiting = 0; int threadsTerminated = 0; for (ThreadInfo threadInfo : threadInfos) { // threadInfo is null if the thread is not alive or doesn't exist if (threadInfo == null) continue; Thread.State state = threadInfo.getThreadState(); if (state == NEW) { threadsNew++; } else if (state == RUNNABLE) { threadsRunnable++; } else if (state == BLOCKED) { threadsBlocked++; } else if (state == WAITING) { threadsWaiting++; } else if (state == TIMED_WAITING) { threadsTimedWaiting++; } else if (state == TERMINATED) { threadsTerminated++; } } metrics.setMetric("threadsNew", threadsNew); metrics.setMetric("threadsRunnable", threadsRunnable); metrics.setMetric("threadsBlocked", threadsBlocked); metrics.setMetric("threadsWaiting", threadsWaiting); metrics.setMetric("threadsTimedWaiting", threadsTimedWaiting); metrics.setMetric("threadsTerminated", threadsTerminated); } private void doEventCountUpdates() { long newFatal = EventCounter.getFatal(); long newError = EventCounter.getError(); long newWarn = EventCounter.getWarn(); long newInfo = EventCounter.getInfo(); metrics.incrMetric("logFatal", (int)(newFatal - fatalCount)); metrics.incrMetric("logError", (int)(newError - errorCount)); metrics.incrMetric("logWarn", (int)(newWarn - warnCount)); metrics.incrMetric("logInfo", (int)(newInfo - infoCount)); fatalCount = newFatal; errorCount = newError; warnCount = newWarn; infoCount = newInfo; } }
7,501
36.51
86
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/jvm/EventCounter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics.jvm; /** * A log4J Appender that simply counts logging events in three levels: * fatal, error and warn. */ public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter { static { // The logging system is not started yet. System.err.println("WARNING: "+ EventCounter.class.getName() + " is deprecated. Please use "+ org.apache.hadoop.log.metrics.EventCounter.class.getName() + " in all the log4j.properties files."); } }
1,318
37.794118
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** <h1>Metrics 2.0</h1> <ul id="toc"> <li><a href="#overview">Overview</a></li> <li><a href="#gettingstarted">Getting Started</a></li> <li><a href="#config">Configuration</a></li> <li><a href="#filtering">Metrics Filtering</a></li> <li><a href="#instrumentation">Metrics Instrumentation Strategy</a></li> <li><a href="#migration">Migration from previous system</a></li> </ul> <h2><a name="overview">Overview</a></h2> <p>This package provides a framework for metrics instrumentation and publication. </p> <p>The framework provides a variety of ways to implement metrics instrumentation easily via the simple {@link org.apache.hadoop.metrics2.MetricsSource} interface or the even simpler and more concise and declarative metrics annotations. The consumers of metrics just need to implement the simple {@link org.apache.hadoop.metrics2.MetricsSink} interface. Producers register the metrics sources with a metrics system, while consumers register the sinks. A default metrics system is provided to marshal metrics from sources to sinks based on (per source/sink) configuration options. All the metrics are also published and queryable via the standard JMX MBean interface. This document targets the framework users. Framework developers could also consult the <a href="http://wiki.apache.org/hadoop/HADOOP-6728-MetricsV2">design document</a> for architecture and implementation notes. </p> <h3>Sub-packages</h3> <dl> <dt><code>org.apache.hadoop.metrics2.annotation</code></dt> <dd>Public annotation interfaces for simpler metrics instrumentation. </dd> <dt><code>org.apache.hadoop.metrics2.impl</code></dt> <dd>Implementation classes of the framework for interface and/or abstract classes defined in the top-level package. Sink plugin code usually does not need to reference any class here. </dd> <dt> <code>org.apache.hadoop.metrics2.lib</code></dt> <dd>Convenience classes for implementing metrics sources, including the Mutable[{@link org.apache.hadoop.metrics2.lib.MutableGauge Gauge}*| {@link org.apache.hadoop.metrics2.lib.MutableCounter Counter}*| {@link org.apache.hadoop.metrics2.lib.MutableStat Stat}] and {@link org.apache.hadoop.metrics2.lib.MetricsRegistry}. </dd> <dt> <code>org.apache.hadoop.metrics2.filter</code></dt> <dd>Builtin metrics filter implementations include the {@link org.apache.hadoop.metrics2.filter.GlobFilter} and {@link org.apache.hadoop.metrics2.filter.RegexFilter}. </dd> <dt><code>org.apache.hadoop.metrics2.source</code></dt> <dd>Builtin metrics source implementations including the {@link org.apache.hadoop.metrics2.source.JvmMetrics}. </dd> <dt> <code>org.apache.hadoop.metrics2.sink</code></dt> <dd>Builtin metrics sink implementations including the {@link org.apache.hadoop.metrics2.sink.FileSink}. </dd> <dt> <code>org.apache.hadoop.metrics2.util</code></dt> <dd>General utilities for implementing metrics sinks etc., including the {@link org.apache.hadoop.metrics2.util.MetricsCache}. </dd> </dl> <h2><a name="gettingstarted">Getting started</a></h2> <h3>Implementing metrics sources</h3> <table width="99%" border="1" cellspacing="0" cellpadding="4"> <tbody> <tr> <th>Using annotations</th><th>Using MetricsSource interface</th> </tr> <tr><td> <pre> &#064;Metrics(context="MyContext") class MyStat { &#064;Metric("My metric description") public int getMyMetric() { return 42; } }</pre></td><td> <pre> class MyStat implements MetricsSource { &#064;Override public void getMetrics(MetricsCollector collector, boolean all) { collector.addRecord("MyStat") .setContext("MyContext") .addGauge(info("MyMetric", "My metric description"), 42); } } </pre> </td> </tr> </tbody> </table> <p>In this example we introduced the following:</p> <dl> <dt><em>&#064;Metrics</em></dt> <dd>The {@link org.apache.hadoop.metrics2.annotation.Metrics} annotation is used to indicate that the class is a metrics source. </dd> <dt><em>MyContext</em></dt> <dd>The optional context name typically identifies either the application, or a group of modules within an application or library. </dd> <dt><em>MyStat</em></dt> <dd>The class name is used (by default, or specified by name=value parameter in the Metrics annotation) as the metrics record name for which a set of metrics are to be reported. For example, you could have a record named "CacheStat" for reporting a number of statistics relating to the usage of some cache in your application.</dd> <dt><em>&#064;Metric</em></dt> <dd>The {@link org.apache.hadoop.metrics2.annotation.Metric} annotation identifies a particular metric, which in this case, is the result of the method call getMyMetric of the "gauge" (default) type, which means it can vary in both directions, compared with a "counter" type, which can only increase or stay the same. The name of the metric is "MyMetric" (inferred from getMyMetric method name by default.) The 42 here is the value of the metric which can be substituted with any valid java expressions. </dd> </dl> <p>Note, the {@link org.apache.hadoop.metrics2.MetricsSource} interface is more verbose but more flexible, allowing generated metrics names and multiple records. In fact, the annotation interface is implemented with the MetricsSource interface internally.</p> <h3>Implementing metrics sinks</h3> <pre> public class MySink implements MetricsSink { public void putMetrics(MetricsRecord record) { System.out.print(record); } public void init(SubsetConfiguration conf) {} public void flush() {} }</pre> <p>In this example there are three additional concepts:</p> <dl> <dt><em>record</em></dt> <dd>This object corresponds to the record created in metrics sources e.g., the "MyStat" in previous example. </dd> <dt><em>conf</em></dt> <dd>The configuration object for the sink instance with prefix removed. So you can get any sink specific configuration using the usual get* method. </dd> <dt><em>flush</em></dt> <dd>This method is called for each update cycle, which may involve more than one record. The sink should try to flush any buffered metrics to its backend upon the call. But it's not required that the implementation is synchronous. </dd> </dl> <p>In order to make use our <code>MyMetrics</code> and <code>MySink</code>, they need to be hooked up to a metrics system. In this case (and most cases), the <code>DefaultMetricsSystem</code> would suffice. </p> <pre> DefaultMetricsSystem.initialize("test"); // called once per application DefaultMetricsSystem.register(new MyStat());</pre> <h2><a name="config">Metrics system configuration</a></h2> <p>Sinks are usually specified in a configuration file, say, "hadoop-metrics2-test.properties", as: </p> <pre> test.sink.mysink0.class=com.example.hadoop.metrics.MySink</pre> <p>The configuration syntax is:</p> <pre> [prefix].[source|sink|jmx|].[instance].[option]</pre> <p>In the previous example, <code>test</code> is the prefix and <code>mysink0</code> is an instance name. <code>DefaultMetricsSystem</code> would try to load <code>hadoop-metrics2-[prefix].properties</code> first, and if not found, try the default <code>hadoop-metrics2.properties</code> in the class path. Note, the <code>[instance]</code> is an arbitrary name to uniquely identify a particular sink instance. The asterisk (<code>*</code>) can be used to specify default options. </p> <p>Consult the metrics instrumentation in jvm, rpc, hdfs and mapred, etc. for more examples. </p> <h2><a name="filtering">Metrics Filtering</a></h2> <p>One of the features of the default metrics system is metrics filtering configuration by source, context, record/tags and metrics. The least expensive way to filter out metrics would be at the source level, e.g., filtering out source named "MyMetrics". The most expensive way would be per metric filtering. </p> <p>Here are some examples:</p> <pre> test.sink.file0.class=org.apache.hadoop.metrics2.sink.FileSink test.sink.file0.context=foo</pre> <p>In this example, we configured one sink instance that would accept metrics from context <code>foo</code> only. </p> <pre> *.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter test.*.source.filter.include=foo test.*.source.filter.exclude=bar</pre> <p>In this example, we specify a source filter that includes source <code>foo</code> and excludes <code>bar</code>. When only include patterns are specified, the filter operates in the white listing mode, where only matched sources are included. Likewise, when only exclude patterns are specified, only matched sources are excluded. Sources that are not matched in either patterns are included as well when both patterns are present. Note, the include patterns have precedence over the exclude patterns. </p> <p>Similarly, you can specify the <code>record.filter</code> and <code>metric.filter</code> options, which operate at record and metric level, respectively. Filters can be combined to optimize the filtering efficiency.</p> <h2><a name="instrumentation">Metrics instrumentation strategy</a></h2> In previous examples, we showed a minimal example to use the metrics framework. In a larger system (like Hadoop) that allows custom metrics instrumentation, we recommend the following strategy: <pre> &#064;Metrics(about="My metrics description", context="MyContext") class MyMetrics extends MyInstrumentation { &#064;Metric("My gauge description") MutableGaugeInt gauge0; &#064;Metric("My counter description") MutableCounterLong counter0; &#064;Metric("My rate description") MutableRate rate0; &#064;Override public void setGauge0(int value) { gauge0.set(value); } &#064;Override public void incrCounter0() { counter0.incr(); } &#064;Override public void addRate0(long elapsed) { rate0.add(elapsed); } } </pre> Note, in this example we introduced the following: <dl> <dt><em>MyInstrumentation</em></dt> <dd>This is usually an abstract class (or interface) to define an instrumentation interface (incrCounter0 etc.) that allows different implementations. This could be a mechanism to allow different metrics systems to be used at runtime via configuration. </dd> <dt><em>Mutable[Gauge*|Counter*|Rate]</em></dt> <dd>These are library classes to manage mutable metrics for implementations of metrics sources. They produce immutable gauge and counters (Metric[Gauge*|Counter*]) for downstream consumption (sinks) upon <code>snapshot</code>. The <code>MutableRate</code> in particular, provides a way to measure latency and throughput of an operation. In this particular case, it produces a long counter "Rate0NumOps" and double gauge "Rate0AvgTime" when snapshotted. </dd> </dl> <h2><a name="migration">Migration from previous system</a></h2> <p>Users of the previous metrics system would notice the lack of <code>context</code> prefix in the configuration examples. The new metrics system decouples the concept for context (for grouping) with the implementation where a particular context object does the updating and publishing of metrics, which causes problems when you want to have a single context to be consumed by multiple backends. You would also have to configure an implementation instance per context, even if you have a backend that can handle multiple contexts (file, gangalia etc.): </p> <table width="99%" border="1" cellspacing="0" cellpadding="4"> <tbody> <tr> <th width="40%">Before</th><th>After</th> </tr> <tr> <td><pre> context1.class=org.hadoop.metrics.file.FileContext context2.class=org.hadoop.metrics.file.FileContext ... contextn.class=org.hadoop.metrics.file.FileContext</pre> </td> <td><pre> myprefix.sink.file.class=org.hadoop.metrics2.sink.FileSink</pre> </td> </tr> </tbody> </table> <p>In the new metrics system, you can simulate the previous behavior by using the context option in the sink options like the following: </p> <table width="99%" border="1" cellspacing="0" cellpadding="4"> <tbody> <tr> <th width="40%">Before</th><th>After</th> </tr> <tr> <td><pre> context0.class=org.hadoop.metrics.file.FileContext context0.fileName=context0.out context1.class=org.hadoop.metrics.file.FileContext context1.fileName=context1.out ... contextn.class=org.hadoop.metrics.file.FileContext contextn.fileName=contextn.out</pre> </td> <td><pre> myprefix.sink.*.class=org.apache.hadoop.metrics2.sink.FileSink myprefix.sink.file0.context=context0 myprefix.sink.file0.filename=context1.out myprefix.sink.file1.context=context1 myprefix.sink.file1.filename=context1.out ... myprefix.sink.filen.context=contextn myprefix.sink.filen.filename=contextn.out</pre> </td> </tr> </tbody> </table> <p>to send metrics of a particular context to a particular backend. Note, <code>myprefix</code> is an arbitrary prefix for configuration groupings, typically they are the name of a particular process (<code>namenode</code>, <code>jobtracker</code>, etc.) </p> */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
14,867
41.601719
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A general metrics exception wrapper */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MetricsException extends RuntimeException { private static final long serialVersionUID = 1L; /** * Construct the exception with a message * @param message for the exception */ public MetricsException(String message) { super(message); } /** * Construct the exception with a message and a cause * @param message for the exception * @param cause of the exception */ public MetricsException(String message, Throwable cause) { super(message, cause); } /** * Construct the exception with a cause * @param cause of the exception */ public MetricsException(Throwable cause) { super(cause); } }
1,731
29.385965
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsRecordBuilder.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The metrics record builder interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class MetricsRecordBuilder { /** * Add a metrics tag * @param info metadata of the tag * @param value of the tag * @return self */ public abstract MetricsRecordBuilder tag(MetricsInfo info, String value); /** * Add an immutable metrics tag object * @param tag a pre-made tag object (potentially save an object construction) * @return self */ public abstract MetricsRecordBuilder add(MetricsTag tag); /** * Add a pre-made immutable metric object * @param metric the pre-made metric to save an object construction * @return self */ public abstract MetricsRecordBuilder add(AbstractMetric metric); /** * Set the context tag * @param value of the context * @return self */ public abstract MetricsRecordBuilder setContext(String value); /** * Add an integer metric * @param info metadata of the metric * @param value of the metric * @return self */ public abstract MetricsRecordBuilder addCounter(MetricsInfo info, int value); /** * Add an long metric * @param info metadata of the metric * @param value of the metric * @return self */ public abstract MetricsRecordBuilder addCounter(MetricsInfo info, long value); /** * Add a integer gauge metric * @param info metadata of the metric * @param value of the metric * @return self */ public abstract MetricsRecordBuilder addGauge(MetricsInfo info, int value); /** * Add a long gauge metric * @param info metadata of the metric * @param value of the metric * @return self */ public abstract MetricsRecordBuilder addGauge(MetricsInfo info, long value); /** * Add a float gauge metric * @param info metadata of the metric * @param value of the metric * @return self */ public abstract MetricsRecordBuilder addGauge(MetricsInfo info, float value); /** * Add a double gauge metric * @param info metadata of the metric * @param value of the metric * @return self */ public abstract MetricsRecordBuilder addGauge(MetricsInfo info, double value); /** * @return the parent metrics collector object */ public abstract MetricsCollector parent(); /** * Syntactic sugar to add multiple records in a collector in a one liner. * @return the parent metrics collector object */ public MetricsCollector endRecord() { return parent(); } }
3,470
28.415254
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystemMXBean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The JMX interface to the metrics system */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsSystemMXBean { /** * Start the metrics system * @throws MetricsException */ public void start(); /** * Stop the metrics system * @throws MetricsException */ public void stop(); /** * Start metrics MBeans * @throws MetricsException */ public void startMetricsMBeans(); /** * Stop metrics MBeans. * Note, it doesn't stop the metrics system control MBean, * i.e this interface. * @throws MetricsException */ public void stopMetricsMBeans(); /** * @return the current config * Avoided getConfig, as it'll turn into a "Config" attribute, * which doesn't support multiple line values in jconsole. * @throws MetricsException */ public String currentConfig(); }
1,830
27.609375
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsPlugin.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The plugin interface for the metrics framework */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsPlugin { /** * Initialize the plugin * @param conf the configuration object for the plugin */ void init(SubsetConfiguration conf); }
1,310
34.432432
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsVisitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A visitor interface for metrics */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsVisitor { /** * Callback for integer value gauges * @param info the metric info * @param value of the metric */ public void gauge(MetricsInfo info, int value); /** * Callback for long value gauges * @param info the metric info * @param value of the metric */ public void gauge(MetricsInfo info, long value); /** * Callback for float value gauges * @param info the metric info * @param value of the metric */ public void gauge(MetricsInfo info, float value); /** * Callback for double value gauges * @param info the metric info * @param value of the metric */ public void gauge(MetricsInfo info, double value); /** * Callback for integer value counters * @param info the metric info * @param value of the metric */ public void counter(MetricsInfo info, int value); /** * Callback for long value counters * @param info the metric info * @param value of the metric */ public void counter(MetricsInfo info, long value); }
2,112
28.347222
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The metrics source interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsSource { /** * Get metrics from the source * @param collector to contain the resulting metrics snapshot * @param all if true, return all metrics even if unchanged. */ void getMetrics(MetricsCollector collector, boolean all); }
1,327
34.891892
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsFilter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The metrics filter interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class MetricsFilter implements MetricsPlugin { /** * Whether to accept the name * @param name to filter on * @return true to accept; false otherwise. */ public abstract boolean accepts(String name); /** * Whether to accept the tag * @param tag to filter on * @return true to accept; false otherwise */ public abstract boolean accepts(MetricsTag tag); /** * Whether to accept the tags * @param tags to filter on * @return true to accept; false otherwise */ public abstract boolean accepts(Iterable<MetricsTag> tags); /** * Whether to accept the record * @param record to filter on * @return true to accept; false otherwise. */ public boolean accepts(MetricsRecord record) { return accepts(record.name()) && accepts(record.tags()); } }
1,891
29.516129
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsCollector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The metrics collector interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsCollector { /** * Add a metrics record * @param name of the record * @return a metrics record builder for the record */ public MetricsRecordBuilder addRecord(String name); /** * Add a metrics record * @param info of the record * @return a metrics record builder for the record */ public MetricsRecordBuilder addRecord(MetricsInfo info); }
1,463
32.272727
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSink.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import java.io.Closeable; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The metrics sink interface. <p> * Implementations of this interface consume the {@link MetricsRecord} generated * from {@link MetricsSource}. It registers with {@link MetricsSystem} which * periodically pushes the {@link MetricsRecord} to the sink using * {@link #putMetrics(MetricsRecord)} method. If the implementing class also * implements {@link Closeable}, then the MetricsSystem will close the sink when * it is stopped. */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsSink extends MetricsPlugin { /** * Put a metrics record in the sink * @param record the record to put */ void putMetrics(MetricsRecord record); /** * Flush any buffered metrics */ void flush(); }
1,737
34.469388
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Interface to provide immutable meta info for metrics */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsInfo { /** * @return the name of the metric/tag */ String name(); /** * @return the description of the metric/tag */ String description(); }
1,270
30.775
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricType.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; public enum MetricType { /** * A monotonically increasing metric that can be used * to calculate throughput */ COUNTER, /** * An arbitrary varying metric */ GAUGE }
1,032
31.28125
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import com.google.common.base.Objects; import static com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Immutable tag for metrics (for grouping on host/queue/username etc.) */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MetricsTag implements MetricsInfo { private final MetricsInfo info; private final String value; /** * Construct the tag with name, description and value * @param info of the tag * @param value of the tag */ public MetricsTag(MetricsInfo info, String value) { this.info = checkNotNull(info, "tag info"); this.value = value; } @Override public String name() { return info.name(); } @Override public String description() { return info.description(); } /** * @return the info object of the tag */ public MetricsInfo info() { return info; } /** * Get the value of the tag * @return the value */ public String value() { return value; } @Override public boolean equals(Object obj) { if (obj instanceof MetricsTag) { final MetricsTag other = (MetricsTag) obj; return Objects.equal(info, other.info()) && Objects.equal(value, other.value()); } return false; } @Override public int hashCode() { return Objects.hashCode(info, value); } @Override public String toString() { return Objects.toStringHelper(this) .add("info", info) .add("value", value()) .toString(); } }
2,436
26.382022
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsRecord.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import java.util.Collection; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * An immutable snapshot of metrics with a timestamp */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface MetricsRecord { /** * Get the timestamp of the metrics * @return the timestamp */ long timestamp(); /** * @return the record name */ String name(); /** * @return the description of the record */ String description(); /** * @return the context name of the record */ String context(); /** * Get the tags of the record * Note: returning a collection instead of iterable as we * need to use tags as keys (hence Collection#hashCode etc.) in maps * @return an unmodifiable collection of tags */ Collection<MetricsTag> tags(); /** * Get the metrics of the record * @return an immutable iterable interface for metrics */ Iterable<AbstractMetric> metrics(); }
1,860
26.776119
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import com.google.common.base.Objects; import static com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The immutable metric */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class AbstractMetric implements MetricsInfo { private final MetricsInfo info; /** * Construct the metric * @param info about the metric */ protected AbstractMetric(MetricsInfo info) { this.info = checkNotNull(info, "metric info"); } @Override public String name() { return info.name(); } @Override public String description() { return info.description(); } protected MetricsInfo info() { return info; } /** * Get the value of the metric * @return the value of the metric */ public abstract Number value(); /** * Get the type of the metric * @return the type of the metric */ public abstract MetricType type(); /** * Accept a visitor interface * @param visitor of the metric */ public abstract void visit(MetricsVisitor visitor); @Override public boolean equals(Object obj) { if (obj instanceof AbstractMetric) { final AbstractMetric other = (AbstractMetric) obj; return Objects.equal(info, other.info()) && Objects.equal(value(), other.value()); } return false; } @Override public int hashCode() { return Objects.hashCode(info, value()); } @Override public String toString() { return Objects.toStringHelper(this) .add("info", info) .add("value", value()) .toString(); } }
2,510
26
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * The metrics system interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class MetricsSystem implements MetricsSystemMXBean { @InterfaceAudience.Private public abstract MetricsSystem init(String prefix); /** * Register a metrics source * @param <T> the actual type of the source object * @param source object to register * @param name of the source. Must be unique or null (then extracted from * the annotations of the source object.) * @param desc the description of the source (or null. See above.) * @return the source object * @exception MetricsException */ public abstract <T> T register(String name, String desc, T source); /** * Unregister a metrics source * @param name of the source. This is the name you use to call register() */ public abstract void unregisterSource(String name); /** * Register a metrics source (deriving name and description from the object) * @param <T> the actual type of the source object * @param source object to register * @return the source object * @exception MetricsException */ public <T> T register(T source) { return register(null, null, source); } /** * @param name of the metrics source * @return the metrics source (potentially wrapped) object */ @InterfaceAudience.Private public abstract MetricsSource getSource(String name); /** * Register a metrics sink * @param <T> the type of the sink * @param sink to register * @param name of the sink. Must be unique. * @param desc the description of the sink * @return the sink * @exception MetricsException */ public abstract <T extends MetricsSink> T register(String name, String desc, T sink); /** * Register a callback interface for JMX events * @param callback the callback object implementing the MBean interface. */ public abstract void register(Callback callback); /** * Requests an immediate publish of all metrics from sources to sinks. * * This is a "soft" request: the expectation is that a best effort will be * done to synchronously snapshot the metrics from all the sources and put * them in all the sinks (including flushing the sinks) before returning to * the caller. If this can't be accomplished in reasonable time it's OK to * return to the caller before everything is done. */ public abstract void publishMetricsNow(); /** * Shutdown the metrics system completely (usually during server shutdown.) * The MetricsSystemMXBean will be unregistered. * @return true if shutdown completed */ public abstract boolean shutdown(); /** * The metrics system callback interface (needed for proxies.) */ public interface Callback { /** * Called before start() */ void preStart(); /** * Called after start() */ void postStart(); /** * Called before stop() */ void preStop(); /** * Called after stop() */ void postStop(); } /** * Convenient abstract class for implementing callback interface */ public static abstract class AbstractCallback implements Callback { @Override public void preStart() {} @Override public void postStart() {} @Override public void preStop() {} @Override public void postStop() {} } }
4,344
29.815603
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Builtin metrics sinks */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.metrics2.sink; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,055
38.111111
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.commons.io.Charsets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.metrics2.MetricsTag; import java.io.Closeable; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; import java.net.Socket; /** * A metrics sink that writes to a Graphite server */ @InterfaceAudience.Public @InterfaceStability.Evolving public class GraphiteSink implements MetricsSink, Closeable { private static final Log LOG = LogFactory.getLog(GraphiteSink.class); private static final String SERVER_HOST_KEY = "server_host"; private static final String SERVER_PORT_KEY = "server_port"; private static final String METRICS_PREFIX = "metrics_prefix"; private String metricsPrefix = null; private Graphite graphite = null; @Override public void init(SubsetConfiguration conf) { // Get Graphite host configurations. final String serverHost = conf.getString(SERVER_HOST_KEY); final int serverPort = Integer.parseInt(conf.getString(SERVER_PORT_KEY)); // Get Graphite metrics graph prefix. metricsPrefix = conf.getString(METRICS_PREFIX); if (metricsPrefix == null) metricsPrefix = ""; graphite = new Graphite(serverHost, serverPort); graphite.connect(); } @Override public void putMetrics(MetricsRecord record) { StringBuilder lines = new StringBuilder(); StringBuilder metricsPathPrefix = new StringBuilder(); // Configure the hierarchical place to display the graph. metricsPathPrefix.append(metricsPrefix).append(".") .append(record.context()).append(".").append(record.name()); for (MetricsTag tag : record.tags()) { if (tag.value() != null) { metricsPathPrefix.append("."); metricsPathPrefix.append(tag.name()); metricsPathPrefix.append("="); metricsPathPrefix.append(tag.value()); } } // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds. long timestamp = record.timestamp() / 1000L; // Collect datapoints. for (AbstractMetric metric : record.metrics()) { lines.append( metricsPathPrefix.toString() + "." + metric.name().replace(' ', '.')).append(" ") .append(metric.value()).append(" ").append(timestamp) .append("\n"); } try { graphite.write(lines.toString()); } catch (Exception e) { LOG.warn("Error sending metrics to Graphite", e); try { graphite.close(); } catch (Exception e1) { throw new MetricsException("Error closing connection to Graphite", e1); } } } @Override public void flush() { try { graphite.flush(); } catch (Exception e) { LOG.warn("Error flushing metrics to Graphite", e); try { graphite.close(); } catch (Exception e1) { throw new MetricsException("Error closing connection to Graphite", e1); } } } @Override public void close() throws IOException { graphite.close(); } public static class Graphite { private final static int MAX_CONNECTION_FAILURES = 5; private String serverHost; private int serverPort; private Writer writer = null; private Socket socket = null; private int connectionFailures = 0; public Graphite(String serverHost, int serverPort) { this.serverHost = serverHost; this.serverPort = serverPort; } public void connect() { if (isConnected()) { throw new MetricsException("Already connected to Graphite"); } if (tooManyConnectionFailures()) { // return silently (there was ERROR in logs when we reached limit for the first time) return; } try { // Open a connection to Graphite server. socket = new Socket(serverHost, serverPort); writer = new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8); } catch (Exception e) { connectionFailures++; if (tooManyConnectionFailures()) { // first time when connection limit reached, report to logs LOG.error("Too many connection failures, would not try to connect again."); } throw new MetricsException("Error creating connection, " + serverHost + ":" + serverPort, e); } } public void write(String msg) throws IOException { if (!isConnected()) { connect(); } if (isConnected()) { writer.write(msg); } } public void flush() throws IOException { if (isConnected()) { writer.flush(); } } public boolean isConnected() { return socket != null && socket.isConnected() && !socket.isClosed(); } public void close() throws IOException { try { if (writer != null) { writer.close(); } } catch (IOException ex) { if (socket != null) { socket.close(); } } finally { socket = null; writer = null; } } private boolean tooManyConnectionFailures() { return connectionFailures > MAX_CONNECTION_FAILURES; } } }
6,828
32.150485
98
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink; import java.io.Closeable; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintStream; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.metrics2.MetricsTag; /** * A metrics sink that writes to a file */ @InterfaceAudience.Public @InterfaceStability.Evolving public class FileSink implements MetricsSink, Closeable { private static final String FILENAME_KEY = "filename"; private PrintStream writer; @Override public void init(SubsetConfiguration conf) { String filename = conf.getString(FILENAME_KEY); try { writer = filename == null ? System.out : new PrintStream(new FileOutputStream(new File(filename)), true, "UTF-8"); } catch (Exception e) { throw new MetricsException("Error creating "+ filename, e); } } @Override public void putMetrics(MetricsRecord record) { writer.print(record.timestamp()); writer.print(" "); writer.print(record.context()); writer.print("."); writer.print(record.name()); String separator = ": "; for (MetricsTag tag : record.tags()) { writer.print(separator); separator = ", "; writer.print(tag.name()); writer.print("="); writer.print(tag.value()); } for (AbstractMetric metric : record.metrics()) { writer.print(separator); separator = ", "; writer.print(metric.name()); writer.print("="); writer.print(metric.value()); } writer.println(); } @Override public void flush() { writer.flush(); } @Override public void close() throws IOException { writer.close(); } }
2,862
30.119565
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink.ganglia; import java.io.IOException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Set; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.impl.MsInfo; import org.apache.hadoop.metrics2.util.MetricsCache; import org.apache.hadoop.metrics2.util.MetricsCache.Record; /** * This code supports Ganglia 3.0 * */ public class GangliaSink30 extends AbstractGangliaSink { public final Log LOG = LogFactory.getLog(this.getClass()); private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix."; private MetricsCache metricsCache = new MetricsCache(); // a key with a NULL value means ALL private Map<String,Set<String>> useTagsMap = new HashMap<String,Set<String>>(); @Override @SuppressWarnings("unchecked") public void init(SubsetConfiguration conf) { super.init(conf); conf.setListDelimiter(','); Iterator<String> it = (Iterator<String>) conf.getKeys(); while (it.hasNext()) { String propertyName = it.next(); if (propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) { String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length()); String[] tags = conf.getStringArray(propertyName); boolean useAllTags = false; Set<String> set = null; if (tags.length > 0) { set = new HashSet<String>(); for (String tag : tags) { tag = tag.trim(); useAllTags |= tag.equals("*"); if (tag.length() > 0) { set.add(tag); } } if (useAllTags) { set = null; } } useTagsMap.put(contextName, set); } } } @InterfaceAudience.Private public void appendPrefix(MetricsRecord record, StringBuilder sb) { String contextName = record.context(); Collection<MetricsTag> tags = record.tags(); if (useTagsMap.containsKey(contextName)) { Set<String> useTags = useTagsMap.get(contextName); for (MetricsTag t : tags) { if (useTags == null || useTags.contains(t.name())) { // the context is always skipped here because it is always added // the hostname is always skipped to avoid case-mismatches // from different DNSes. if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) { sb.append('.').append(t.name()).append('=').append(t.value()); } } } } } @Override public void putMetrics(MetricsRecord record) { // The method handles both cases whether Ganglia support dense publish // of metrics of sparse (only on change) publish of metrics try { String recordName = record.name(); String contextName = record.context(); StringBuilder sb = new StringBuilder(); sb.append(contextName); sb.append('.'); sb.append(recordName); appendPrefix(record, sb); String groupName = sb.toString(); sb.append('.'); int sbBaseLen = sb.length(); String type = null; GangliaSlope slopeFromMetric = null; GangliaSlope calculatedSlope = null; Record cachedMetrics = null; resetBuffer(); // reset the buffer to the beginning if (!isSupportSparseMetrics()) { // for sending dense metrics, update metrics cache // and get the updated data cachedMetrics = metricsCache.update(record); if (cachedMetrics != null && cachedMetrics.metricsEntrySet() != null) { for (Map.Entry<String, AbstractMetric> entry : cachedMetrics .metricsEntrySet()) { AbstractMetric metric = entry.getValue(); sb.append(metric.name()); String name = sb.toString(); // visit the metric to identify the Ganglia type and // slope metric.visit(gangliaMetricVisitor); type = gangliaMetricVisitor.getType(); slopeFromMetric = gangliaMetricVisitor.getSlope(); GangliaConf gConf = getGangliaConfForMetric(name); calculatedSlope = calculateSlope(gConf, slopeFromMetric); // send metric to Ganglia emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope); // reset the length of the buffer for next iteration sb.setLength(sbBaseLen); } } } else { // we support sparse updates Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record .metrics(); if (metrics.size() > 0) { // we got metrics. so send the latest for (AbstractMetric metric : record.metrics()) { sb.append(metric.name()); String name = sb.toString(); // visit the metric to identify the Ganglia type and // slope metric.visit(gangliaMetricVisitor); type = gangliaMetricVisitor.getType(); slopeFromMetric = gangliaMetricVisitor.getSlope(); GangliaConf gConf = getGangliaConfForMetric(name); calculatedSlope = calculateSlope(gConf, slopeFromMetric); // send metric to Ganglia emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope); // reset the length of the buffer for next iteration sb.setLength(sbBaseLen); } } } } catch (IOException io) { throw new MetricsException("Failed to putMetrics", io); } } // Calculate the slope from properties and metric private GangliaSlope calculateSlope(GangliaConf gConf, GangliaSlope slopeFromMetric) { if (gConf.getSlope() != null) { // if slope has been specified in properties, use that return gConf.getSlope(); } else if (slopeFromMetric != null) { // slope not specified in properties, use derived from Metric return slopeFromMetric; } else { return DEFAULT_SLOPE; } } /** * The method sends metrics to Ganglia servers. The method has been taken from * org.apache.hadoop.metrics.ganglia.GangliaContext30 with minimal changes in * order to keep it in sync. * @param groupName The group name of the metric * @param name The metric name * @param type The type of the metric * @param value The value of the metric * @param gConf The GangliaConf for this metric * @param gSlope The slope for this metric * @throws IOException */ protected void emitMetric(String groupName, String name, String type, String value, GangliaConf gConf, GangliaSlope gSlope) throws IOException { if (name == null) { LOG.warn("Metric was emitted with no name."); return; } else if (value == null) { LOG.warn("Metric name " + name + " was emitted with a null value."); return; } else if (type == null) { LOG.warn("Metric name " + name + ", value " + value + " has no type."); return; } if (LOG.isDebugEnabled()) { LOG.debug("Emitting metric " + name + ", type " + type + ", value " + value + ", slope " + gSlope.name() + " from hostname " + getHostName()); } xdr_int(0); // metric_user_defined xdr_string(type); xdr_string(name); xdr_string(value); xdr_string(gConf.getUnits()); xdr_int(gSlope.ordinal()); xdr_int(gConf.getTmax()); xdr_int(gConf.getDmax()); // send the metric to Ganglia hosts emitToGangliaHosts(); } }
8,862
33.621094
95
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaConf.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink.ganglia; import org.apache.hadoop.metrics2.sink.ganglia.AbstractGangliaSink.GangliaSlope; /** * class which is used to store ganglia properties */ class GangliaConf { private String units = AbstractGangliaSink.DEFAULT_UNITS; private GangliaSlope slope; private int dmax = AbstractGangliaSink.DEFAULT_DMAX; private int tmax = AbstractGangliaSink.DEFAULT_TMAX; @Override public String toString() { StringBuilder buf = new StringBuilder(); buf.append("unit=").append(units).append(", slope=").append(slope) .append(", dmax=").append(dmax).append(", tmax=").append(tmax); return buf.toString(); } /** * @return the units */ String getUnits() { return units; } /** * @param units the units to set */ void setUnits(String units) { this.units = units; } /** * @return the slope */ GangliaSlope getSlope() { return slope; } /** * @param slope the slope to set */ void setSlope(GangliaSlope slope) { this.slope = slope; } /** * @return the dmax */ int getDmax() { return dmax; } /** * @param dmax the dmax to set */ void setDmax(int dmax) { this.dmax = dmax; } /** * @return the tmax */ int getTmax() { return tmax; } /** * @param tmax the tmax to set */ void setTmax(int tmax) { this.tmax = tmax; } }
2,215
22.326316
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink.ganglia; import java.io.IOException; import java.net.*; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.commons.io.Charsets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.metrics2.util.Servers; import org.apache.hadoop.net.DNS; /** * This the base class for Ganglia sink classes using metrics2. Lot of the code * has been derived from org.apache.hadoop.metrics.ganglia.GangliaContext. * As per the documentation, sink implementations doesn't have to worry about * thread safety. Hence the code wasn't written for thread safety and should * be modified in case the above assumption changes in the future. */ public abstract class AbstractGangliaSink implements MetricsSink { public final Log LOG = LogFactory.getLog(this.getClass()); /* * Output of "gmetric --help" showing allowable values * -t, --type=STRING * Either string|int8|uint8|int16|uint16|int32|uint32|float|double * -u, --units=STRING Unit of measure for the value e.g. Kilobytes, Celcius * (default='') * -s, --slope=STRING Either zero|positive|negative|both * (default='both') * -x, --tmax=INT The maximum time in seconds between gmetric calls * (default='60') */ public static final String DEFAULT_UNITS = ""; public static final int DEFAULT_TMAX = 60; public static final int DEFAULT_DMAX = 0; public static final GangliaSlope DEFAULT_SLOPE = GangliaSlope.both; public static final int DEFAULT_PORT = 8649; public static final boolean DEFAULT_MULTICAST_ENABLED = false; public static final int DEFAULT_MULTICAST_TTL = 1; public static final String SERVERS_PROPERTY = "servers"; public static final String MULTICAST_ENABLED_PROPERTY = "multicast"; public static final String MULTICAST_TTL_PROPERTY = "multicast.ttl"; public static final int BUFFER_SIZE = 1500; // as per libgmond.c public static final String SUPPORT_SPARSE_METRICS_PROPERTY = "supportsparse"; public static final boolean SUPPORT_SPARSE_METRICS_DEFAULT = false; public static final String EQUAL = "="; private String hostName = "UNKNOWN.example.com"; private DatagramSocket datagramSocket; private List<? extends SocketAddress> metricsServers; private boolean multicastEnabled; private int multicastTtl; private byte[] buffer = new byte[BUFFER_SIZE]; private int offset; private boolean supportSparseMetrics = SUPPORT_SPARSE_METRICS_DEFAULT; /** * Used for visiting Metrics */ protected final GangliaMetricVisitor gangliaMetricVisitor = new GangliaMetricVisitor(); private SubsetConfiguration conf; private Map<String, GangliaConf> gangliaConfMap; private GangliaConf DEFAULT_GANGLIA_CONF = new GangliaConf(); /** * ganglia slope values which equal the ordinal */ public enum GangliaSlope { zero, // 0 positive, // 1 negative, // 2 both // 3 }; /** * define enum for various type of conf */ public enum GangliaConfType { slope, units, dmax, tmax }; /* * (non-Javadoc) * * @see * org.apache.hadoop.metrics2.MetricsPlugin#init(org.apache.commons.configuration * .SubsetConfiguration) */ public void init(SubsetConfiguration conf) { LOG.debug("Initializing the GangliaSink for Ganglia metrics."); this.conf = conf; // Take the hostname from the DNS class. if (conf.getString("slave.host.name") != null) { hostName = conf.getString("slave.host.name"); } else { try { hostName = DNS.getDefaultHost( conf.getString("dfs.datanode.dns.interface", "default"), conf.getString("dfs.datanode.dns.nameserver", "default")); } catch (UnknownHostException uhe) { LOG.error(uhe); hostName = "UNKNOWN.example.com"; } } // load the gannglia servers from properties metricsServers = Servers.parse(conf.getString(SERVERS_PROPERTY), DEFAULT_PORT); multicastEnabled = conf.getBoolean(MULTICAST_ENABLED_PROPERTY, DEFAULT_MULTICAST_ENABLED); multicastTtl = conf.getInt(MULTICAST_TTL_PROPERTY, DEFAULT_MULTICAST_TTL); // extract the Ganglia conf per metrics gangliaConfMap = new HashMap<String, GangliaConf>(); loadGangliaConf(GangliaConfType.units); loadGangliaConf(GangliaConfType.tmax); loadGangliaConf(GangliaConfType.dmax); loadGangliaConf(GangliaConfType.slope); try { if (multicastEnabled) { LOG.info("Enabling multicast for Ganglia with TTL " + multicastTtl); datagramSocket = new MulticastSocket(); ((MulticastSocket) datagramSocket).setTimeToLive(multicastTtl); } else { datagramSocket = new DatagramSocket(); } } catch (IOException e) { LOG.error(e); } // see if sparseMetrics is supported. Default is false supportSparseMetrics = conf.getBoolean(SUPPORT_SPARSE_METRICS_PROPERTY, SUPPORT_SPARSE_METRICS_DEFAULT); } /* * (non-Javadoc) * * @see org.apache.hadoop.metrics2.MetricsSink#flush() */ public void flush() { // nothing to do as we are not buffering data } // Load the configurations for a conf type private void loadGangliaConf(GangliaConfType gtype) { String propertyarr[] = conf.getStringArray(gtype.name()); if (propertyarr != null && propertyarr.length > 0) { for (String metricNValue : propertyarr) { String metricNValueArr[] = metricNValue.split(EQUAL); if (metricNValueArr.length != 2 || metricNValueArr[0].length() == 0) { LOG.error("Invalid propertylist for " + gtype.name()); } String metricName = metricNValueArr[0].trim(); String metricValue = metricNValueArr[1].trim(); GangliaConf gconf = gangliaConfMap.get(metricName); if (gconf == null) { gconf = new GangliaConf(); gangliaConfMap.put(metricName, gconf); } switch (gtype) { case units: gconf.setUnits(metricValue); break; case dmax: gconf.setDmax(Integer.parseInt(metricValue)); break; case tmax: gconf.setTmax(Integer.parseInt(metricValue)); break; case slope: gconf.setSlope(GangliaSlope.valueOf(metricValue)); break; } } } } /** * Lookup GangliaConf from cache. If not found, return default values * * @param metricName * @return looked up GangliaConf */ protected GangliaConf getGangliaConfForMetric(String metricName) { GangliaConf gconf = gangliaConfMap.get(metricName); return gconf != null ? gconf : DEFAULT_GANGLIA_CONF; } /** * @return the hostName */ protected String getHostName() { return hostName; } /** * Puts a string into the buffer by first writing the size of the string as an * int, followed by the bytes of the string, padded if necessary to a multiple * of 4. * @param s the string to be written to buffer at offset location */ protected void xdr_string(String s) { byte[] bytes = s.getBytes(Charsets.UTF_8); int len = bytes.length; xdr_int(len); System.arraycopy(bytes, 0, buffer, offset, len); offset += len; pad(); } // Pads the buffer with zero bytes up to the nearest multiple of 4. private void pad() { int newOffset = ((offset + 3) / 4) * 4; while (offset < newOffset) { buffer[offset++] = 0; } } /** * Puts an integer into the buffer as 4 bytes, big-endian. */ protected void xdr_int(int i) { buffer[offset++] = (byte) ((i >> 24) & 0xff); buffer[offset++] = (byte) ((i >> 16) & 0xff); buffer[offset++] = (byte) ((i >> 8) & 0xff); buffer[offset++] = (byte) (i & 0xff); } /** * Sends Ganglia Metrics to the configured hosts * @throws IOException */ protected void emitToGangliaHosts() throws IOException { try { for (SocketAddress socketAddress : metricsServers) { if (socketAddress == null || !(socketAddress instanceof InetSocketAddress)) throw new IllegalArgumentException("Unsupported Address type"); InetSocketAddress inetAddress = (InetSocketAddress)socketAddress; if(inetAddress.isUnresolved()) { throw new UnknownHostException("Unresolved host: " + inetAddress); } DatagramPacket packet = new DatagramPacket(buffer, offset, socketAddress); datagramSocket.send(packet); } } finally { // reset the buffer for the next metric to be built offset = 0; } } /** * Reset the buffer for the next metric to be built */ void resetBuffer() { offset = 0; } /** * @return whether sparse metrics are supported */ protected boolean isSupportSparseMetrics() { return supportSparseMetrics; } /** * Used only by unit test * @param datagramSocket the datagramSocket to set. */ void setDatagramSocket(DatagramSocket datagramSocket) { this.datagramSocket = datagramSocket; } /** * Used only by unit tests * @return the datagramSocket for this sink */ DatagramSocket getDatagramSocket() { return datagramSocket; } }
10,172
31.295238
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaMetricVisitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink.ganglia; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; import org.apache.hadoop.metrics2.sink.ganglia.AbstractGangliaSink.GangliaSlope; /** * Since implementations of Metric are not public, hence use a visitor to figure * out the type and slope of the metric. Counters have "positive" slope. */ class GangliaMetricVisitor implements MetricsVisitor { private static final String INT32 = "int32"; private static final String FLOAT = "float"; private static final String DOUBLE = "double"; private String type; private GangliaSlope slope; /** * @return the type of a visited metric */ String getType() { return type; } /** * @return the slope of a visited metric. Slope is positive for counters and * null for others */ GangliaSlope getSlope() { return slope; } @Override public void gauge(MetricsInfo info, int value) { // MetricGaugeInt.class ==> "int32" type = INT32; slope = null; // set to null as cannot figure out from Metric } @Override public void gauge(MetricsInfo info, long value) { // MetricGaugeLong.class ==> "float" type = FLOAT; slope = null; // set to null as cannot figure out from Metric } @Override public void gauge(MetricsInfo info, float value) { // MetricGaugeFloat.class ==> "float" type = FLOAT; slope = null; // set to null as cannot figure out from Metric } @Override public void gauge(MetricsInfo info, double value) { // MetricGaugeDouble.class ==> "double" type = DOUBLE; slope = null; // set to null as cannot figure out from Metric } @Override public void counter(MetricsInfo info, int value) { // MetricCounterInt.class ==> "int32" type = INT32; // counters have positive slope slope = GangliaSlope.positive; } @Override public void counter(MetricsInfo info, long value) { // MetricCounterLong.class ==> "float" type = FLOAT; // counters have positive slope slope = GangliaSlope.positive; } }
2,907
29.291667
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.sink.ganglia; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * This code supports Ganglia 3.1 * */ public class GangliaSink31 extends GangliaSink30 { public final Log LOG = LogFactory.getLog(this.getClass()); /** * The method sends metrics to Ganglia servers. The method has been taken from * org.apache.hadoop.metrics.ganglia.GangliaContext31 with minimal changes in * order to keep it in sync. * @param groupName The group name of the metric * @param name The metric name * @param type The type of the metric * @param value The value of the metric * @param gConf The GangliaConf for this metric * @param gSlope The slope for this metric * @throws IOException */ protected void emitMetric(String groupName, String name, String type, String value, GangliaConf gConf, GangliaSlope gSlope) throws IOException { if (name == null) { LOG.warn("Metric was emitted with no name."); return; } else if (value == null) { LOG.warn("Metric name " + name +" was emitted with a null value."); return; } else if (type == null) { LOG.warn("Metric name " + name + ", value " + value + " has no type."); return; } if (LOG.isDebugEnabled()) { LOG.debug("Emitting metric " + name + ", type " + type + ", value " + value + ", slope " + gSlope.name()+ " from hostname " + getHostName()); } // The following XDR recipe was done through a careful reading of // gm_protocol.x in Ganglia 3.1 and carefully examining the output of // the gmetric utility with strace. // First we send out a metadata message xdr_int(128); // metric_id = metadata_msg xdr_string(getHostName()); // hostname xdr_string(name); // metric name xdr_int(0); // spoof = False xdr_string(type); // metric type xdr_string(name); // metric name xdr_string(gConf.getUnits()); // units xdr_int(gSlope.ordinal()); // slope xdr_int(gConf.getTmax()); // tmax, the maximum time between metrics xdr_int(gConf.getDmax()); // dmax, the maximum data value xdr_int(1); /*Num of the entries in extra_value field for Ganglia 3.1.x*/ xdr_string("GROUP"); /*Group attribute*/ xdr_string(groupName); /*Group value*/ // send the metric to Ganglia hosts emitToGangliaHosts(); // Now we send out a message with the actual value. // Technically, we only need to send out the metadata message once for // each metric, but I don't want to have to record which metrics we did and // did not send. xdr_int(133); // we are sending a string value xdr_string(getHostName()); // hostName xdr_string(name); // metric name xdr_int(0); // spoof = False xdr_string("%s"); // format field xdr_string(value); // metric value // send the metric to Ganglia hosts emitToGangliaHosts(); } }
3,957
37.057692
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Builtin metrics filters (to be used in metrics config files) */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.metrics2.filter; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,096
39.62963
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/GlobFilter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.filter; import java.util.regex.Pattern; import org.apache.hadoop.fs.GlobPattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A glob pattern filter for metrics. * * The class name is used in metrics config files */ @InterfaceAudience.Public @InterfaceStability.Evolving public class GlobFilter extends AbstractPatternFilter { @Override protected Pattern compile(String s) { return GlobPattern.compile(s); } }
1,350
31.95122
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.filter; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.Maps; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsTag; /** * Base class for pattern based filters */ @InterfaceAudience.Private public abstract class AbstractPatternFilter extends MetricsFilter { protected static final String INCLUDE_KEY = "include"; protected static final String EXCLUDE_KEY = "exclude"; protected static final String INCLUDE_TAGS_KEY = "include.tags"; protected static final String EXCLUDE_TAGS_KEY = "exclude.tags"; private Pattern includePattern; private Pattern excludePattern; private final Map<String, Pattern> includeTagPatterns; private final Map<String, Pattern> excludeTagPatterns; private final Pattern tagPattern = Pattern.compile("^(\\w+):(.*)"); AbstractPatternFilter() { includeTagPatterns = Maps.newHashMap(); excludeTagPatterns = Maps.newHashMap(); } @Override public void init(SubsetConfiguration conf) { String patternString = conf.getString(INCLUDE_KEY); if (patternString != null && !patternString.isEmpty()) { setIncludePattern(compile(patternString)); } patternString = conf.getString(EXCLUDE_KEY); if (patternString != null && !patternString.isEmpty()) { setExcludePattern(compile(patternString)); } String[] patternStrings = conf.getStringArray(INCLUDE_TAGS_KEY); if (patternStrings != null && patternStrings.length != 0) { for (String pstr : patternStrings) { Matcher matcher = tagPattern.matcher(pstr); if (!matcher.matches()) { throw new MetricsException("Illegal tag pattern: "+ pstr); } setIncludeTagPattern(matcher.group(1), compile(matcher.group(2))); } } patternStrings = conf.getStringArray(EXCLUDE_TAGS_KEY); if (patternStrings != null && patternStrings.length != 0) { for (String pstr : patternStrings) { Matcher matcher = tagPattern.matcher(pstr); if (!matcher.matches()) { throw new MetricsException("Illegal tag pattern: "+ pstr); } setExcludeTagPattern(matcher.group(1), compile(matcher.group(2))); } } } void setIncludePattern(Pattern includePattern) { this.includePattern = includePattern; } void setExcludePattern(Pattern excludePattern) { this.excludePattern = excludePattern; } void setIncludeTagPattern(String name, Pattern pattern) { includeTagPatterns.put(name, pattern); } void setExcludeTagPattern(String name, Pattern pattern) { excludeTagPatterns.put(name, pattern); } @Override public boolean accepts(MetricsTag tag) { // Accept if whitelisted Pattern ipat = includeTagPatterns.get(tag.name()); if (ipat != null && ipat.matcher(tag.value()).matches()) { return true; } // Reject if blacklisted Pattern epat = excludeTagPatterns.get(tag.name()); if (epat != null && epat.matcher(tag.value()).matches()) { return false; } // Reject if no match in whitelist only mode if (!includeTagPatterns.isEmpty() && excludeTagPatterns.isEmpty()) { return false; } return true; } @Override public boolean accepts(Iterable<MetricsTag> tags) { // Accept if any include tag pattern matches for (MetricsTag t : tags) { Pattern pat = includeTagPatterns.get(t.name()); if (pat != null && pat.matcher(t.value()).matches()) { return true; } } // Reject if any exclude tag pattern matches for (MetricsTag t : tags) { Pattern pat = excludeTagPatterns.get(t.name()); if (pat != null && pat.matcher(t.value()).matches()) { return false; } } // Reject if no match in whitelist only mode if (!includeTagPatterns.isEmpty() && excludeTagPatterns.isEmpty()) { return false; } return true; } @Override public boolean accepts(String name) { // Accept if whitelisted if (includePattern != null && includePattern.matcher(name).matches()) { return true; } // Reject if blacklisted if ((excludePattern != null && excludePattern.matcher(name).matches())) { return false; } // Reject if no match in whitelist only mode if (includePattern != null && excludePattern == null) { return false; } return true; } /** * Compile a string pattern in to a pattern object * @param s the string pattern to compile * @return the compiled pattern object */ protected abstract Pattern compile(String s); }
5,618
32.446429
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/RegexFilter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.filter; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A regex pattern filter for metrics */ @InterfaceAudience.Public @InterfaceStability.Evolving public class RegexFilter extends AbstractPatternFilter { @Override protected Pattern compile(String s) { return Pattern.compile(s); } }
1,253
32
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * General helpers for implementing source and sinks */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.metrics2.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,084
37.75
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import java.util.Arrays; import java.util.LinkedList; import java.util.ListIterator; import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.classification.InterfaceAudience; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; /** * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm * for streaming calculation of targeted high-percentile epsilon-approximate * quantiles. * * This is a generalization of the earlier work by Greenwald and Khanna (GK), * which essentially allows different error bounds on the targeted quantiles, * which allows for far more efficient calculation of high-percentiles. * * See: Cormode, Korn, Muthukrishnan, and Srivastava * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 * * Greenwald and Khanna, * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 * */ @InterfaceAudience.Private public class SampleQuantiles { /** * Total number of items in stream */ private long count = 0; /** * Current list of sampled items, maintained in sorted order with error bounds */ private LinkedList<SampleItem> samples; /** * Buffers incoming items to be inserted in batch. Items are inserted into * the buffer linearly. When the buffer fills, it is flushed into the samples * array in its entirety. */ private long[] buffer = new long[500]; private int bufferCount = 0; /** * Array of Quantiles that we care about, along with desired error. */ private final Quantile quantiles[]; public SampleQuantiles(Quantile[] quantiles) { this.quantiles = quantiles; this.samples = new LinkedList<SampleItem>(); } /** * Specifies the allowable error for this rank, depending on which quantiles * are being targeted. * * This is the f(r_i, n) function from the CKMS paper. It's basically how wide * the range of this rank can be. * * @param rank * the index in the list of samples */ private double allowableError(int rank) { int size = samples.size(); double minError = size + 1; for (Quantile q : quantiles) { double error; if (rank <= q.quantile * size) { error = (2.0 * q.error * (size - rank)) / (1.0 - q.quantile); } else { error = (2.0 * q.error * rank) / q.quantile; } if (error < minError) { minError = error; } } return minError; } /** * Add a new value from the stream. * * @param v */ synchronized public void insert(long v) { buffer[bufferCount] = v; bufferCount++; count++; if (bufferCount == buffer.length) { insertBatch(); compress(); } } /** * Merges items from buffer into the samples array in one pass. * This is more efficient than doing an insert on every item. */ private void insertBatch() { if (bufferCount == 0) { return; } Arrays.sort(buffer, 0, bufferCount); // Base case: no samples int start = 0; if (samples.size() == 0) { SampleItem newItem = new SampleItem(buffer[0], 1, 0); samples.add(newItem); start++; } ListIterator<SampleItem> it = samples.listIterator(); SampleItem item = it.next(); for (int i = start; i < bufferCount; i++) { long v = buffer[i]; while (it.nextIndex() < samples.size() && item.value < v) { item = it.next(); } // If we found that bigger item, back up so we insert ourselves before it if (item.value > v) { it.previous(); } // We use different indexes for the edge comparisons, because of the above // if statement that adjusts the iterator int delta; if (it.previousIndex() == 0 || it.nextIndex() == samples.size()) { delta = 0; } else { delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1; } SampleItem newItem = new SampleItem(v, 1, delta); it.add(newItem); item = newItem; } bufferCount = 0; } /** * Try to remove extraneous items from the set of sampled items. This checks * if an item is unnecessary based on the desired error bounds, and merges it * with the adjacent item if it is. */ private void compress() { if (samples.size() < 2) { return; } ListIterator<SampleItem> it = samples.listIterator(); SampleItem prev = null; SampleItem next = it.next(); while (it.hasNext()) { prev = next; next = it.next(); if (prev.g + next.g + next.delta <= allowableError(it.previousIndex())) { next.g += prev.g; // Remove prev. it.remove() kills the last thing returned. it.previous(); it.previous(); it.remove(); // it.next() is now equal to next, skip it back forward again it.next(); } } } /** * Get the estimated value at the specified quantile. * * @param quantile Queried quantile, e.g. 0.50 or 0.99. * @return Estimated value at that quantile. */ private long query(double quantile) { Preconditions.checkState(!samples.isEmpty(), "no data in estimator"); int rankMin = 0; int desired = (int) (quantile * count); ListIterator<SampleItem> it = samples.listIterator(); SampleItem prev = null; SampleItem cur = it.next(); for (int i = 1; i < samples.size(); i++) { prev = cur; cur = it.next(); rankMin += prev.g; if (rankMin + cur.g + cur.delta > desired + (allowableError(i) / 2)) { return prev.value; } } // edge case of wanting max value return samples.get(samples.size() - 1).value; } /** * Get a snapshot of the current values of all the tracked quantiles. * * @return snapshot of the tracked quantiles. If no items are added * to the estimator, returns null. */ synchronized public Map<Quantile, Long> snapshot() { // flush the buffer first for best results insertBatch(); if (samples.isEmpty()) { return null; } Map<Quantile, Long> values = new TreeMap<Quantile, Long>(); for (int i = 0; i < quantiles.length; i++) { values.put(quantiles[i], query(quantiles[i].quantile)); } return values; } /** * Returns the number of items that the estimator has processed * * @return count total number of items processed */ synchronized public long getCount() { return count; } /** * Returns the number of samples kept by the estimator * * @return count current number of samples */ @VisibleForTesting synchronized public int getSampleCount() { return samples.size(); } /** * Resets the estimator, clearing out all previously inserted items */ synchronized public void clear() { count = 0; bufferCount = 0; samples.clear(); } @Override synchronized public String toString() { Map<Quantile, Long> data = snapshot(); if (data == null) { return "[no samples]"; } else { return Joiner.on("\n").withKeyValueSeparator(": ").join(data); } } /** * Describes a measured value passed to the estimator, tracking additional * metadata required by the CKMS algorithm. */ private static class SampleItem { /** * Value of the sampled item (e.g. a measured latency value) */ public final long value; /** * Difference between the lowest possible rank of the previous item, and * the lowest possible rank of this item. * * The sum of the g of all previous items yields this item's lower bound. */ public int g; /** * Difference between the item's greatest possible rank and lowest possible * rank. */ public final int delta; public SampleItem(long value, int lowerDelta, int delta) { this.value = value; this.g = lowerDelta; this.delta = delta; } @Override public String toString() { return String.format("%d, %d, %d", value, g, delta); } } }
8,987
26.486239
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java
/* * Util.java * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import java.net.InetSocketAddress; import java.util.List; import com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.net.NetUtils; /** * Helpers to handle server addresses */ @InterfaceAudience.Public @InterfaceStability.Evolving public class Servers { /** * This class is not intended to be instantiated */ private Servers() {} /** * Parses a space and/or comma separated sequence of server specifications * of the form <i>hostname</i> or <i>hostname:port</i>. If * the specs string is null, defaults to localhost:defaultPort. * * @param specs server specs (see description) * @param defaultPort the default port if not specified * @return a list of InetSocketAddress objects. */ public static List<InetSocketAddress> parse(String specs, int defaultPort) { List<InetSocketAddress> result = Lists.newArrayList(); if (specs == null) { result.add(new InetSocketAddress("localhost", defaultPort)); } else { String[] specStrings = specs.split("[ ,]+"); for (String specString : specStrings) { result.add(NetUtils.createSocketAddr(specString, defaultPort)); } } return result; } }
2,172
31.432836
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Contracts.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import org.apache.hadoop.classification.InterfaceAudience; /** * Additional helpers (besides guava Preconditions) for programming by contract */ @InterfaceAudience.Private public class Contracts { private Contracts() {} /** * Check an argument for false conditions * @param <T> type of the argument * @param arg the argument to check * @param expression the boolean expression for the condition * @param msg the error message if {@code expression} is false * @return the argument for convenience */ public static <T> T checkArg(T arg, boolean expression, Object msg) { if (!expression) { throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg); } return arg; } /** * Check an argument for false conditions * @param arg the argument to check * @param expression the boolean expression for the condition * @param msg the error message if {@code expression} is false * @return the argument for convenience */ public static int checkArg(int arg, boolean expression, Object msg) { if (!expression) { throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg); } return arg; } /** * Check an argument for false conditions * @param arg the argument to check * @param expression the boolean expression for the condition * @param msg the error message if {@code expression} is false * @return the argument for convenience */ public static long checkArg(long arg, boolean expression, Object msg) { if (!expression) { throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg); } return arg; } /** * Check an argument for false conditions * @param arg the argument to check * @param expression the boolean expression for the condition * @param msg the error message if {@code expression} is false * @return the argument for convenience */ public static float checkArg(float arg, boolean expression, Object msg) { if (!expression) { throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg); } return arg; } /** * Check an argument for false conditions * @param arg the argument to check * @param expression the boolean expression for the condition * @param msg the error message if {@code expression} is false * @return the argument for convenience */ public static double checkArg(double arg, boolean expression, Object msg) { if (!expression) { throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg); } return arg; } }
3,437
32.705882
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import java.util.Collection; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; import com.google.common.base.Objects; import com.google.common.collect.Maps; /** * A metrics cache for sinks that don't support sparse updates. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MetricsCache { static final Log LOG = LogFactory.getLog(MetricsCache.class); static final int MAX_RECS_PER_NAME_DEFAULT = 1000; private final Map<String, RecordCache> map = Maps.newHashMap(); private final int maxRecsPerName; class RecordCache extends LinkedHashMap<Collection<MetricsTag>, Record> { private static final long serialVersionUID = 1L; private boolean gotOverflow = false; @Override protected boolean removeEldestEntry(Map.Entry<Collection<MetricsTag>, Record> eldest) { boolean overflow = size() > maxRecsPerName; if (overflow && !gotOverflow) { LOG.warn("Metrics cache overflow at "+ size() +" for "+ eldest); gotOverflow = true; } return overflow; } } /** * Cached record */ public static class Record { final Map<String, String> tags = Maps.newHashMap(); final Map<String, AbstractMetric> metrics = Maps.newHashMap(); /** * Lookup a tag value * @param key name of the tag * @return the tag value */ public String getTag(String key) { return tags.get(key); } /** * Lookup a metric value * @param key name of the metric * @return the metric value */ public Number getMetric(String key) { AbstractMetric metric = metrics.get(key); return metric != null ? metric.value() : null; } /** * Lookup a metric instance * @param key name of the metric * @return the metric instance */ public AbstractMetric getMetricInstance(String key) { return metrics.get(key); } /** * @return the entry set of the tags of the record */ public Set<Map.Entry<String, String>> tags() { return tags.entrySet(); } /** * @deprecated use metricsEntrySet() instead * @return entry set of metrics */ @Deprecated public Set<Map.Entry<String, Number>> metrics() { Map<String, Number> map = new LinkedHashMap<String, Number>( metrics.size()); for (Map.Entry<String, AbstractMetric> mapEntry : metrics.entrySet()) { map.put(mapEntry.getKey(), mapEntry.getValue().value()); } return map.entrySet(); } /** * @return entry set of metrics */ public Set<Map.Entry<String, AbstractMetric>> metricsEntrySet() { return metrics.entrySet(); } @Override public String toString() { return Objects.toStringHelper(this) .add("tags", tags).add("metrics", metrics) .toString(); } } public MetricsCache() { this(MAX_RECS_PER_NAME_DEFAULT); } /** * Construct a metrics cache * @param maxRecsPerName limit of the number records per record name */ public MetricsCache(int maxRecsPerName) { this.maxRecsPerName = maxRecsPerName; } /** * Update the cache and return the current cached record * @param mr the update record * @param includingTags cache tag values (for later lookup by name) if true * @return the updated cache record */ public Record update(MetricsRecord mr, boolean includingTags) { String name = mr.name(); RecordCache recordCache = map.get(name); if (recordCache == null) { recordCache = new RecordCache(); map.put(name, recordCache); } Collection<MetricsTag> tags = mr.tags(); Record record = recordCache.get(tags); if (record == null) { record = new Record(); recordCache.put(tags, record); } for (AbstractMetric m : mr.metrics()) { record.metrics.put(m.name(), m); } if (includingTags) { // mostly for some sinks that include tags as part of a dense schema for (MetricsTag t : mr.tags()) { record.tags.put(t.name(), t.value()); } } return record; } /** * Update the cache and return the current cache record * @param mr the update record * @return the updated cache record */ public Record update(MetricsRecord mr) { return update(mr, false); } /** * Get the cached record * @param name of the record * @param tags of the record * @return the cached record or null */ public Record get(String name, Collection<MetricsTag> tags) { RecordCache rc = map.get(name); if (rc == null) return null; return rc.get(tags); } }
5,877
28.39
77
java