repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import org.apache.hadoop.classification.InterfaceAudience; /** * Helper to compute running sample stats */ @InterfaceAudience.Private public class SampleStat { private final MinMax minmax = new MinMax(); private long numSamples = 0; private double a0, a1, s0, s1; /** * Construct a new running sample stat */ public SampleStat() { a0 = s0 = 0.0; } public void reset() { numSamples = 0; a0 = s0 = 0.0; minmax.reset(); } // We want to reuse the object, sometimes. void reset(long numSamples, double a0, double a1, double s0, double s1, MinMax minmax) { this.numSamples = numSamples; this.a0 = a0; this.a1 = a1; this.s0 = s0; this.s1 = s1; this.minmax.reset(minmax); } /** * Copy the values to other (saves object creation and gc.) * @param other the destination to hold our values */ public void copyTo(SampleStat other) { other.reset(numSamples, a0, a1, s0, s1, minmax); } /** * Add a sample the running stat. * @param x the sample number * @return self */ public SampleStat add(double x) { minmax.add(x); return add(1, x); } /** * Add some sample and a partial sum to the running stat. * Note, min/max is not evaluated using this method. * @param nSamples number of samples * @param x the partial sum * @return self */ public SampleStat add(long nSamples, double x) { numSamples += nSamples; if (numSamples == 1) { a0 = a1 = x; s0 = 0.0; } else { // The Welford method for numerical stability a1 = a0 + (x - a0) / numSamples; s1 = s0 + (x - a0) * (x - a1); a0 = a1; s0 = s1; } return this; } /** * @return the total number of samples */ public long numSamples() { return numSamples; } /** * @return the arithmetic mean of the samples */ public double mean() { return numSamples > 0 ? a1 : 0.0; } /** * @return the variance of the samples */ public double variance() { return numSamples > 1 ? s1 / (numSamples - 1) : 0.0; } /** * @return the standard deviation of the samples */ public double stddev() { return Math.sqrt(variance()); } /** * @return the minimum value of the samples */ public double min() { return minmax.min(); } /** * @return the maximum value of the samples */ public double max() { return minmax.max(); } @Override public String toString() { try { return "Samples = " + numSamples() + " Min = " + min() + " Mean = " + mean() + " Std Dev = " + stddev() + " Max = " + max(); } catch (Throwable t) { return super.toString(); } } /** * Helper to keep running min/max */ @SuppressWarnings("PublicInnerClass") public static class MinMax { // Float.MAX_VALUE is used rather than Double.MAX_VALUE, even though the // min and max variables are of type double. // Float.MAX_VALUE is big enough, and using Double.MAX_VALUE makes // Ganglia core due to buffer overflow. // The same reasoning applies to the MIN_VALUE counterparts. static final double DEFAULT_MIN_VALUE = Float.MAX_VALUE; static final double DEFAULT_MAX_VALUE = Float.MIN_VALUE; private double min = DEFAULT_MIN_VALUE; private double max = DEFAULT_MAX_VALUE; public void add(double value) { if (value > max) max = value; if (value < min) min = value; } public double min() { return min; } public double max() { return max; } public void reset() { min = DEFAULT_MIN_VALUE; max = DEFAULT_MAX_VALUE; } public void reset(MinMax other) { min = other.min(); max = other.max(); } } }
4,639
23.550265
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import org.apache.hadoop.classification.InterfaceAudience; import com.google.common.collect.ComparisonChain; /** * Specifies a quantile (with error bounds) to be watched by a * {@link SampleQuantiles} object. */ @InterfaceAudience.Private public class Quantile implements Comparable<Quantile> { public final double quantile; public final double error; public Quantile(double quantile, double error) { this.quantile = quantile; this.error = error; } @Override public boolean equals(Object aThat) { if (this == aThat) { return true; } if (!(aThat instanceof Quantile)) { return false; } Quantile that = (Quantile) aThat; long qbits = Double.doubleToLongBits(quantile); long ebits = Double.doubleToLongBits(error); return qbits == Double.doubleToLongBits(that.quantile) && ebits == Double.doubleToLongBits(that.error); } @Override public int hashCode() { return (int) (Double.doubleToLongBits(quantile) ^ Double .doubleToLongBits(error)); } @Override public int compareTo(Quantile other) { return ComparisonChain.start() .compare(quantile, other.quantile) .compare(error, other.error) .result(); } @Override public String toString() { return String.format("%.2f %%ile +/- %.2f%%", quantile * 100, error * 100); } }
2,221
27.857143
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.util; import java.lang.management.ManagementFactory; import javax.management.InstanceAlreadyExistsException; import javax.management.MBeanServer; import javax.management.ObjectName; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; /** * This util class provides a method to register an MBean using * our standard naming convention as described in the doc * for {link {@link #register(String, String, Object)} */ @InterfaceAudience.Public @InterfaceStability.Stable public class MBeans { private static final Log LOG = LogFactory.getLog(MBeans.class); /** * Register the MBean using our standard MBeanName format * "hadoop:service=<serviceName>,name=<nameName>" * Where the <serviceName> and <nameName> are the supplied parameters * * @param serviceName * @param nameName * @param theMbean - the MBean to register * @return the named used to register the MBean */ static public ObjectName register(String serviceName, String nameName, Object theMbean) { final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName name = getMBeanName(serviceName, nameName); try { mbs.registerMBean(theMbean, name); LOG.debug("Registered "+ name); return name; } catch (InstanceAlreadyExistsException iaee) { if (LOG.isTraceEnabled()) { LOG.trace("Failed to register MBean \""+ name + "\"", iaee); } else { LOG.warn("Failed to register MBean \""+ name + "\": Instance already exists."); } } catch (Exception e) { LOG.warn("Failed to register MBean \""+ name + "\"", e); } return null; } static public void unregister(ObjectName mbeanName) { LOG.debug("Unregistering "+ mbeanName); final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); if (mbeanName == null) { LOG.debug("Stacktrace: ", new Throwable()); return; } try { mbs.unregisterMBean(mbeanName); } catch (Exception e) { LOG.warn("Error unregistering "+ mbeanName, e); } DefaultMetricsSystem.removeMBeanName(mbeanName); } static private ObjectName getMBeanName(String serviceName, String nameName) { ObjectName name = null; String nameStr = "Hadoop:service="+ serviceName +",name="+ nameName; try { name = DefaultMetricsSystem.newMBeanName(nameStr); } catch (Exception e) { LOG.warn("Error creating MBean object name: "+ nameStr, e); } return name; } }
3,561
34.979798
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsBufferBuilder.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.ArrayList; /** * Builder for the immutable metrics buffers */ class MetricsBufferBuilder extends ArrayList<MetricsBuffer.Entry> { private static final long serialVersionUID = 1L; boolean add(String name, Iterable<MetricsRecordImpl> records) { return add(new MetricsBuffer.Entry(name, records)); } MetricsBuffer get() { return new MetricsBuffer(this); } }
1,245
32.675676
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.io.StringWriter; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.net.InetAddress; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Timer; import java.util.TimerTask; import javax.management.ObjectName; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.annotations.VisibleForTesting; import java.util.Locale; import static com.google.common.base.Preconditions.*; import org.apache.commons.configuration.PropertiesConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.math3.util.ArithmeticUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import static org.apache.hadoop.metrics2.impl.MetricsConfig.*; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.metrics2.lib.MetricsAnnotations; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MetricsSourceBuilder; import org.apache.hadoop.metrics2.lib.MutableStat; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; /** * A base class for metrics system singletons */ @InterfaceAudience.Private @Metrics(context="metricssystem") public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { static final Log LOG = LogFactory.getLog(MetricsSystemImpl.class); static final String MS_NAME = "MetricsSystem"; static final String MS_STATS_NAME = MS_NAME +",sub=Stats"; static final String MS_STATS_DESC = "Metrics system metrics"; static final String MS_CONTROL_NAME = MS_NAME +",sub=Control"; static final String MS_INIT_MODE_KEY = "hadoop.metrics.init.mode"; enum InitMode { NORMAL, STANDBY } private final Map<String, MetricsSourceAdapter> sources; private final Map<String, MetricsSource> allSources; private final Map<String, MetricsSinkAdapter> sinks; private final Map<String, MetricsSink> allSinks; // The callback list is used by register(Callback callback), while // the callback map is used by register(String name, String desc, T sink) private final List<Callback> callbacks; private final Map<String, Callback> namedCallbacks; private final MetricsCollectorImpl collector; private final MetricsRegistry registry = new MetricsRegistry(MS_NAME); @Metric({"Snapshot", "Snapshot stats"}) MutableStat snapshotStat; @Metric({"Publish", "Publishing stats"}) MutableStat publishStat; @Metric("Dropped updates by all sinks") MutableCounterLong droppedPubAll; private final List<MetricsTag> injectedTags; // Things that are changed by init()/start()/stop() private String prefix; private MetricsFilter sourceFilter; private MetricsConfig config; private Map<String, MetricsConfig> sourceConfigs, sinkConfigs; private boolean monitoring = false; private Timer timer; private int period; // seconds private long logicalTime; // number of timer invocations * period private ObjectName mbeanName; private boolean publishSelfMetrics = true; private MetricsSourceAdapter sysSource; private int refCount = 0; // for mini cluster mode /** * Construct the metrics system * @param prefix for the system */ public MetricsSystemImpl(String prefix) { this.prefix = prefix; allSources = Maps.newHashMap(); sources = Maps.newLinkedHashMap(); allSinks = Maps.newHashMap(); sinks = Maps.newLinkedHashMap(); sourceConfigs = Maps.newHashMap(); sinkConfigs = Maps.newHashMap(); callbacks = Lists.newArrayList(); namedCallbacks = Maps.newHashMap(); injectedTags = Lists.newArrayList(); collector = new MetricsCollectorImpl(); if (prefix != null) { // prefix could be null for default ctor, which requires init later initSystemMBean(); } } /** * Construct the system but not initializing (read config etc.) it. */ public MetricsSystemImpl() { this(null); } /** * Initialized the metrics system with a prefix. * @param prefix the system will look for configs with the prefix * @return the metrics system object itself */ @Override public synchronized MetricsSystem init(String prefix) { if (monitoring && !DefaultMetricsSystem.inMiniClusterMode()) { LOG.warn(this.prefix +" metrics system already initialized!"); return this; } this.prefix = checkNotNull(prefix, "prefix"); ++refCount; if (monitoring) { // in mini cluster mode LOG.info(this.prefix +" metrics system started (again)"); return this; } switch (initMode()) { case NORMAL: try { start(); } catch (MetricsConfigException e) { // Configuration errors (e.g., typos) should not be fatal. // We can always start the metrics system later via JMX. LOG.warn("Metrics system not started: "+ e.getMessage()); LOG.debug("Stacktrace: ", e); } break; case STANDBY: LOG.info(prefix +" metrics system started in standby mode"); } initSystemMBean(); return this; } @Override public synchronized void start() { checkNotNull(prefix, "prefix"); if (monitoring) { LOG.warn(prefix +" metrics system already started!", new MetricsException("Illegal start")); return; } for (Callback cb : callbacks) cb.preStart(); for (Callback cb : namedCallbacks.values()) cb.preStart(); configure(prefix); startTimer(); monitoring = true; LOG.info(prefix +" metrics system started"); for (Callback cb : callbacks) cb.postStart(); for (Callback cb : namedCallbacks.values()) cb.postStart(); } @Override public synchronized void stop() { if (!monitoring && !DefaultMetricsSystem.inMiniClusterMode()) { LOG.warn(prefix +" metrics system not yet started!", new MetricsException("Illegal stop")); return; } if (!monitoring) { // in mini cluster mode LOG.info(prefix +" metrics system stopped (again)"); return; } for (Callback cb : callbacks) cb.preStop(); for (Callback cb : namedCallbacks.values()) cb.preStop(); LOG.info("Stopping "+ prefix +" metrics system..."); stopTimer(); stopSources(); stopSinks(); clearConfigs(); monitoring = false; LOG.info(prefix +" metrics system stopped."); for (Callback cb : callbacks) cb.postStop(); for (Callback cb : namedCallbacks.values()) cb.postStop(); } @Override public synchronized <T> T register(String name, String desc, T source) { MetricsSourceBuilder sb = MetricsAnnotations.newSourceBuilder(source); final MetricsSource s = sb.build(); MetricsInfo si = sb.info(); String name2 = name == null ? si.name() : name; final String finalDesc = desc == null ? si.description() : desc; final String finalName = // be friendly to non-metrics tests DefaultMetricsSystem.sourceName(name2, !monitoring); allSources.put(finalName, s); LOG.debug(finalName +", "+ finalDesc); if (monitoring) { registerSource(finalName, finalDesc, s); } // We want to re-register the source to pick up new config when the // metrics system restarts. register(finalName, new AbstractCallback() { @Override public void postStart() { registerSource(finalName, finalDesc, s); } }); return source; } @Override public synchronized void unregisterSource(String name) { if (sources.containsKey(name)) { sources.get(name).stop(); sources.remove(name); } if (allSources.containsKey(name)) { allSources.remove(name); } if (namedCallbacks.containsKey(name)) { namedCallbacks.remove(name); } } synchronized void registerSource(String name, String desc, MetricsSource source) { checkNotNull(config, "config"); MetricsConfig conf = sourceConfigs.get(name); MetricsSourceAdapter sa = conf != null ? new MetricsSourceAdapter(prefix, name, desc, source, injectedTags, period, conf) : new MetricsSourceAdapter(prefix, name, desc, source, injectedTags, period, config.subset(SOURCE_KEY)); sources.put(name, sa); sa.start(); LOG.debug("Registered source "+ name); } @Override public synchronized <T extends MetricsSink> T register(final String name, final String description, final T sink) { LOG.debug(name +", "+ description); if (allSinks.containsKey(name)) { LOG.warn("Sink "+ name +" already exists!"); return sink; } allSinks.put(name, sink); if (config != null) { registerSink(name, description, sink); } // We want to re-register the sink to pick up new config // when the metrics system restarts. register(name, new AbstractCallback() { @Override public void postStart() { register(name, description, sink); } }); return sink; } synchronized void registerSink(String name, String desc, MetricsSink sink) { checkNotNull(config, "config"); MetricsConfig conf = sinkConfigs.get(name); MetricsSinkAdapter sa = conf != null ? newSink(name, desc, sink, conf) : newSink(name, desc, sink, config.subset(SINK_KEY)); sinks.put(name, sa); sa.start(); LOG.info("Registered sink "+ name); } @Override public synchronized void register(final Callback callback) { callbacks.add((Callback) getProxyForCallback(callback)); } private synchronized void register(String name, final Callback callback) { namedCallbacks.put(name, (Callback) getProxyForCallback(callback)); } private Object getProxyForCallback(final Callback callback) { return Proxy.newProxyInstance(callback.getClass().getClassLoader(), new Class<?>[] { Callback.class }, new InvocationHandler() { @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { try { return method.invoke(callback, args); } catch (Exception e) { // These are not considered fatal. LOG.warn("Caught exception in callback " + method.getName(), e); } return null; } }); } @Override public synchronized void startMetricsMBeans() { for (MetricsSourceAdapter sa : sources.values()) { sa.startMBeans(); } } @Override public synchronized void stopMetricsMBeans() { for (MetricsSourceAdapter sa : sources.values()) { sa.stopMBeans(); } } @Override public synchronized String currentConfig() { PropertiesConfiguration saver = new PropertiesConfiguration(); StringWriter writer = new StringWriter(); saver.copy(config); try { saver.save(writer); } catch (Exception e) { throw new MetricsConfigException("Error stringify config", e); } return writer.toString(); } private synchronized void startTimer() { if (timer != null) { LOG.warn(prefix +" metrics system timer already started!"); return; } logicalTime = 0; long millis = period * 1000; timer = new Timer("Timer for '"+ prefix +"' metrics system", true); timer.scheduleAtFixedRate(new TimerTask() { public void run() { try { onTimerEvent(); } catch (Exception e) { LOG.warn("Error invoking metrics timer", e); } } }, millis, millis); LOG.info("Scheduled snapshot period at "+ period +" second(s)."); } synchronized void onTimerEvent() { logicalTime += period; if (sinks.size() > 0) { publishMetrics(sampleMetrics(), false); } } /** * Requests an immediate publish of all metrics from sources to sinks. */ @Override public synchronized void publishMetricsNow() { if (sinks.size() > 0) { publishMetrics(sampleMetrics(), true); } } /** * Sample all the sources for a snapshot of metrics/tags * @return the metrics buffer containing the snapshot */ synchronized MetricsBuffer sampleMetrics() { collector.clear(); MetricsBufferBuilder bufferBuilder = new MetricsBufferBuilder(); for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) { if (sourceFilter == null || sourceFilter.accepts(entry.getKey())) { snapshotMetrics(entry.getValue(), bufferBuilder); } } if (publishSelfMetrics) { snapshotMetrics(sysSource, bufferBuilder); } MetricsBuffer buffer = bufferBuilder.get(); return buffer; } private void snapshotMetrics(MetricsSourceAdapter sa, MetricsBufferBuilder bufferBuilder) { long startTime = Time.now(); bufferBuilder.add(sa.name(), sa.getMetrics(collector, true)); collector.clear(); snapshotStat.add(Time.now() - startTime); LOG.debug("Snapshotted source "+ sa.name()); } /** * Publish a metrics snapshot to all the sinks * @param buffer the metrics snapshot to publish * @param immediate indicates that we should publish metrics immediately * instead of using a separate thread. */ synchronized void publishMetrics(MetricsBuffer buffer, boolean immediate) { int dropped = 0; for (MetricsSinkAdapter sa : sinks.values()) { long startTime = Time.now(); boolean result; if (immediate) { result = sa.putMetricsImmediate(buffer); } else { result = sa.putMetrics(buffer, logicalTime); } dropped += result ? 0 : 1; publishStat.add(Time.now() - startTime); } droppedPubAll.incr(dropped); } private synchronized void stopTimer() { if (timer == null) { LOG.warn(prefix +" metrics system timer already stopped!"); return; } timer.cancel(); timer = null; } private synchronized void stopSources() { for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) { MetricsSourceAdapter sa = entry.getValue(); LOG.debug("Stopping metrics source "+ entry.getKey() + ": class=" + sa.source().getClass()); sa.stop(); } sysSource.stop(); sources.clear(); } private synchronized void stopSinks() { for (Entry<String, MetricsSinkAdapter> entry : sinks.entrySet()) { MetricsSinkAdapter sa = entry.getValue(); LOG.debug("Stopping metrics sink "+ entry.getKey() + ": class=" + sa.sink().getClass()); sa.stop(); } sinks.clear(); } private synchronized void configure(String prefix) { config = MetricsConfig.create(prefix); configureSinks(); configureSources(); configureSystem(); } private synchronized void configureSystem() { injectedTags.add(Interns.tag(MsInfo.Hostname, getHostname())); } private synchronized void configureSinks() { sinkConfigs = config.getInstanceConfigs(SINK_KEY); int confPeriod = 0; for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) { MetricsConfig conf = entry.getValue(); int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT); confPeriod = confPeriod == 0 ? sinkPeriod : ArithmeticUtils.gcd(confPeriod, sinkPeriod); String clsName = conf.getClassName(""); if (clsName == null) continue; // sink can be registered later on String sinkName = entry.getKey(); try { MetricsSinkAdapter sa = newSink(sinkName, conf.getString(DESC_KEY, sinkName), conf); sa.start(); sinks.put(sinkName, sa); } catch (Exception e) { LOG.warn("Error creating sink '"+ sinkName +"'", e); } } period = confPeriod > 0 ? confPeriod : config.getInt(PERIOD_KEY, PERIOD_DEFAULT); } static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink, MetricsConfig conf) { return new MetricsSinkAdapter(name, desc, sink, conf.getString(CONTEXT_KEY), conf.getFilter(SOURCE_FILTER_KEY), conf.getFilter(RECORD_FILTER_KEY), conf.getFilter(METRIC_FILTER_KEY), conf.getInt(PERIOD_KEY, PERIOD_DEFAULT), conf.getInt(QUEUE_CAPACITY_KEY, QUEUE_CAPACITY_DEFAULT), conf.getInt(RETRY_DELAY_KEY, RETRY_DELAY_DEFAULT), conf.getFloat(RETRY_BACKOFF_KEY, RETRY_BACKOFF_DEFAULT), conf.getInt(RETRY_COUNT_KEY, RETRY_COUNT_DEFAULT)); } static MetricsSinkAdapter newSink(String name, String desc, MetricsConfig conf) { return newSink(name, desc, (MetricsSink) conf.getPlugin(""), conf); } private void configureSources() { sourceFilter = config.getFilter(PREFIX_DEFAULT + SOURCE_FILTER_KEY); sourceConfigs = config.getInstanceConfigs(SOURCE_KEY); registerSystemSource(); } private void clearConfigs() { sinkConfigs.clear(); sourceConfigs.clear(); injectedTags.clear(); config = null; } static String getHostname() { try { return InetAddress.getLocalHost().getHostName(); } catch (Exception e) { LOG.error("Error getting localhost name. Using 'localhost'...", e); } return "localhost"; } private void registerSystemSource() { MetricsConfig sysConf = sourceConfigs.get(MS_NAME); sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC, MetricsAnnotations.makeSource(this), injectedTags, period, sysConf == null ? config.subset(SOURCE_KEY) : sysConf); sysSource.start(); } @Override public synchronized void getMetrics(MetricsCollector builder, boolean all) { MetricsRecordBuilder rb = builder.addRecord(MS_NAME) .addGauge(MsInfo.NumActiveSources, sources.size()) .addGauge(MsInfo.NumAllSources, allSources.size()) .addGauge(MsInfo.NumActiveSinks, sinks.size()) .addGauge(MsInfo.NumAllSinks, allSinks.size()); for (MetricsSinkAdapter sa : sinks.values()) { sa.snapshot(rb, all); } registry.snapshot(rb, all); } private void initSystemMBean() { checkNotNull(prefix, "prefix should not be null here!"); if (mbeanName == null) { mbeanName = MBeans.register(prefix, MS_CONTROL_NAME, this); } } @Override public synchronized boolean shutdown() { LOG.debug("refCount="+ refCount); if (refCount <= 0) { LOG.debug("Redundant shutdown", new Throwable()); return true; // already shutdown } if (--refCount > 0) return false; if (monitoring) { try { stop(); } catch (Exception e) { LOG.warn("Error stopping the metrics system", e); } } allSources.clear(); allSinks.clear(); callbacks.clear(); namedCallbacks.clear(); if (mbeanName != null) { MBeans.unregister(mbeanName); mbeanName = null; } LOG.info(prefix +" metrics system shutdown complete."); return true; } public MetricsSource getSource(String name) { return allSources.get(name); } @VisibleForTesting MetricsSourceAdapter getSourceAdapter(String name) { return sources.get(name); } private InitMode initMode() { LOG.debug("from system property: "+ System.getProperty(MS_INIT_MODE_KEY)); LOG.debug("from environment variable: "+ System.getenv(MS_INIT_MODE_KEY)); String m = System.getProperty(MS_INIT_MODE_KEY); String m2 = m == null ? System.getenv(MS_INIT_MODE_KEY) : m; return InitMode.valueOf( StringUtils.toUpperCase((m2 == null ? InitMode.NORMAL.name() : m2))); } }
21,244
32.883573
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * A metrics system implementation */ @InterfaceAudience.Private @InterfaceStability.Evolving package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,066
38.518519
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricGaugeLong.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; class MetricGaugeLong extends AbstractMetric { final long value; MetricGaugeLong(MetricsInfo info, long value) { super(info); this.value = value; } @Override public Long value() { return value; } @Override public MetricType type() { return MetricType.GAUGE; } @Override public void visit(MetricsVisitor visitor) { visitor.gauge(this, value); } }
1,438
28.367347
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import com.google.common.base.Objects; import com.google.common.collect.Iterables; import org.apache.hadoop.metrics2.MetricsRecord; abstract class AbstractMetricsRecord implements MetricsRecord { @Override public boolean equals(Object obj) { if (obj instanceof MetricsRecord) { final MetricsRecord other = (MetricsRecord) obj; return Objects.equal(timestamp(), other.timestamp()) && Objects.equal(name(), other.name()) && Objects.equal(description(), other.description()) && Objects.equal(tags(), other.tags()) && Iterables.elementsEqual(metrics(), other.metrics()); } return false; } // Should make sense most of the time when the record is used as a key @Override public int hashCode() { return Objects.hashCode(name(), description(), tags()); } @Override public String toString() { return Objects.toStringHelper(this) .add("timestamp", timestamp()) .add("name", name()) .add("description", description()) .add("tags", tags()) .add("metrics", Iterables.toString(metrics())) .toString(); } }
1,993
35.254545
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/SinkQueue.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.ConcurrentModificationException; /** * A half-blocking (nonblocking for producers, blocking for consumers) queue * for metrics sinks. * * New elements are dropped when the queue is full to preserve "interesting" * elements at the onset of queue filling events */ class SinkQueue<T> { interface Consumer<T> { void consume(T object) throws InterruptedException; } // A fixed size circular buffer to minimize garbage private final T[] data; private int head; // head position private int tail; // tail position private int size; // number of elements private Thread currentConsumer = null; @SuppressWarnings("unchecked") SinkQueue(int capacity) { this.data = (T[]) new Object[Math.max(1, capacity)]; head = tail = size = 0; } synchronized boolean enqueue(T e) { if (data.length == size) { return false; } ++size; tail = (tail + 1) % data.length; data[tail] = e; notify(); return true; } /** * Consume one element, will block if queue is empty * Only one consumer at a time is allowed * @param consumer the consumer callback object */ void consume(Consumer<T> consumer) throws InterruptedException { T e = waitForData(); try { consumer.consume(e); // can take forever _dequeue(); } finally { clearConsumerLock(); } } /** * Consume all the elements, will block if queue is empty * @param consumer the consumer callback object * @throws InterruptedException */ void consumeAll(Consumer<T> consumer) throws InterruptedException { waitForData(); try { for (int i = size(); i-- > 0; ) { consumer.consume(front()); // can take forever _dequeue(); } } finally { clearConsumerLock(); } } /** * Dequeue one element from head of the queue, will block if queue is empty * @return the first element * @throws InterruptedException */ synchronized T dequeue() throws InterruptedException { checkConsumer(); while (0 == size) { wait(); } return _dequeue(); } private synchronized T waitForData() throws InterruptedException { checkConsumer(); while (0 == size) { wait(); } setConsumerLock(); return front(); } private synchronized void checkConsumer() { if (currentConsumer != null) { throw new ConcurrentModificationException("The "+ currentConsumer.getName() +" thread is consuming the queue."); } } private synchronized void setConsumerLock() { currentConsumer = Thread.currentThread(); } private synchronized void clearConsumerLock() { currentConsumer = null; } private synchronized T _dequeue() { if (0 == size) { throw new IllegalStateException("Size must > 0 here."); } --size; head = (head + 1) % data.length; T ret = data[head]; data[head] = null; // hint to gc return ret; } synchronized T front() { return data[(head + 1) % data.length]; } synchronized T back() { return data[tail]; } synchronized void clear() { checkConsumer(); for (int i = data.length; i-- > 0; ) { data[i] = null; } size = 0; } synchronized int size() { return size; } int capacity() { return data.length; } }
4,184
23.473684
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import com.google.common.base.Objects; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; /** * Metrics system related metrics info instances */ @InterfaceAudience.Private public enum MsInfo implements MetricsInfo { NumActiveSources("Number of active metrics sources"), NumAllSources("Number of all registered metrics sources"), NumActiveSinks("Number of active metrics sinks"), NumAllSinks("Number of all registered metrics sinks"), Context("Metrics context"), Hostname("Local hostname"), SessionId("Session ID"), ProcessName("Process name"); private final String desc; MsInfo(String desc) { this.desc = desc; } @Override public String description() { return desc; } @Override public String toString() { return Objects.toStringHelper(this) .add("name", name()).add("description", desc) .toString(); } }
1,775
30.714286
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricGaugeInt.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; class MetricGaugeInt extends AbstractMetric { final int value; MetricGaugeInt(MetricsInfo info, int value) { super(info); this.value = value; } @Override public Integer value() { return value; } @Override public MetricType type() { return MetricType.GAUGE; } @Override public void visit(MetricsVisitor visitor) { visitor.gauge(this, value); } }
1,437
28.346939
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.Iterator; import java.util.List; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsFilter; import static org.apache.hadoop.metrics2.lib.Interns.*; @InterfaceAudience.Private @VisibleForTesting public class MetricsCollectorImpl implements MetricsCollector, Iterable<MetricsRecordBuilderImpl> { private final List<MetricsRecordBuilderImpl> rbs = Lists.newArrayList(); private MetricsFilter recordFilter, metricFilter; @Override public MetricsRecordBuilderImpl addRecord(MetricsInfo info) { boolean acceptable = recordFilter == null || recordFilter.accepts(info.name()); MetricsRecordBuilderImpl rb = new MetricsRecordBuilderImpl(this, info, recordFilter, metricFilter, acceptable); if (acceptable) rbs.add(rb); return rb; } @Override public MetricsRecordBuilderImpl addRecord(String name) { return addRecord(info(name, name +" record")); } public List<MetricsRecordImpl> getRecords() { List<MetricsRecordImpl> recs = Lists.newArrayListWithCapacity(rbs.size()); for (MetricsRecordBuilderImpl rb : rbs) { MetricsRecordImpl mr = rb.getRecord(); if (mr != null) { recs.add(mr); } } return recs; } @Override public Iterator<MetricsRecordBuilderImpl> iterator() { return rbs.iterator(); } @InterfaceAudience.Private public void clear() { rbs.clear(); } MetricsCollectorImpl setRecordFilter(MetricsFilter rf) { recordFilter = rf; return this; } MetricsCollectorImpl setMetricFilter(MetricsFilter mf) { metricFilter = mf; return this; } }
2,709
30.882353
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricGaugeFloat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; class MetricGaugeFloat extends AbstractMetric { final float value; MetricGaugeFloat(MetricsInfo info, float value) { super(info); this.value = value; } @Override public Float value() { return value; } @Override public MetricType type() { return MetricType.GAUGE; } @Override public void visit(MetricsVisitor visitor) { visitor.gauge(this, value); } }
1,443
28.469388
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsBuffer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.Iterator; /** * An immutable element for the sink queues. */ class MetricsBuffer implements Iterable<MetricsBuffer.Entry> { private final Iterable<Entry> mutable; MetricsBuffer(Iterable<MetricsBuffer.Entry> mutable) { this.mutable = mutable; } @Override public Iterator<Entry> iterator() { return mutable.iterator(); } static class Entry { private final String sourceName; private final Iterable<MetricsRecordImpl> records; Entry(String name, Iterable<MetricsRecordImpl> records) { sourceName = name; this.records = records; } String name() { return sourceName; } Iterable<MetricsRecordImpl> records() { return records; } } }
1,584
26.327586
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfigException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.MetricsException; /** * The metrics configuration runtime exception */ class MetricsConfigException extends MetricsException { private static final long serialVersionUID = 1L; MetricsConfigException(String message) { super(message); } MetricsConfigException(String message, Throwable cause) { super(message, cause); } MetricsConfigException(Throwable cause) { super(cause); } }
1,295
30.609756
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricGaugeDouble.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; class MetricGaugeDouble extends AbstractMetric { final double value; MetricGaugeDouble(MetricsInfo info, double value) { super(info); this.value = value; } @Override public Double value() { return value; } @Override public MetricType type() { return MetricType.GAUGE; } @Override public void visit(MetricsVisitor visitor) { visitor.gauge(this, value); } }
1,448
28.571429
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.io.Closeable; import java.util.Random; import java.util.concurrent.*; import static com.google.common.base.Preconditions.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterInt; import org.apache.hadoop.metrics2.lib.MutableStat; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import static org.apache.hadoop.metrics2.util.Contracts.*; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.util.Time; /** * An adapter class for metrics sink and associated filters */ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> { private final Log LOG = LogFactory.getLog(MetricsSinkAdapter.class); private final String name, description, context; private final MetricsSink sink; private final MetricsFilter sourceFilter, recordFilter, metricFilter; private final SinkQueue<MetricsBuffer> queue; private final Thread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; private final int period, firstRetryDelay, retryCount; private final long oobPutTimeout; private final float retryBackoff; private final MetricsRegistry registry = new MetricsRegistry("sinkadapter"); private final MutableStat latency; private final MutableCounterInt dropped; private final MutableGaugeInt qsize; MetricsSinkAdapter(String name, String description, MetricsSink sink, String context, MetricsFilter sourceFilter, MetricsFilter recordFilter, MetricsFilter metricFilter, int period, int queueCapacity, int retryDelay, float retryBackoff, int retryCount) { this.name = checkNotNull(name, "name"); this.description = description; this.sink = checkNotNull(sink, "sink object"); this.context = context; this.sourceFilter = sourceFilter; this.recordFilter = recordFilter; this.metricFilter = metricFilter; this.period = checkArg(period, period > 0, "period"); firstRetryDelay = checkArg(retryDelay, retryDelay > 0, "retry delay"); this.retryBackoff = checkArg(retryBackoff, retryBackoff>1, "retry backoff"); oobPutTimeout = (long) (firstRetryDelay * Math.pow(retryBackoff, retryCount) * 1000); this.retryCount = retryCount; this.queue = new SinkQueue<MetricsBuffer>(checkArg(queueCapacity, queueCapacity > 0, "queue capacity")); latency = registry.newRate("Sink_"+ name, "Sink end to end latency", false); dropped = registry.newCounter("Sink_"+ name +"Dropped", "Dropped updates per sink", 0); qsize = registry.newGauge("Sink_"+ name + "Qsize", "Queue size", 0); sinkThread = new Thread() { @Override public void run() { publishMetricsFromQueue(); } }; sinkThread.setName(name); sinkThread.setDaemon(true); } boolean putMetrics(MetricsBuffer buffer, long logicalTime) { if (logicalTime % period == 0) { LOG.debug("enqueue, logicalTime="+ logicalTime); if (queue.enqueue(buffer)) { refreshQueueSizeGauge(); return true; } dropped.incr(); return false; } return true; // OK } public boolean putMetricsImmediate(MetricsBuffer buffer) { WaitableMetricsBuffer waitableBuffer = new WaitableMetricsBuffer(buffer); if (queue.enqueue(waitableBuffer)) { refreshQueueSizeGauge(); } else { LOG.warn(name + " has a full queue and can't consume the given metrics."); dropped.incr(); return false; } if (!waitableBuffer.waitTillNotified(oobPutTimeout)) { LOG.warn(name + " couldn't fulfill an immediate putMetrics request in time." + " Abandoning."); return false; } return true; } void publishMetricsFromQueue() { int retryDelay = firstRetryDelay; int n = retryCount; int minDelay = Math.min(500, retryDelay * 1000); // millis Random rng = new Random(System.nanoTime()); while (!stopping) { try { queue.consumeAll(this); refreshQueueSizeGauge(); retryDelay = firstRetryDelay; n = retryCount; inError = false; } catch (InterruptedException e) { LOG.info(name +" thread interrupted."); } catch (Exception e) { if (n > 0) { int retryWindow = Math.max(0, 1000 / 2 * retryDelay - minDelay); int awhile = rng.nextInt(retryWindow) + minDelay; if (!inError) { LOG.error("Got sink exception, retry in "+ awhile +"ms", e); } retryDelay *= retryBackoff; try { Thread.sleep(awhile); } catch (InterruptedException e2) { LOG.info(name +" thread interrupted while waiting for retry", e2); } --n; } else { if (!inError) { LOG.error("Got sink exception and over retry limit, "+ "suppressing further error messages", e); } queue.clear(); refreshQueueSizeGauge(); inError = true; // Don't keep complaining ad infinitum } } } } private void refreshQueueSizeGauge() { qsize.set(queue.size()); } @Override public void consume(MetricsBuffer buffer) { long ts = 0; for (MetricsBuffer.Entry entry : buffer) { if (sourceFilter == null || sourceFilter.accepts(entry.name())) { for (MetricsRecordImpl record : entry.records()) { if ((context == null || context.equals(record.context())) && (recordFilter == null || recordFilter.accepts(record))) { if (LOG.isDebugEnabled()) { LOG.debug("Pushing record "+ entry.name() +"."+ record.context() + "."+ record.name() +" to "+ name); } sink.putMetrics(metricFilter == null ? record : new MetricsRecordFiltered(record, metricFilter)); if (ts == 0) ts = record.timestamp(); } } } } if (ts > 0) { sink.flush(); latency.add(Time.now() - ts); } if (buffer instanceof WaitableMetricsBuffer) { ((WaitableMetricsBuffer)buffer).notifyAnyWaiters(); } LOG.debug("Done"); } void start() { sinkThread.start(); LOG.info("Sink "+ name +" started"); } void stop() { stopping = true; sinkThread.interrupt(); if (sink instanceof Closeable) { IOUtils.cleanup(LOG, (Closeable)sink); } try { sinkThread.join(); } catch (InterruptedException e) { LOG.warn("Stop interrupted", e); } } String name() { return name; } String description() { return description; } void snapshot(MetricsRecordBuilder rb, boolean all) { registry.snapshot(rb, all); } MetricsSink sink() { return sink; } static class WaitableMetricsBuffer extends MetricsBuffer { private final Semaphore notificationSemaphore = new Semaphore(0); public WaitableMetricsBuffer(MetricsBuffer metricsBuffer) { super(metricsBuffer); } public boolean waitTillNotified(long millisecondsToWait) { try { return notificationSemaphore.tryAcquire(millisecondsToWait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { return false; } } public void notifyAnyWaiters() { notificationSemaphore.release(); } } }
8,578
31.869732
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.List; import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; import com.google.common.collect.Lists; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.MetricsVisitor; /** * Helper class to build MBeanInfo from metrics records */ class MBeanInfoBuilder implements MetricsVisitor { private final String name, description; private List<MBeanAttributeInfo> attrs; private Iterable<MetricsRecordImpl> recs; private int curRecNo; MBeanInfoBuilder(String name, String desc) { this.name = name; description = desc; attrs = Lists.newArrayList(); } MBeanInfoBuilder reset(Iterable<MetricsRecordImpl> recs) { this.recs = recs; attrs.clear(); return this; } MBeanAttributeInfo newAttrInfo(String name, String desc, String type) { return new MBeanAttributeInfo(getAttrName(name), type, desc, true, false, false); // read-only, non-is } MBeanAttributeInfo newAttrInfo(MetricsInfo info, String type) { return newAttrInfo(info.name(), info.description(), type); } @Override public void gauge(MetricsInfo info, int value) { attrs.add(newAttrInfo(info, "java.lang.Integer")); } @Override public void gauge(MetricsInfo info, long value) { attrs.add(newAttrInfo(info, "java.lang.Long")); } @Override public void gauge(MetricsInfo info, float value) { attrs.add(newAttrInfo(info, "java.lang.Float")); } @Override public void gauge(MetricsInfo info, double value) { attrs.add(newAttrInfo(info, "java.lang.Double")); } @Override public void counter(MetricsInfo info, int value) { attrs.add(newAttrInfo(info, "java.lang.Integer")); } @Override public void counter(MetricsInfo info, long value) { attrs.add(newAttrInfo(info, "java.lang.Long")); } String getAttrName(String name) { return curRecNo > 0 ? name +"."+ curRecNo : name; } MBeanInfo get() { curRecNo = 0; for (MetricsRecordImpl rec : recs) { for (MetricsTag t : rec.tags()) { attrs.add(newAttrInfo("tag."+ t.name(), t.description(), "java.lang.String")); } for (AbstractMetric m : rec.metrics()) { m.visit(this); } ++curRecNo; } MetricsSystemImpl.LOG.debug(attrs); MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()]; return new MBeanInfo(name, description, attrs.toArray(attrsArray), null, null, null); // no ops/ctors/notifications } }
3,503
29.469565
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.HashMap; import javax.management.Attribute; import javax.management.AttributeList; import javax.management.AttributeNotFoundException; import javax.management.DynamicMBean; import javax.management.InvalidAttributeValueException; import javax.management.MBeanException; import javax.management.MBeanInfo; import javax.management.ObjectName; import javax.management.ReflectionException; import static com.google.common.base.Preconditions.*; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsTag; import static org.apache.hadoop.metrics2.impl.MetricsConfig.*; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.Time; import static org.apache.hadoop.metrics2.util.Contracts.*; /** * An adapter class for metrics source and associated filter and jmx impl */ class MetricsSourceAdapter implements DynamicMBean { private static final Log LOG = LogFactory.getLog(MetricsSourceAdapter.class); private final String prefix, name; private final MetricsSource source; private final MetricsFilter recordFilter, metricFilter; private final HashMap<String, Attribute> attrCache; private final MBeanInfoBuilder infoBuilder; private final Iterable<MetricsTag> injectedTags; private Iterable<MetricsRecordImpl> lastRecs; private long jmxCacheTS = 0; private int jmxCacheTTL; private MBeanInfo infoCache; private ObjectName mbeanName; private final boolean startMBeans; MetricsSourceAdapter(String prefix, String name, String description, MetricsSource source, Iterable<MetricsTag> injectedTags, MetricsFilter recordFilter, MetricsFilter metricFilter, int jmxCacheTTL, boolean startMBeans) { this.prefix = checkNotNull(prefix, "prefix"); this.name = checkNotNull(name, "name"); this.source = checkNotNull(source, "source"); attrCache = Maps.newHashMap(); infoBuilder = new MBeanInfoBuilder(name, description); this.injectedTags = injectedTags; this.recordFilter = recordFilter; this.metricFilter = metricFilter; this.jmxCacheTTL = checkArg(jmxCacheTTL, jmxCacheTTL > 0, "jmxCacheTTL"); this.startMBeans = startMBeans; } MetricsSourceAdapter(String prefix, String name, String description, MetricsSource source, Iterable<MetricsTag> injectedTags, int period, MetricsConfig conf) { this(prefix, name, description, source, injectedTags, conf.getFilter(RECORD_FILTER_KEY), conf.getFilter(METRIC_FILTER_KEY), period + 1, // hack to avoid most of the "innocuous" races. conf.getBoolean(START_MBEANS_KEY, true)); } void start() { if (startMBeans) startMBeans(); } @Override public Object getAttribute(String attribute) throws AttributeNotFoundException, MBeanException, ReflectionException { updateJmxCache(); synchronized(this) { Attribute a = attrCache.get(attribute); if (a == null) { throw new AttributeNotFoundException(attribute +" not found"); } if (LOG.isDebugEnabled()) { LOG.debug(attribute +": "+ a); } return a.getValue(); } } @Override public void setAttribute(Attribute attribute) throws AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException { throw new UnsupportedOperationException("Metrics are read-only."); } @Override public AttributeList getAttributes(String[] attributes) { updateJmxCache(); synchronized(this) { AttributeList ret = new AttributeList(); for (String key : attributes) { Attribute attr = attrCache.get(key); if (LOG.isDebugEnabled()) { LOG.debug(key +": "+ attr); } ret.add(attr); } return ret; } } @Override public AttributeList setAttributes(AttributeList attributes) { throw new UnsupportedOperationException("Metrics are read-only."); } @Override public Object invoke(String actionName, Object[] params, String[] signature) throws MBeanException, ReflectionException { throw new UnsupportedOperationException("Not supported yet."); } @Override public MBeanInfo getMBeanInfo() { updateJmxCache(); return infoCache; } private void updateJmxCache() { boolean getAllMetrics = false; synchronized (this) { if (Time.now() - jmxCacheTS >= jmxCacheTTL) { // temporarilly advance the expiry while updating the cache jmxCacheTS = Time.now() + jmxCacheTTL; if (lastRecs == null) { getAllMetrics = true; } } else { return; } if (getAllMetrics) { MetricsCollectorImpl builder = new MetricsCollectorImpl(); getMetrics(builder, true); } updateAttrCache(); if (getAllMetrics) { updateInfoCache(); } jmxCacheTS = Time.now(); lastRecs = null; // in case regular interval update is not running } } Iterable<MetricsRecordImpl> getMetrics(MetricsCollectorImpl builder, boolean all) { builder.setRecordFilter(recordFilter).setMetricFilter(metricFilter); synchronized(this) { if (lastRecs == null && jmxCacheTS == 0) { all = true; // Get all the metrics to populate the sink caches } } try { source.getMetrics(builder, all); } catch (Exception e) { LOG.error("Error getting metrics from source "+ name, e); } for (MetricsRecordBuilderImpl rb : builder) { for (MetricsTag t : injectedTags) { rb.add(t); } } synchronized(this) { lastRecs = builder.getRecords(); return lastRecs; } } synchronized void stop() { stopMBeans(); } synchronized void startMBeans() { if (mbeanName != null) { LOG.warn("MBean "+ name +" already initialized!"); LOG.debug("Stacktrace: ", new Throwable()); return; } mbeanName = MBeans.register(prefix, name, this); LOG.debug("MBean for source "+ name +" registered."); } synchronized void stopMBeans() { if (mbeanName != null) { MBeans.unregister(mbeanName); mbeanName = null; } } @VisibleForTesting ObjectName getMBeanName() { return mbeanName; } private void updateInfoCache() { LOG.debug("Updating info cache..."); infoCache = infoBuilder.reset(lastRecs).get(); LOG.debug("Done"); } private int updateAttrCache() { LOG.debug("Updating attr cache..."); int recNo = 0; int numMetrics = 0; for (MetricsRecordImpl record : lastRecs) { for (MetricsTag t : record.tags()) { setAttrCacheTag(t, recNo); ++numMetrics; } for (AbstractMetric m : record.metrics()) { setAttrCacheMetric(m, recNo); ++numMetrics; } ++recNo; } LOG.debug("Done. # tags & metrics="+ numMetrics); return numMetrics; } private static String tagName(String name, int recNo) { StringBuilder sb = new StringBuilder(name.length() + 16); sb.append("tag.").append(name); if (recNo > 0) { sb.append('.').append(recNo); } return sb.toString(); } private void setAttrCacheTag(MetricsTag tag, int recNo) { String key = tagName(tag.name(), recNo); attrCache.put(key, new Attribute(key, tag.value())); } private static String metricName(String name, int recNo) { if (recNo == 0) { return name; } StringBuilder sb = new StringBuilder(name.length() + 12); sb.append(name); if (recNo > 0) { sb.append('.').append(recNo); } return sb.toString(); } private void setAttrCacheMetric(AbstractMetric metric, int recNo) { String key = metricName(metric.name(), recNo); attrCache.put(key, new Attribute(key, metric.value())); } String name() { return name; } MetricsSource source() { return source; } }
9,125
29.624161
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordFiltered.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.Iterator; import java.util.Collection; import com.google.common.collect.AbstractIterator; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; class MetricsRecordFiltered extends AbstractMetricsRecord { private final MetricsRecord delegate; private final MetricsFilter filter; MetricsRecordFiltered(MetricsRecord delegate, MetricsFilter filter) { this.delegate = delegate; this.filter = filter; } @Override public long timestamp() { return delegate.timestamp(); } @Override public String name() { return delegate.name(); } @Override public String description() { return delegate.description(); } @Override public String context() { return delegate.context(); } @Override public Collection<MetricsTag> tags() { return delegate.tags(); } @Override public Iterable<AbstractMetric> metrics() { return new Iterable<AbstractMetric>() { final Iterator<AbstractMetric> it = delegate.metrics().iterator(); @Override public Iterator<AbstractMetric> iterator() { return new AbstractIterator<AbstractMetric>() { @Override public AbstractMetric computeNext() { while (it.hasNext()) { AbstractMetric next = it.next(); if (filter.accepts(next.name())) { return next; } } return endOfData(); } }; } }; } }
2,424
29.696203
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.Collections; import java.util.List; import com.google.common.collect.Lists; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.util.Time; class MetricsRecordBuilderImpl extends MetricsRecordBuilder { private final MetricsCollector parent; private final long timestamp; private final MetricsInfo recInfo; private final List<AbstractMetric> metrics; private final List<MetricsTag> tags; private final MetricsFilter recordFilter, metricFilter; private final boolean acceptable; MetricsRecordBuilderImpl(MetricsCollector parent, MetricsInfo info, MetricsFilter rf, MetricsFilter mf, boolean acceptable) { this.parent = parent; timestamp = Time.now(); recInfo = info; metrics = Lists.newArrayList(); tags = Lists.newArrayList(); recordFilter = rf; metricFilter = mf; this.acceptable = acceptable; } @Override public MetricsCollector parent() { return parent; } @Override public MetricsRecordBuilderImpl tag(MetricsInfo info, String value) { if (acceptable) { tags.add(Interns.tag(info, value)); } return this; } @Override public MetricsRecordBuilderImpl add(MetricsTag tag) { tags.add(tag); return this; } @Override public MetricsRecordBuilderImpl add(AbstractMetric metric) { metrics.add(metric); return this; } @Override public MetricsRecordBuilderImpl addCounter(MetricsInfo info, int value) { if (acceptable && (metricFilter == null || metricFilter.accepts(info.name()))) { metrics.add(new MetricCounterInt(info, value)); } return this; } @Override public MetricsRecordBuilderImpl addCounter(MetricsInfo info, long value) { if (acceptable && (metricFilter == null || metricFilter.accepts(info.name()))) { metrics.add(new MetricCounterLong(info, value)); } return this; } @Override public MetricsRecordBuilderImpl addGauge(MetricsInfo info, int value) { if (acceptable && (metricFilter == null || metricFilter.accepts(info.name()))) { metrics.add(new MetricGaugeInt(info, value)); } return this; } @Override public MetricsRecordBuilderImpl addGauge(MetricsInfo info, long value) { if (acceptable && (metricFilter == null || metricFilter.accepts(info.name()))) { metrics.add(new MetricGaugeLong(info, value)); } return this; } @Override public MetricsRecordBuilderImpl addGauge(MetricsInfo info, float value) { if (acceptable && (metricFilter == null || metricFilter.accepts(info.name()))) { metrics.add(new MetricGaugeFloat(info, value)); } return this; } @Override public MetricsRecordBuilderImpl addGauge(MetricsInfo info, double value) { if (acceptable && (metricFilter == null || metricFilter.accepts(info.name()))) { metrics.add(new MetricGaugeDouble(info, value)); } return this; } @Override public MetricsRecordBuilderImpl setContext(String value) { return tag(MsInfo.Context, value); } public MetricsRecordImpl getRecord() { if (acceptable && (recordFilter == null || recordFilter.accepts(tags))) { return new MetricsRecordImpl(recInfo, timestamp, tags(), metrics()); } return null; } List<MetricsTag> tags() { return Collections.unmodifiableList(tags); } List<AbstractMetric> metrics() { return Collections.unmodifiableList(metrics); } }
4,651
29.207792
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.util.List; import static com.google.common.base.Preconditions.*; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsTag; import static org.apache.hadoop.metrics2.util.Contracts.*; class MetricsRecordImpl extends AbstractMetricsRecord { protected static final String DEFAULT_CONTEXT = "default"; private final long timestamp; private final MetricsInfo info; private final List<MetricsTag> tags; private final Iterable<AbstractMetric> metrics; /** * Construct a metrics record * @param info {@link MetricInfo} of the record * @param timestamp of the record * @param tags of the record * @param metrics of the record */ public MetricsRecordImpl(MetricsInfo info, long timestamp, List<MetricsTag> tags, Iterable<AbstractMetric> metrics) { this.timestamp = checkArg(timestamp, timestamp > 0, "timestamp"); this.info = checkNotNull(info, "info"); this.tags = checkNotNull(tags, "tags"); this.metrics = checkNotNull(metrics, "metrics"); } @Override public long timestamp() { return timestamp; } @Override public String name() { return info.name(); } MetricsInfo info() { return info; } @Override public String description() { return info.description(); } @Override public String context() { // usually the first tag for (MetricsTag t : tags) { if (t.info() == MsInfo.Context) { return t.value(); } } return DEFAULT_CONTEXT; } @Override public List<MetricsTag> tags() { return tags; // already unmodifiable from MetricsRecordBuilderImpl#tags } @Override public Iterable<AbstractMetric> metrics() { return metrics; } }
2,667
28.977528
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricCounterInt.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; class MetricCounterInt extends AbstractMetric { final int value; MetricCounterInt(MetricsInfo info, int value) { super(info); this.value = value; } @Override public Integer value() { return value; } @Override public MetricType type() { return MetricType.COUNTER; } @Override public void visit(MetricsVisitor visitor) { visitor.counter(this, value); } }
1,445
28.510204
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.net.URL; import java.net.URLClassLoader; import static java.security.AccessController.*; import java.security.PrivilegedAction; import java.util.Iterator; import java.util.Locale; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; import org.apache.commons.configuration.Configuration; import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.configuration.PropertiesConfiguration; import org.apache.commons.configuration.SubsetConfiguration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsPlugin; import org.apache.hadoop.metrics2.filter.GlobFilter; import org.apache.hadoop.util.StringUtils; /** * Metrics configuration for MetricsSystemImpl */ class MetricsConfig extends SubsetConfiguration { static final Log LOG = LogFactory.getLog(MetricsConfig.class); static final String DEFAULT_FILE_NAME = "hadoop-metrics2.properties"; static final String PREFIX_DEFAULT = "*."; static final String PERIOD_KEY = "period"; static final int PERIOD_DEFAULT = 10; // seconds static final String QUEUE_CAPACITY_KEY = "queue.capacity"; static final int QUEUE_CAPACITY_DEFAULT = 1; static final String RETRY_DELAY_KEY = "retry.delay"; static final int RETRY_DELAY_DEFAULT = 10; // seconds static final String RETRY_BACKOFF_KEY = "retry.backoff"; static final int RETRY_BACKOFF_DEFAULT = 2; // back off factor static final String RETRY_COUNT_KEY = "retry.count"; static final int RETRY_COUNT_DEFAULT = 1; static final String JMX_CACHE_TTL_KEY = "jmx.cache.ttl"; static final String START_MBEANS_KEY = "source.start_mbeans"; static final String PLUGIN_URLS_KEY = "plugin.urls"; static final String CONTEXT_KEY = "context"; static final String NAME_KEY = "name"; static final String DESC_KEY = "description"; static final String SOURCE_KEY = "source"; static final String SINK_KEY = "sink"; static final String METRIC_FILTER_KEY = "metric.filter"; static final String RECORD_FILTER_KEY = "record.filter"; static final String SOURCE_FILTER_KEY = "source.filter"; static final Pattern INSTANCE_REGEX = Pattern.compile("([^.*]+)\\..+"); static final Splitter SPLITTER = Splitter.on(',').trimResults(); private ClassLoader pluginLoader; MetricsConfig(Configuration c, String prefix) { super(c, StringUtils.toLowerCase(prefix), "."); } static MetricsConfig create(String prefix) { return loadFirst(prefix, "hadoop-metrics2-" + StringUtils.toLowerCase(prefix) + ".properties", DEFAULT_FILE_NAME); } static MetricsConfig create(String prefix, String... fileNames) { return loadFirst(prefix, fileNames); } /** * Load configuration from a list of files until the first successful load * @param conf the configuration object * @param files the list of filenames to try * @return the configuration object */ static MetricsConfig loadFirst(String prefix, String... fileNames) { for (String fname : fileNames) { try { Configuration cf = new PropertiesConfiguration(fname) .interpolatedConfiguration(); LOG.info("loaded properties from "+ fname); LOG.debug(toString(cf)); MetricsConfig mc = new MetricsConfig(cf, prefix); LOG.debug(mc); return mc; } catch (ConfigurationException e) { if (e.getMessage().startsWith("Cannot locate configuration")) { continue; } throw new MetricsConfigException(e); } } LOG.warn("Cannot locate configuration: tried "+ Joiner.on(",").join(fileNames)); // default to an empty configuration return new MetricsConfig(new PropertiesConfiguration(), prefix); } @Override public MetricsConfig subset(String prefix) { return new MetricsConfig(this, prefix); } /** * Return sub configs for instance specified in the config. * Assuming format specified as follows:<pre> * [type].[instance].[option] = [value]</pre> * Note, '*' is a special default instance, which is excluded in the result. * @param type of the instance * @return a map with [instance] as key and config object as value */ Map<String, MetricsConfig> getInstanceConfigs(String type) { Map<String, MetricsConfig> map = Maps.newHashMap(); MetricsConfig sub = subset(type); for (String key : sub.keys()) { Matcher matcher = INSTANCE_REGEX.matcher(key); if (matcher.matches()) { String instance = matcher.group(1); if (!map.containsKey(instance)) { map.put(instance, sub.subset(instance)); } } } return map; } Iterable<String> keys() { return new Iterable<String>() { @SuppressWarnings("unchecked") @Override public Iterator<String> iterator() { return (Iterator<String>) getKeys(); } }; } /** * Will poke parents for defaults * @param key to lookup * @return the value or null */ @Override public Object getProperty(String key) { Object value = super.getProperty(key); if (value == null) { if (LOG.isDebugEnabled()) { LOG.debug("poking parent '"+ getParent().getClass().getSimpleName() + "' for key: "+ key); } return getParent().getProperty(key.startsWith(PREFIX_DEFAULT) ? key : PREFIX_DEFAULT + key); } if (LOG.isDebugEnabled()) { LOG.debug("returning '"+ value +"' for key: "+ key); } return value; } <T extends MetricsPlugin> T getPlugin(String name) { String clsName = getClassName(name); if (clsName == null) return null; try { Class<?> cls = Class.forName(clsName, true, getPluginLoader()); @SuppressWarnings("unchecked") T plugin = (T) cls.newInstance(); plugin.init(name.isEmpty() ? this : subset(name)); return plugin; } catch (Exception e) { throw new MetricsConfigException("Error creating plugin: "+ clsName, e); } } String getClassName(String prefix) { String classKey = prefix.isEmpty() ? "class" : prefix +".class"; String clsName = getString(classKey); LOG.debug(clsName); if (clsName == null || clsName.isEmpty()) { return null; } return clsName; } ClassLoader getPluginLoader() { if (pluginLoader != null) return pluginLoader; final ClassLoader defaultLoader = getClass().getClassLoader(); Object purls = super.getProperty(PLUGIN_URLS_KEY); if (purls == null) return defaultLoader; Iterable<String> jars = SPLITTER.split((String) purls); int len = Iterables.size(jars); if ( len > 0) { final URL[] urls = new URL[len]; try { int i = 0; for (String jar : jars) { LOG.debug(jar); urls[i++] = new URL(jar); } } catch (Exception e) { throw new MetricsConfigException(e); } if (LOG.isDebugEnabled()) { LOG.debug("using plugin jars: "+ Iterables.toString(jars)); } pluginLoader = doPrivileged(new PrivilegedAction<ClassLoader>() { @Override public ClassLoader run() { return new URLClassLoader(urls, defaultLoader); } }); return pluginLoader; } if (parent instanceof MetricsConfig) { return ((MetricsConfig) parent).getPluginLoader(); } return defaultLoader; } @Override public void clear() { super.clear(); // pluginLoader.close(); // jdk7 is saner } MetricsFilter getFilter(String prefix) { // don't create filter instances without out options MetricsConfig conf = subset(prefix); if (conf.isEmpty()) return null; MetricsFilter filter = getPlugin(prefix); if (filter != null) return filter; // glob filter is assumed if pattern is specified but class is not. filter = new GlobFilter(); filter.init(conf); return filter; } @Override public String toString() { return toString(this); } static String toString(Configuration c) { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); try { PrintStream ps = new PrintStream(buffer, false, "UTF-8"); PropertiesConfiguration tmp = new PropertiesConfiguration(); tmp.copy(c); tmp.save(ps); return buffer.toString("UTF-8"); } catch (Exception e) { throw new MetricsConfigException(e); } } }
9,586
32.404181
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricCounterLong.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.impl; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsVisitor; class MetricCounterLong extends AbstractMetric { final long value; MetricCounterLong(MetricsInfo info, long value) { super(info); this.value = value; } @Override public Long value() { return value; } @Override public MetricType type() { return MetricType.COUNTER; } @Override public void visit(MetricsVisitor visitor) { visitor.counter(this, value); } }
1,446
28.530612
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * A collection of library classes for implementing metrics sources */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,097
39.666667
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/UniqueNames.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.util.Map; import com.google.common.base.Joiner; import com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; /** * Generates predictable and user-friendly unique names */ @InterfaceAudience.Private public class UniqueNames { static class Count { final String baseName; int value; Count(String name, int value) { baseName = name; this.value = value; } } static final Joiner joiner = Joiner.on('-'); final Map<String, Count> map = Maps.newHashMap(); public synchronized String uniqueName(String name) { Count c = map.get(name); if (c == null) { c = new Count(name, 0); map.put(name, c); return name; } if (!c.baseName.equals(name)) c = new Count(name, 0); do { String newName = joiner.join(name, ++c.value); Count c2 = map.get(newName); if (c2 == null) { map.put(newName, c); return newName; } // handle collisons, assume to be rare cases, // eg: people explicitly passed in name-\d+ names. } while (true); } }
1,949
28.104478
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/Interns.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.util.Map; import java.util.LinkedHashMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; /** * Helpers to create interned metrics info */ @InterfaceAudience.Public @InterfaceStability.Evolving public class Interns { private static final Log LOG = LogFactory.getLog(Interns.class); // A simple intern cache with two keys // (to avoid creating new (combined) key objects for lookup) private static abstract class CacheWith2Keys<K1, K2, V> { private final Map<K1, Map<K2, V>> k1Map = new LinkedHashMap<K1, Map<K2, V>>() { private static final long serialVersionUID = 1L; private boolean gotOverflow = false; @Override protected boolean removeEldestEntry(Map.Entry<K1, Map<K2, V>> e) { boolean overflow = expireKey1At(size()); if (overflow && !gotOverflow) { LOG.warn("Metrics intern cache overflow at "+ size() +" for "+ e); gotOverflow = true; } return overflow; } }; abstract protected boolean expireKey1At(int size); abstract protected boolean expireKey2At(int size); abstract protected V newValue(K1 k1, K2 k2); synchronized V add(K1 k1, K2 k2) { Map<K2, V> k2Map = k1Map.get(k1); if (k2Map == null) { k2Map = new LinkedHashMap<K2, V>() { private static final long serialVersionUID = 1L; private boolean gotOverflow = false; @Override protected boolean removeEldestEntry(Map.Entry<K2, V> e) { boolean overflow = expireKey2At(size()); if (overflow && !gotOverflow) { LOG.warn("Metrics intern cache overflow at "+ size() +" for "+ e); gotOverflow = true; } return overflow; } }; k1Map.put(k1, k2Map); } V v = k2Map.get(k2); if (v == null) { v = newValue(k1, k2); k2Map.put(k2, v); } return v; } } // Sanity limits in case of misuse/abuse. static final int MAX_INFO_NAMES = 2010; static final int MAX_INFO_DESCS = 100; // distinct per name enum Info { INSTANCE; final CacheWith2Keys<String, String, MetricsInfo> cache = new CacheWith2Keys<String, String, MetricsInfo>() { @Override protected boolean expireKey1At(int size) { return size > MAX_INFO_NAMES; } @Override protected boolean expireKey2At(int size) { return size > MAX_INFO_DESCS; } @Override protected MetricsInfo newValue(String name, String desc) { return new MetricsInfoImpl(name, desc); } }; } /** * Get a metric info object * @param name * @param description * @return an interned metric info object */ public static MetricsInfo info(String name, String description) { return Info.INSTANCE.cache.add(name, description); } // Sanity limits static final int MAX_TAG_NAMES = 100; static final int MAX_TAG_VALUES = 1000; // distinct per name enum Tags { INSTANCE; final CacheWith2Keys<MetricsInfo, String, MetricsTag> cache = new CacheWith2Keys<MetricsInfo, String, MetricsTag>() { @Override protected boolean expireKey1At(int size) { return size > MAX_TAG_NAMES; } @Override protected boolean expireKey2At(int size) { return size > MAX_TAG_VALUES; } @Override protected MetricsTag newValue(MetricsInfo info, String value) { return new MetricsTag(info, value); } }; } /** * Get a metrics tag * @param info of the tag * @param value of the tag * @return an interned metrics tag */ public static MetricsTag tag(MetricsInfo info, String value) { return Tags.INSTANCE.cache.add(info, value); } /** * Get a metrics tag * @param name of the tag * @param description of the tag * @param value of the tag * @return an interned metrics tag */ public static MetricsTag tag(String name, String description, String value) { return Tags.INSTANCE.cache.add(info(name, description), value); } }
5,159
30.084337
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import java.util.concurrent.atomic.AtomicLong; /** * A mutable long counter */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableCounterLong extends MutableCounter { private AtomicLong value = new AtomicLong(); MutableCounterLong(MetricsInfo info, long initValue) { super(info); this.value.set(initValue); } @Override public void incr() { incr(1); } /** * Increment the value by a delta * @param delta of the increment */ public void incr(long delta) { value.addAndGet(delta); setChanged(); } public long value() { return value.get(); } @Override public void snapshot(MetricsRecordBuilder builder, boolean all) { if (all || changed()) { builder.addCounter(info(), value()); clearChanged(); } } }
1,884
26.318841
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.util.Collection; import java.util.Map; import com.google.common.collect.Maps; import com.google.common.base.Objects; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.impl.MsInfo; /** * An optional metrics registry class for creating and maintaining a * collection of MetricsMutables, making writing metrics source easier. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MetricsRegistry { private final Map<String, MutableMetric> metricsMap = Maps.newLinkedHashMap(); private final Map<String, MetricsTag> tagsMap = Maps.newLinkedHashMap(); private final MetricsInfo metricsInfo; /** * Construct the registry with a record name * @param name of the record of the metrics */ public MetricsRegistry(String name) { metricsInfo = Interns.info(name, name); } /** * Construct the registry with a metadata object * @param info the info object for the metrics record/group */ public MetricsRegistry(MetricsInfo info) { metricsInfo = info; } /** * @return the info object of the metrics registry */ public MetricsInfo info() { return metricsInfo; } /** * Get a metric by name * @param name of the metric * @return the metric object */ public synchronized MutableMetric get(String name) { return metricsMap.get(name); } /** * Get a tag by name * @param name of the tag * @return the tag object */ public synchronized MetricsTag getTag(String name) { return tagsMap.get(name); } /** * Create a mutable integer counter * @param name of the metric * @param desc metric description * @param iVal initial value * @return a new counter object */ public MutableCounterInt newCounter(String name, String desc, int iVal) { return newCounter(Interns.info(name, desc), iVal); } /** * Create a mutable integer counter * @param info metadata of the metric * @param iVal initial value * @return a new counter object */ public synchronized MutableCounterInt newCounter(MetricsInfo info, int iVal) { checkMetricName(info.name()); MutableCounterInt ret = new MutableCounterInt(info, iVal); metricsMap.put(info.name(), ret); return ret; } /** * Create a mutable long integer counter * @param name of the metric * @param desc metric description * @param iVal initial value * @return a new counter object */ public MutableCounterLong newCounter(String name, String desc, long iVal) { return newCounter(Interns.info(name, desc), iVal); } /** * Create a mutable long integer counter * @param info metadata of the metric * @param iVal initial value * @return a new counter object */ public synchronized MutableCounterLong newCounter(MetricsInfo info, long iVal) { checkMetricName(info.name()); MutableCounterLong ret = new MutableCounterLong(info, iVal); metricsMap.put(info.name(), ret); return ret; } /** * Create a mutable integer gauge * @param name of the metric * @param desc metric description * @param iVal initial value * @return a new gauge object */ public MutableGaugeInt newGauge(String name, String desc, int iVal) { return newGauge(Interns.info(name, desc), iVal); } /** * Create a mutable integer gauge * @param info metadata of the metric * @param iVal initial value * @return a new gauge object */ public synchronized MutableGaugeInt newGauge(MetricsInfo info, int iVal) { checkMetricName(info.name()); MutableGaugeInt ret = new MutableGaugeInt(info, iVal); metricsMap.put(info.name(), ret); return ret; } /** * Create a mutable long integer gauge * @param name of the metric * @param desc metric description * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(String name, String desc, long iVal) { return newGauge(Interns.info(name, desc), iVal); } /** * Create a mutable long integer gauge * @param info metadata of the metric * @param iVal initial value * @return a new gauge object */ public synchronized MutableGaugeLong newGauge(MetricsInfo info, long iVal) { checkMetricName(info.name()); MutableGaugeLong ret = new MutableGaugeLong(info, iVal); metricsMap.put(info.name(), ret); return ret; } /** * Create a mutable metric that estimates quantiles of a stream of values * @param name of the metric * @param desc metric description * @param sampleName of the metric (e.g., "Ops") * @param valueName of the metric (e.g., "Time" or "Latency") * @param interval rollover interval of estimator in seconds * @return a new quantile estimator object */ public synchronized MutableQuantiles newQuantiles(String name, String desc, String sampleName, String valueName, int interval) { checkMetricName(name); MutableQuantiles ret = new MutableQuantiles(name, desc, sampleName, valueName, interval); metricsMap.put(name, ret); return ret; } /** * Create a mutable metric with stats * @param name of the metric * @param desc metric description * @param sampleName of the metric (e.g., "Ops") * @param valueName of the metric (e.g., "Time" or "Latency") * @param extended produce extended stat (stdev, min/max etc.) if true. * @return a new mutable stat metric object */ public synchronized MutableStat newStat(String name, String desc, String sampleName, String valueName, boolean extended) { checkMetricName(name); MutableStat ret = new MutableStat(name, desc, sampleName, valueName, extended); metricsMap.put(name, ret); return ret; } /** * Create a mutable metric with stats * @param name of the metric * @param desc metric description * @param sampleName of the metric (e.g., "Ops") * @param valueName of the metric (e.g., "Time" or "Latency") * @return a new mutable metric object */ public MutableStat newStat(String name, String desc, String sampleName, String valueName) { return newStat(name, desc, sampleName, valueName, false); } /** * Create a mutable rate metric * @param name of the metric * @return a new mutable metric object */ public MutableRate newRate(String name) { return newRate(name, name, false); } /** * Create a mutable rate metric * @param name of the metric * @param description of the metric * @return a new mutable rate metric object */ public MutableRate newRate(String name, String description) { return newRate(name, description, false); } /** * Create a mutable rate metric (for throughput measurement) * @param name of the metric * @param desc description * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { return newRate(name, desc, extended, true); } @InterfaceAudience.Private public synchronized MutableRate newRate(String name, String desc, boolean extended, boolean returnExisting) { if (returnExisting) { MutableMetric rate = metricsMap.get(name); if (rate != null) { if (rate instanceof MutableRate) return (MutableRate) rate; throw new MetricsException("Unexpected metrics type "+ rate.getClass() +" for "+ name); } } checkMetricName(name); MutableRate ret = new MutableRate(name, desc, extended); metricsMap.put(name, ret); return ret; } synchronized void add(String name, MutableMetric metric) { checkMetricName(name); metricsMap.put(name, metric); } /** * Add sample to a stat metric by name. * @param name of the metric * @param value of the snapshot to add */ public synchronized void add(String name, long value) { MutableMetric m = metricsMap.get(name); if (m != null) { if (m instanceof MutableStat) { ((MutableStat) m).add(value); } else { throw new MetricsException("Unsupported add(value) for metric "+ name); } } else { metricsMap.put(name, newRate(name)); // default is a rate metric add(name, value); } } /** * Set the metrics context tag * @param name of the context * @return the registry itself as a convenience */ public MetricsRegistry setContext(String name) { return tag(MsInfo.Context, name, true); } /** * Add a tag to the metrics * @param name of the tag * @param description of the tag * @param value of the tag * @return the registry (for keep adding tags) */ public MetricsRegistry tag(String name, String description, String value) { return tag(name, description, value, false); } /** * Add a tag to the metrics * @param name of the tag * @param description of the tag * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags) */ public MetricsRegistry tag(String name, String description, String value, boolean override) { return tag(Interns.info(name, description), value, override); } /** * Add a tag to the metrics * @param info metadata of the tag * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags etc.) */ public synchronized MetricsRegistry tag(MetricsInfo info, String value, boolean override) { if (!override) checkTagName(info.name()); tagsMap.put(info.name(), Interns.tag(info, value)); return this; } public MetricsRegistry tag(MetricsInfo info, String value) { return tag(info, value, false); } Collection<MetricsTag> tags() { return tagsMap.values(); } Collection<MutableMetric> metrics() { return metricsMap.values(); } private void checkMetricName(String name) { // Check for invalid characters in metric name boolean foundWhitespace = false; for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (Character.isWhitespace(c)) { foundWhitespace = true; break; } } if (foundWhitespace) { throw new MetricsException("Metric name '"+ name + "' contains illegal whitespace character"); } // Check if name has already been registered if (metricsMap.containsKey(name)) { throw new MetricsException("Metric name "+ name +" already exists!"); } } private void checkTagName(String name) { if (tagsMap.containsKey(name)) { throw new MetricsException("Tag "+ name +" already exists!"); } } /** * Sample all the mutable metrics and put the snapshot in the builder * @param builder to contain the metrics snapshot * @param all get all the metrics even if the values are not changed. */ public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) { for (MetricsTag tag : tags()) { builder.add(tag); } for (MutableMetric metric : metrics()) { metric.snapshot(builder, all); } } @Override public String toString() { return Objects.toStringHelper(this) .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) .toString(); } }
12,577
29.603406
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.util.concurrent.atomic.AtomicReference; import javax.management.ObjectName; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; /** * The default metrics system singleton */ @InterfaceAudience.Public @InterfaceStability.Evolving public enum DefaultMetricsSystem { INSTANCE; // the singleton private AtomicReference<MetricsSystem> impl = new AtomicReference<MetricsSystem>(new MetricsSystemImpl()); volatile boolean miniClusterMode = false; transient final UniqueNames mBeanNames = new UniqueNames(); transient final UniqueNames sourceNames = new UniqueNames(); /** * Convenience method to initialize the metrics system * @param prefix for the metrics system configuration * @return the metrics system instance */ public static MetricsSystem initialize(String prefix) { return INSTANCE.init(prefix); } MetricsSystem init(String prefix) { return impl.get().init(prefix); } /** * @return the metrics system object */ public static MetricsSystem instance() { return INSTANCE.getImpl(); } /** * Shutdown the metrics system */ public static void shutdown() { INSTANCE.shutdownInstance(); } void shutdownInstance() { boolean last = impl.get().shutdown(); if (last) synchronized(this) { mBeanNames.map.clear(); sourceNames.map.clear(); } } @InterfaceAudience.Private public static MetricsSystem setInstance(MetricsSystem ms) { return INSTANCE.setImpl(ms); } MetricsSystem setImpl(MetricsSystem ms) { return impl.getAndSet(ms); } MetricsSystem getImpl() { return impl.get(); } @InterfaceAudience.Private public static void setMiniClusterMode(boolean choice) { INSTANCE.miniClusterMode = choice; } @InterfaceAudience.Private public static boolean inMiniClusterMode() { return INSTANCE.miniClusterMode; } @InterfaceAudience.Private public static ObjectName newMBeanName(String name) { return INSTANCE.newObjectName(name); } @InterfaceAudience.Private public static void removeMBeanName(ObjectName name) { INSTANCE.removeObjectName(name.toString()); } @InterfaceAudience.Private public static String sourceName(String name, boolean dupOK) { return INSTANCE.newSourceName(name, dupOK); } synchronized ObjectName newObjectName(String name) { try { if (mBeanNames.map.containsKey(name) && !miniClusterMode) { throw new MetricsException(name +" already exists!"); } return new ObjectName(mBeanNames.uniqueName(name)); } catch (Exception e) { throw new MetricsException(e); } } synchronized void removeObjectName(String name) { mBeanNames.map.remove(name); } synchronized String newSourceName(String name, boolean dupOK) { if (sourceNames.map.containsKey(name)) { if (dupOK) { return name; } else if (!miniClusterMode) { throw new MetricsException("Metrics source "+ name +" already exists!"); } } return sourceNames.uniqueName(name); } }
4,113
28.177305
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import static com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; /** * The mutable gauge metric interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class MutableGauge extends MutableMetric { private final MetricsInfo info; protected MutableGauge(MetricsInfo info) { this.info = checkNotNull(info, "metric info"); } protected MetricsInfo info() { return info; } /** * Increment the value of the metric by 1 */ public abstract void incr(); /** * Decrement the value of the metric by 1 */ public abstract void decr(); }
1,599
29.188679
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetric.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsRecordBuilder; /** * The mutable metric interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class MutableMetric { private volatile boolean changed = true; /** * Get a snapshot of the metric * @param builder the metrics record builder * @param all if true, snapshot unchanged metrics as well */ public abstract void snapshot(MetricsRecordBuilder builder, boolean all); /** * Get a snapshot of metric if changed * @param builder the metrics record builder */ public void snapshot(MetricsRecordBuilder builder) { snapshot(builder, false); } /** * Set the changed flag in mutable operations */ protected void setChanged() { changed = true; } /** * Clear the changed flag in the snapshot operations */ protected void clearChanged() { changed = false; } /** * @return true if metric is changed since last snapshot/snapshot */ public boolean changed() { return changed; } }
1,984
30.507937
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsException; /** * Experimental interface to extend metrics dynamically */ @InterfaceAudience.Private public enum DefaultMetricsFactory { INSTANCE; // the singleton private MutableMetricsFactory mmfImpl; public static MutableMetricsFactory getAnnotatedMetricsFactory() { return INSTANCE.getInstance(MutableMetricsFactory.class); } @SuppressWarnings("unchecked") public synchronized <T> T getInstance(Class<T> cls) { if (cls == MutableMetricsFactory.class) { if (mmfImpl == null) { mmfImpl = new MutableMetricsFactory(); } return (T) mmfImpl; } throw new MetricsException("Unknown metrics factory type: "+ cls.getName()); } public synchronized void setInstance(MutableMetricsFactory factory) { mmfImpl = factory; } }
1,733
32.346154
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.lang.annotation.Annotation; import java.lang.reflect.Field; import java.lang.reflect.Method; import static com.google.common.base.Preconditions.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.util.ReflectionUtils; /** * Helper class to build metrics source object from annotations */ @InterfaceAudience.Private public class MetricsSourceBuilder { private static final Log LOG = LogFactory.getLog(MetricsSourceBuilder.class); private final Object source; private final MutableMetricsFactory factory; private final MetricsRegistry registry; private MetricsInfo info; private boolean hasAtMetric = false; private boolean hasRegistry = false; MetricsSourceBuilder(Object source, MutableMetricsFactory factory) { this.source = checkNotNull(source, "source"); this.factory = checkNotNull(factory, "mutable metrics factory"); Class<?> cls = source.getClass(); registry = initRegistry(source); for (Field field : ReflectionUtils.getDeclaredFieldsIncludingInherited(cls)) { add(source, field); } for (Method method : ReflectionUtils.getDeclaredMethodsIncludingInherited(cls)) { add(source, method); } } public MetricsSource build() { if (source instanceof MetricsSource) { if (hasAtMetric && !hasRegistry) { throw new MetricsException("Hybrid metrics: registry required."); } return (MetricsSource) source; } else if (!hasAtMetric) { throw new MetricsException("No valid @Metric annotation found."); } return new MetricsSource() { @Override public void getMetrics(MetricsCollector builder, boolean all) { registry.snapshot(builder.addRecord(registry.info()), all); } }; } public MetricsInfo info() { return info; } private MetricsRegistry initRegistry(Object source) { Class<?> cls = source.getClass(); MetricsRegistry r = null; // Get the registry if it already exists. for (Field field : ReflectionUtils.getDeclaredFieldsIncludingInherited(cls)) { if (field.getType() != MetricsRegistry.class) continue; try { field.setAccessible(true); r = (MetricsRegistry) field.get(source); hasRegistry = r != null; break; } catch (Exception e) { LOG.warn("Error accessing field "+ field, e); continue; } } // Create a new registry according to annotation for (Annotation annotation : cls.getAnnotations()) { if (annotation instanceof Metrics) { Metrics ma = (Metrics) annotation; info = factory.getInfo(cls, ma); if (r == null) { r = new MetricsRegistry(info); } r.setContext(ma.context()); } } if (r == null) return new MetricsRegistry(cls.getSimpleName()); return r; } private void add(Object source, Field field) { for (Annotation annotation : field.getAnnotations()) { if (!(annotation instanceof Metric)) continue; try { // skip fields already set field.setAccessible(true); if (field.get(source) != null) continue; } catch (Exception e) { LOG.warn("Error accessing field "+ field +" annotated with"+ annotation, e); continue; } MutableMetric mutable = factory.newForField(field, (Metric) annotation, registry); if (mutable != null) { try { field.set(source, mutable); hasAtMetric = true; } catch (Exception e) { throw new MetricsException("Error setting field "+ field + " annotated with "+ annotation, e); } } } } private void add(Object source, Method method) { for (Annotation annotation : method.getAnnotations()) { if (!(annotation instanceof Metric)) continue; factory.newForMethod(source, method, (Metric) annotation, registry); hasAtMetric = true; } } }
5,289
32.910256
85
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import static org.apache.hadoop.metrics2.lib.Interns.info; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.metrics2.util.SampleQuantiles; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Watches a stream of long values, maintaining online estimates of specific * quantiles with provably low error bounds. This is particularly useful for * accurate high-percentile (e.g. 95th, 99th) latency metrics. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableQuantiles extends MutableMetric { @VisibleForTesting public static final Quantile[] quantiles = { new Quantile(0.50, 0.050), new Quantile(0.75, 0.025), new Quantile(0.90, 0.010), new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) }; private final MetricsInfo numInfo; private final MetricsInfo[] quantileInfos; private final int interval; private SampleQuantiles estimator; private long previousCount = 0; @VisibleForTesting protected Map<Quantile, Long> previousSnapshot = null; private static final ScheduledExecutorService scheduler = Executors .newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("MutableQuantiles-%d").build()); /** * Instantiates a new {@link MutableQuantiles} for a metric that rolls itself * over on the specified time interval. * * @param name * of the metric * @param description * long-form textual description of the metric * @param sampleName * type of items in the stream (e.g., "Ops") * @param valueName * type of the values * @param interval * rollover interval (in seconds) of the estimator */ public MutableQuantiles(String name, String description, String sampleName, String valueName, int interval) { String ucName = StringUtils.capitalize(name); String usName = StringUtils.capitalize(sampleName); String uvName = StringUtils.capitalize(valueName); String desc = StringUtils.uncapitalize(description); String lsName = StringUtils.uncapitalize(sampleName); String lvName = StringUtils.uncapitalize(valueName); numInfo = info(ucName + "Num" + usName, String.format( "Number of %s for %s with %ds interval", lsName, desc, interval)); // Construct the MetricsInfos for the quantiles, converting to percentiles quantileInfos = new MetricsInfo[quantiles.length]; String nameTemplate = ucName + "%dthPercentile" + uvName; String descTemplate = "%d percentile " + lvName + " with " + interval + " second interval for " + desc; for (int i = 0; i < quantiles.length; i++) { int percentile = (int) (100 * quantiles[i].quantile); quantileInfos[i] = info(String.format(nameTemplate, percentile), String.format(descTemplate, percentile)); } estimator = new SampleQuantiles(quantiles); this.interval = interval; scheduler.scheduleAtFixedRate(new RolloverSample(this), interval, interval, TimeUnit.SECONDS); } @Override public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) { if (all || changed()) { builder.addGauge(numInfo, previousCount); for (int i = 0; i < quantiles.length; i++) { long newValue = 0; // If snapshot is null, we failed to update since the window was empty if (previousSnapshot != null) { newValue = previousSnapshot.get(quantiles[i]); } builder.addGauge(quantileInfos[i], newValue); } if (changed()) { clearChanged(); } } } public synchronized void add(long value) { estimator.insert(value); } public int getInterval() { return interval; } /** * Runnable used to periodically roll over the internal * {@link SampleQuantiles} every interval. */ private static class RolloverSample implements Runnable { MutableQuantiles parent; public RolloverSample(MutableQuantiles parent) { this.parent = parent; } @Override public void run() { synchronized (parent) { parent.previousCount = parent.estimator.getCount(); parent.previousSnapshot = parent.estimator.snapshot(); parent.estimator.clear(); } parent.setChanged(); } } }
5,651
34.10559
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsAnnotations.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsSource; /** * Metrics annotation helpers. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class MetricsAnnotations { /** * Make an metrics source from an annotated object. * @param source the annotated object. * @return a metrics source */ public static MetricsSource makeSource(Object source) { return new MetricsSourceBuilder(source, DefaultMetricsFactory.getAnnotatedMetricsFactory()).build(); } public static MetricsSourceBuilder newSourceBuilder(Object source) { return new MetricsSourceBuilder(source, DefaultMetricsFactory.getAnnotatedMetricsFactory()); } }
1,643
34.73913
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import java.util.concurrent.atomic.AtomicInteger; /** * A mutable int gauge */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableGaugeInt extends MutableGauge { private AtomicInteger value = new AtomicInteger(); MutableGaugeInt(MetricsInfo info, int initValue) { super(info); this.value.set(initValue); } public int value() { return value.get(); } @Override public void incr() { incr(1); } /** * Increment by delta * @param delta of the increment */ public void incr(int delta) { value.addAndGet(delta); setChanged(); } @Override public void decr() { decr(1); } /** * decrement by delta * @param delta of the decrement */ public void decr(int delta) { value.addAndGet(-delta); setChanged(); } /** * Set the value of the metric * @param value to set */ public void set(int value) { this.value.set(value); setChanged(); } @Override public void snapshot(MetricsRecordBuilder builder, boolean all) { if (all || changed()) { builder.addGauge(info(), value()); clearChanged(); } } }
2,223
23.43956
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.lang.reflect.Method; import java.util.Set; import static com.google.common.base.Preconditions.*; import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsRecordBuilder; /** * Helper class to manage a group of mutable rate metrics */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableRates extends MutableMetric { static final Log LOG = LogFactory.getLog(MutableRates.class); private final MetricsRegistry registry; private final Set<Class<?>> protocolCache = Sets.newHashSet(); MutableRates(MetricsRegistry registry) { this.registry = checkNotNull(registry, "metrics registry"); } /** * Initialize the registry with all the methods in a protocol * so they all show up in the first snapshot. * Convenient for JMX implementations. * @param protocol the protocol class */ public void init(Class<?> protocol) { if (protocolCache.contains(protocol)) return; protocolCache.add(protocol); for (Method method : protocol.getDeclaredMethods()) { String name = method.getName(); LOG.debug(name); try { registry.newRate(name, name, false, true); } catch (Exception e) { LOG.error("Error creating rate metrics for "+ method.getName(), e); } } } /** * Add a rate sample for a rate metric * @param name of the rate metric * @param elapsed time */ public void add(String name, long elapsed) { registry.add(name, elapsed); } @Override public void snapshot(MetricsRecordBuilder rb, boolean all) { registry.snapshot(rb, all); } }
2,655
31.790123
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import com.google.common.base.Objects; import static com.google.common.base.Preconditions.*; import org.apache.hadoop.metrics2.MetricsInfo; /** * Making implementing metric info a little easier */ class MetricsInfoImpl implements MetricsInfo { private final String name, description; MetricsInfoImpl(String name, String description) { this.name = checkNotNull(name, "name"); this.description = checkNotNull(description, "description"); } @Override public String name() { return name; } @Override public String description() { return description; } @Override public boolean equals(Object obj) { if (obj instanceof MetricsInfo) { MetricsInfo other = (MetricsInfo) obj; return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); } return false; } @Override public int hashCode() { return Objects.hashCode(name, description); } @Override public String toString() { return Objects.toStringHelper(this) .add("name", name).add("description", description) .toString(); } }
1,960
30.126984
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.lang.reflect.Method; import static com.google.common.base.Preconditions.*; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.annotation.Metric; import static org.apache.hadoop.metrics2.util.Contracts.*; /** * Metric generated from a method, mostly used by annotation */ class MethodMetric extends MutableMetric { private static final Log LOG = LogFactory.getLog(MethodMetric.class); private final Object obj; private final Method method; private final MetricsInfo info; private final MutableMetric impl; MethodMetric(Object obj, Method method, MetricsInfo info, Metric.Type type) { this.obj = checkNotNull(obj, "object"); this.method = checkArg(method, method.getParameterTypes().length == 0, "Metric method should have no arguments"); this.method.setAccessible(true); this.info = checkNotNull(info, "info"); impl = newImpl(checkNotNull(type, "metric type")); } private MutableMetric newImpl(Metric.Type metricType) { Class<?> resType = method.getReturnType(); switch (metricType) { case COUNTER: return newCounter(resType); case GAUGE: return newGauge(resType); case DEFAULT: return resType == String.class ? newTag(resType) : newGauge(resType); case TAG: return newTag(resType); default: checkArg(metricType, false, "unsupported metric type"); return null; } } MutableMetric newCounter(final Class<?> type) { if (isInt(type) || isLong(type)) { return new MutableMetric() { @Override public void snapshot(MetricsRecordBuilder rb, boolean all) { try { Object ret = method.invoke(obj, (Object[])null); if (isInt(type)) rb.addCounter(info, ((Integer) ret).intValue()); else rb.addCounter(info, ((Long) ret).longValue()); } catch (Exception ex) { LOG.error("Error invoking method "+ method.getName(), ex); } } }; } throw new MetricsException("Unsupported counter type: "+ type.getName()); } static boolean isInt(Class<?> type) { boolean ret = type == Integer.TYPE || type == Integer.class; return ret; } static boolean isLong(Class<?> type) { return type == Long.TYPE || type == Long.class; } static boolean isFloat(Class<?> type) { return type == Float.TYPE || type == Float.class; } static boolean isDouble(Class<?> type) { return type == Double.TYPE || type == Double.class; } MutableMetric newGauge(final Class<?> t) { if (isInt(t) || isLong(t) || isFloat(t) || isDouble(t)) { return new MutableMetric() { @Override public void snapshot(MetricsRecordBuilder rb, boolean all) { try { Object ret = method.invoke(obj, (Object[]) null); if (isInt(t)) rb.addGauge(info, ((Integer) ret).intValue()); else if (isLong(t)) rb.addGauge(info, ((Long) ret).longValue()); else if (isFloat(t)) rb.addGauge(info, ((Float) ret).floatValue()); else rb.addGauge(info, ((Double) ret).doubleValue()); } catch (Exception ex) { LOG.error("Error invoking method "+ method.getName(), ex); } } }; } throw new MetricsException("Unsupported gauge type: "+ t.getName()); } MutableMetric newTag(Class<?> resType) { if (resType == String.class) { return new MutableMetric() { @Override public void snapshot(MetricsRecordBuilder rb, boolean all) { try { Object ret = method.invoke(obj, (Object[]) null); rb.tag(info, (String) ret); } catch (Exception ex) { LOG.error("Error invoking method "+ method.getName(), ex); } } }; } throw new MetricsException("Unsupported tag type: "+ resType.getName()); } @Override public void snapshot(MetricsRecordBuilder builder, boolean all) { impl.snapshot(builder, all); } static MetricsInfo metricInfo(Method method) { return Interns.info(nameFrom(method), "Metric for "+ method.getName()); } static String nameFrom(Method method) { String methodName = method.getName(); if (methodName.startsWith("get")) { return StringUtils.capitalize(methodName.substring(3)); } return StringUtils.capitalize(methodName); } }
5,504
33.622642
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterInt.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import java.util.concurrent.atomic.AtomicInteger; /** * A mutable int counter for implementing metrics sources */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableCounterInt extends MutableCounter { private AtomicInteger value = new AtomicInteger(); MutableCounterInt(MetricsInfo info, int initValue) { super(info); this.value.set(initValue); } @Override public void incr() { incr(1); } /** * Increment the value by a delta * @param delta of the increment */ public synchronized void incr(int delta) { value.addAndGet(delta); setChanged(); } public int value() { return value.get(); } @Override public void snapshot(MetricsRecordBuilder builder, boolean all) { if (all || changed()) { builder.addCounter(info(), value()); clearChanged(); } } }
1,932
27.426471
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import static com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; /** * The mutable counter (monotonically increasing) metric interface */ @InterfaceAudience.Public @InterfaceStability.Evolving public abstract class MutableCounter extends MutableMetric { private final MetricsInfo info; protected MutableCounter(MetricsInfo info) { this.info = checkNotNull(info, "counter info"); } protected MetricsInfo info() { return info; } /** * Increment the metric value by 1. */ public abstract void incr(); }
1,540
31.104167
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import java.lang.reflect.Field; import java.lang.reflect.Method; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsException; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; @InterfaceAudience.Private @InterfaceStability.Evolving public class MutableMetricsFactory { private static final Log LOG = LogFactory.getLog(MutableMetricsFactory.class); MutableMetric newForField(Field field, Metric annotation, MetricsRegistry registry) { if (LOG.isDebugEnabled()) { LOG.debug("field "+ field +" with annotation "+ annotation); } MetricsInfo info = getInfo(annotation, field); MutableMetric metric = newForField(field, annotation); if (metric != null) { registry.add(info.name(), metric); return metric; } final Class<?> cls = field.getType(); if (cls == MutableCounterInt.class) { return registry.newCounter(info, 0); } if (cls == MutableCounterLong.class) { return registry.newCounter(info, 0L); } if (cls == MutableGaugeInt.class) { return registry.newGauge(info, 0); } if (cls == MutableGaugeLong.class) { return registry.newGauge(info, 0L); } if (cls == MutableRate.class) { return registry.newRate(info.name(), info.description(), annotation.always()); } if (cls == MutableRates.class) { return new MutableRates(registry); } if (cls == MutableStat.class) { return registry.newStat(info.name(), info.description(), annotation.sampleName(), annotation.valueName(), annotation.always()); } throw new MetricsException("Unsupported metric field "+ field.getName() + " of type "+ field.getType().getName()); } MutableMetric newForMethod(Object source, Method method, Metric annotation, MetricsRegistry registry) { if (LOG.isDebugEnabled()) { LOG.debug("method "+ method +" with annotation "+ annotation); } MetricsInfo info = getInfo(annotation, method); MutableMetric metric = newForMethod(source, method, annotation); metric = metric != null ? metric : new MethodMetric(source, method, info, annotation.type()); registry.add(info.name(), metric); return metric; } /** * Override to handle custom mutable metrics for fields * @param field of the metric * @param annotation of the field * @return a new metric object or null */ protected MutableMetric newForField(Field field, Metric annotation) { return null; } /** * Override to handle custom mutable metrics for methods * @param source the metrics source object * @param method to return the metric * @param annotation of the method * @return a new metric object or null */ protected MutableMetric newForMethod(Object source, Method method, Metric annotation) { return null; } protected MetricsInfo getInfo(Metric annotation, Field field) { return getInfo(annotation, getName(field)); } protected String getName(Field field) { return StringUtils.capitalize(field.getName()); } protected MetricsInfo getInfo(Metric annotation, Method method) { return getInfo(annotation, getName(method)); } protected MetricsInfo getInfo(Class<?> cls, Metrics annotation) { String name = annotation.name(); String about = annotation.about(); String name2 = name.isEmpty() ? cls.getSimpleName() : name; return Interns.info(name2, about.isEmpty() ? name2 : about); } protected String getName(Method method) { String methodName = method.getName(); if (methodName.startsWith("get")) { return StringUtils.capitalize(methodName.substring(3)); } return StringUtils.capitalize(methodName); } protected MetricsInfo getInfo(Metric annotation, String defaultName) { String[] value = annotation.value(); if (value.length == 2) { return Interns.info(value[0], value[1]); } if (value.length == 1) { return Interns.info(defaultName, value[0]); } return Interns.info(defaultName, defaultName); } }
5,397
34.513158
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.SampleStat; import static org.apache.hadoop.metrics2.lib.Interns.*; /** * A mutable metric with stats. * * Useful for keeping throughput/latency stats. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableStat extends MutableMetric { private final MetricsInfo numInfo; private final MetricsInfo avgInfo; private final MetricsInfo stdevInfo; private final MetricsInfo iMinInfo; private final MetricsInfo iMaxInfo; private final MetricsInfo minInfo; private final MetricsInfo maxInfo; private final SampleStat intervalStat = new SampleStat(); private final SampleStat prevStat = new SampleStat(); private final SampleStat.MinMax minMax = new SampleStat.MinMax(); private long numSamples = 0; private boolean extended = false; /** * Construct a sample statistics metric * @param name of the metric * @param description of the metric * @param sampleName of the metric (e.g. "Ops") * @param valueName of the metric (e.g. "Time", "Latency") * @param extended create extended stats (stdev, min/max etc.) by default. */ public MutableStat(String name, String description, String sampleName, String valueName, boolean extended) { String ucName = StringUtils.capitalize(name); String usName = StringUtils.capitalize(sampleName); String uvName = StringUtils.capitalize(valueName); String desc = StringUtils.uncapitalize(description); String lsName = StringUtils.uncapitalize(sampleName); String lvName = StringUtils.uncapitalize(valueName); numInfo = info(ucName +"Num"+ usName, "Number of "+ lsName +" for "+ desc); avgInfo = info(ucName +"Avg"+ uvName, "Average "+ lvName +" for "+ desc); stdevInfo = info(ucName +"Stdev"+ uvName, "Standard deviation of "+ lvName +" for "+ desc); iMinInfo = info(ucName +"IMin"+ uvName, "Interval min "+ lvName +" for "+ desc); iMaxInfo = info(ucName + "IMax"+ uvName, "Interval max "+ lvName +" for "+ desc); minInfo = info(ucName +"Min"+ uvName, "Min "+ lvName +" for "+ desc); maxInfo = info(ucName +"Max"+ uvName, "Max "+ lvName +" for "+ desc); this.extended = extended; } /** * Construct a snapshot stat metric with extended stat off by default * @param name of the metric * @param description of the metric * @param sampleName of the metric (e.g. "Ops") * @param valueName of the metric (e.g. "Time", "Latency") */ public MutableStat(String name, String description, String sampleName, String valueName) { this(name, description, sampleName, valueName, false); } /** * Set whether to display the extended stats (stdev, min/max etc.) or not * @param extended enable/disable displaying extended stats */ public synchronized void setExtended(boolean extended) { this.extended = extended; } /** * Add a number of samples and their sum to the running stat * @param numSamples number of samples * @param sum of the samples */ public synchronized void add(long numSamples, long sum) { intervalStat.add(numSamples, sum); setChanged(); } /** * Add a snapshot to the metric * @param value of the metric */ public synchronized void add(long value) { intervalStat.add(value); minMax.add(value); setChanged(); } public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) { if (all || changed()) { numSamples += intervalStat.numSamples(); builder.addCounter(numInfo, numSamples) .addGauge(avgInfo, lastStat().mean()); if (extended) { builder.addGauge(stdevInfo, lastStat().stddev()) .addGauge(iMinInfo, lastStat().min()) .addGauge(iMaxInfo, lastStat().max()) .addGauge(minInfo, minMax.min()) .addGauge(maxInfo, minMax.max()); } if (changed()) { if (numSamples > 0) { intervalStat.copyTo(prevStat); intervalStat.reset(); } clearChanged(); } } } private SampleStat lastStat() { return changed() ? intervalStat : prevStat; } /** * Reset the all time min max of the metric */ public void resetMinMax() { minMax.reset(); } @Override public String toString() { return lastStat().toString(); } }
5,560
34.196203
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRate.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A convenient mutable metric for throughput measurement */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableRate extends MutableStat { MutableRate(String name, String description, boolean extended) { super(name, description, "Ops", "Time", extended); } }
1,266
35.2
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import java.util.concurrent.atomic.AtomicLong; /** * A mutable long gauge */ @InterfaceAudience.Public @InterfaceStability.Evolving public class MutableGaugeLong extends MutableGauge { private AtomicLong value = new AtomicLong(); MutableGaugeLong(MetricsInfo info, long initValue) { super(info); this.value.set(initValue); } public long value() { return value.get(); } @Override public void incr() { incr(1); } /** * Increment by delta * @param delta of the increment */ public void incr(long delta) { value.addAndGet(delta); setChanged(); } @Override public void decr() { decr(1); } /** * decrement by delta * @param delta of the decrement */ public void decr(long delta) { value.addAndGet(-delta); setChanged(); } /** * Set the value of the metric * @param value to set */ public void set(long value) { this.value.set(value); setChanged(); } public void snapshot(MetricsRecordBuilder builder, boolean all) { if (all || changed()) { builder.addGauge(info(), value()); clearChanged(); } } }
2,211
23.307692
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.source; import com.google.common.base.Objects; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; /** * JVM and logging related metrics info instances */ @InterfaceAudience.Private public enum JvmMetricsInfo implements MetricsInfo { JvmMetrics("JVM related metrics etc."), // record info // metrics MemNonHeapUsedM("Non-heap memory used in MB"), MemNonHeapCommittedM("Non-heap memory committed in MB"), MemNonHeapMaxM("Non-heap memory max in MB"), MemHeapUsedM("Heap memory used in MB"), MemHeapCommittedM("Heap memory committed in MB"), MemHeapMaxM("Heap memory max in MB"), MemMaxM("Max memory size in MB"), GcCount("Total GC count"), GcTimeMillis("Total GC time in milliseconds"), ThreadsNew("Number of new threads"), ThreadsRunnable("Number of runnable threads"), ThreadsBlocked("Number of blocked threads"), ThreadsWaiting("Number of waiting threads"), ThreadsTimedWaiting("Number of timed waiting threads"), ThreadsTerminated("Number of terminated threads"), LogFatal("Total number of fatal log events"), LogError("Total number of error log events"), LogWarn("Total number of warning log events"), LogInfo("Total number of info log events"), GcNumWarnThresholdExceeded("Number of times that the GC warn threshold is exceeded"), GcNumInfoThresholdExceeded("Number of times that the GC info threshold is exceeded"), GcTotalExtraSleepTime("Total GC extra sleep time in milliseconds"); private final String desc; JvmMetricsInfo(String desc) { this.desc = desc; } @Override public String description() { return desc; } @Override public String toString() { return Objects.toStringHelper(this) .add("name", name()).add("description", desc) .toString(); } }
2,634
37.75
87
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.source; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.lang.management.MemoryUsage; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.lang.management.GarbageCollectorMXBean; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.log.metrics.EventCounter; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.Interns; import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*; import static org.apache.hadoop.metrics2.impl.MsInfo.*; import org.apache.hadoop.util.JvmPauseMonitor; /** * JVM and logging related metrics. * Mostly used by various servers as a part of the metrics they export. */ @InterfaceAudience.Private public class JvmMetrics implements MetricsSource { enum Singleton { INSTANCE; JvmMetrics impl; synchronized JvmMetrics init(String processName, String sessionId) { if (impl == null) { impl = create(processName, sessionId, DefaultMetricsSystem.instance()); } return impl; } } static final float M = 1024*1024; final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); final List<GarbageCollectorMXBean> gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); final String processName, sessionId; private JvmPauseMonitor pauseMonitor = null; final ConcurrentHashMap<String, MetricsInfo[]> gcInfoCache = new ConcurrentHashMap<String, MetricsInfo[]>(); JvmMetrics(String processName, String sessionId) { this.processName = processName; this.sessionId = sessionId; } public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) { this.pauseMonitor = pauseMonitor; } public static JvmMetrics create(String processName, String sessionId, MetricsSystem ms) { return ms.register(JvmMetrics.name(), JvmMetrics.description(), new JvmMetrics(processName, sessionId)); } public static JvmMetrics initSingleton(String processName, String sessionId) { return Singleton.INSTANCE.init(processName, sessionId); } @Override public void getMetrics(MetricsCollector collector, boolean all) { MetricsRecordBuilder rb = collector.addRecord(JvmMetrics) .setContext("jvm").tag(ProcessName, processName) .tag(SessionId, sessionId); getMemoryUsage(rb); getGcUsage(rb); getThreadUsage(rb); getEventCounters(rb); } private void getMemoryUsage(MetricsRecordBuilder rb) { MemoryUsage memNonHeap = memoryMXBean.getNonHeapMemoryUsage(); MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage(); Runtime runtime = Runtime.getRuntime(); rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M) .addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M) .addGauge(MemNonHeapMaxM, memNonHeap.getMax() / M) .addGauge(MemHeapUsedM, memHeap.getUsed() / M) .addGauge(MemHeapCommittedM, memHeap.getCommitted() / M) .addGauge(MemHeapMaxM, memHeap.getMax() / M) .addGauge(MemMaxM, runtime.maxMemory() / M); } private void getGcUsage(MetricsRecordBuilder rb) { long count = 0; long timeMillis = 0; for (GarbageCollectorMXBean gcBean : gcBeans) { long c = gcBean.getCollectionCount(); long t = gcBean.getCollectionTime(); MetricsInfo[] gcInfo = getGcInfo(gcBean.getName()); rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t); count += c; timeMillis += t; } rb.addCounter(GcCount, count) .addCounter(GcTimeMillis, timeMillis); if (pauseMonitor != null) { rb.addCounter(GcNumWarnThresholdExceeded, pauseMonitor.getNumGcWarnThreadholdExceeded()); rb.addCounter(GcNumInfoThresholdExceeded, pauseMonitor.getNumGcInfoThresholdExceeded()); rb.addCounter(GcTotalExtraSleepTime, pauseMonitor.getTotalGcExtraSleepTime()); } } private MetricsInfo[] getGcInfo(String gcName) { MetricsInfo[] gcInfo = gcInfoCache.get(gcName); if (gcInfo == null) { gcInfo = new MetricsInfo[2]; gcInfo[0] = Interns.info("GcCount" + gcName, "GC Count for " + gcName); gcInfo[1] = Interns .info("GcTimeMillis" + gcName, "GC Time for " + gcName); MetricsInfo[] previousGcInfo = gcInfoCache.putIfAbsent(gcName, gcInfo); if (previousGcInfo != null) { return previousGcInfo; } } return gcInfo; } private void getThreadUsage(MetricsRecordBuilder rb) { int threadsNew = 0; int threadsRunnable = 0; int threadsBlocked = 0; int threadsWaiting = 0; int threadsTimedWaiting = 0; int threadsTerminated = 0; long threadIds[] = threadMXBean.getAllThreadIds(); for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) { if (threadInfo == null) continue; // race protection switch (threadInfo.getThreadState()) { case NEW: threadsNew++; break; case RUNNABLE: threadsRunnable++; break; case BLOCKED: threadsBlocked++; break; case WAITING: threadsWaiting++; break; case TIMED_WAITING: threadsTimedWaiting++; break; case TERMINATED: threadsTerminated++; break; } } rb.addGauge(ThreadsNew, threadsNew) .addGauge(ThreadsRunnable, threadsRunnable) .addGauge(ThreadsBlocked, threadsBlocked) .addGauge(ThreadsWaiting, threadsWaiting) .addGauge(ThreadsTimedWaiting, threadsTimedWaiting) .addGauge(ThreadsTerminated, threadsTerminated); } private void getEventCounters(MetricsRecordBuilder rb) { rb.addCounter(LogFatal, EventCounter.getFatal()) .addCounter(LogError, EventCounter.getError()) .addCounter(LogWarn, EventCounter.getWarn()) .addCounter(LogInfo, EventCounter.getInfo()); } }
7,163
36.904762
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Annotation interfaces for metrics instrumentation */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.metrics2.annotation; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,090
37.964286
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.annotation; import java.lang.annotation.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Annotation interface for a single metric */ @InterfaceAudience.Public @InterfaceStability.Evolving @Documented @Target({ElementType.FIELD, ElementType.METHOD}) @Retention(RetentionPolicy.RUNTIME) public @interface Metric { public enum Type { DEFAULT, COUNTER, GAUGE, TAG } /** * Shorthand for optional name and description * @return {description} or {name, description} */ String[] value() default {}; /** * @return optional description of the metric */ String about() default ""; /** * @return optional sample name for MutableStat/Rate/Rates */ String sampleName() default "Ops"; /** * @return optional value name for MutableStat/Rate/Rates */ String valueName() default "Time"; /** * @return true to create a metric snapshot even if unchanged. */ boolean always() default false; /** * @return optional type (counter|gauge) of the metric */ Type type() default Type.DEFAULT; }
1,972
26.788732
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metrics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.metrics2.annotation; import java.lang.annotation.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Annotation interface for a group of metrics */ @InterfaceAudience.Public @InterfaceStability.Evolving @Documented @Target({ElementType.TYPE}) @Retention(RetentionPolicy.RUNTIME) public @interface Metrics { /** * @return the (record) name of the metrics */ String name() default ""; /** * @return the optional description of metrics */ String about() default ""; /** * @return the context name for a group of metrics */ String context(); }
1,488
28.196078
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordInput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Interface that all the Deserializers have to implement. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public interface RecordInput { /** * Read a byte from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ byte readByte(String tag) throws IOException; /** * Read a boolean from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ boolean readBool(String tag) throws IOException; /** * Read an integer from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ int readInt(String tag) throws IOException; /** * Read a long integer from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ long readLong(String tag) throws IOException; /** * Read a single-precision float from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ float readFloat(String tag) throws IOException; /** * Read a double-precision number from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ double readDouble(String tag) throws IOException; /** * Read a UTF-8 encoded string from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ String readString(String tag) throws IOException; /** * Read byte array from serialized record. * @param tag Used by tagged serialization formats (such as XML) * @return value read from serialized record. */ Buffer readBuffer(String tag) throws IOException; /** * Check the mark for start of the serialized record. * @param tag Used by tagged serialization formats (such as XML) */ void startRecord(String tag) throws IOException; /** * Check the mark for end of the serialized record. * @param tag Used by tagged serialization formats (such as XML) */ void endRecord(String tag) throws IOException; /** * Check the mark for start of the serialized vector. * @param tag Used by tagged serialization formats (such as XML) * @return Index that is used to count the number of elements. */ Index startVector(String tag) throws IOException; /** * Check the mark for end of the serialized vector. * @param tag Used by tagged serialization formats (such as XML) */ void endVector(String tag) throws IOException; /** * Check the mark for start of the serialized map. * @param tag Used by tagged serialization formats (such as XML) * @return Index that is used to count the number of map entries. */ Index startMap(String tag) throws IOException; /** * Check the mark for end of the serialized map. * @param tag Used by tagged serialization formats (such as XML) */ void endMap(String tag) throws IOException; }
4,320
32.496124
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordOutput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.IOException; import java.util.TreeMap; import java.util.ArrayList; import java.io.PrintStream; import java.io.OutputStream; import java.io.UnsupportedEncodingException; import java.util.Stack; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * XML Serializer. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class XmlRecordOutput implements RecordOutput { private PrintStream stream; private int indent = 0; private Stack<String> compoundStack; private void putIndent() { StringBuilder sb = new StringBuilder(""); for (int idx = 0; idx < indent; idx++) { sb.append(" "); } stream.print(sb.toString()); } private void addIndent() { indent++; } private void closeIndent() { indent--; } private void printBeginEnvelope(String tag) { if (!compoundStack.empty()) { String s = compoundStack.peek(); if ("struct".equals(s)) { putIndent(); stream.print("<member>\n"); addIndent(); putIndent(); stream.print("<name>"+tag+"</name>\n"); putIndent(); stream.print("<value>"); } else if ("vector".equals(s)) { stream.print("<value>"); } else if ("map".equals(s)) { stream.print("<value>"); } } else { stream.print("<value>"); } } private void printEndEnvelope(String tag) { if (!compoundStack.empty()) { String s = compoundStack.peek(); if ("struct".equals(s)) { stream.print("</value>\n"); closeIndent(); putIndent(); stream.print("</member>\n"); } else if ("vector".equals(s)) { stream.print("</value>\n"); } else if ("map".equals(s)) { stream.print("</value>\n"); } } else { stream.print("</value>\n"); } } private void insideVector(String tag) { printBeginEnvelope(tag); compoundStack.push("vector"); } private void outsideVector(String tag) throws IOException { String s = compoundStack.pop(); if (!"vector".equals(s)) { throw new IOException("Error serializing vector."); } printEndEnvelope(tag); } private void insideMap(String tag) { printBeginEnvelope(tag); compoundStack.push("map"); } private void outsideMap(String tag) throws IOException { String s = compoundStack.pop(); if (!"map".equals(s)) { throw new IOException("Error serializing map."); } printEndEnvelope(tag); } private void insideRecord(String tag) { printBeginEnvelope(tag); compoundStack.push("struct"); } private void outsideRecord(String tag) throws IOException { String s = compoundStack.pop(); if (!"struct".equals(s)) { throw new IOException("Error serializing record."); } printEndEnvelope(tag); } /** Creates a new instance of XmlRecordOutput */ public XmlRecordOutput(OutputStream out) { try { stream = new PrintStream(out, true, "UTF-8"); compoundStack = new Stack<String>(); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } } @Override public void writeByte(byte b, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<ex:i1>"); stream.print(Byte.toString(b)); stream.print("</ex:i1>"); printEndEnvelope(tag); } @Override public void writeBool(boolean b, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<boolean>"); stream.print(b ? "1" : "0"); stream.print("</boolean>"); printEndEnvelope(tag); } @Override public void writeInt(int i, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<i4>"); stream.print(Integer.toString(i)); stream.print("</i4>"); printEndEnvelope(tag); } @Override public void writeLong(long l, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<ex:i8>"); stream.print(Long.toString(l)); stream.print("</ex:i8>"); printEndEnvelope(tag); } @Override public void writeFloat(float f, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<ex:float>"); stream.print(Float.toString(f)); stream.print("</ex:float>"); printEndEnvelope(tag); } @Override public void writeDouble(double d, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<double>"); stream.print(Double.toString(d)); stream.print("</double>"); printEndEnvelope(tag); } @Override public void writeString(String s, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<string>"); stream.print(Utils.toXMLString(s)); stream.print("</string>"); printEndEnvelope(tag); } @Override public void writeBuffer(Buffer buf, String tag) throws IOException { printBeginEnvelope(tag); stream.print("<string>"); stream.print(Utils.toXMLBuffer(buf)); stream.print("</string>"); printEndEnvelope(tag); } @Override public void startRecord(Record r, String tag) throws IOException { insideRecord(tag); stream.print("<struct>\n"); addIndent(); } @Override public void endRecord(Record r, String tag) throws IOException { closeIndent(); putIndent(); stream.print("</struct>"); outsideRecord(tag); } @Override public void startVector(ArrayList v, String tag) throws IOException { insideVector(tag); stream.print("<array>\n"); addIndent(); } @Override public void endVector(ArrayList v, String tag) throws IOException { closeIndent(); putIndent(); stream.print("</array>"); outsideVector(tag); } @Override public void startMap(TreeMap v, String tag) throws IOException { insideMap(tag); stream.print("<array>\n"); addIndent(); } @Override public void endMap(TreeMap v, String tag) throws IOException { closeIndent(); putIndent(); stream.print("</array>"); outsideMap(tag); } }
7,125
25.295203
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordOutput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.IOException; import java.util.TreeMap; import java.util.ArrayList; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.OutputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class BinaryRecordOutput implements RecordOutput { private DataOutput out; private BinaryRecordOutput() {} private void setDataOutput(DataOutput out) { this.out = out; } private static final ThreadLocal<BinaryRecordOutput> B_OUT = new ThreadLocal<BinaryRecordOutput>() { @Override protected BinaryRecordOutput initialValue() { return new BinaryRecordOutput(); } }; /** * Get a thread-local record output for the supplied DataOutput. * @param out data output stream * @return binary record output corresponding to the supplied DataOutput. */ public static BinaryRecordOutput get(DataOutput out) { BinaryRecordOutput bout = B_OUT.get(); bout.setDataOutput(out); return bout; } /** Creates a new instance of BinaryRecordOutput */ public BinaryRecordOutput(OutputStream out) { this.out = new DataOutputStream(out); } /** Creates a new instance of BinaryRecordOutput */ public BinaryRecordOutput(DataOutput out) { this.out = out; } @Override public void writeByte(byte b, String tag) throws IOException { out.writeByte(b); } @Override public void writeBool(boolean b, String tag) throws IOException { out.writeBoolean(b); } @Override public void writeInt(int i, String tag) throws IOException { Utils.writeVInt(out, i); } @Override public void writeLong(long l, String tag) throws IOException { Utils.writeVLong(out, l); } @Override public void writeFloat(float f, String tag) throws IOException { out.writeFloat(f); } @Override public void writeDouble(double d, String tag) throws IOException { out.writeDouble(d); } @Override public void writeString(String s, String tag) throws IOException { Utils.toBinaryString(out, s); } @Override public void writeBuffer(Buffer buf, String tag) throws IOException { byte[] barr = buf.get(); int len = buf.getCount(); Utils.writeVInt(out, len); out.write(barr, 0, len); } @Override public void startRecord(Record r, String tag) throws IOException {} @Override public void endRecord(Record r, String tag) throws IOException {} @Override public void startVector(ArrayList v, String tag) throws IOException { writeInt(v.size(), tag); } @Override public void endVector(ArrayList v, String tag) throws IOException {} @Override public void startMap(TreeMap v, String tag) throws IOException { writeInt(v.size(), tag); } @Override public void endMap(TreeMap v, String tag) throws IOException {} }
3,950
26.4375
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.IOException; import java.util.TreeMap; import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Interface that all the serializers have to implement. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public interface RecordOutput { /** * Write a byte to serialized record. * @param b Byte to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeByte(byte b, String tag) throws IOException; /** * Write a boolean to serialized record. * @param b Boolean to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeBool(boolean b, String tag) throws IOException; /** * Write an integer to serialized record. * @param i Integer to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeInt(int i, String tag) throws IOException; /** * Write a long integer to serialized record. * @param l Long to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeLong(long l, String tag) throws IOException; /** * Write a single-precision float to serialized record. * @param f Float to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeFloat(float f, String tag) throws IOException; /** * Write a double precision floating point number to serialized record. * @param d Double to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeDouble(double d, String tag) throws IOException; /** * Write a unicode string to serialized record. * @param s String to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeString(String s, String tag) throws IOException; /** * Write a buffer to serialized record. * @param buf Buffer to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void writeBuffer(Buffer buf, String tag) throws IOException; /** * Mark the start of a record to be serialized. * @param r Record to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void startRecord(Record r, String tag) throws IOException; /** * Mark the end of a serialized record. * @param r Record to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void endRecord(Record r, String tag) throws IOException; /** * Mark the start of a vector to be serialized. * @param v Vector to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void startVector(ArrayList v, String tag) throws IOException; /** * Mark the end of a serialized vector. * @param v Vector to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void endVector(ArrayList v, String tag) throws IOException; /** * Mark the start of a map to be serialized. * @param m Map to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void startMap(TreeMap m, String tag) throws IOException; /** * Mark the end of a serialized map. * @param m Map to be serialized * @param tag Used by tagged serialization formats (such as XML) * @throws IOException Indicates error in serialization */ public void endMap(TreeMap m, String tag) throws IOException; }
5,380
34.873333
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Utils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; /** * Various utility functions for Hadoop record I/O runtime. * * @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class Utils { /** Cannot create a new instance of Utils */ private Utils() { } public static final char[] hexchars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; /** * * @param s * @return */ static String toXMLString(String s) { StringBuilder sb = new StringBuilder(); for (int idx = 0; idx < s.length(); idx++) { char ch = s.charAt(idx); if (ch == '<') { sb.append("&lt;"); } else if (ch == '&') { sb.append("&amp;"); } else if (ch == '%') { sb.append("%0025"); } else if (ch < 0x20 || (ch > 0xD7FF && ch < 0xE000) || (ch > 0xFFFD)) { sb.append("%"); sb.append(hexchars[(ch & 0xF000) >> 12]); sb.append(hexchars[(ch & 0x0F00) >> 8]); sb.append(hexchars[(ch & 0x00F0) >> 4]); sb.append(hexchars[(ch & 0x000F)]); } else { sb.append(ch); } } return sb.toString(); } static private int h2c(char ch) { if (ch >= '0' && ch <= '9') { return ch - '0'; } else if (ch >= 'A' && ch <= 'F') { return ch - 'A' + 10; } else if (ch >= 'a' && ch <= 'f') { return ch - 'a' + 10; } return 0; } /** * * @param s * @return */ static String fromXMLString(String s) { StringBuilder sb = new StringBuilder(); for (int idx = 0; idx < s.length();) { char ch = s.charAt(idx++); if (ch == '%') { int ch1 = h2c(s.charAt(idx++)) << 12; int ch2 = h2c(s.charAt(idx++)) << 8; int ch3 = h2c(s.charAt(idx++)) << 4; int ch4 = h2c(s.charAt(idx++)); char res = (char)(ch1 | ch2 | ch3 | ch4); sb.append(res); } else { sb.append(ch); } } return sb.toString(); } /** * * @param s * @return */ static String toCSVString(String s) { StringBuilder sb = new StringBuilder(s.length()+1); sb.append('\''); int len = s.length(); for (int i = 0; i < len; i++) { char c = s.charAt(i); switch(c) { case '\0': sb.append("%00"); break; case '\n': sb.append("%0A"); break; case '\r': sb.append("%0D"); break; case ',': sb.append("%2C"); break; case '}': sb.append("%7D"); break; case '%': sb.append("%25"); break; default: sb.append(c); } } return sb.toString(); } /** * * @param s * @throws java.io.IOException * @return */ static String fromCSVString(String s) throws IOException { if (s.charAt(0) != '\'') { throw new IOException("Error deserializing string."); } int len = s.length(); StringBuilder sb = new StringBuilder(len-1); for (int i = 1; i < len; i++) { char c = s.charAt(i); if (c == '%') { char ch1 = s.charAt(i+1); char ch2 = s.charAt(i+2); i += 2; if (ch1 == '0' && ch2 == '0') { sb.append('\0'); } else if (ch1 == '0' && ch2 == 'A') { sb.append('\n'); } else if (ch1 == '0' && ch2 == 'D') { sb.append('\r'); } else if (ch1 == '2' && ch2 == 'C') { sb.append(','); } else if (ch1 == '7' && ch2 == 'D') { sb.append('}'); } else if (ch1 == '2' && ch2 == '5') { sb.append('%'); } else { throw new IOException("Error deserializing string."); } } else { sb.append(c); } } return sb.toString(); } /** * * @param s * @return */ static String toXMLBuffer(Buffer s) { return s.toString(); } /** * * @param s * @throws java.io.IOException * @return */ static Buffer fromXMLBuffer(String s) throws IOException { if (s.length() == 0) { return new Buffer(); } int blen = s.length()/2; byte[] barr = new byte[blen]; for (int idx = 0; idx < blen; idx++) { char c1 = s.charAt(2*idx); char c2 = s.charAt(2*idx+1); barr[idx] = (byte)Integer.parseInt(""+c1+c2, 16); } return new Buffer(barr); } /** * * @param buf * @return */ static String toCSVBuffer(Buffer buf) { StringBuilder sb = new StringBuilder("#"); sb.append(buf.toString()); return sb.toString(); } /** * Converts a CSV-serialized representation of buffer to a new * Buffer * @param s CSV-serialized representation of buffer * @throws java.io.IOException * @return Deserialized Buffer */ static Buffer fromCSVBuffer(String s) throws IOException { if (s.charAt(0) != '#') { throw new IOException("Error deserializing buffer."); } if (s.length() == 1) { return new Buffer(); } int blen = (s.length()-1)/2; byte[] barr = new byte[blen]; for (int idx = 0; idx < blen; idx++) { char c1 = s.charAt(2*idx+1); char c2 = s.charAt(2*idx+2); barr[idx] = (byte)Integer.parseInt(""+c1+c2, 16); } return new Buffer(barr); } private static int utf8LenForCodePoint(final int cpt) throws IOException { if (cpt >=0 && cpt <= 0x7F) { return 1; } if (cpt >= 0x80 && cpt <= 0x07FF) { return 2; } if ((cpt >= 0x0800 && cpt < 0xD800) || (cpt > 0xDFFF && cpt <= 0xFFFD)) { return 3; } if (cpt >= 0x10000 && cpt <= 0x10FFFF) { return 4; } throw new IOException("Illegal Unicode Codepoint "+ Integer.toHexString(cpt)+" in string."); } private static final int B10 = Integer.parseInt("10000000", 2); private static final int B110 = Integer.parseInt("11000000", 2); private static final int B1110 = Integer.parseInt("11100000", 2); private static final int B11110 = Integer.parseInt("11110000", 2); private static final int B11 = Integer.parseInt("11000000", 2); private static final int B111 = Integer.parseInt("11100000", 2); private static final int B1111 = Integer.parseInt("11110000", 2); private static final int B11111 = Integer.parseInt("11111000", 2); private static int writeUtf8(int cpt, final byte[] bytes, final int offset) throws IOException { if (cpt >=0 && cpt <= 0x7F) { bytes[offset] = (byte) cpt; return 1; } if (cpt >= 0x80 && cpt <= 0x07FF) { bytes[offset+1] = (byte) (B10 | (cpt & 0x3F)); cpt = cpt >> 6; bytes[offset] = (byte) (B110 | (cpt & 0x1F)); return 2; } if ((cpt >= 0x0800 && cpt < 0xD800) || (cpt > 0xDFFF && cpt <= 0xFFFD)) { bytes[offset+2] = (byte) (B10 | (cpt & 0x3F)); cpt = cpt >> 6; bytes[offset+1] = (byte) (B10 | (cpt & 0x3F)); cpt = cpt >> 6; bytes[offset] = (byte) (B1110 | (cpt & 0x0F)); return 3; } if (cpt >= 0x10000 && cpt <= 0x10FFFF) { bytes[offset+3] = (byte) (B10 | (cpt & 0x3F)); cpt = cpt >> 6; bytes[offset+2] = (byte) (B10 | (cpt & 0x3F)); cpt = cpt >> 6; bytes[offset+1] = (byte) (B10 | (cpt & 0x3F)); cpt = cpt >> 6; bytes[offset] = (byte) (B11110 | (cpt & 0x07)); return 4; } throw new IOException("Illegal Unicode Codepoint "+ Integer.toHexString(cpt)+" in string."); } static void toBinaryString(final DataOutput out, final String str) throws IOException { final int strlen = str.length(); byte[] bytes = new byte[strlen*4]; // Codepoints expand to 4 bytes max int utf8Len = 0; int idx = 0; while(idx < strlen) { final int cpt = str.codePointAt(idx); idx += Character.isSupplementaryCodePoint(cpt) ? 2 : 1; utf8Len += writeUtf8(cpt, bytes, utf8Len); } writeVInt(out, utf8Len); out.write(bytes, 0, utf8Len); } static boolean isValidCodePoint(int cpt) { return !((cpt > 0x10FFFF) || (cpt >= 0xD800 && cpt <= 0xDFFF) || (cpt >= 0xFFFE && cpt <=0xFFFF)); } private static int utf8ToCodePoint(int b1, int b2, int b3, int b4) { int cpt = 0; cpt = (((b1 & ~B11111) << 18) | ((b2 & ~B11) << 12) | ((b3 & ~B11) << 6) | (b4 & ~B11)); return cpt; } private static int utf8ToCodePoint(int b1, int b2, int b3) { int cpt = 0; cpt = (((b1 & ~B1111) << 12) | ((b2 & ~B11) << 6) | (b3 & ~B11)); return cpt; } private static int utf8ToCodePoint(int b1, int b2) { int cpt = 0; cpt = (((b1 & ~B111) << 6) | (b2 & ~B11)); return cpt; } private static void checkB10(int b) throws IOException { if ((b & B11) != B10) { throw new IOException("Invalid UTF-8 representation."); } } static String fromBinaryString(final DataInput din) throws IOException { final int utf8Len = readVInt(din); final byte[] bytes = new byte[utf8Len]; din.readFully(bytes); int len = 0; // For the most commmon case, i.e. ascii, numChars = utf8Len StringBuilder sb = new StringBuilder(utf8Len); while(len < utf8Len) { int cpt = 0; final int b1 = bytes[len++] & 0xFF; if (b1 <= 0x7F) { cpt = b1; } else if ((b1 & B11111) == B11110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); int b3 = bytes[len++] & 0xFF; checkB10(b3); int b4 = bytes[len++] & 0xFF; checkB10(b4); cpt = utf8ToCodePoint(b1, b2, b3, b4); } else if ((b1 & B1111) == B1110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); int b3 = bytes[len++] & 0xFF; checkB10(b3); cpt = utf8ToCodePoint(b1, b2, b3); } else if ((b1 & B111) == B110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); cpt = utf8ToCodePoint(b1, b2); } else { throw new IOException("Invalid UTF-8 byte "+Integer.toHexString(b1)+ " at offset "+(len-1)+" in length of "+utf8Len); } if (!isValidCodePoint(cpt)) { throw new IOException("Illegal Unicode Codepoint "+ Integer.toHexString(cpt)+" in stream."); } sb.appendCodePoint(cpt); } return sb.toString(); } /** Parse a float from a byte array. */ public static float readFloat(byte[] bytes, int start) { return WritableComparator.readFloat(bytes, start); } /** Parse a double from a byte array. */ public static double readDouble(byte[] bytes, int start) { return WritableComparator.readDouble(bytes, start); } /** * Reads a zero-compressed encoded long from a byte array and returns it. * @param bytes byte array with decode long * @param start starting index * @throws java.io.IOException * @return deserialized long */ public static long readVLong(byte[] bytes, int start) throws IOException { return WritableComparator.readVLong(bytes, start); } /** * Reads a zero-compressed encoded integer from a byte array and returns it. * @param bytes byte array with the encoded integer * @param start start index * @throws java.io.IOException * @return deserialized integer */ public static int readVInt(byte[] bytes, int start) throws IOException { return WritableComparator.readVInt(bytes, start); } /** * Reads a zero-compressed encoded long from a stream and return it. * @param in input stream * @throws java.io.IOException * @return deserialized long */ public static long readVLong(DataInput in) throws IOException { return WritableUtils.readVLong(in); } /** * Reads a zero-compressed encoded integer from a stream and returns it. * @param in input stream * @throws java.io.IOException * @return deserialized integer */ public static int readVInt(DataInput in) throws IOException { return WritableUtils.readVInt(in); } /** * Get the encoded length if an integer is stored in a variable-length format * @return the encoded length */ public static int getVIntSize(long i) { return WritableUtils.getVIntSize(i); } /** * Serializes a long to a binary stream with zero-compressed encoding. * For {@literal -112 <= i <= 127}, only one byte is used with the actual * value. For other values of i, the first byte value indicates whether the * long is positive or negative, and the number of bytes that follow. * If the first byte value v is between -113 and -120, the following long * is positive, with number of bytes that follow are -(v+112). * If the first byte value v is between -121 and -128, the following long * is negative, with number of bytes that follow are -(v+120). Bytes are * stored in the high-non-zero-byte-first order. * * @param stream Binary output stream * @param i Long to be serialized * @throws java.io.IOException */ public static void writeVLong(DataOutput stream, long i) throws IOException { WritableUtils.writeVLong(stream, i); } /** * Serializes an int to a binary stream with zero-compressed encoding. * * @param stream Binary output stream * @param i int to be serialized * @throws java.io.IOException */ public static void writeVInt(DataOutput stream, int i) throws IOException { WritableUtils.writeVInt(stream, i); } /** Lexicographic order of binary data. */ public static int compareBytes(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2); } }
14,962
28.985972
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/XmlRecordInput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.InputStream; import java.io.IOException; import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.xml.sax.*; import org.xml.sax.helpers.DefaultHandler; import javax.xml.parsers.SAXParserFactory; import javax.xml.parsers.SAXParser; /** * XML Deserializer. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class XmlRecordInput implements RecordInput { static private class Value { private String type; private StringBuffer sb; public Value(String t) { type = t; sb = new StringBuffer(); } public void addChars(char[] buf, int offset, int len) { sb.append(buf, offset, len); } public String getValue() { return sb.toString(); } public String getType() { return type; } } private static class XMLParser extends DefaultHandler { private boolean charsValid = false; private ArrayList<Value> valList; private XMLParser(ArrayList<Value> vlist) { valList = vlist; } @Override public void startDocument() throws SAXException {} @Override public void endDocument() throws SAXException {} @Override public void startElement(String ns, String sname, String qname, Attributes attrs) throws SAXException { charsValid = false; if ("boolean".equals(qname) || "i4".equals(qname) || "int".equals(qname) || "string".equals(qname) || "double".equals(qname) || "ex:i1".equals(qname) || "ex:i8".equals(qname) || "ex:float".equals(qname)) { charsValid = true; valList.add(new Value(qname)); } else if ("struct".equals(qname) || "array".equals(qname)) { valList.add(new Value(qname)); } } @Override public void endElement(String ns, String sname, String qname) throws SAXException { charsValid = false; if ("struct".equals(qname) || "array".equals(qname)) { valList.add(new Value("/"+qname)); } } @Override public void characters(char buf[], int offset, int len) throws SAXException { if (charsValid) { Value v = valList.get(valList.size()-1); v.addChars(buf, offset, len); } } } private class XmlIndex implements Index { @Override public boolean done() { Value v = valList.get(vIdx); if ("/array".equals(v.getType())) { valList.set(vIdx, null); vIdx++; return true; } else { return false; } } @Override public void incr() {} } private ArrayList<Value> valList; private int vLen; private int vIdx; private Value next() throws IOException { if (vIdx < vLen) { Value v = valList.get(vIdx); valList.set(vIdx, null); vIdx++; return v; } else { throw new IOException("Error in deserialization."); } } /** Creates a new instance of XmlRecordInput */ public XmlRecordInput(InputStream in) { try{ valList = new ArrayList<Value>(); DefaultHandler handler = new XMLParser(valList); SAXParserFactory factory = SAXParserFactory.newInstance(); SAXParser parser = factory.newSAXParser(); parser.parse(in, handler); vLen = valList.size(); vIdx = 0; } catch (Exception ex) { throw new RuntimeException(ex); } } @Override public byte readByte(String tag) throws IOException { Value v = next(); if (!"ex:i1".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Byte.parseByte(v.getValue()); } @Override public boolean readBool(String tag) throws IOException { Value v = next(); if (!"boolean".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return "1".equals(v.getValue()); } @Override public int readInt(String tag) throws IOException { Value v = next(); if (!"i4".equals(v.getType()) && !"int".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Integer.parseInt(v.getValue()); } @Override public long readLong(String tag) throws IOException { Value v = next(); if (!"ex:i8".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Long.parseLong(v.getValue()); } @Override public float readFloat(String tag) throws IOException { Value v = next(); if (!"ex:float".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Float.parseFloat(v.getValue()); } @Override public double readDouble(String tag) throws IOException { Value v = next(); if (!"double".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Double.parseDouble(v.getValue()); } @Override public String readString(String tag) throws IOException { Value v = next(); if (!"string".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Utils.fromXMLString(v.getValue()); } @Override public Buffer readBuffer(String tag) throws IOException { Value v = next(); if (!"string".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return Utils.fromXMLBuffer(v.getValue()); } @Override public void startRecord(String tag) throws IOException { Value v = next(); if (!"struct".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } } @Override public void endRecord(String tag) throws IOException { Value v = next(); if (!"/struct".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } } @Override public Index startVector(String tag) throws IOException { Value v = next(); if (!"array".equals(v.getType())) { throw new IOException("Error deserializing "+tag+"."); } return new XmlIndex(); } @Override public void endVector(String tag) throws IOException {} @Override public Index startMap(String tag) throws IOException { return startVector(tag); } @Override public void endMap(String tag) throws IOException { endVector(tag); } }
7,606
26.966912
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordOutput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.IOException; import java.util.TreeMap; import java.util.ArrayList; import java.io.PrintStream; import java.io.OutputStream; import java.io.UnsupportedEncodingException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class CsvRecordOutput implements RecordOutput { private PrintStream stream; private boolean isFirst = true; private void throwExceptionOnError(String tag) throws IOException { if (stream.checkError()) { throw new IOException("Error serializing "+tag); } } private void printCommaUnlessFirst() { if (!isFirst) { stream.print(","); } isFirst = false; } /** Creates a new instance of CsvRecordOutput */ public CsvRecordOutput(OutputStream out) { try { stream = new PrintStream(out, true, "UTF-8"); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } } @Override public void writeByte(byte b, String tag) throws IOException { writeLong((long)b, tag); } @Override public void writeBool(boolean b, String tag) throws IOException { printCommaUnlessFirst(); String val = b ? "T" : "F"; stream.print(val); throwExceptionOnError(tag); } @Override public void writeInt(int i, String tag) throws IOException { writeLong((long)i, tag); } @Override public void writeLong(long l, String tag) throws IOException { printCommaUnlessFirst(); stream.print(l); throwExceptionOnError(tag); } @Override public void writeFloat(float f, String tag) throws IOException { writeDouble((double)f, tag); } @Override public void writeDouble(double d, String tag) throws IOException { printCommaUnlessFirst(); stream.print(d); throwExceptionOnError(tag); } @Override public void writeString(String s, String tag) throws IOException { printCommaUnlessFirst(); stream.print(Utils.toCSVString(s)); throwExceptionOnError(tag); } @Override public void writeBuffer(Buffer buf, String tag) throws IOException { printCommaUnlessFirst(); stream.print(Utils.toCSVBuffer(buf)); throwExceptionOnError(tag); } @Override public void startRecord(Record r, String tag) throws IOException { if (tag != null && ! tag.isEmpty()) { printCommaUnlessFirst(); stream.print("s{"); isFirst = true; } } @Override public void endRecord(Record r, String tag) throws IOException { if (tag == null || tag.isEmpty()) { stream.print("\n"); isFirst = true; } else { stream.print("}"); isFirst = false; } } @Override public void startVector(ArrayList v, String tag) throws IOException { printCommaUnlessFirst(); stream.print("v{"); isFirst = true; } @Override public void endVector(ArrayList v, String tag) throws IOException { stream.print("}"); isFirst = false; } @Override public void startMap(TreeMap v, String tag) throws IOException { printCommaUnlessFirst(); stream.print("m{"); isFirst = true; } @Override public void endMap(TreeMap v, String tag) throws IOException { stream.print("}"); isFirst = false; } }
4,306
25.58642
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/BinaryRecordInput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.DataInput; import java.io.IOException; import java.io.DataInputStream; import java.io.InputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class BinaryRecordInput implements RecordInput { private DataInput in; static private class BinaryIndex implements Index { private int nelems; private BinaryIndex(int nelems) { this.nelems = nelems; } @Override public boolean done() { return (nelems <= 0); } @Override public void incr() { nelems--; } } private BinaryRecordInput() {} private void setDataInput(DataInput inp) { this.in = inp; } private static final ThreadLocal<BinaryRecordInput> B_IN = new ThreadLocal<BinaryRecordInput>() { @Override protected BinaryRecordInput initialValue() { return new BinaryRecordInput(); } }; /** * Get a thread-local record input for the supplied DataInput. * @param inp data input stream * @return binary record input corresponding to the supplied DataInput. */ public static BinaryRecordInput get(DataInput inp) { BinaryRecordInput bin = B_IN.get(); bin.setDataInput(inp); return bin; } /** Creates a new instance of BinaryRecordInput */ public BinaryRecordInput(InputStream strm) { this.in = new DataInputStream(strm); } /** Creates a new instance of BinaryRecordInput */ public BinaryRecordInput(DataInput din) { this.in = din; } @Override public byte readByte(final String tag) throws IOException { return in.readByte(); } @Override public boolean readBool(final String tag) throws IOException { return in.readBoolean(); } @Override public int readInt(final String tag) throws IOException { return Utils.readVInt(in); } @Override public long readLong(final String tag) throws IOException { return Utils.readVLong(in); } @Override public float readFloat(final String tag) throws IOException { return in.readFloat(); } @Override public double readDouble(final String tag) throws IOException { return in.readDouble(); } @Override public String readString(final String tag) throws IOException { return Utils.fromBinaryString(in); } @Override public Buffer readBuffer(final String tag) throws IOException { final int len = Utils.readVInt(in); final byte[] barr = new byte[len]; in.readFully(barr); return new Buffer(barr); } @Override public void startRecord(final String tag) throws IOException { // no-op } @Override public void endRecord(final String tag) throws IOException { // no-op } @Override public Index startVector(final String tag) throws IOException { return new BinaryIndex(readInt(tag)); } @Override public void endVector(final String tag) throws IOException { // no-op } @Override public Index startMap(final String tag) throws IOException { return new BinaryIndex(readInt(tag)); } @Override public void endMap(final String tag) throws IOException { // no-op } }
4,228
25.104938
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.UnsupportedEncodingException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A byte sequence that is used as a Java native type for buffer. * It is resizable and distinguishes between the count of the sequence and * the current capacity. * * @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class Buffer implements Comparable, Cloneable { /** Number of valid bytes in this.bytes. */ private int count; /** Backing store for Buffer. */ private byte[] bytes = null; /** * Create a zero-count sequence. */ public Buffer() { this.count = 0; } /** * Create a Buffer using the byte array as the initial value. * * @param bytes This array becomes the backing storage for the object. */ public Buffer(byte[] bytes) { this.bytes = bytes; this.count = (bytes == null) ? 0 : bytes.length; } /** * Create a Buffer using the byte range as the initial value. * * @param bytes Copy of this array becomes the backing storage for the object. * @param offset offset into byte array * @param length length of data */ public Buffer(byte[] bytes, int offset, int length) { copy(bytes, offset, length); } /** * Use the specified bytes array as underlying sequence. * * @param bytes byte sequence */ public void set(byte[] bytes) { this.count = (bytes == null) ? 0 : bytes.length; this.bytes = bytes; } /** * Copy the specified byte array to the Buffer. Replaces the current buffer. * * @param bytes byte array to be assigned * @param offset offset into byte array * @param length length of data */ public final void copy(byte[] bytes, int offset, int length) { if (this.bytes == null || this.bytes.length < length) { this.bytes = new byte[length]; } System.arraycopy(bytes, offset, this.bytes, 0, length); this.count = length; } /** * Get the data from the Buffer. * * @return The data is only valid between 0 and getCount() - 1. */ public byte[] get() { if (bytes == null) { bytes = new byte[0]; } return bytes; } /** * Get the current count of the buffer. */ public int getCount() { return count; } /** * Get the capacity, which is the maximum count that could handled without * resizing the backing storage. * * @return The number of bytes */ public int getCapacity() { return this.get().length; } /** * Change the capacity of the backing storage. * The data is preserved if newCapacity {@literal >=} getCount(). * @param newCapacity The new capacity in bytes. */ public void setCapacity(int newCapacity) { if (newCapacity < 0) { throw new IllegalArgumentException("Invalid capacity argument "+newCapacity); } if (newCapacity == 0) { this.bytes = null; this.count = 0; return; } if (newCapacity != getCapacity()) { byte[] data = new byte[newCapacity]; if (newCapacity < count) { count = newCapacity; } if (count != 0) { System.arraycopy(this.get(), 0, data, 0, count); } bytes = data; } } /** * Reset the buffer to 0 size */ public void reset() { setCapacity(0); } /** * Change the capacity of the backing store to be the same as the current * count of buffer. */ public void truncate() { setCapacity(count); } /** * Append specified bytes to the buffer. * * @param bytes byte array to be appended * @param offset offset into byte array * @param length length of data */ public void append(byte[] bytes, int offset, int length) { setCapacity(count+length); System.arraycopy(bytes, offset, this.get(), count, length); count = count + length; } /** * Append specified bytes to the buffer * * @param bytes byte array to be appended */ public void append(byte[] bytes) { append(bytes, 0, bytes.length); } // inherit javadoc @Override public int hashCode() { int hash = 1; byte[] b = this.get(); for (int i = 0; i < count; i++) hash = (31 * hash) + b[i]; return hash; } /** * Define the sort order of the Buffer. * * @param other The other buffer * @return Positive if this is bigger than other, 0 if they are equal, and * negative if this is smaller than other. */ @Override public int compareTo(Object other) { Buffer right = ((Buffer) other); byte[] lb = this.get(); byte[] rb = right.get(); for (int i = 0; i < count && i < right.count; i++) { int a = (lb[i] & 0xff); int b = (rb[i] & 0xff); if (a != b) { return a - b; } } return count - right.count; } // inherit javadoc @Override public boolean equals(Object other) { if (other instanceof Buffer && this != other) { return compareTo(other) == 0; } return (this == other); } // inheric javadoc @Override public String toString() { StringBuilder sb = new StringBuilder(2*count); for(int idx = 0; idx < count; idx++) { sb.append(Character.forDigit((bytes[idx] & 0xF0) >> 4, 16)); sb.append(Character.forDigit(bytes[idx] & 0x0F, 16)); } return sb.toString(); } /** * Convert the byte buffer to a string an specific character encoding * * @param charsetName Valid Java Character Set Name */ public String toString(String charsetName) throws UnsupportedEncodingException { return new String(this.get(), 0, this.getCount(), charsetName); } // inherit javadoc @Override public Object clone() throws CloneNotSupportedException { Buffer result = (Buffer) super.clone(); result.copy(this.get(), 0, this.getCount()); return result; } }
6,821
25.339768
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/CsvRecordInput.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.InputStreamReader; import java.io.InputStream; import java.io.IOException; import java.io.PushbackReader; import java.io.UnsupportedEncodingException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class CsvRecordInput implements RecordInput { private PushbackReader stream; private class CsvIndex implements Index { @Override public boolean done() { char c = '\0'; try { c = (char) stream.read(); stream.unread(c); } catch (IOException ex) { } return (c == '}') ? true : false; } @Override public void incr() {} } private void throwExceptionOnError(String tag) throws IOException { throw new IOException("Error deserializing "+tag); } private String readField(String tag) throws IOException { try { StringBuilder buf = new StringBuilder(); while (true) { char c = (char) stream.read(); switch (c) { case ',': return buf.toString(); case '}': case '\n': case '\r': stream.unread(c); return buf.toString(); default: buf.append(c); } } } catch (IOException ex) { throw new IOException("Error reading "+tag); } } /** Creates a new instance of CsvRecordInput */ public CsvRecordInput(InputStream in) { try { stream = new PushbackReader(new InputStreamReader(in, "UTF-8")); } catch (UnsupportedEncodingException ex) { throw new RuntimeException(ex); } } @Override public byte readByte(String tag) throws IOException { return (byte) readLong(tag); } @Override public boolean readBool(String tag) throws IOException { String sval = readField(tag); return "T".equals(sval) ? true : false; } @Override public int readInt(String tag) throws IOException { return (int) readLong(tag); } @Override public long readLong(String tag) throws IOException { String sval = readField(tag); try { long lval = Long.parseLong(sval); return lval; } catch (NumberFormatException ex) { throw new IOException("Error deserializing "+tag); } } @Override public float readFloat(String tag) throws IOException { return (float) readDouble(tag); } @Override public double readDouble(String tag) throws IOException { String sval = readField(tag); try { double dval = Double.parseDouble(sval); return dval; } catch (NumberFormatException ex) { throw new IOException("Error deserializing "+tag); } } @Override public String readString(String tag) throws IOException { String sval = readField(tag); return Utils.fromCSVString(sval); } @Override public Buffer readBuffer(String tag) throws IOException { String sval = readField(tag); return Utils.fromCSVBuffer(sval); } @Override public void startRecord(String tag) throws IOException { if (tag != null && !tag.isEmpty()) { char c1 = (char) stream.read(); char c2 = (char) stream.read(); if (c1 != 's' || c2 != '{') { throw new IOException("Error deserializing "+tag); } } } @Override public void endRecord(String tag) throws IOException { char c = (char) stream.read(); if (tag == null || tag.isEmpty()) { if (c != '\n' && c != '\r') { throw new IOException("Error deserializing record."); } else { return; } } if (c != '}') { throw new IOException("Error deserializing "+tag); } c = (char) stream.read(); if (c != ',') { stream.unread(c); } return; } @Override public Index startVector(String tag) throws IOException { char c1 = (char) stream.read(); char c2 = (char) stream.read(); if (c1 != 'v' || c2 != '{') { throw new IOException("Error deserializing "+tag); } return new CsvIndex(); } @Override public void endVector(String tag) throws IOException { char c = (char) stream.read(); if (c != '}') { throw new IOException("Error deserializing "+tag); } c = (char) stream.read(); if (c != ',') { stream.unread(c); } return; } @Override public Index startMap(String tag) throws IOException { char c1 = (char) stream.read(); char c2 = (char) stream.read(); if (c1 != 'm' || c2 != '{') { throw new IOException("Error deserializing "+tag); } return new CsvIndex(); } @Override public void endMap(String tag) throws IOException { char c = (char) stream.read(); if (c != '}') { throw new IOException("Error deserializing "+tag); } c = (char) stream.read(); if (c != ',') { stream.unread(c); } return; } }
5,913
25.401786
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordComparator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; /** * A raw record comparator base class * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public abstract class RecordComparator extends WritableComparator { /** * Construct a raw {@link Record} comparison implementation. */ protected RecordComparator(Class<? extends WritableComparable> recordClass) { super(recordClass); } // inheric JavaDoc @Override public abstract int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2); /** * Register an optimized comparator for a {@link Record} implementation. * * @param c record classs for which a raw comparator is provided * @param comparator Raw comparator instance for class c */ public static synchronized void define(Class c, RecordComparator comparator) { WritableComparator.define(c, comparator); } }
1,979
34.357143
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Index.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Interface that acts as an iterator for deserializing maps. * The deserializer returns an instance that the record uses to * read vectors and maps. An example of usage is as follows: * * <code> * Index idx = startVector(...); * while (!idx.done()) { * .... // read element of a vector * idx.incr(); * } * </code> * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public interface Index { boolean done(); void incr(); }
1,507
31.782609
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Record.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record; import java.io.DataInput; import java.io.DataOutput; import java.io.ByteArrayOutputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.WritableComparable; /** * Abstract class that is extended by generated classes. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public abstract class Record implements WritableComparable, Cloneable { /** * Serialize a record with tag (ususally field name) * @param rout Record output destination * @param tag record tag (Used only in tagged serialization e.g. XML) */ public abstract void serialize(RecordOutput rout, String tag) throws IOException; /** * Deserialize a record with a tag (usually field name) * @param rin Record input source * @param tag Record tag (Used only in tagged serialization e.g. XML) */ public abstract void deserialize(RecordInput rin, String tag) throws IOException; // inheric javadoc @Override public abstract int compareTo (final Object peer) throws ClassCastException; /** * Serialize a record without a tag * @param rout Record output destination */ public void serialize(RecordOutput rout) throws IOException { this.serialize(rout, ""); } /** * Deserialize a record without a tag * @param rin Record input source */ public void deserialize(RecordInput rin) throws IOException { this.deserialize(rin, ""); } // inherit javadoc @Override public void write(final DataOutput out) throws java.io.IOException { BinaryRecordOutput bout = BinaryRecordOutput.get(out); this.serialize(bout); } // inherit javadoc @Override public void readFields(final DataInput din) throws java.io.IOException { BinaryRecordInput rin = BinaryRecordInput.get(din); this.deserialize(rin); } // inherit javadoc @Override public String toString() { try { ByteArrayOutputStream s = new ByteArrayOutputStream(); CsvRecordOutput a = new CsvRecordOutput(s); this.serialize(a); return new String(s.toByteArray(), "UTF-8"); } catch (Throwable ex) { throw new RuntimeException(ex); } } }
3,189
29.970874
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/MapTypeID.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordOutput; /** * Represents typeID for a Map * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class MapTypeID extends TypeID { private TypeID typeIDKey; private TypeID typeIDValue; public MapTypeID(TypeID typeIDKey, TypeID typeIDValue) { super(RIOType.MAP); this.typeIDKey = typeIDKey; this.typeIDValue = typeIDValue; } /** * get the TypeID of the map's key element */ public TypeID getKeyTypeID() { return this.typeIDKey; } /** * get the TypeID of the map's value element */ public TypeID getValueTypeID() { return this.typeIDValue; } @Override void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); typeIDKey.write(rout, tag); typeIDValue.write(rout, tag); } /** * Two map typeIDs are equal if their constituent elements have the * same type */ @Override public boolean equals(Object o) { if (!super.equals(o)) return false; MapTypeID mti = (MapTypeID) o; return this.typeIDKey.equals(mti.typeIDKey) && this.typeIDValue.equals(mti.typeIDValue); } /** * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ @Override public int hashCode() { return 37*17+typeIDKey.hashCode() + 37*17+typeIDValue.hashCode(); } }
2,511
26.604396
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/VectorTypeID.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordOutput; /** * Represents typeID for vector. * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class VectorTypeID extends TypeID { private TypeID typeIDElement; public VectorTypeID(TypeID typeIDElement) { super(RIOType.VECTOR); this.typeIDElement = typeIDElement; } public TypeID getElementTypeID() { return this.typeIDElement; } @Override void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); typeIDElement.write(rout, tag); } /** * Two vector typeIDs are equal if their constituent elements have the * same type */ @Override public boolean equals(Object o) { if (!super.equals (o)) return false; VectorTypeID vti = (VectorTypeID) o; return this.typeIDElement.equals(vti.typeIDElement); } /** * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ @Override public int hashCode() { return 37*17+typeIDElement.hashCode(); } }
2,170
27.946667
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/StructTypeID.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import java.util.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordInput; import org.apache.hadoop.record.RecordOutput; /** * Represents typeID for a struct * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class StructTypeID extends TypeID { private ArrayList<FieldTypeInfo> typeInfos = new ArrayList<FieldTypeInfo>(); StructTypeID() { super(RIOType.STRUCT); } /** * Create a StructTypeID based on the RecordTypeInfo of some record */ public StructTypeID(RecordTypeInfo rti) { super(RIOType.STRUCT); typeInfos.addAll(rti.getFieldTypeInfos()); } void add (FieldTypeInfo ti) { typeInfos.add(ti); } public Collection<FieldTypeInfo> getFieldTypeInfos() { return typeInfos; } /* * return the StructTypeiD, if any, of the given field */ StructTypeID findStruct(String name) { // walk through the list, searching. Not the most efficient way, but this // in intended to be used rarely, so we keep it simple. // As an optimization, we can keep a hashmap of record name to its RTI, for later. for (FieldTypeInfo ti : typeInfos) { if ((0 == ti.getFieldID().compareTo(name)) && (ti.getTypeID().getTypeVal() == RIOType.STRUCT)) { return (StructTypeID) ti.getTypeID(); } } return null; } @Override void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); writeRest(rout, tag); } /* * Writes rest of the struct (excluding type value). * As an optimization, this method is directly called by RTI * for the top level record so that we don't write out the byte * indicating that this is a struct (since top level records are * always structs). */ void writeRest(RecordOutput rout, String tag) throws IOException { rout.writeInt(typeInfos.size(), tag); for (FieldTypeInfo ti : typeInfos) { ti.write(rout, tag); } } /* * deserialize ourselves. Called by RTI. */ void read(RecordInput rin, String tag) throws IOException { // number of elements int numElems = rin.readInt(tag); for (int i=0; i<numElems; i++) { typeInfos.add(genericReadTypeInfo(rin, tag)); } } // generic reader: reads the next TypeInfo object from stream and returns it private FieldTypeInfo genericReadTypeInfo(RecordInput rin, String tag) throws IOException { String fieldName = rin.readString(tag); TypeID id = genericReadTypeID(rin, tag); return new FieldTypeInfo(fieldName, id); } // generic reader: reads the next TypeID object from stream and returns it private TypeID genericReadTypeID(RecordInput rin, String tag) throws IOException { byte typeVal = rin.readByte(tag); switch (typeVal) { case TypeID.RIOType.BOOL: return TypeID.BoolTypeID; case TypeID.RIOType.BUFFER: return TypeID.BufferTypeID; case TypeID.RIOType.BYTE: return TypeID.ByteTypeID; case TypeID.RIOType.DOUBLE: return TypeID.DoubleTypeID; case TypeID.RIOType.FLOAT: return TypeID.FloatTypeID; case TypeID.RIOType.INT: return TypeID.IntTypeID; case TypeID.RIOType.LONG: return TypeID.LongTypeID; case TypeID.RIOType.MAP: { TypeID tIDKey = genericReadTypeID(rin, tag); TypeID tIDValue = genericReadTypeID(rin, tag); return new MapTypeID(tIDKey, tIDValue); } case TypeID.RIOType.STRING: return TypeID.StringTypeID; case TypeID.RIOType.STRUCT: { StructTypeID stID = new StructTypeID(); int numElems = rin.readInt(tag); for (int i=0; i<numElems; i++) { stID.add(genericReadTypeInfo(rin, tag)); } return stID; } case TypeID.RIOType.VECTOR: { TypeID tID = genericReadTypeID(rin, tag); return new VectorTypeID(tID); } default: // shouldn't be here throw new IOException("Unknown type read"); } } @Override public boolean equals(Object o) { return super.equals(o); } @Override public int hashCode() { return super.hashCode(); } }
5,163
29.922156
102
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/Utils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import java.util.Iterator; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordInput; /** * Various utility functions for Hadooop record I/O platform. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class Utils { /** Cannot create a new instance of Utils */ private Utils() { } /** * read/skip bytes from stream based on a type */ public static void skip(RecordInput rin, String tag, TypeID typeID) throws IOException { switch (typeID.typeVal) { case TypeID.RIOType.BOOL: rin.readBool(tag); break; case TypeID.RIOType.BUFFER: rin.readBuffer(tag); break; case TypeID.RIOType.BYTE: rin.readByte(tag); break; case TypeID.RIOType.DOUBLE: rin.readDouble(tag); break; case TypeID.RIOType.FLOAT: rin.readFloat(tag); break; case TypeID.RIOType.INT: rin.readInt(tag); break; case TypeID.RIOType.LONG: rin.readLong(tag); break; case TypeID.RIOType.MAP: org.apache.hadoop.record.Index midx1 = rin.startMap(tag); MapTypeID mtID = (MapTypeID) typeID; for (; !midx1.done(); midx1.incr()) { skip(rin, tag, mtID.getKeyTypeID()); skip(rin, tag, mtID.getValueTypeID()); } rin.endMap(tag); break; case TypeID.RIOType.STRING: rin.readString(tag); break; case TypeID.RIOType.STRUCT: rin.startRecord(tag); // read past each field in the struct StructTypeID stID = (StructTypeID) typeID; Iterator<FieldTypeInfo> it = stID.getFieldTypeInfos().iterator(); while (it.hasNext()) { FieldTypeInfo tInfo = it.next(); skip(rin, tag, tInfo.getTypeID()); } rin.endRecord(tag); break; case TypeID.RIOType.VECTOR: org.apache.hadoop.record.Index vidx1 = rin.startVector(tag); VectorTypeID vtID = (VectorTypeID) typeID; for (; !vidx1.done(); vidx1.incr()) { skip(rin, tag, vtID.getElementTypeID()); } rin.endVector(tag); break; default: // shouldn't be here throw new IOException("Unknown typeID when skipping bytes"); } } }
3,241
29.87619
90
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/TypeID.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordOutput; /** * Represents typeID for basic types. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class TypeID { /** * constants representing the IDL types we support */ public static final class RIOType { public static final byte BOOL = 1; public static final byte BUFFER = 2; public static final byte BYTE = 3; public static final byte DOUBLE = 4; public static final byte FLOAT = 5; public static final byte INT = 6; public static final byte LONG = 7; public static final byte MAP = 8; public static final byte STRING = 9; public static final byte STRUCT = 10; public static final byte VECTOR = 11; } /** * Constant classes for the basic types, so we can share them. */ public static final TypeID BoolTypeID = new TypeID(RIOType.BOOL); public static final TypeID BufferTypeID = new TypeID(RIOType.BUFFER); public static final TypeID ByteTypeID = new TypeID(RIOType.BYTE); public static final TypeID DoubleTypeID = new TypeID(RIOType.DOUBLE); public static final TypeID FloatTypeID = new TypeID(RIOType.FLOAT); public static final TypeID IntTypeID = new TypeID(RIOType.INT); public static final TypeID LongTypeID = new TypeID(RIOType.LONG); public static final TypeID StringTypeID = new TypeID(RIOType.STRING); protected byte typeVal; /** * Create a TypeID object */ TypeID(byte typeVal) { this.typeVal = typeVal; } /** * Get the type value. One of the constants in RIOType. */ public byte getTypeVal() { return typeVal; } /** * Serialize the TypeID object */ void write(RecordOutput rout, String tag) throws IOException { rout.writeByte(typeVal, tag); } /** * Two base typeIDs are equal if they refer to the same type */ @Override public boolean equals(Object o) { if (this == o) return true; if (o == null) return false; if (this.getClass() != o.getClass()) return false; TypeID oTypeID = (TypeID) o; return (this.typeVal == oTypeID.typeVal); } /** * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ @Override public int hashCode() { // See 'Effectve Java' by Joshua Bloch return 37*17+(int)typeVal; } }
3,450
28.245763
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/RecordTypeInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import java.util.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordInput; import org.apache.hadoop.record.RecordOutput; /** * A record's Type Information object which can read/write itself. * * Type information for a record comprises metadata about the record, * as well as a collection of type information for each field in the record. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class RecordTypeInfo extends org.apache.hadoop.record.Record { private String name; // A RecordTypeInfo is really just a wrapper around StructTypeID StructTypeID sTid; // A RecordTypeInfo object is just a collection of TypeInfo objects for each of its fields. //private ArrayList<FieldTypeInfo> typeInfos = new ArrayList<FieldTypeInfo>(); // we keep a hashmap of struct/record names and their type information, as we need it to // set filters when reading nested structs. This map is used during deserialization. //private Map<String, RecordTypeInfo> structRTIs = new HashMap<String, RecordTypeInfo>(); /** * Create an empty RecordTypeInfo object. */ public RecordTypeInfo() { sTid = new StructTypeID(); } /** * Create a RecordTypeInfo object representing a record with the given name * @param name Name of the record */ public RecordTypeInfo(String name) { this.name = name; sTid = new StructTypeID(); } /* * private constructor */ private RecordTypeInfo(String name, StructTypeID stid) { this.sTid = stid; this.name = name; } /** * return the name of the record */ public String getName() { return name; } /** * set the name of the record */ public void setName(String name) { this.name = name; } /** * Add a field. * @param fieldName Name of the field * @param tid Type ID of the field */ public void addField(String fieldName, TypeID tid) { sTid.getFieldTypeInfos().add(new FieldTypeInfo(fieldName, tid)); } private void addAll(Collection<FieldTypeInfo> tis) { sTid.getFieldTypeInfos().addAll(tis); } /** * Return a collection of field type infos */ public Collection<FieldTypeInfo> getFieldTypeInfos() { return sTid.getFieldTypeInfos(); } /** * Return the type info of a nested record. We only consider nesting * to one level. * @param name Name of the nested record */ public RecordTypeInfo getNestedStructTypeInfo(String name) { StructTypeID stid = sTid.findStruct(name); if (null == stid) return null; return new RecordTypeInfo(name, stid); } /** * Serialize the type information for a record */ @Override public void serialize(RecordOutput rout, String tag) throws IOException { // write out any header, version info, here rout.startRecord(this, tag); rout.writeString(name, tag); sTid.writeRest(rout, tag); rout.endRecord(this, tag); } /** * Deserialize the type information for a record */ @Override public void deserialize(RecordInput rin, String tag) throws IOException { // read in any header, version info rin.startRecord(tag); // name this.name = rin.readString(tag); sTid.read(rin, tag); rin.endRecord(tag); } /** * This class doesn't implement Comparable as it's not meant to be used * for anything besides de/serializing. * So we always throw an exception. * Not implemented. Always returns 0 if another RecordTypeInfo is passed in. */ @Override public int compareTo (final Object peer_) throws ClassCastException { if (!(peer_ instanceof RecordTypeInfo)) { throw new ClassCastException("Comparing different types of records."); } throw new UnsupportedOperationException("compareTo() is not supported"); } }
4,849
28.938272
96
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/meta/FieldTypeInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.meta; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.RecordOutput; /** * Represents a type information for a field, which is made up of its * ID (name) and its type (a TypeID object). * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class FieldTypeInfo { private String fieldID; private TypeID typeID; /** * Construct a FiledTypeInfo with the given field name and the type */ FieldTypeInfo(String fieldID, TypeID typeID) { this.fieldID = fieldID; this.typeID = typeID; } /** * get the field's TypeID object */ public TypeID getTypeID() { return typeID; } /** * get the field's id (name) */ public String getFieldID() { return fieldID; } void write(RecordOutput rout, String tag) throws IOException { rout.writeString(fieldID, tag); typeID.write(rout, tag); } /** * Two FieldTypeInfos are equal if ach of their fields matches */ @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof FieldTypeInfo)) return false; FieldTypeInfo fti = (FieldTypeInfo) o; // first check if fieldID matches if (!this.fieldID.equals(fti.fieldID)) { return false; } // now see if typeID matches return (this.typeID.equals(fti.typeID)); } /** * We use a basic hashcode implementation, since this class will likely not * be used as a hashmap key */ @Override public int hashCode() { return 37*17+typeID.hashCode() + 37*17+fieldID.hashCode(); } public boolean equals(FieldTypeInfo ti) { // first check if fieldID matches if (!this.fieldID.equals(ti.fieldID)) { return false; } // now see if typeID matches return (this.typeID.equals(ti.typeID)); } }
2,855
25.444444
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JVector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JVector extends JCompType { static private int level = 0; static private String getId(String id) { return id+getLevel(); } static private String getLevel() { return Integer.toString(level); } static private void incrLevel() { level++; } static private void decrLevel() { level--; } private JType type; class JavaVector extends JavaCompType { private JType.JavaType element; JavaVector(JType.JavaType t) { super("java.util.ArrayList<"+t.getWrapperType()+">", "Vector", "java.util.ArrayList<"+t.getWrapperType()+">", "TypeID.RIOType.VECTOR"); element = t; } @Override String getTypeIDObjectString() { return "new org.apache.hadoop.record.meta.VectorTypeID(" + element.getTypeIDObjectString() + ")"; } @Override void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) { element.genSetRTIFilter(cb, nestedStructMap); } @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append("{\n"); incrLevel(); cb.append("int "+getId(Consts.RIO_PREFIX + "len1")+" = "+fname+ ".size();\n"); cb.append("int "+getId(Consts.RIO_PREFIX + "len2")+" = "+other+ ".size();\n"); cb.append("for(int "+getId(Consts.RIO_PREFIX + "vidx")+" = 0; "+ getId(Consts.RIO_PREFIX + "vidx")+"<"+getId(Consts.RIO_PREFIX + "len1")+ " && "+getId(Consts.RIO_PREFIX + "vidx")+"<"+ getId(Consts.RIO_PREFIX + "len2")+"; "+ getId(Consts.RIO_PREFIX + "vidx")+"++) {\n"); cb.append(element.getType()+" "+getId(Consts.RIO_PREFIX + "e1")+ " = "+fname+ ".get("+getId(Consts.RIO_PREFIX + "vidx")+");\n"); cb.append(element.getType()+" "+getId(Consts.RIO_PREFIX + "e2")+ " = "+other+ ".get("+getId(Consts.RIO_PREFIX + "vidx")+");\n"); element.genCompareTo(cb, getId(Consts.RIO_PREFIX + "e1"), getId(Consts.RIO_PREFIX + "e2")); cb.append("if (" + Consts.RIO_PREFIX + "ret != 0) { return " + Consts.RIO_PREFIX + "ret; }\n"); cb.append("}\n"); cb.append(Consts.RIO_PREFIX + "ret = ("+getId(Consts.RIO_PREFIX + "len1")+ " - "+getId(Consts.RIO_PREFIX + "len2")+");\n"); decrLevel(); cb.append("}\n"); } @Override void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(getType()+" "+fname+";\n"); } cb.append("{\n"); incrLevel(); cb.append("org.apache.hadoop.record.Index "+ getId(Consts.RIO_PREFIX + "vidx")+" = " + Consts.RECORD_INPUT + ".startVector(\""+tag+"\");\n"); cb.append(fname+"=new "+getType()+"();\n"); cb.append("for (; !"+getId(Consts.RIO_PREFIX + "vidx")+".done(); " + getId(Consts.RIO_PREFIX + "vidx")+".incr()) {\n"); element.genReadMethod(cb, getId(Consts.RIO_PREFIX + "e"), getId(Consts.RIO_PREFIX + "e"), true); cb.append(fname+".add("+getId(Consts.RIO_PREFIX + "e")+");\n"); cb.append("}\n"); cb.append(Consts.RECORD_INPUT + ".endVector(\""+tag+"\");\n"); decrLevel(); cb.append("}\n"); } @Override void genWriteMethod(CodeBuffer cb, String fname, String tag) { cb.append("{\n"); incrLevel(); cb.append(Consts.RECORD_OUTPUT + ".startVector("+fname+",\""+tag+"\");\n"); cb.append("int "+getId(Consts.RIO_PREFIX + "len")+" = "+fname+".size();\n"); cb.append("for(int "+getId(Consts.RIO_PREFIX + "vidx")+" = 0; " + getId(Consts.RIO_PREFIX + "vidx")+"<"+getId(Consts.RIO_PREFIX + "len")+ "; "+getId(Consts.RIO_PREFIX + "vidx")+"++) {\n"); cb.append(element.getType()+" "+getId(Consts.RIO_PREFIX + "e")+" = "+ fname+".get("+getId(Consts.RIO_PREFIX + "vidx")+");\n"); element.genWriteMethod(cb, getId(Consts.RIO_PREFIX + "e"), getId(Consts.RIO_PREFIX + "e")); cb.append("}\n"); cb.append(Consts.RECORD_OUTPUT + ".endVector("+fname+",\""+tag+"\");\n"); cb.append("}\n"); decrLevel(); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); incrLevel(); cb.append("int "+getId("vi")+ " = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n"); cb.append("int "+getId("vz")+ " = org.apache.hadoop.record.Utils.getVIntSize("+getId("vi")+");\n"); cb.append(s+"+="+getId("vz")+"; "+l+"-="+getId("vz")+";\n"); cb.append("for (int "+getId("vidx")+" = 0; "+getId("vidx")+ " < "+getId("vi")+"; "+getId("vidx")+"++)"); element.genSlurpBytes(cb, b, s, l); decrLevel(); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); incrLevel(); cb.append("int "+getId("vi1")+ " = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); cb.append("int "+getId("vi2")+ " = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n"); cb.append("int "+getId("vz1")+ " = org.apache.hadoop.record.Utils.getVIntSize("+getId("vi1")+");\n"); cb.append("int "+getId("vz2")+ " = org.apache.hadoop.record.Utils.getVIntSize("+getId("vi2")+");\n"); cb.append("s1+="+getId("vz1")+"; s2+="+getId("vz2")+ "; l1-="+getId("vz1")+"; l2-="+getId("vz2")+";\n"); cb.append("for (int "+getId("vidx")+" = 0; "+getId("vidx")+ " < "+getId("vi1")+" && "+getId("vidx")+" < "+getId("vi2")+ "; "+getId("vidx")+"++)"); element.genCompareBytes(cb); cb.append("if ("+getId("vi1")+" != "+getId("vi2")+ ") { return ("+getId("vi1")+"<"+getId("vi2")+")?-1:0; }\n"); decrLevel(); cb.append("}\n"); } } class CppVector extends CppCompType { private JType.CppType element; CppVector(JType.CppType t) { super("::std::vector< "+t.getType()+" >"); element = t; } @Override String getTypeIDObjectString() { return "new ::hadoop::VectorTypeID(" + element.getTypeIDObjectString() + ")"; } @Override void genSetRTIFilter(CodeBuffer cb) { element.genSetRTIFilter(cb); } } /** Creates a new instance of JVector */ public JVector(JType t) { type = t; setJavaType(new JavaVector(t.getJavaType())); setCppType(new CppVector(t.getCppType())); setCType(new CCompType()); } @Override String getSignature() { return "[" + type.getSignature() + "]"; } }
7,872
35.618605
86
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JLong.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Code generator for "long" type * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JLong extends JType { class JavaLong extends JavaType { JavaLong() { super("long", "Long", "Long", "TypeID.RIOType.LONG"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.LongTypeID"; } @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = (int) ("+fname+"^("+ fname+">>>32));\n"); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("long i = org.apache.hadoop.record.Utils.readVLong("+b+", "+s+");\n"); cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n"); cb.append(s+"+=z; "+l+"-=z;\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);\n"); cb.append("long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);\n"); cb.append("if (i1 != i2) {\n"); cb.append("return ((i1-i2) < 0) ? -1 : 0;\n"); cb.append("}\n"); cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n"); cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n"); cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n"); cb.append("}\n"); } } class CppLong extends CppType { CppLong() { super("int64_t"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_LONG)"; } } /** Creates a new instance of JLong */ public JLong() { setJavaType(new JavaLong()); setCppType(new CppLong()); setCType(new CType()); } @Override String getSignature() { return "l"; } }
3,005
29.363636
86
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JavaGenerator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.ArrayList; import java.io.IOException; import java.util.Iterator; /** * Java Code generator front-end for Hadoop record I/O. */ class JavaGenerator extends CodeGenerator { JavaGenerator() { } /** * Generate Java code for records. This method is only a front-end to * JRecord, since one file is generated for each record. * * @param name possibly full pathname to the file * @param ilist included files (as JFile) * @param rlist List of records defined within this file * @param destDir output directory */ @Override void genCode(String name, ArrayList<JFile> ilist, ArrayList<JRecord> rlist, String destDir, ArrayList<String> options) throws IOException { for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) { JRecord rec = iter.next(); rec.genJavaCode(destDir, options); } } }
1,744
32.557692
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JByte.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Code generator for "byte" type. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JByte extends JType { class JavaByte extends JavaType { JavaByte() { super("byte", "Byte", "Byte", "TypeID.RIOType.BYTE"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.ByteTypeID"; } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<1) {\n"); cb.append("throw new java.io.IOException(\"Byte is exactly 1 byte."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append(s+"++; "+l+"--;\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<1 || l2<1) {\n"); cb.append("throw new java.io.IOException(\"Byte is exactly 1 byte."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append("if (b1[s1] != b2[s2]) {\n"); cb.append("return (b1[s1]<b2[s2])?-1:0;\n"); cb.append("}\n"); cb.append("s1++; s2++; l1--; l2--;\n"); cb.append("}\n"); } } class CppByte extends CppType { CppByte() { super("int8_t"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BYTE)"; } } public JByte() { setJavaType(new JavaByte()); setCppType(new CppByte()); setCType(new CType()); } @Override String getSignature() { return "b"; } }
2,691
27.638298
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JString.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JString extends JCompType { class JavaString extends JavaCompType { JavaString() { super("String", "String", "String", "TypeID.RIOType.STRING"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.StringTypeID"; } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n"); cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n"); cb.append(s+"+=(z+i); "+l+"-= (z+i);\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); cb.append("int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n"); cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n"); cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n"); cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n"); cb.append("int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);\n"); cb.append("if (r1 != 0) { return (r1<0)?-1:0; }\n"); cb.append("s1+=i1; s2+=i2; l1-=i1; l1-=i2;\n"); cb.append("}\n"); } @Override void genClone(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "other."+fname+" = this."+fname+";\n"); } } class CppString extends CppCompType { CppString() { super("::std::string"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_STRING)"; } } /** Creates a new instance of JString */ public JString() { setJavaType(new JavaString()); setCppType(new CppString()); setCType(new CCompType()); } @Override String getSignature() { return "s"; } }
3,100
30.969072
94
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JDouble.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JDouble extends JType { class JavaDouble extends JavaType { JavaDouble() { super("double", "Double", "Double", "TypeID.RIOType.DOUBLE"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.DoubleTypeID"; } @Override void genHashCode(CodeBuffer cb, String fname) { String tmp = "Double.doubleToLongBits("+fname+")"; cb.append(Consts.RIO_PREFIX + "ret = (int)("+tmp+"^("+tmp+">>>32));\n"); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<8) {\n"); cb.append("throw new java.io.IOException(\"Double is exactly 8 bytes."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append(s+"+=8; "+l+"-=8;\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<8 || l2<8) {\n"); cb.append("throw new java.io.IOException(\"Double is exactly 8 bytes."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append("double d1 = org.apache.hadoop.record.Utils.readDouble(b1, s1);\n"); cb.append("double d2 = org.apache.hadoop.record.Utils.readDouble(b2, s2);\n"); cb.append("if (d1 != d2) {\n"); cb.append("return ((d1-d2) < 0) ? -1 : 0;\n"); cb.append("}\n"); cb.append("s1+=8; s2+=8; l1-=8; l2-=8;\n"); cb.append("}\n"); } } class CppDouble extends CppType { CppDouble() { super("double"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_DOUBLE)"; } } /** Creates a new instance of JDouble */ public JDouble() { setJavaType(new JavaDouble()); setCppType(new CppDouble()); setCType(new CType()); } @Override String getSignature() { return "d"; } }
3,114
29.242718
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBoolean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JBoolean extends JType { class JavaBoolean extends JType.JavaType { JavaBoolean() { super("boolean", "Bool", "Boolean", "TypeID.RIOType.BOOL"); } @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = ("+fname+" == "+other+")? 0 : ("+ fname+"?1:-1);\n"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.BoolTypeID"; } @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = ("+fname+")?0:1;\n"); } // In Binary format, boolean is written as byte. true = 1, false = 0 @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<1) {\n"); cb.append("throw new java.io.IOException(\"Boolean is exactly 1 byte."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append(s+"++; "+l+"--;\n"); cb.append("}\n"); } // In Binary format, boolean is written as byte. true = 1, false = 0 @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<1 || l2<1) {\n"); cb.append("throw new java.io.IOException(\"Boolean is exactly 1 byte."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append("if (b1[s1] != b2[s2]) {\n"); cb.append("return (b1[s1]<b2[s2])? -1 : 0;\n"); cb.append("}\n"); cb.append("s1++; s2++; l1--; l2--;\n"); cb.append("}\n"); } } class CppBoolean extends CppType { CppBoolean() { super("bool"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BOOL)"; } } /** Creates a new instance of JBoolean */ public JBoolean() { setJavaType(new JavaBoolean()); setCppType(new CppBoolean()); setCType(new CType()); } @Override String getSignature() { return "z"; } }
3,227
29.168224
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JRecord.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JRecord extends JCompType { class JavaRecord extends JavaCompType { private String fullName; private String name; private String module; private ArrayList<JField<JavaType>> fields = new ArrayList<JField<JavaType>>(); JavaRecord(String name, ArrayList<JField<JType>> flist) { super(name, "Record", name, "TypeID.RIOType.STRUCT"); this.fullName = name; int idx = name.lastIndexOf('.'); this.name = name.substring(idx+1); this.module = name.substring(0, idx); for (Iterator<JField<JType>> iter = flist.iterator(); iter.hasNext();) { JField<JType> f = iter.next(); fields.add(new JField<JavaType>(f.getName(), f.getType().getJavaType())); } } @Override String getTypeIDObjectString() { return "new org.apache.hadoop.record.meta.StructTypeID(" + fullName + ".getTypeInfo())"; } @Override void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) { // ignore, if we'ev already set the type filter for this record if (!nestedStructMap.containsKey(fullName)) { // we set the RTI filter here cb.append(fullName + ".setTypeFilter(rti.getNestedStructTypeInfo(\""+ name + "\"));\n"); nestedStructMap.put(fullName, null); } } // for each typeInfo in the filter, we see if there's a similar one in the record. // Since we store typeInfos in ArrayLists, thsi search is O(n squared). We do it faster // if we also store a map (of TypeInfo to index), but since setupRtiFields() is called // only once when deserializing, we're sticking with the former, as the code is easier. void genSetupRtiFields(CodeBuffer cb) { cb.append("private static void setupRtiFields()\n{\n"); cb.append("if (null == " + Consts.RTI_FILTER + ") return;\n"); cb.append("// we may already have done this\n"); cb.append("if (null != " + Consts.RTI_FILTER_FIELDS + ") return;\n"); cb.append("int " + Consts.RIO_PREFIX + "i, " + Consts.RIO_PREFIX + "j;\n"); cb.append(Consts.RTI_FILTER_FIELDS + " = new int [" + Consts.RIO_PREFIX + "rtiFilter.getFieldTypeInfos().size()];\n"); cb.append("for (" + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + "i<"+ Consts.RTI_FILTER_FIELDS + ".length; " + Consts.RIO_PREFIX + "i++) {\n"); cb.append(Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i] = 0;\n"); cb.append("}\n"); cb.append("java.util.Iterator<org.apache.hadoop.record.meta." + "FieldTypeInfo> " + Consts.RIO_PREFIX + "itFilter = " + Consts.RIO_PREFIX + "rtiFilter.getFieldTypeInfos().iterator();\n"); cb.append(Consts.RIO_PREFIX + "i=0;\n"); cb.append("while (" + Consts.RIO_PREFIX + "itFilter.hasNext()) {\n"); cb.append("org.apache.hadoop.record.meta.FieldTypeInfo " + Consts.RIO_PREFIX + "tInfoFilter = " + Consts.RIO_PREFIX + "itFilter.next();\n"); cb.append("java.util.Iterator<org.apache.hadoop.record.meta." + "FieldTypeInfo> " + Consts.RIO_PREFIX + "it = " + Consts.RTI_VAR + ".getFieldTypeInfos().iterator();\n"); cb.append(Consts.RIO_PREFIX + "j=1;\n"); cb.append("while (" + Consts.RIO_PREFIX + "it.hasNext()) {\n"); cb.append("org.apache.hadoop.record.meta.FieldTypeInfo " + Consts.RIO_PREFIX + "tInfo = " + Consts.RIO_PREFIX + "it.next();\n"); cb.append("if (" + Consts.RIO_PREFIX + "tInfo.equals(" + Consts.RIO_PREFIX + "tInfoFilter)) {\n"); cb.append(Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i] = " + Consts.RIO_PREFIX + "j;\n"); cb.append("break;\n"); cb.append("}\n"); cb.append(Consts.RIO_PREFIX + "j++;\n"); cb.append("}\n"); /*int ct = 0; for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { ct++; JField<JavaType> jf = i.next(); JavaType type = jf.getType(); String name = jf.getName(); if (ct != 1) { cb.append("else "); } type.genRtiFieldCondition(cb, name, ct); } if (ct != 0) { cb.append("else {\n"); cb.append("rtiFilterFields[i] = 0;\n"); cb.append("}\n"); }*/ cb.append(Consts.RIO_PREFIX + "i++;\n"); cb.append("}\n"); cb.append("}\n"); } @Override void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(fullName+" "+fname+";\n"); } cb.append(fname+"= new "+fullName+"();\n"); cb.append(fname+".deserialize(" + Consts.RECORD_INPUT + ",\""+tag+"\");\n"); } @Override void genWriteMethod(CodeBuffer cb, String fname, String tag) { cb.append(fname+".serialize(" + Consts.RECORD_OUTPUT + ",\""+tag+"\");\n"); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int r = "+fullName+ ".Comparator.slurpRaw("+b+","+s+","+l+");\n"); cb.append(s+"+=r; "+l+"-=r;\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int r1 = "+fullName+ ".Comparator.compareRaw(b1,s1,l1,b2,s2,l2);\n"); cb.append("if (r1 <= 0) { return r1; }\n"); cb.append("s1+=r1; s2+=r1; l1-=r1; l2-=r1;\n"); cb.append("}\n"); } void genCode(String destDir, ArrayList<String> options) throws IOException { String pkg = module; String pkgpath = pkg.replaceAll("\\.", "/"); File pkgdir = new File(destDir, pkgpath); final File jfile = new File(pkgdir, name+".java"); if (!pkgdir.exists()) { // create the pkg directory boolean ret = pkgdir.mkdirs(); if (!ret) { throw new IOException("Cannnot create directory: "+pkgpath); } } else if (!pkgdir.isDirectory()) { // not a directory throw new IOException(pkgpath+" is not a directory."); } CodeBuffer cb = new CodeBuffer(); cb.append("// File generated by hadoop record compiler. Do not edit.\n"); cb.append("package "+module+";\n\n"); cb.append("public class "+name+ " extends org.apache.hadoop.record.Record {\n"); // type information declarations cb.append("private static final " + "org.apache.hadoop.record.meta.RecordTypeInfo " + Consts.RTI_VAR + ";\n"); cb.append("private static " + "org.apache.hadoop.record.meta.RecordTypeInfo " + Consts.RTI_FILTER + ";\n"); cb.append("private static int[] " + Consts.RTI_FILTER_FIELDS + ";\n"); // static init for type information cb.append("static {\n"); cb.append(Consts.RTI_VAR + " = " + "new org.apache.hadoop.record.meta.RecordTypeInfo(\"" + name + "\");\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genStaticTypeInfo(cb, name); } cb.append("}\n\n"); // field definitions for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genDecl(cb, name); } // default constructor cb.append("public "+name+"() { }\n"); // constructor cb.append("public "+name+"(\n"); int fIdx = 0; for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext(); fIdx++) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genConstructorParam(cb, name); cb.append((!i.hasNext())?"":",\n"); } cb.append(") {\n"); fIdx = 0; for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext(); fIdx++) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genConstructorSet(cb, name); } cb.append("}\n"); // getter/setter for type info cb.append("public static org.apache.hadoop.record.meta.RecordTypeInfo" + " getTypeInfo() {\n"); cb.append("return " + Consts.RTI_VAR + ";\n"); cb.append("}\n"); cb.append("public static void setTypeFilter(" + "org.apache.hadoop.record.meta.RecordTypeInfo rti) {\n"); cb.append("if (null == rti) return;\n"); cb.append(Consts.RTI_FILTER + " = rti;\n"); cb.append(Consts.RTI_FILTER_FIELDS + " = null;\n"); // set RTIFilter for nested structs. // To prevent setting up the type filter for the same struct more than once, // we use a hash map to keep track of what we've set. Map<String, Integer> nestedStructMap = new HashMap<String, Integer>(); for (JField<JavaType> jf : fields) { JavaType type = jf.getType(); type.genSetRTIFilter(cb, nestedStructMap); } cb.append("}\n"); // setupRtiFields() genSetupRtiFields(cb); // getters/setters for member variables for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genGetSet(cb, name); } // serialize() cb.append("public void serialize("+ "final org.apache.hadoop.record.RecordOutput " + Consts.RECORD_OUTPUT + ", final String " + Consts.TAG + ")\n"+ "throws java.io.IOException {\n"); cb.append(Consts.RECORD_OUTPUT + ".startRecord(this," + Consts.TAG + ");\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genWriteMethod(cb, name, name); } cb.append(Consts.RECORD_OUTPUT + ".endRecord(this," + Consts.TAG+");\n"); cb.append("}\n"); // deserializeWithoutFilter() cb.append("private void deserializeWithoutFilter("+ "final org.apache.hadoop.record.RecordInput " + Consts.RECORD_INPUT + ", final String " + Consts.TAG + ")\n"+ "throws java.io.IOException {\n"); cb.append(Consts.RECORD_INPUT + ".startRecord(" + Consts.TAG + ");\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genReadMethod(cb, name, name, false); } cb.append(Consts.RECORD_INPUT + ".endRecord(" + Consts.TAG+");\n"); cb.append("}\n"); // deserialize() cb.append("public void deserialize(final " + "org.apache.hadoop.record.RecordInput " + Consts.RECORD_INPUT + ", final String " + Consts.TAG + ")\n"+ "throws java.io.IOException {\n"); cb.append("if (null == " + Consts.RTI_FILTER + ") {\n"); cb.append("deserializeWithoutFilter(" + Consts.RECORD_INPUT + ", " + Consts.TAG + ");\n"); cb.append("return;\n"); cb.append("}\n"); cb.append("// if we're here, we need to read based on version info\n"); cb.append(Consts.RECORD_INPUT + ".startRecord(" + Consts.TAG + ");\n"); cb.append("setupRtiFields();\n"); cb.append("for (int " + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + "i<" + Consts.RTI_FILTER + ".getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n"); int ct = 0; for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); ct++; if (1 != ct) { cb.append("else "); } cb.append("if (" + ct + " == " + Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i]) {\n"); type.genReadMethod(cb, name, name, false); cb.append("}\n"); } if (0 != ct) { cb.append("else {\n"); cb.append("java.util.ArrayList<" + "org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = " + "(java.util.ArrayList<" + "org.apache.hadoop.record.meta.FieldTypeInfo>)" + "(" + Consts.RTI_FILTER + ".getFieldTypeInfos());\n"); cb.append("org.apache.hadoop.record.meta.Utils.skip(" + Consts.RECORD_INPUT + ", " + "typeInfos.get(" + Consts.RIO_PREFIX + "i).getFieldID(), typeInfos.get(" + Consts.RIO_PREFIX + "i).getTypeID());\n"); cb.append("}\n"); } cb.append("}\n"); cb.append(Consts.RECORD_INPUT + ".endRecord(" + Consts.TAG+");\n"); cb.append("}\n"); // compareTo() cb.append("public int compareTo (final Object " + Consts.RIO_PREFIX + "peer_) throws ClassCastException {\n"); cb.append("if (!(" + Consts.RIO_PREFIX + "peer_ instanceof "+name+")) {\n"); cb.append("throw new ClassCastException(\"Comparing different types of records.\");\n"); cb.append("}\n"); cb.append(name+" " + Consts.RIO_PREFIX + "peer = ("+name+") " + Consts.RIO_PREFIX + "peer_;\n"); cb.append("int " + Consts.RIO_PREFIX + "ret = 0;\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genCompareTo(cb, name, Consts.RIO_PREFIX + "peer."+name); cb.append("if (" + Consts.RIO_PREFIX + "ret != 0) return " + Consts.RIO_PREFIX + "ret;\n"); } cb.append("return " + Consts.RIO_PREFIX + "ret;\n"); cb.append("}\n"); // equals() cb.append("public boolean equals(final Object " + Consts.RIO_PREFIX + "peer_) {\n"); cb.append("if (!(" + Consts.RIO_PREFIX + "peer_ instanceof "+name+")) {\n"); cb.append("return false;\n"); cb.append("}\n"); cb.append("if (" + Consts.RIO_PREFIX + "peer_ == this) {\n"); cb.append("return true;\n"); cb.append("}\n"); cb.append(name+" " + Consts.RIO_PREFIX + "peer = ("+name+") " + Consts.RIO_PREFIX + "peer_;\n"); cb.append("boolean " + Consts.RIO_PREFIX + "ret = false;\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genEquals(cb, name, Consts.RIO_PREFIX + "peer."+name); cb.append("if (!" + Consts.RIO_PREFIX + "ret) return " + Consts.RIO_PREFIX + "ret;\n"); } cb.append("return " + Consts.RIO_PREFIX + "ret;\n"); cb.append("}\n"); // clone() cb.append("public Object clone() throws CloneNotSupportedException {\n"); cb.append(name+" " + Consts.RIO_PREFIX + "other = new "+name+"();\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genClone(cb, name); } cb.append("return " + Consts.RIO_PREFIX + "other;\n"); cb.append("}\n"); cb.append("public int hashCode() {\n"); cb.append("int " + Consts.RIO_PREFIX + "result = 17;\n"); cb.append("int " + Consts.RIO_PREFIX + "ret;\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genHashCode(cb, name); cb.append(Consts.RIO_PREFIX + "result = 37*" + Consts.RIO_PREFIX + "result + " + Consts.RIO_PREFIX + "ret;\n"); } cb.append("return " + Consts.RIO_PREFIX + "result;\n"); cb.append("}\n"); cb.append("public static String signature() {\n"); cb.append("return \""+getSignature()+"\";\n"); cb.append("}\n"); cb.append("public static class Comparator extends"+ " org.apache.hadoop.record.RecordComparator {\n"); cb.append("public Comparator() {\n"); cb.append("super("+name+".class);\n"); cb.append("}\n"); cb.append("static public int slurpRaw(byte[] b, int s, int l) {\n"); cb.append("try {\n"); cb.append("int os = s;\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genSlurpBytes(cb, "b","s","l"); } cb.append("return (os - s);\n"); cb.append("} catch(java.io.IOException e) {\n"); cb.append("throw new RuntimeException(e);\n"); cb.append("}\n"); cb.append("}\n"); cb.append("static public int compareRaw(byte[] b1, int s1, int l1,\n"); cb.append(" byte[] b2, int s2, int l2) {\n"); cb.append("try {\n"); cb.append("int os1 = s1;\n"); for (Iterator<JField<JavaType>> i = fields.iterator(); i.hasNext();) { JField<JavaType> jf = i.next(); String name = jf.getName(); JavaType type = jf.getType(); type.genCompareBytes(cb); } cb.append("return (os1 - s1);\n"); cb.append("} catch(java.io.IOException e) {\n"); cb.append("throw new RuntimeException(e);\n"); cb.append("}\n"); cb.append("}\n"); cb.append("public int compare(byte[] b1, int s1, int l1,\n"); cb.append(" byte[] b2, int s2, int l2) {\n"); cb.append("int ret = compareRaw(b1,s1,l1,b2,s2,l2);\n"); cb.append("return (ret == -1)? -1 : ((ret==0)? 1 : 0);"); cb.append("}\n"); cb.append("}\n\n"); cb.append("static {\n"); cb.append("org.apache.hadoop.record.RecordComparator.define(" +name+".class, new Comparator());\n"); cb.append("}\n"); cb.append("}\n"); FileWriter jj = new FileWriter(jfile); try { jj.write(cb.toString()); } finally { jj.close(); } } } class CppRecord extends CppCompType { private String fullName; private String name; private String module; private ArrayList<JField<CppType>> fields = new ArrayList<JField<CppType>>(); CppRecord(String name, ArrayList<JField<JType>> flist) { super(name.replaceAll("\\.","::")); this.fullName = name.replaceAll("\\.", "::"); int idx = name.lastIndexOf('.'); this.name = name.substring(idx+1); this.module = name.substring(0, idx).replaceAll("\\.", "::"); for (Iterator<JField<JType>> iter = flist.iterator(); iter.hasNext();) { JField<JType> f = iter.next(); fields.add(new JField<CppType>(f.getName(), f.getType().getCppType())); } } @Override String getTypeIDObjectString() { return "new ::hadoop::StructTypeID(" + fullName + "::getTypeInfo().getFieldTypeInfos())"; } String genDecl(String fname) { return " "+name+" "+fname+";\n"; } @Override void genSetRTIFilter(CodeBuffer cb) { // we set the RTI filter here cb.append(fullName + "::setTypeFilter(rti.getNestedStructTypeInfo(\""+ name + "\"));\n"); } void genSetupRTIFields(CodeBuffer cb) { cb.append("void " + fullName + "::setupRtiFields() {\n"); cb.append("if (NULL == p" + Consts.RTI_FILTER + ") return;\n"); cb.append("if (NULL != p" + Consts.RTI_FILTER_FIELDS + ") return;\n"); cb.append("p" + Consts.RTI_FILTER_FIELDS + " = new int[p" + Consts.RTI_FILTER + "->getFieldTypeInfos().size()];\n"); cb.append("for (unsigned int " + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + "i<p" + Consts.RTI_FILTER + "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n"); cb.append("p" + Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i] = 0;\n"); cb.append("}\n"); cb.append("for (unsigned int " + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + "i<p" + Consts.RTI_FILTER + "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n"); cb.append("for (unsigned int " + Consts.RIO_PREFIX + "j=0; " + Consts.RIO_PREFIX + "j<p" + Consts.RTI_VAR + "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "j++) {\n"); cb.append("if (*(p" + Consts.RTI_FILTER + "->getFieldTypeInfos()[" + Consts.RIO_PREFIX + "i]) == *(p" + Consts.RTI_VAR + "->getFieldTypeInfos()[" + Consts.RIO_PREFIX + "j])) {\n"); cb.append("p" + Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i] = " + Consts.RIO_PREFIX + "j+1;\n"); cb.append("break;\n"); cb.append("}\n"); cb.append("}\n"); cb.append("}\n"); cb.append("}\n"); } void genCode(FileWriter hh, FileWriter cc, ArrayList<String> options) throws IOException { CodeBuffer hb = new CodeBuffer(); String[] ns = module.split("::"); for (int i = 0; i < ns.length; i++) { hb.append("namespace "+ns[i]+" {\n"); } hb.append("class "+name+" : public ::hadoop::Record {\n"); hb.append("private:\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); CppType type = jf.getType(); type.genDecl(hb, name); } // type info vars hb.append("static ::hadoop::RecordTypeInfo* p" + Consts.RTI_VAR + ";\n"); hb.append("static ::hadoop::RecordTypeInfo* p" + Consts.RTI_FILTER + ";\n"); hb.append("static int* p" + Consts.RTI_FILTER_FIELDS + ";\n"); hb.append("static ::hadoop::RecordTypeInfo* setupTypeInfo();\n"); hb.append("static void setupRtiFields();\n"); hb.append("virtual void deserializeWithoutFilter(::hadoop::IArchive& " + Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ");\n"); hb.append("public:\n"); hb.append("static const ::hadoop::RecordTypeInfo& getTypeInfo() " + "{return *p" + Consts.RTI_VAR + ";}\n"); hb.append("static void setTypeFilter(const ::hadoop::RecordTypeInfo& rti);\n"); hb.append("static void setTypeFilter(const ::hadoop::RecordTypeInfo* prti);\n"); hb.append("virtual void serialize(::hadoop::OArchive& " + Consts.RECORD_OUTPUT + ", const char* " + Consts.TAG + ") const;\n"); hb.append("virtual void deserialize(::hadoop::IArchive& " + Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ");\n"); hb.append("virtual const ::std::string& type() const;\n"); hb.append("virtual const ::std::string& signature() const;\n"); hb.append("virtual bool operator<(const "+name+"& peer_) const;\n"); hb.append("virtual bool operator==(const "+name+"& peer_) const;\n"); hb.append("virtual ~"+name+"() {};\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); CppType type = jf.getType(); type.genGetSet(hb, name); } hb.append("}; // end record "+name+"\n"); for (int i=ns.length-1; i>=0; i--) { hb.append("} // end namespace "+ns[i]+"\n"); } hh.write(hb.toString()); CodeBuffer cb = new CodeBuffer(); // initialize type info vars cb.append("::hadoop::RecordTypeInfo* " + fullName + "::p" + Consts.RTI_VAR + " = " + fullName + "::setupTypeInfo();\n"); cb.append("::hadoop::RecordTypeInfo* " + fullName + "::p" + Consts.RTI_FILTER + " = NULL;\n"); cb.append("int* " + fullName + "::p" + Consts.RTI_FILTER_FIELDS + " = NULL;\n\n"); // setupTypeInfo() cb.append("::hadoop::RecordTypeInfo* "+fullName+"::setupTypeInfo() {\n"); cb.append("::hadoop::RecordTypeInfo* p = new ::hadoop::RecordTypeInfo(\"" + name + "\");\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); CppType type = jf.getType(); type.genStaticTypeInfo(cb, name); } cb.append("return p;\n"); cb.append("}\n"); // setTypeFilter() cb.append("void "+fullName+"::setTypeFilter(const " + "::hadoop::RecordTypeInfo& rti) {\n"); cb.append("if (NULL != p" + Consts.RTI_FILTER + ") {\n"); cb.append("delete p" + Consts.RTI_FILTER + ";\n"); cb.append("}\n"); cb.append("p" + Consts.RTI_FILTER + " = new ::hadoop::RecordTypeInfo(rti);\n"); cb.append("if (NULL != p" + Consts.RTI_FILTER_FIELDS + ") {\n"); cb.append("delete p" + Consts.RTI_FILTER_FIELDS + ";\n"); cb.append("}\n"); cb.append("p" + Consts.RTI_FILTER_FIELDS + " = NULL;\n"); // set RTIFilter for nested structs. We may end up with multiple lines that // do the same thing, if the same struct is nested in more than one field, // but that's OK. for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); CppType type = jf.getType(); type.genSetRTIFilter(cb); } cb.append("}\n"); // setTypeFilter() cb.append("void "+fullName+"::setTypeFilter(const " + "::hadoop::RecordTypeInfo* prti) {\n"); cb.append("if (NULL != prti) {\n"); cb.append("setTypeFilter(*prti);\n"); cb.append("}\n"); cb.append("}\n"); // setupRtiFields() genSetupRTIFields(cb); // serialize() cb.append("void "+fullName+"::serialize(::hadoop::OArchive& " + Consts.RECORD_OUTPUT + ", const char* " + Consts.TAG + ") const {\n"); cb.append(Consts.RECORD_OUTPUT + ".startRecord(*this," + Consts.TAG + ");\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); CppType type = jf.getType(); if (type instanceof JBuffer.CppBuffer) { cb.append(Consts.RECORD_OUTPUT + ".serialize("+name+","+name+ ".length(),\""+name+"\");\n"); } else { cb.append(Consts.RECORD_OUTPUT + ".serialize("+name+",\""+ name+"\");\n"); } } cb.append(Consts.RECORD_OUTPUT + ".endRecord(*this," + Consts.TAG + ");\n"); cb.append("return;\n"); cb.append("}\n"); // deserializeWithoutFilter() cb.append("void "+fullName+"::deserializeWithoutFilter(::hadoop::IArchive& " + Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ") {\n"); cb.append(Consts.RECORD_INPUT + ".startRecord(*this," + Consts.TAG + ");\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); CppType type = jf.getType(); if (type instanceof JBuffer.CppBuffer) { cb.append("{\nsize_t len=0; " + Consts.RECORD_INPUT + ".deserialize("+ name+",len,\""+name+"\");\n}\n"); } else { cb.append(Consts.RECORD_INPUT + ".deserialize("+name+",\""+ name+"\");\n"); } } cb.append(Consts.RECORD_INPUT + ".endRecord(*this," + Consts.TAG + ");\n"); cb.append("return;\n"); cb.append("}\n"); // deserialize() cb.append("void "+fullName+"::deserialize(::hadoop::IArchive& " + Consts.RECORD_INPUT + ", const char* " + Consts.TAG + ") {\n"); cb.append("if (NULL == p" + Consts.RTI_FILTER + ") {\n"); cb.append("deserializeWithoutFilter(" + Consts.RECORD_INPUT + ", " + Consts.TAG + ");\n"); cb.append("return;\n"); cb.append("}\n"); cb.append("// if we're here, we need to read based on version info\n"); cb.append(Consts.RECORD_INPUT + ".startRecord(*this," + Consts.TAG + ");\n"); cb.append("setupRtiFields();\n"); cb.append("for (unsigned int " + Consts.RIO_PREFIX + "i=0; " + Consts.RIO_PREFIX + "i<p" + Consts.RTI_FILTER + "->getFieldTypeInfos().size(); " + Consts.RIO_PREFIX + "i++) {\n"); int ct = 0; for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); CppType type = jf.getType(); ct++; if (1 != ct) { cb.append("else "); } cb.append("if (" + ct + " == p" + Consts.RTI_FILTER_FIELDS + "[" + Consts.RIO_PREFIX + "i]) {\n"); if (type instanceof JBuffer.CppBuffer) { cb.append("{\nsize_t len=0; " + Consts.RECORD_INPUT + ".deserialize("+ name+",len,\""+name+"\");\n}\n"); } else { cb.append(Consts.RECORD_INPUT + ".deserialize("+name+",\""+ name+"\");\n"); } cb.append("}\n"); } if (0 != ct) { cb.append("else {\n"); cb.append("const std::vector< ::hadoop::FieldTypeInfo* >& typeInfos = p" + Consts.RTI_FILTER + "->getFieldTypeInfos();\n"); cb.append("::hadoop::Utils::skip(" + Consts.RECORD_INPUT + ", typeInfos[" + Consts.RIO_PREFIX + "i]->getFieldID()->c_str()" + ", *(typeInfos[" + Consts.RIO_PREFIX + "i]->getTypeID()));\n"); cb.append("}\n"); } cb.append("}\n"); cb.append(Consts.RECORD_INPUT + ".endRecord(*this, " + Consts.TAG+");\n"); cb.append("}\n"); // operator < cb.append("bool "+fullName+"::operator< (const "+fullName+"& peer_) const {\n"); cb.append("return (1\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); cb.append("&& ("+name+" < peer_."+name+")\n"); } cb.append(");\n"); cb.append("}\n"); cb.append("bool "+fullName+"::operator== (const "+fullName+"& peer_) const {\n"); cb.append("return (1\n"); for (Iterator<JField<CppType>> i = fields.iterator(); i.hasNext();) { JField<CppType> jf = i.next(); String name = jf.getName(); cb.append("&& ("+name+" == peer_."+name+")\n"); } cb.append(");\n"); cb.append("}\n"); cb.append("const ::std::string&"+fullName+"::type() const {\n"); cb.append("static const ::std::string type_(\""+name+"\");\n"); cb.append("return type_;\n"); cb.append("}\n"); cb.append("const ::std::string&"+fullName+"::signature() const {\n"); cb.append("static const ::std::string sig_(\""+getSignature()+"\");\n"); cb.append("return sig_;\n"); cb.append("}\n"); cc.write(cb.toString()); } } class CRecord extends CCompType { } private String signature; /** * Creates a new instance of JRecord */ public JRecord(String name, ArrayList<JField<JType>> flist) { setJavaType(new JavaRecord(name, flist)); setCppType(new CppRecord(name, flist)); setCType(new CRecord()); // precompute signature int idx = name.lastIndexOf('.'); String recName = name.substring(idx+1); StringBuilder sb = new StringBuilder(); sb.append("L").append(recName).append("("); for (Iterator<JField<JType>> i = flist.iterator(); i.hasNext();) { String s = i.next().getType().getSignature(); sb.append(s); } sb.append(")"); signature = sb.toString(); } @Override String getSignature() { return signature; } void genCppCode(FileWriter hh, FileWriter cc, ArrayList<String> options) throws IOException { ((CppRecord)getCppType()).genCode(hh, cc, options); } void genJavaCode(String destDir, ArrayList<String> options) throws IOException { ((JavaRecord)getJavaType()).genCode(destDir, options); } }
33,693
39.940462
94
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CppGenerator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.ArrayList; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.Iterator; import org.apache.hadoop.util.StringUtils; /** * C++ Code generator front-end for Hadoop record I/O. */ class CppGenerator extends CodeGenerator { CppGenerator() { } /** * Generate C++ code. This method only creates the requested file(s) * and spits-out file-level elements (such as include statements etc.) * record-level code is generated by JRecord. */ @Override void genCode(String name, ArrayList<JFile> ilist, ArrayList<JRecord> rlist, String destDir, ArrayList<String> options) throws IOException { name = new File(destDir, (new File(name)).getName()).getAbsolutePath(); FileWriter cc = new FileWriter(name+".cc"); try { FileWriter hh = new FileWriter(name+".hh"); try { String fileName = (new File(name)).getName(); hh.write("#ifndef __"+ StringUtils.toUpperCase(fileName).replace('.','_')+"__\n"); hh.write("#define __"+ StringUtils.toUpperCase(fileName).replace('.','_')+"__\n"); hh.write("#include \"recordio.hh\"\n"); hh.write("#include \"recordTypeInfo.hh\"\n"); for (Iterator<JFile> iter = ilist.iterator(); iter.hasNext();) { hh.write("#include \""+iter.next().getName()+".hh\"\n"); } cc.write("#include \""+fileName+".hh\"\n"); cc.write("#include \"utils.hh\"\n"); for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) { iter.next().genCppCode(hh, cc, options); } hh.write("#endif //"+ StringUtils.toUpperCase(fileName).replace('.','_')+"__\n"); } finally { hh.close(); } } finally { cc.close(); } } }
2,709
32.875
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JInt.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Code generator for "int" type * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JInt extends JType { class JavaInt extends JavaType { JavaInt() { super("int", "Int", "Integer", "TypeID.RIOType.INT"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.IntTypeID"; } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n"); cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n"); cb.append(s+"+=z; "+l+"-=z;\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); cb.append("int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n"); cb.append("if (i1 != i2) {\n"); cb.append("return ((i1-i2) < 0) ? -1 : 0;\n"); cb.append("}\n"); cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n"); cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n"); cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n"); cb.append("}\n"); } } class CppInt extends CppType { CppInt() { super("int32_t"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_INT)"; } } /** Creates a new instance of JInt */ public JInt() { setJavaType(new JavaInt()); setCppType(new CppInt()); setCType(new CType()); } @Override String getSignature() { return "i"; } }
2,816
28.968085
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JBuffer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Code generator for "buffer" type. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JBuffer extends JCompType { class JavaBuffer extends JavaCompType { JavaBuffer() { super("org.apache.hadoop.record.Buffer", "Buffer", "org.apache.hadoop.record.Buffer", "TypeID.RIOType.BUFFER"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.BufferTypeID"; } @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".compareTo("+other+");\n"); } @Override void genEquals(CodeBuffer cb, String fname, String peer) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".equals("+peer+");\n"); } @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".hashCode();\n"); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("int i = org.apache.hadoop.record.Utils.readVInt("+ b+", "+s+");\n"); cb.append("int z = org.apache.hadoop.record.Utils.getVIntSize(i);\n"); cb.append(s+" += z+i; "+l+" -= (z+i);\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); cb.append("int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n"); cb.append("int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);\n"); cb.append("int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);\n"); cb.append("s1+=z1; s2+=z2; l1-=z1; l2-=z2;\n"); cb.append("int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2);\n"); cb.append("if (r1 != 0) { return (r1<0)?-1:0; }\n"); cb.append("s1+=i1; s2+=i2; l1-=i1; l1-=i2;\n"); cb.append("}\n"); } } class CppBuffer extends CppCompType { CppBuffer() { super(" ::std::string"); } @Override void genGetSet(CodeBuffer cb, String fname) { cb.append("virtual const "+getType()+"& get"+toCamelCase(fname)+"() const {\n"); cb.append("return "+fname+";\n"); cb.append("}\n"); cb.append("virtual "+getType()+"& get"+toCamelCase(fname)+"() {\n"); cb.append("return "+fname+";\n"); cb.append("}\n"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_BUFFER)"; } } /** Creates a new instance of JBuffer */ public JBuffer() { setJavaType(new JavaBuffer()); setCppType(new CppBuffer()); setCType(new CCompType()); } @Override String getSignature() { return "B"; } }
3,915
31.363636
94
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFloat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JFloat extends JType { class JavaFloat extends JavaType { JavaFloat() { super("float", "Float", "Float", "TypeID.RIOType.FLOAT"); } @Override String getTypeIDObjectString() { return "org.apache.hadoop.record.meta.TypeID.FloatTypeID"; } @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = Float.floatToIntBits("+fname+");\n"); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); cb.append("if ("+l+"<4) {\n"); cb.append("throw new java.io.IOException(\"Float is exactly 4 bytes."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append(s+"+=4; "+l+"-=4;\n"); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); cb.append("if (l1<4 || l2<4) {\n"); cb.append("throw new java.io.IOException(\"Float is exactly 4 bytes."+ " Provided buffer is smaller.\");\n"); cb.append("}\n"); cb.append("float f1 = org.apache.hadoop.record.Utils.readFloat(b1, s1);\n"); cb.append("float f2 = org.apache.hadoop.record.Utils.readFloat(b2, s2);\n"); cb.append("if (f1 != f2) {\n"); cb.append("return ((f1-f2) < 0) ? -1 : 0;\n"); cb.append("}\n"); cb.append("s1+=4; s2+=4; l1-=4; l2-=4;\n"); cb.append("}\n"); } } class CppFloat extends CppType { CppFloat() { super("float"); } @Override String getTypeIDObjectString() { return "new ::hadoop::TypeID(::hadoop::RIOTYPE_FLOAT)"; } } /** Creates a new instance of JFloat */ public JFloat() { setJavaType(new JavaFloat()); setCppType(new CppFloat()); setCType(new CType()); } @Override String getSignature() { return "f"; } }
3,033
29.34
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeBuffer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A wrapper around StringBuffer that automatically does indentation * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class CodeBuffer { static private ArrayList<Character> startMarkers = new ArrayList<Character>(); static private ArrayList<Character> endMarkers = new ArrayList<Character>(); static { addMarkers('{', '}'); addMarkers('(', ')'); } static void addMarkers(char ch1, char ch2) { startMarkers.add(ch1); endMarkers.add(ch2); } private int level = 0; private int numSpaces = 2; private boolean firstChar = true; private StringBuffer sb; /** Creates a new instance of CodeBuffer */ CodeBuffer() { this(2, ""); } CodeBuffer(String s) { this(2, s); } CodeBuffer(int numSpaces, String s) { sb = new StringBuffer(); this.numSpaces = numSpaces; this.append(s); } void append(String s) { int length = s.length(); for (int idx = 0; idx < length; idx++) { char ch = s.charAt(idx); append(ch); } } void append(char ch) { if (endMarkers.contains(ch)) { level--; } if (firstChar) { for (int idx = 0; idx < level; idx++) { for (int num = 0; num < numSpaces; num++) { rawAppend(' '); } } } rawAppend(ch); firstChar = false; if (startMarkers.contains(ch)) { level++; } if (ch == '\n') { firstChar = true; } } private void rawAppend(char ch) { sb.append(ch); } @Override public String toString() { return sb.toString(); } }
2,679
24.283019
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CGenerator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.ArrayList; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.Iterator; import org.apache.hadoop.util.StringUtils; /** * C Code generator front-end for Hadoop record I/O. */ class CGenerator extends CodeGenerator { CGenerator() { } /** * Generate C code. This method only creates the requested file(s) * and spits-out file-level elements (such as include statements etc.) * record-level code is generated by JRecord. */ @Override void genCode(String name, ArrayList<JFile> ilist, ArrayList<JRecord> rlist, String destDir, ArrayList<String> options) throws IOException { name = new File(destDir, (new File(name)).getName()).getAbsolutePath(); FileWriter cc = new FileWriter(name+".c"); try { FileWriter hh = new FileWriter(name+".h"); try { hh.write("#ifndef __"+ StringUtils.toUpperCase(name).replace('.','_')+"__\n"); hh.write("#define __"+ StringUtils.toUpperCase(name).replace('.','_')+"__\n"); hh.write("#include \"recordio.h\"\n"); for (Iterator<JFile> iter = ilist.iterator(); iter.hasNext();) { hh.write("#include \""+iter.next().getName()+".h\"\n"); } cc.write("#include \""+name+".h\"\n"); /* for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) { iter.next().genCppCode(hh, cc); } */ hh.write("#endif //"+ StringUtils.toUpperCase(name).replace('.','_')+"__\n"); } finally { hh.close(); } } finally { cc.close(); } } }
2,507
31.571429
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JFile.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.io.IOException; import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Container for the Hadoop Record DDL. * The main components of the file are filename, list of included files, * and records defined in that file. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JFile { /** Possibly full name of the file */ private String mName; /** Ordered list of included files */ private ArrayList<JFile> mInclFiles; /** Ordered list of records declared in this file */ private ArrayList<JRecord> mRecords; /** Creates a new instance of JFile * * @param name possibly full pathname to the file * @param inclFiles included files (as JFile) * @param recList List of records defined within this file */ public JFile(String name, ArrayList<JFile> inclFiles, ArrayList<JRecord> recList) { mName = name; mInclFiles = inclFiles; mRecords = recList; } /** Strip the other pathname components and return the basename */ String getName() { int idx = mName.lastIndexOf('/'); return (idx > 0) ? mName.substring(idx) : mName; } /** Generate record code in given language. Language should be all * lowercase. */ public int genCode(String language, String destDir, ArrayList<String> options) throws IOException { CodeGenerator gen = CodeGenerator.get(language); if (gen != null) { gen.genCode(mName, mInclFiles, mRecords, destDir, options); } else { System.err.println("Cannnot recognize language:"+language); return 1; } return 0; } }
2,639
32.417722
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JField.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A thin wrappper around record field. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JField<T> { private String name; private T type; /** * Creates a new instance of JField */ public JField(String name, T type) { this.type = type; this.name = name; } String getName() { return name; } T getType() { return type; } }
1,463
26.622642
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/CodeGenerator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; /** * CodeGenerator is a Factory and a base class for Hadoop Record I/O translators. * Different translators register creation methods with this factory. */ abstract class CodeGenerator { private static HashMap<String, CodeGenerator> generators = new HashMap<String, CodeGenerator>(); static { register("c", new CGenerator()); register("c++", new CppGenerator()); register("java", new JavaGenerator()); } static void register(String lang, CodeGenerator gen) { generators.put(lang, gen); } static CodeGenerator get(String lang) { return generators.get(lang); } abstract void genCode(String file, ArrayList<JFile> inclFiles, ArrayList<JRecord> records, String destDir, ArrayList<String> options) throws IOException; }
1,805
32.444444
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JMap.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class JMap extends JCompType { static private int level = 0; static private String getLevel() { return Integer.toString(level); } static private void incrLevel() { level++; } static private void decrLevel() { level--; } static private String getId(String id) { return id+getLevel(); } private JType keyType; private JType valueType; class JavaMap extends JavaCompType { JType.JavaType key; JType.JavaType value; JavaMap(JType.JavaType key, JType.JavaType value) { super("java.util.TreeMap<"+key.getWrapperType()+","+value.getWrapperType()+">", "Map", "java.util.TreeMap<"+key.getWrapperType()+","+value.getWrapperType()+">", "TypeID.RIOType.MAP"); this.key = key; this.value = value; } @Override String getTypeIDObjectString() { return "new org.apache.hadoop.record.meta.MapTypeID(" + key.getTypeIDObjectString() + ", " + value.getTypeIDObjectString() + ")"; } @Override void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) { key.genSetRTIFilter(cb, nestedStructMap); value.genSetRTIFilter(cb, nestedStructMap); } @Override void genCompareTo(CodeBuffer cb, String fname, String other) { String setType = "java.util.Set<"+key.getWrapperType()+"> "; String iterType = "java.util.Iterator<"+key.getWrapperType()+"> "; cb.append("{\n"); cb.append(setType+getId(Consts.RIO_PREFIX + "set1")+" = "+ fname+".keySet();\n"); cb.append(setType+getId(Consts.RIO_PREFIX + "set2")+" = "+ other+".keySet();\n"); cb.append(iterType+getId(Consts.RIO_PREFIX + "miter1")+" = "+ getId(Consts.RIO_PREFIX + "set1")+".iterator();\n"); cb.append(iterType+getId(Consts.RIO_PREFIX + "miter2")+" = "+ getId(Consts.RIO_PREFIX + "set2")+".iterator();\n"); cb.append("for(; "+getId(Consts.RIO_PREFIX + "miter1")+".hasNext() && "+ getId(Consts.RIO_PREFIX + "miter2")+".hasNext();) {\n"); cb.append(key.getType()+" "+getId(Consts.RIO_PREFIX + "k1")+ " = "+getId(Consts.RIO_PREFIX + "miter1")+".next();\n"); cb.append(key.getType()+" "+getId(Consts.RIO_PREFIX + "k2")+ " = "+getId(Consts.RIO_PREFIX + "miter2")+".next();\n"); key.genCompareTo(cb, getId(Consts.RIO_PREFIX + "k1"), getId(Consts.RIO_PREFIX + "k2")); cb.append("if (" + Consts.RIO_PREFIX + "ret != 0) { return " + Consts.RIO_PREFIX + "ret; }\n"); cb.append("}\n"); cb.append(Consts.RIO_PREFIX + "ret = ("+getId(Consts.RIO_PREFIX + "set1")+ ".size() - "+getId(Consts.RIO_PREFIX + "set2")+".size());\n"); cb.append("}\n"); } @Override void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(getType()+" "+fname+";\n"); } cb.append("{\n"); incrLevel(); cb.append("org.apache.hadoop.record.Index " + getId(Consts.RIO_PREFIX + "midx")+" = " + Consts.RECORD_INPUT + ".startMap(\""+tag+"\");\n"); cb.append(fname+"=new "+getType()+"();\n"); cb.append("for (; !"+getId(Consts.RIO_PREFIX + "midx")+".done(); "+ getId(Consts.RIO_PREFIX + "midx")+".incr()) {\n"); key.genReadMethod(cb, getId(Consts.RIO_PREFIX + "k"), getId(Consts.RIO_PREFIX + "k"), true); value.genReadMethod(cb, getId(Consts.RIO_PREFIX + "v"), getId(Consts.RIO_PREFIX + "v"), true); cb.append(fname+".put("+getId(Consts.RIO_PREFIX + "k")+","+ getId(Consts.RIO_PREFIX + "v")+");\n"); cb.append("}\n"); cb.append(Consts.RECORD_INPUT + ".endMap(\""+tag+"\");\n"); decrLevel(); cb.append("}\n"); } @Override void genWriteMethod(CodeBuffer cb, String fname, String tag) { String setType = "java.util.Set<java.util.Map.Entry<"+ key.getWrapperType()+","+value.getWrapperType()+">> "; String entryType = "java.util.Map.Entry<"+ key.getWrapperType()+","+value.getWrapperType()+"> "; String iterType = "java.util.Iterator<java.util.Map.Entry<"+ key.getWrapperType()+","+value.getWrapperType()+">> "; cb.append("{\n"); incrLevel(); cb.append(Consts.RECORD_OUTPUT + ".startMap("+fname+",\""+tag+"\");\n"); cb.append(setType+getId(Consts.RIO_PREFIX + "es")+" = "+ fname+".entrySet();\n"); cb.append("for("+iterType+getId(Consts.RIO_PREFIX + "midx")+" = "+ getId(Consts.RIO_PREFIX + "es")+".iterator(); "+ getId(Consts.RIO_PREFIX + "midx")+".hasNext();) {\n"); cb.append(entryType+getId(Consts.RIO_PREFIX + "me")+" = "+ getId(Consts.RIO_PREFIX + "midx")+".next();\n"); cb.append(key.getType()+" "+getId(Consts.RIO_PREFIX + "k")+" = "+ getId(Consts.RIO_PREFIX + "me")+".getKey();\n"); cb.append(value.getType()+" "+getId(Consts.RIO_PREFIX + "v")+" = "+ getId(Consts.RIO_PREFIX + "me")+".getValue();\n"); key.genWriteMethod(cb, getId(Consts.RIO_PREFIX + "k"), getId(Consts.RIO_PREFIX + "k")); value.genWriteMethod(cb, getId(Consts.RIO_PREFIX + "v"), getId(Consts.RIO_PREFIX + "v")); cb.append("}\n"); cb.append(Consts.RECORD_OUTPUT + ".endMap("+fname+",\""+tag+"\");\n"); cb.append("}\n"); decrLevel(); } @Override void genSlurpBytes(CodeBuffer cb, String b, String s, String l) { cb.append("{\n"); incrLevel(); cb.append("int "+getId("mi")+ " = org.apache.hadoop.record.Utils.readVInt("+b+", "+s+");\n"); cb.append("int "+getId("mz")+ " = org.apache.hadoop.record.Utils.getVIntSize("+getId("mi")+");\n"); cb.append(s+"+="+getId("mz")+"; "+l+"-="+getId("mz")+";\n"); cb.append("for (int "+getId("midx")+" = 0; "+getId("midx")+ " < "+getId("mi")+"; "+getId("midx")+"++) {"); key.genSlurpBytes(cb, b, s, l); value.genSlurpBytes(cb, b, s, l); cb.append("}\n"); decrLevel(); cb.append("}\n"); } @Override void genCompareBytes(CodeBuffer cb) { cb.append("{\n"); incrLevel(); cb.append("int "+getId("mi1")+ " = org.apache.hadoop.record.Utils.readVInt(b1, s1);\n"); cb.append("int "+getId("mi2")+ " = org.apache.hadoop.record.Utils.readVInt(b2, s2);\n"); cb.append("int "+getId("mz1")+ " = org.apache.hadoop.record.Utils.getVIntSize("+getId("mi1")+");\n"); cb.append("int "+getId("mz2")+ " = org.apache.hadoop.record.Utils.getVIntSize("+getId("mi2")+");\n"); cb.append("s1+="+getId("mz1")+"; s2+="+getId("mz2")+ "; l1-="+getId("mz1")+"; l2-="+getId("mz2")+";\n"); cb.append("for (int "+getId("midx")+" = 0; "+getId("midx")+ " < "+getId("mi1")+" && "+getId("midx")+" < "+getId("mi2")+ "; "+getId("midx")+"++) {"); key.genCompareBytes(cb); value.genSlurpBytes(cb, "b1", "s1", "l1"); value.genSlurpBytes(cb, "b2", "s2", "l2"); cb.append("}\n"); cb.append("if ("+getId("mi1")+" != "+getId("mi2")+ ") { return ("+getId("mi1")+"<"+getId("mi2")+")?-1:0; }\n"); decrLevel(); cb.append("}\n"); } } class CppMap extends CppCompType { JType.CppType key; JType.CppType value; CppMap(JType.CppType key, JType.CppType value) { super("::std::map< "+key.getType()+", "+ value.getType()+" >"); this.key = key; this.value = value; } @Override String getTypeIDObjectString() { return "new ::hadoop::MapTypeID(" + key.getTypeIDObjectString() + ", " + value.getTypeIDObjectString() + ")"; } @Override void genSetRTIFilter(CodeBuffer cb) { key.genSetRTIFilter(cb); value.genSetRTIFilter(cb); } } /** Creates a new instance of JMap */ public JMap(JType t1, JType t2) { setJavaType(new JavaMap(t1.getJavaType(), t2.getJavaType())); setCppType(new CppMap(t1.getCppType(), t2.getCppType())); setCType(new CType()); keyType = t1; valueType = t2; } @Override String getSignature() { return "{" + keyType.getSignature() + valueType.getSignature() +"}"; } }
9,553
37.680162
86
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/Consts.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * const definitions for Record I/O compiler * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class Consts { /** Cannot create a new instance */ private Consts() { } // prefix to use for variables in generated classes public static final String RIO_PREFIX = "_rio_"; // other vars used in generated classes public static final String RTI_VAR = RIO_PREFIX + "recTypeInfo"; public static final String RTI_FILTER = RIO_PREFIX + "rtiFilter"; public static final String RTI_FILTER_FIELDS = RIO_PREFIX + "rtiFilterFields"; public static final String RECORD_OUTPUT = RIO_PREFIX + "a"; public static final String RECORD_INPUT = RIO_PREFIX + "a"; public static final String TAG = RIO_PREFIX + "tag"; }
1,811
35.979592
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JCompType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; /** * Abstract base class for all the "compound" types such as ustring, * buffer, vector, map, and record. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated abstract class JCompType extends JType { abstract class JavaCompType extends JavaType { JavaCompType(String type, String suffix, String wrapper, String typeIDByteString) { super(type, suffix, wrapper, typeIDByteString); } @Override void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".compareTo("+other+");\n"); } @Override void genEquals(CodeBuffer cb, String fname, String peer) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".equals("+peer+");\n"); } @Override void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = "+fname+".hashCode();\n"); } @Override void genClone(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "other."+fname+" = ("+getType()+") this."+ fname+".clone();\n"); } } abstract class CppCompType extends CppType { CppCompType(String type) { super(type); } @Override void genGetSet(CodeBuffer cb, String fname) { cb.append("virtual const "+getType()+"& get"+toCamelCase(fname)+"() const {\n"); cb.append("return "+fname+";\n"); cb.append("}\n"); cb.append("virtual "+getType()+"& get"+toCamelCase(fname)+"() {\n"); cb.append("return "+fname+";\n"); cb.append("}\n"); } } class CCompType extends CType { } }
2,516
30.074074
86
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/JType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Abstract Base class for all types supported by Hadoop Record I/O. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable abstract public class JType { static String toCamelCase(String name) { char firstChar = name.charAt(0); if (Character.isLowerCase(firstChar)) { return ""+Character.toUpperCase(firstChar) + name.substring(1); } return name; } JavaType javaType; CppType cppType; CType cType; abstract class JavaType { private String name; private String methodSuffix; private String wrapper; private String typeIDByteString; // points to TypeID.RIOType JavaType(String javaname, String suffix, String wrapper, String typeIDByteString) { this.name = javaname; this.methodSuffix = suffix; this.wrapper = wrapper; this.typeIDByteString = typeIDByteString; } void genDecl(CodeBuffer cb, String fname) { cb.append("private "+name+" "+fname+";\n"); } void genStaticTypeInfo(CodeBuffer cb, String fname) { cb.append(Consts.RTI_VAR + ".addField(\"" + fname + "\", " + getTypeIDObjectString() + ");\n"); } abstract String getTypeIDObjectString(); void genSetRTIFilter(CodeBuffer cb, Map<String, Integer> nestedStructMap) { // do nothing by default return; } /*void genRtiFieldCondition(CodeBuffer cb, String fname, int ct) { cb.append("if ((tInfo.fieldID.equals(\"" + fname + "\")) && (typeVal ==" + " org.apache.hadoop.record.meta." + getTypeIDByteString() + ")) {\n"); cb.append("rtiFilterFields[i] = " + ct + ";\n"); cb.append("}\n"); } void genRtiNestedFieldCondition(CodeBuffer cb, String varName, int ct) { cb.append("if (" + varName + ".getElementTypeID().getTypeVal() == " + "org.apache.hadoop.record.meta." + getTypeIDByteString() + ") {\n"); cb.append("rtiFilterFields[i] = " + ct + ";\n"); cb.append("}\n"); }*/ void genConstructorParam(CodeBuffer cb, String fname) { cb.append("final "+name+" "+fname); } void genGetSet(CodeBuffer cb, String fname) { cb.append("public "+name+" get"+toCamelCase(fname)+"() {\n"); cb.append("return "+fname+";\n"); cb.append("}\n"); cb.append("public void set"+toCamelCase(fname)+"(final "+name+" "+fname+") {\n"); cb.append("this."+fname+"="+fname+";\n"); cb.append("}\n"); } String getType() { return name; } String getWrapperType() { return wrapper; } String getMethodSuffix() { return methodSuffix; } String getTypeIDByteString() { return typeIDByteString; } void genWriteMethod(CodeBuffer cb, String fname, String tag) { cb.append(Consts.RECORD_OUTPUT + ".write"+methodSuffix + "("+fname+",\""+tag+"\");\n"); } void genReadMethod(CodeBuffer cb, String fname, String tag, boolean decl) { if (decl) { cb.append(name+" "+fname+";\n"); } cb.append(fname+"=" + Consts.RECORD_INPUT + ".read" + methodSuffix+"(\""+tag+"\");\n"); } void genCompareTo(CodeBuffer cb, String fname, String other) { cb.append(Consts.RIO_PREFIX + "ret = ("+fname+" == "+other+")? 0 :(("+ fname+"<"+other+")?-1:1);\n"); } abstract void genCompareBytes(CodeBuffer cb); abstract void genSlurpBytes(CodeBuffer cb, String b, String s, String l); void genEquals(CodeBuffer cb, String fname, String peer) { cb.append(Consts.RIO_PREFIX + "ret = ("+fname+"=="+peer+");\n"); } void genHashCode(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "ret = (int)"+fname+";\n"); } void genConstructorSet(CodeBuffer cb, String fname) { cb.append("this."+fname+" = "+fname+";\n"); } void genClone(CodeBuffer cb, String fname) { cb.append(Consts.RIO_PREFIX + "other."+fname+" = this."+fname+";\n"); } } abstract class CppType { private String name; CppType(String cppname) { name = cppname; } void genDecl(CodeBuffer cb, String fname) { cb.append(name+" "+fname+";\n"); } void genStaticTypeInfo(CodeBuffer cb, String fname) { cb.append("p->addField(new ::std::string(\"" + fname + "\"), " + getTypeIDObjectString() + ");\n"); } void genGetSet(CodeBuffer cb, String fname) { cb.append("virtual "+name+" get"+toCamelCase(fname)+"() const {\n"); cb.append("return "+fname+";\n"); cb.append("}\n"); cb.append("virtual void set"+toCamelCase(fname)+"("+name+" m_) {\n"); cb.append(fname+"=m_;\n"); cb.append("}\n"); } abstract String getTypeIDObjectString(); void genSetRTIFilter(CodeBuffer cb) { // do nothing by default return; } String getType() { return name; } } class CType { } abstract String getSignature(); void setJavaType(JavaType jType) { this.javaType = jType; } JavaType getJavaType() { return javaType; } void setCppType(CppType cppType) { this.cppType = cppType; } CppType getCppType() { return cppType; } void setCType(CType cType) { this.cType = cType; } CType getCType() { return cType; } }
6,496
27.125541
87
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/ant/RccTask.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.ant; import java.io.File; import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.compiler.generated.Rcc; import org.apache.tools.ant.BuildException; import org.apache.tools.ant.DirectoryScanner; import org.apache.tools.ant.Project; import org.apache.tools.ant.Task; import org.apache.tools.ant.types.FileSet; /** * Hadoop record compiler ant Task *<p> This task takes the given record definition files and compiles them into * java or c++ * files. It is then up to the user to compile the generated files. * * <p> The task requires the <code>file</code> or the nested fileset element to be * specified. Optional attributes are <code>language</code> (set the output * language, default is "java"), * <code>destdir</code> (name of the destination directory for generated java/c++ * code, default is ".") and <code>failonerror</code> (specifies error handling * behavior. default is true). * <p><h4>Usage</h4> * <pre> * &lt;recordcc * destdir="${basedir}/gensrc" * language="java"&gt; * &lt;fileset include="**\/*.jr" /&gt; * &lt;/recordcc&gt; * </pre> * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class RccTask extends Task { private String language = "java"; private File src; private File dest = new File("."); private final ArrayList<FileSet> filesets = new ArrayList<FileSet>(); private boolean failOnError = true; /** Creates a new instance of RccTask */ public RccTask() { } /** * Sets the output language option * @param language "java"/"c++" */ public void setLanguage(String language) { this.language = language; } /** * Sets the record definition file attribute * @param file record definition file */ public void setFile(File file) { this.src = file; } /** * Given multiple files (via fileset), set the error handling behavior * @param flag true will throw build exception in case of failure (default) */ public void setFailonerror(boolean flag) { this.failOnError = flag; } /** * Sets directory where output files will be generated * @param dir output directory */ public void setDestdir(File dir) { this.dest = dir; } /** * Adds a fileset that can consist of one or more files * @param set Set of record definition files */ public void addFileset(FileSet set) { filesets.add(set); } /** * Invoke the Hadoop record compiler on each record definition file */ @Override public void execute() throws BuildException { if (src == null && filesets.size()==0) { throw new BuildException("There must be a file attribute or a fileset child element"); } if (src != null) { doCompile(src); } Project myProject = getProject(); for (int i = 0; i < filesets.size(); i++) { FileSet fs = filesets.get(i); DirectoryScanner ds = fs.getDirectoryScanner(myProject); File dir = fs.getDir(myProject); String[] srcs = ds.getIncludedFiles(); for (int j = 0; j < srcs.length; j++) { doCompile(new File(dir, srcs[j])); } } } private void doCompile(File file) throws BuildException { String[] args = new String[5]; args[0] = "--language"; args[1] = this.language; args[2] = "--destdir"; args[3] = this.dest.getPath(); args[4] = file.getPath(); int retVal = Rcc.driver(args); if (retVal != 0 && failOnError) { throw new BuildException("Hadoop record compiler returned error code "+retVal); } } }
4,582
30.390411
92
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccConstants.java
/* Generated By:JavaCC: Do not edit this line. RccConstants.java */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public interface RccConstants { int EOF = 0; int MODULE_TKN = 11; int RECORD_TKN = 12; int INCLUDE_TKN = 13; int BYTE_TKN = 14; int BOOLEAN_TKN = 15; int INT_TKN = 16; int LONG_TKN = 17; int FLOAT_TKN = 18; int DOUBLE_TKN = 19; int USTRING_TKN = 20; int BUFFER_TKN = 21; int VECTOR_TKN = 22; int MAP_TKN = 23; int LBRACE_TKN = 24; int RBRACE_TKN = 25; int LT_TKN = 26; int GT_TKN = 27; int SEMICOLON_TKN = 28; int COMMA_TKN = 29; int DOT_TKN = 30; int CSTRING_TKN = 31; int IDENT_TKN = 32; int DEFAULT = 0; int WithinOneLineComment = 1; int WithinMultiLineComment = 2; String[] tokenImage = { "<EOF>", "\" \"", "\"\\t\"", "\"\\n\"", "\"\\r\"", "\"//\"", "<token of kind 6>", "<token of kind 7>", "\"/*\"", "\"*/\"", "<token of kind 10>", "\"module\"", "\"class\"", "\"include\"", "\"byte\"", "\"boolean\"", "\"int\"", "\"long\"", "\"float\"", "\"double\"", "\"ustring\"", "\"buffer\"", "\"vector\"", "\"map\"", "\"{\"", "\"}\"", "\"<\"", "\">\"", "\";\"", "\",\"", "\".\"", "<CSTRING_TKN>", "<IDENT_TKN>", }; }
2,404
23.540816
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Rcc.java
/* Generated By:JavaCC: Do not edit this line. Rcc.java */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.record.compiler.*; import java.util.ArrayList; import java.util.Hashtable; import java.io.File; import java.io.FileReader; import java.io.FileNotFoundException; import java.io.IOException; import org.apache.hadoop.util.StringUtils; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class Rcc implements RccConstants { private static String language = "java"; private static String destDir = "."; private static ArrayList<String> recFiles = new ArrayList<String>(); private static ArrayList<String> cmdargs = new ArrayList<String>(); private static JFile curFile; private static Hashtable<String,JRecord> recTab; private static String curDir = "."; private static String curFileName; private static String curModuleName; public static void main(String[] args) { System.exit(driver(args)); } public static void usage() { System.err.println("Usage: rcc --language [java|c++] ddl-files"); } public static int driver(String[] args) { for (int i=0; i<args.length; i++) { if ("-l".equalsIgnoreCase(args[i]) || "--language".equalsIgnoreCase(args[i])) { language = StringUtils.toLowerCase(args[i+1]); i++; } else if ("-d".equalsIgnoreCase(args[i]) || "--destdir".equalsIgnoreCase(args[i])) { destDir = args[i+1]; i++; } else if (args[i].startsWith("-")) { String arg = args[i].substring(1); if (arg.startsWith("-")) { arg = arg.substring(1); } cmdargs.add(StringUtils.toLowerCase(arg)); } else { recFiles.add(args[i]); } } if (recFiles.size() == 0) { usage(); return 1; } for (int i=0; i<recFiles.size(); i++) { curFileName = recFiles.get(i); File file = new File(curFileName); try { FileReader reader = new FileReader(file); Rcc parser = new Rcc(reader); try { recTab = new Hashtable<String,JRecord>(); curFile = parser.Input(); } catch (ParseException e) { System.err.println(e.toString()); return 1; } try { reader.close(); } catch (IOException e) { } } catch (FileNotFoundException e) { System.err.println("File " + recFiles.get(i) + " Not found."); return 1; } try { int retCode = curFile.genCode(language, destDir, cmdargs); if (retCode != 0) { return retCode; } } catch (IOException e) { System.err.println(e.toString()); return 1; } } return 0; } final public JFile Input() throws ParseException { ArrayList<JFile> ilist = new ArrayList<JFile>(); ArrayList<JRecord> rlist = new ArrayList<JRecord>(); JFile i; ArrayList<JRecord> l; label_1: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INCLUDE_TKN: i = Include(); ilist.add(i); break; case MODULE_TKN: l = Module(); rlist.addAll(l); break; default: jj_la1[0] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case MODULE_TKN: case INCLUDE_TKN: ; break; default: jj_la1[1] = jj_gen; break label_1; } } jj_consume_token(0); {if (true) return new JFile(curFileName, ilist, rlist);} throw new Error("Missing return statement in function"); } final public JFile Include() throws ParseException { String fname; Token t; jj_consume_token(INCLUDE_TKN); t = jj_consume_token(CSTRING_TKN); JFile ret = null; fname = t.image.replaceAll("^\"", "").replaceAll("\"$",""); File file = new File(curDir, fname); String tmpDir = curDir; String tmpFile = curFileName; curDir = file.getParent(); curFileName = file.getName(); try { FileReader reader = new FileReader(file); Rcc parser = new Rcc(reader); try { ret = parser.Input(); System.out.println(fname + " Parsed Successfully"); } catch (ParseException e) { System.out.println(e.toString()); System.exit(1); } try { reader.close(); } catch (IOException e) { } } catch (FileNotFoundException e) { System.out.println("File " + fname + " Not found."); System.exit(1); } curDir = tmpDir; curFileName = tmpFile; {if (true) return ret;} throw new Error("Missing return statement in function"); } final public ArrayList<JRecord> Module() throws ParseException { String mName; ArrayList<JRecord> rlist; jj_consume_token(MODULE_TKN); mName = ModuleName(); curModuleName = mName; jj_consume_token(LBRACE_TKN); rlist = RecordList(); jj_consume_token(RBRACE_TKN); {if (true) return rlist;} throw new Error("Missing return statement in function"); } final public String ModuleName() throws ParseException { String name = ""; Token t; t = jj_consume_token(IDENT_TKN); name += t.image; label_2: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DOT_TKN: ; break; default: jj_la1[2] = jj_gen; break label_2; } jj_consume_token(DOT_TKN); t = jj_consume_token(IDENT_TKN); name += "." + t.image; } {if (true) return name;} throw new Error("Missing return statement in function"); } final public ArrayList<JRecord> RecordList() throws ParseException { ArrayList<JRecord> rlist = new ArrayList<JRecord>(); JRecord r; label_3: while (true) { r = Record(); rlist.add(r); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case RECORD_TKN: ; break; default: jj_la1[3] = jj_gen; break label_3; } } {if (true) return rlist;} throw new Error("Missing return statement in function"); } final public JRecord Record() throws ParseException { String rname; ArrayList<JField<JType>> flist = new ArrayList<JField<JType>>(); Token t; JField<JType> f; jj_consume_token(RECORD_TKN); t = jj_consume_token(IDENT_TKN); rname = t.image; jj_consume_token(LBRACE_TKN); label_4: while (true) { f = Field(); flist.add(f); jj_consume_token(SEMICOLON_TKN); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case BYTE_TKN: case BOOLEAN_TKN: case INT_TKN: case LONG_TKN: case FLOAT_TKN: case DOUBLE_TKN: case USTRING_TKN: case BUFFER_TKN: case VECTOR_TKN: case MAP_TKN: case IDENT_TKN: ; break; default: jj_la1[4] = jj_gen; break label_4; } } jj_consume_token(RBRACE_TKN); String fqn = curModuleName + "." + rname; JRecord r = new JRecord(fqn, flist); recTab.put(fqn, r); {if (true) return r;} throw new Error("Missing return statement in function"); } final public JField<JType> Field() throws ParseException { JType jt; Token t; jt = Type(); t = jj_consume_token(IDENT_TKN); {if (true) return new JField<JType>(t.image, jt);} throw new Error("Missing return statement in function"); } final public JType Type() throws ParseException { JType jt; Token t; String rname; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case MAP_TKN: jt = Map(); {if (true) return jt;} break; case VECTOR_TKN: jt = Vector(); {if (true) return jt;} break; case BYTE_TKN: jj_consume_token(BYTE_TKN); {if (true) return new JByte();} break; case BOOLEAN_TKN: jj_consume_token(BOOLEAN_TKN); {if (true) return new JBoolean();} break; case INT_TKN: jj_consume_token(INT_TKN); {if (true) return new JInt();} break; case LONG_TKN: jj_consume_token(LONG_TKN); {if (true) return new JLong();} break; case FLOAT_TKN: jj_consume_token(FLOAT_TKN); {if (true) return new JFloat();} break; case DOUBLE_TKN: jj_consume_token(DOUBLE_TKN); {if (true) return new JDouble();} break; case USTRING_TKN: jj_consume_token(USTRING_TKN); {if (true) return new JString();} break; case BUFFER_TKN: jj_consume_token(BUFFER_TKN); {if (true) return new JBuffer();} break; case IDENT_TKN: rname = ModuleName(); if (rname.indexOf('.', 0) < 0) { rname = curModuleName + "." + rname; } JRecord r = recTab.get(rname); if (r == null) { System.out.println("Type " + rname + " not known. Exiting."); System.exit(1); } {if (true) return r;} break; default: jj_la1[5] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public JMap Map() throws ParseException { JType jt1; JType jt2; jj_consume_token(MAP_TKN); jj_consume_token(LT_TKN); jt1 = Type(); jj_consume_token(COMMA_TKN); jt2 = Type(); jj_consume_token(GT_TKN); {if (true) return new JMap(jt1, jt2);} throw new Error("Missing return statement in function"); } final public JVector Vector() throws ParseException { JType jt; jj_consume_token(VECTOR_TKN); jj_consume_token(LT_TKN); jt = Type(); jj_consume_token(GT_TKN); {if (true) return new JVector(jt);} throw new Error("Missing return statement in function"); } public RccTokenManager token_source; SimpleCharStream jj_input_stream; public Token token, jj_nt; private int jj_ntk; private int jj_gen; final private int[] jj_la1 = new int[6]; static private int[] jj_la1_0; static private int[] jj_la1_1; static { jj_la1_0(); jj_la1_1(); } private static void jj_la1_0() { jj_la1_0 = new int[] {0x2800, 0x2800, 0x40000000, 0x1000, 0xffc000, 0xffc000,}; } private static void jj_la1_1() { jj_la1_1 = new int[] {0x0, 0x0, 0x0, 0x0, 0x1, 0x1,}; } public Rcc(java.io.InputStream stream) { this(stream, null); } public Rcc(java.io.InputStream stream, String encoding) { try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } token_source = new RccTokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 6; i++) jj_la1[i] = -1; } public void ReInit(java.io.InputStream stream) { ReInit(stream, null); } public void ReInit(java.io.InputStream stream, String encoding) { try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 6; i++) jj_la1[i] = -1; } public Rcc(java.io.Reader stream) { jj_input_stream = new SimpleCharStream(stream, 1, 1); token_source = new RccTokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 6; i++) jj_la1[i] = -1; } public void ReInit(java.io.Reader stream) { jj_input_stream.ReInit(stream, 1, 1); token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 6; i++) jj_la1[i] = -1; } public Rcc(RccTokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 6; i++) jj_la1[i] = -1; } public void ReInit(RccTokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 6; i++) jj_la1[i] = -1; } final private Token jj_consume_token(int kind) throws ParseException { Token oldToken; if ((oldToken = token).next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; if (token.kind == kind) { jj_gen++; return token; } token = oldToken; jj_kind = kind; throw generateParseException(); } final public Token getNextToken() { if (token.next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; jj_gen++; return token; } final public Token getToken(int index) { Token t = token; for (int i = 0; i < index; i++) { if (t.next != null) t = t.next; else t = t.next = token_source.getNextToken(); } return t; } final private int jj_ntk() { if ((jj_nt=token.next) == null) return (jj_ntk = (token.next=token_source.getNextToken()).kind); else return (jj_ntk = jj_nt.kind); } private java.util.Vector<int[]> jj_expentries = new java.util.Vector<int[]>(); private int[] jj_expentry; private int jj_kind = -1; public ParseException generateParseException() { jj_expentries.removeAllElements(); boolean[] la1tokens = new boolean[33]; for (int i = 0; i < 33; i++) { la1tokens[i] = false; } if (jj_kind >= 0) { la1tokens[jj_kind] = true; jj_kind = -1; } for (int i = 0; i < 6; i++) { if (jj_la1[i] == jj_gen) { for (int j = 0; j < 32; j++) { if ((jj_la1_0[i] & (1<<j)) != 0) { la1tokens[j] = true; } if ((jj_la1_1[i] & (1<<j)) != 0) { la1tokens[32+j] = true; } } } } for (int i = 0; i < 33; i++) { if (la1tokens[i]) { jj_expentry = new int[1]; jj_expentry[0] = i; jj_expentries.addElement(jj_expentry); } } int[][] exptokseq = new int[jj_expentries.size()][]; for (int i = 0; i < jj_expentries.size(); i++) { exptokseq[i] = jj_expentries.elementAt(i); } return new ParseException(token, exptokseq, tokenImage); } final public void enable_tracing() { } final public void disable_tracing() { } }
15,342
27.204044
156
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/SimpleCharStream.java
/* Generated By:JavaCC: Do not edit this line. SimpleCharStream.java Version 4.0 */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * An implementation of interface CharStream, where the stream is assumed to * contain only ASCII characters (without unicode processing). * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class SimpleCharStream { public static final boolean staticFlag = false; int bufsize; int available; int tokenBegin; public int bufpos = -1; protected int bufline[]; protected int bufcolumn[]; protected int column = 0; protected int line = 1; protected boolean prevCharIsCR = false; protected boolean prevCharIsLF = false; protected java.io.Reader inputStream; protected char[] buffer; protected int maxNextCharInd = 0; protected int inBuf = 0; protected int tabSize = 8; protected void setTabSize(int i) { tabSize = i; } protected int getTabSize(int i) { return tabSize; } protected void ExpandBuff(boolean wrapAround) { char[] newbuffer = new char[bufsize + 2048]; int newbufline[] = new int[bufsize + 2048]; int newbufcolumn[] = new int[bufsize + 2048]; try { if (wrapAround) { System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); System.arraycopy(buffer, 0, newbuffer, bufsize - tokenBegin, bufpos); buffer = newbuffer; System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); System.arraycopy(bufline, 0, newbufline, bufsize - tokenBegin, bufpos); bufline = newbufline; System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); System.arraycopy(bufcolumn, 0, newbufcolumn, bufsize - tokenBegin, bufpos); bufcolumn = newbufcolumn; maxNextCharInd = (bufpos += (bufsize - tokenBegin)); } else { System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); buffer = newbuffer; System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); bufline = newbufline; System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); bufcolumn = newbufcolumn; maxNextCharInd = (bufpos -= tokenBegin); } } catch (Throwable t) { throw new Error(t.getMessage()); } bufsize += 2048; available = bufsize; tokenBegin = 0; } protected void FillBuff() throws java.io.IOException { if (maxNextCharInd == available) { if (available == bufsize) { if (tokenBegin > 2048) { bufpos = maxNextCharInd = 0; available = tokenBegin; } else if (tokenBegin < 0) bufpos = maxNextCharInd = 0; else ExpandBuff(false); } else if (available > tokenBegin) available = bufsize; else if ((tokenBegin - available) < 2048) ExpandBuff(true); else available = tokenBegin; } int i; try { if ((i = inputStream.read(buffer, maxNextCharInd, available - maxNextCharInd)) == -1) { inputStream.close(); throw new java.io.IOException(); } else maxNextCharInd += i; return; } catch(java.io.IOException e) { --bufpos; backup(0); if (tokenBegin == -1) tokenBegin = bufpos; throw e; } } public char BeginToken() throws java.io.IOException { tokenBegin = -1; char c = readChar(); tokenBegin = bufpos; return c; } protected void UpdateLineColumn(char c) { column++; if (prevCharIsLF) { prevCharIsLF = false; line += (column = 1); } else if (prevCharIsCR) { prevCharIsCR = false; if (c == '\n') { prevCharIsLF = true; } else line += (column = 1); } switch (c) { case '\r' : prevCharIsCR = true; break; case '\n' : prevCharIsLF = true; break; case '\t' : column--; column += (tabSize - (column % tabSize)); break; default : break; } bufline[bufpos] = line; bufcolumn[bufpos] = column; } public char readChar() throws java.io.IOException { if (inBuf > 0) { --inBuf; if (++bufpos == bufsize) bufpos = 0; return buffer[bufpos]; } if (++bufpos >= maxNextCharInd) FillBuff(); char c = buffer[bufpos]; UpdateLineColumn(c); return (c); } public int getEndColumn() { return bufcolumn[bufpos]; } public int getEndLine() { return bufline[bufpos]; } public int getBeginColumn() { return bufcolumn[tokenBegin]; } public int getBeginLine() { return bufline[tokenBegin]; } public void backup(int amount) { inBuf += amount; if ((bufpos -= amount) < 0) bufpos += bufsize; } public SimpleCharStream(java.io.Reader dstream, int startline, int startcolumn, int buffersize) { inputStream = dstream; line = startline; column = startcolumn - 1; available = bufsize = buffersize; buffer = new char[buffersize]; bufline = new int[buffersize]; bufcolumn = new int[buffersize]; } public SimpleCharStream(java.io.Reader dstream, int startline, int startcolumn) { this(dstream, startline, startcolumn, 4096); } public SimpleCharStream(java.io.Reader dstream) { this(dstream, 1, 1, 4096); } public void ReInit(java.io.Reader dstream, int startline, int startcolumn, int buffersize) { inputStream = dstream; line = startline; column = startcolumn - 1; if (buffer == null || buffersize != buffer.length) { available = bufsize = buffersize; buffer = new char[buffersize]; bufline = new int[buffersize]; bufcolumn = new int[buffersize]; } prevCharIsLF = prevCharIsCR = false; tokenBegin = inBuf = maxNextCharInd = 0; bufpos = -1; } public void ReInit(java.io.Reader dstream, int startline, int startcolumn) { ReInit(dstream, startline, startcolumn, 4096); } public void ReInit(java.io.Reader dstream) { ReInit(dstream, 1, 1, 4096); } public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException { this(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); } public SimpleCharStream(java.io.InputStream dstream, int startline, int startcolumn, int buffersize) { this(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); } public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, int startcolumn) throws java.io.UnsupportedEncodingException { this(dstream, encoding, startline, startcolumn, 4096); } public SimpleCharStream(java.io.InputStream dstream, int startline, int startcolumn) { this(dstream, startline, startcolumn, 4096); } public SimpleCharStream(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException { this(dstream, encoding, 1, 1, 4096); } public SimpleCharStream(java.io.InputStream dstream) { this(dstream, 1, 1, 4096); } public void ReInit(java.io.InputStream dstream, String encoding, int startline, int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException { ReInit(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); } public void ReInit(java.io.InputStream dstream, int startline, int startcolumn, int buffersize) { ReInit(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); } public void ReInit(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException { ReInit(dstream, encoding, 1, 1, 4096); } public void ReInit(java.io.InputStream dstream) { ReInit(dstream, 1, 1, 4096); } public void ReInit(java.io.InputStream dstream, String encoding, int startline, int startcolumn) throws java.io.UnsupportedEncodingException { ReInit(dstream, encoding, startline, startcolumn, 4096); } public void ReInit(java.io.InputStream dstream, int startline, int startcolumn) { ReInit(dstream, startline, startcolumn, 4096); } public String GetImage() { if (bufpos >= tokenBegin) return new String(buffer, tokenBegin, bufpos - tokenBegin + 1); else return new String(buffer, tokenBegin, bufsize - tokenBegin) + new String(buffer, 0, bufpos + 1); } public char[] GetSuffix(int len) { char[] ret = new char[len]; if ((bufpos + 1) >= len) System.arraycopy(buffer, bufpos - len + 1, ret, 0, len); else { System.arraycopy(buffer, bufsize - (len - bufpos - 1), ret, 0, len - bufpos - 1); System.arraycopy(buffer, 0, ret, len - bufpos - 1, bufpos + 1); } return ret; } public void Done() { buffer = null; bufline = null; bufcolumn = null; } /** * Method to adjust line and column numbers for the start of a token. */ public void adjustBeginLineColumn(int newLine, int newCol) { int start = tokenBegin; int len; if (bufpos >= tokenBegin) { len = bufpos - tokenBegin + inBuf + 1; } else { len = bufsize - tokenBegin + bufpos + 1 + inBuf; } int i = 0, j = 0, k = 0; int nextColDiff = 0, columnDiff = 0; while (i < len && bufline[j = start % bufsize] == bufline[k = ++start % bufsize]) { bufline[j] = newLine; nextColDiff = columnDiff + bufcolumn[k] - bufcolumn[j]; bufcolumn[j] = newCol + columnDiff; columnDiff = nextColDiff; i++; } if (i < len) { bufline[j] = newLine++; bufcolumn[j] = newCol + columnDiff; while (i++ < len) { if (bufline[j = start % bufsize] != bufline[++start % bufsize]) bufline[j] = newLine++; else bufline[j] = newLine; } } line = bufline[j]; column = bufcolumn[j]; } }
11,994
25.834452
157
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/TokenMgrError.java
/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 3.0 */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class TokenMgrError extends Error { /* * Ordinals for various reasons why an Error of this type can be thrown. */ /** * Lexical error occured. */ static final int LEXICAL_ERROR = 0; /** * An attempt wass made to create a second instance of a static token manager. */ static final int STATIC_LEXER_ERROR = 1; /** * Tried to change to an invalid lexical state. */ static final int INVALID_LEXICAL_STATE = 2; /** * Detected (and bailed out of) an infinite loop in the token manager. */ static final int LOOP_DETECTED = 3; /** * Indicates the reason why the exception is thrown. It will have * one of the above 4 values. */ int errorCode; /** * Replaces unprintable characters by their espaced (or unicode escaped) * equivalents in the given string */ protected static final String addEscapes(String str) { StringBuffer retval = new StringBuffer(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case 0 : continue; case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } /** * Returns a detailed message for the Error when it is thrown by the * token manager to indicate a lexical error. * Parameters : * EOFSeen : indicates if EOF caused the lexicl error * curLexState : lexical state in which this error occured * errorLine : line number when the error occured * errorColumn : column number when the error occured * errorAfter : prefix that was seen before this error occured * curchar : the offending character * Note: You can customize the lexical error message by modifying this method. */ protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) { return("Lexical error at line " + errorLine + ", column " + errorColumn + ". Encountered: " + (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") + "after : \"" + addEscapes(errorAfter) + "\""); } /** * You can also modify the body of this method to customize your error messages. * For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not * of end-users concern, so you can return something like : * * "Internal Error : Please file a bug report .... " * * from this method for such cases in the release version of your parser. */ @Override public String getMessage() { return super.getMessage(); } /* * Constructors of various flavors follow. */ public TokenMgrError() { } public TokenMgrError(String message, int reason) { super(message); errorCode = reason; } public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) { this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason); } }
5,110
30.549383
136
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/ParseException.java
/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 3.0 */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This exception is thrown when parse errors are encountered. * You can explicitly create objects of this exception type by * calling the method generateParseException in the generated * parser. * * You can modify this class to customize your error reporting * mechanisms so long as you retain the public fields. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class ParseException extends Exception { /** * This constructor is used by the method "generateParseException" * in the generated parser. Calling this constructor generates * a new object of this type with the fields "currentToken", * "expectedTokenSequences", and "tokenImage" set. The boolean * flag "specialConstructor" is also set to true to indicate that * this constructor was used to create this object. * This constructor calls its super class with the empty string * to force the "toString" method of parent class "Throwable" to * print the error message in the form: * ParseException: <result of getMessage> */ public ParseException(Token currentTokenVal, int[][] expectedTokenSequencesVal, String[] tokenImageVal ) { super(""); specialConstructor = true; currentToken = currentTokenVal; expectedTokenSequences = expectedTokenSequencesVal; tokenImage = tokenImageVal; } /** * The following constructors are for use by you for whatever * purpose you can think of. Constructing the exception in this * manner makes the exception behave in the normal way - i.e., as * documented in the class "Throwable". The fields "errorToken", * "expectedTokenSequences", and "tokenImage" do not contain * relevant information. The JavaCC generated code does not use * these constructors. */ public ParseException() { super(); specialConstructor = false; } public ParseException(String message) { super(message); specialConstructor = false; } /** * This variable determines which constructor was used to create * this object and thereby affects the semantics of the * "getMessage" method (see below). */ protected boolean specialConstructor; /** * This is the last token that has been consumed successfully. If * this object has been created due to a parse error, the token * followng this token will (therefore) be the first error token. */ public Token currentToken; /** * Each entry in this array is an array of integers. Each array * of integers represents a sequence of tokens (by their ordinal * values) that is expected at this point of the parse. */ public int[][] expectedTokenSequences; /** * This is a reference to the "tokenImage" array of the generated * parser within which the parse error occurred. This array is * defined in the generated ...Constants interface. */ public String[] tokenImage; /** * This method has the standard behavior when this object has been * created using the standard constructors. Otherwise, it uses * "currentToken" and "expectedTokenSequences" to generate a parse * error message and returns it. If this object has been created * due to a parse error, and you do not catch it (it gets thrown * from the parser), then this method is called during the printing * of the final stack trace, and hence the correct error message * gets displayed. */ @Override public String getMessage() { if (!specialConstructor) { return super.getMessage(); } StringBuffer expected = new StringBuffer(); int maxSize = 0; for (int i = 0; i < expectedTokenSequences.length; i++) { if (maxSize < expectedTokenSequences[i].length) { maxSize = expectedTokenSequences[i].length; } for (int j = 0; j < expectedTokenSequences[i].length; j++) { expected.append(tokenImage[expectedTokenSequences[i][j]]).append(" "); } if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) { expected.append("..."); } expected.append(eol).append(" "); } String retval = "Encountered \""; Token tok = currentToken.next; for (int i = 0; i < maxSize; i++) { if (i != 0) retval += " "; if (tok.kind == 0) { retval += tokenImage[0]; break; } retval += add_escapes(tok.image); tok = tok.next; } retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn; retval += "." + eol; if (expectedTokenSequences.length == 1) { retval += "Was expecting:" + eol + " "; } else { retval += "Was expecting one of:" + eol + " "; } retval += expected.toString(); return retval; } /** * The end of line string for this machine. */ protected String eol = System.getProperty("line.separator", "\n"); /** * Used to convert raw characters to their escaped version * when these raw version cannot be used as part of an ASCII * string literal. */ protected String add_escapes(String str) { StringBuffer retval = new StringBuffer(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case 0 : continue; case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } }
7,394
32.613636
104
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/Token.java
/* Generated By:JavaCC: Do not edit this line. Token.java Version 3.0 */ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.record.compiler.generated; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Describes the input token stream. * * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class Token { /** * An integer that describes the kind of this token. This numbering * system is determined by JavaCCParser, and a table of these numbers is * stored in the file ...Constants.java. */ public int kind; /** * beginLine and beginColumn describe the position of the first character * of this token; endLine and endColumn describe the position of the * last character of this token. */ public int beginLine, beginColumn, endLine, endColumn; /** * The string image of the token. */ public String image; /** * A reference to the next regular (non-special) token from the input * stream. If this is the last token from the input stream, or if the * token manager has not read tokens beyond this one, this field is * set to null. This is true only if this token is also a regular * token. Otherwise, see below for a description of the contents of * this field. */ public Token next; /** * This field is used to access special tokens that occur prior to this * token, but after the immediately preceding regular (non-special) token. * If there are no such special tokens, this field is set to null. * When there are more than one such special token, this field refers * to the last of these special tokens, which in turn refers to the next * previous special token through its specialToken field, and so on * until the first special token (whose specialToken field is null). * The next fields of special tokens refer to other special tokens that * immediately follow it (without an intervening regular token). If there * is no such token, this field is null. */ public Token specialToken; /** * Returns the image. */ @Override public String toString() { return image; } /** * Returns a new Token object, by default. However, if you want, you * can create and return subclass objects based on the value of ofKind. * Simply add the cases to the switch for all those special cases. * For example, if you have a subclass of Token called IDToken that * you want to create if ofKind is ID, simlpy add something like : * * case MyParserConstants.ID : return new IDToken(); * * to the following switch statement. Then you can cast matchedToken * variable to the appropriate type and use it in your lexical actions. */ public static final Token newToken(int ofKind) { switch(ofKind) { default : return new Token(); } } }
3,775
33.962963
77
java