code
stringlengths 10
749k
| repo_name
stringlengths 5
108
| path
stringlengths 7
333
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 10
749k
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.query.aggregation.datasketches.theta;
import com.google.common.base.Preconditions;
import com.google.common.collect.Ordering;
import com.google.common.primitives.Doubles;
import com.google.common.primitives.Longs;
import org.apache.datasketches.Family;
import org.apache.datasketches.memory.Memory;
import org.apache.datasketches.theta.AnotB;
import org.apache.datasketches.theta.Intersection;
import org.apache.datasketches.theta.SetOperation;
import org.apache.datasketches.theta.Sketch;
import org.apache.datasketches.theta.Sketches;
import org.apache.datasketches.theta.Union;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.StringUtils;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Comparator;
/**
*/
public class SketchHolder
{
public static final SketchHolder EMPTY = SketchHolder.of(
Sketches.updateSketchBuilder()
.build()
.compact(true, null)
);
public static final Comparator<Object> COMPARATOR = Ordering.from(
new Comparator<Object>()
{
@Override
public int compare(Object o1, Object o2)
{
SketchHolder h1 = (SketchHolder) o1;
SketchHolder h2 = (SketchHolder) o2;
if (h1.obj instanceof Sketch || h1.obj instanceof Union) {
if (h2.obj instanceof Sketch || h2.obj instanceof Union) {
return SKETCH_COMPARATOR.compare(h1.getSketch(), h2.getSketch());
} else {
return -1;
}
}
if (h1.obj instanceof Memory) {
if (h2.obj instanceof Memory) {
return MEMORY_COMPARATOR.compare((Memory) h1.obj, (Memory) h2.obj);
} else {
return 1;
}
}
throw new IAE("Unknwon types [%s] and [%s]", h1.obj.getClass().getName(), h2.obj.getClass().getName());
}
}
).nullsFirst();
private static final Comparator<Sketch> SKETCH_COMPARATOR = new Comparator<Sketch>()
{
@Override
public int compare(Sketch o1, Sketch o2)
{
return Doubles.compare(o1.getEstimate(), o2.getEstimate());
}
};
private static final Comparator<Memory> MEMORY_COMPARATOR = new Comparator<Memory>()
{
@SuppressWarnings("SubtractionInCompareTo")
@Override
public int compare(Memory o1, Memory o2)
{
// We have two Ordered Compact sketches, so just compare their last entry if they have the size.
// This is to produce a deterministic ordering, though it might not match the actual estimate
// ordering, but that's ok because this comparator is only used by GenericIndexed
int retVal = Longs.compare(o1.getCapacity(), o2.getCapacity());
if (retVal == 0) {
retVal = Longs.compare(o1.getLong(o2.getCapacity() - 8), o2.getLong(o2.getCapacity() - 8));
}
return retVal;
}
};
private final Object obj;
@Nullable
private volatile Double cachedEstimate = null;
@Nullable
private volatile Sketch cachedSketch = null;
private SketchHolder(Object obj)
{
Preconditions.checkArgument(
obj instanceof Sketch || obj instanceof Union || obj instanceof Memory,
"unknown sketch representation type [%s]", obj.getClass().getName()
);
this.obj = obj;
}
public static SketchHolder of(Object obj)
{
return new SketchHolder(obj);
}
public void updateUnion(Union union)
{
if (obj instanceof Memory) {
union.update((Memory) obj);
} else {
union.update(getSketch());
}
}
public Sketch getSketch()
{
if (cachedSketch != null) {
return cachedSketch;
}
if (obj instanceof Sketch) {
cachedSketch = (Sketch) obj;
} else if (obj instanceof Union) {
cachedSketch = ((Union) obj).getResult();
} else if (obj instanceof Memory) {
cachedSketch = deserializeFromMemory((Memory) obj);
} else {
throw new ISE("Can't get sketch from object of type [%s]", obj.getClass().getName());
}
return cachedSketch;
}
public double getEstimate()
{
if (cachedEstimate == null) {
cachedEstimate = getSketch().getEstimate();
}
return cachedEstimate.doubleValue();
}
public SketchEstimateWithErrorBounds getEstimateWithErrorBounds(int errorBoundsStdDev)
{
Sketch sketch = getSketch();
SketchEstimateWithErrorBounds result = new SketchEstimateWithErrorBounds(
getEstimate(),
sketch.getUpperBound(errorBoundsStdDev),
sketch.getLowerBound(errorBoundsStdDev),
errorBoundsStdDev);
return result;
}
public static SketchHolder combine(Object o1, Object o2, int nomEntries)
{
SketchHolder holder1 = (SketchHolder) o1;
SketchHolder holder2 = (SketchHolder) o2;
if (holder1.obj instanceof Union) {
Union union = (Union) holder1.obj;
holder2.updateUnion(union);
holder1.invalidateCache();
return holder1;
} else if (holder2.obj instanceof Union) {
Union union = (Union) holder2.obj;
holder1.updateUnion(union);
holder2.invalidateCache();
return holder2;
} else {
Union union = (Union) SetOperation.builder().setNominalEntries(nomEntries).build(Family.UNION);
holder1.updateUnion(union);
holder2.updateUnion(union);
return SketchHolder.of(union);
}
}
void invalidateCache()
{
cachedEstimate = null;
cachedSketch = null;
}
public static SketchHolder deserialize(Object serializedSketch)
{
if (serializedSketch instanceof String) {
return SketchHolder.of(deserializeFromBase64EncodedString((String) serializedSketch));
} else if (serializedSketch instanceof byte[]) {
return SketchHolder.of(deserializeFromByteArray((byte[]) serializedSketch));
} else if (serializedSketch instanceof SketchHolder) {
return (SketchHolder) serializedSketch;
} else if (serializedSketch instanceof Sketch
|| serializedSketch instanceof Union
|| serializedSketch instanceof Memory) {
return SketchHolder.of(serializedSketch);
}
throw new ISE(
"Object is not of a type[%s] that can be deserialized to sketch.",
serializedSketch.getClass()
);
}
private static Sketch deserializeFromBase64EncodedString(String str)
{
return deserializeFromByteArray(StringUtils.decodeBase64(StringUtils.toUtf8(str)));
}
private static Sketch deserializeFromByteArray(byte[] data)
{
return deserializeFromMemory(Memory.wrap(data));
}
private static Sketch deserializeFromMemory(Memory mem)
{
if (Sketch.getSerializationVersion(mem) < 3) {
return Sketches.heapifySketch(mem);
} else {
return Sketches.wrapSketch(mem);
}
}
public enum Func
{
UNION,
INTERSECT,
NOT
}
public static SketchHolder sketchSetOperation(Func func, int sketchSize, Object... holders)
{
//in the code below, I am returning SetOp.getResult(false, null)
//"false" gets us an unordered sketch which is faster to build
//"true" returns an ordered sketch but slower to compute. advantage of ordered sketch
//is that they are faster to "union" later but given that this method is used in
//the final stages of query processing, ordered sketch would be of no use.
switch (func) {
case UNION:
Union union = (Union) SetOperation.builder().setNominalEntries(sketchSize).build(Family.UNION);
for (Object o : holders) {
((SketchHolder) o).updateUnion(union);
}
return SketchHolder.of(union);
case INTERSECT:
Intersection intersection = (Intersection) SetOperation.builder().setNominalEntries(sketchSize).build(Family.INTERSECTION);
for (Object o : holders) {
intersection.update(((SketchHolder) o).getSketch());
}
return SketchHolder.of(intersection.getResult(false, null));
case NOT:
if (holders.length < 1) {
throw new IllegalArgumentException("A-Not-B requires atleast 1 sketch");
}
if (holders.length == 1) {
return (SketchHolder) holders[0];
}
Sketch result = ((SketchHolder) holders[0]).getSketch();
for (int i = 1; i < holders.length; i++) {
AnotB anotb = (AnotB) SetOperation.builder().setNominalEntries(sketchSize).build(Family.A_NOT_B);
anotb.update(result, ((SketchHolder) holders[i]).getSketch());
result = anotb.getResult(false, null);
}
return SketchHolder.of(result);
default:
throw new IllegalArgumentException("Unknown sketch operation " + func);
}
}
/**
* Ideally make use of Sketch's equals and hashCode methods but which are not value based implementations.
* And yet need value based equals and hashCode implementations for SketchHolder.
* Hence using Arrays.equals() and Arrays.hashCode().
*/
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
return Arrays.equals(this.getSketch().toByteArray(), ((SketchHolder) o).getSketch().toByteArray());
}
@Override
public int hashCode()
{
return 31 * Arrays.hashCode(this.getSketch().toByteArray());
}
}
| himanshug/druid | extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/theta/SketchHolder.java | Java | apache-2.0 | 10,187 |
package web;
/*
* Copyright 2002-2007 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponseWrapper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Mock implementation of the {@link javax.servlet.RequestDispatcher} interface.
*
* <p>Used for testing the web framework; typically not necessary for
* testing application controllers.
*
* @author Rod Johnson
* @author Juergen Hoeller
* @since 1.0.2
*/
public class MockRequestDispatcher implements RequestDispatcher {
private final Log logger = LogFactory.getLog(getClass());
private final String url;
/**
* Create a new MockRequestDispatcher for the given URL.
* @param url the URL to dispatch to.
*/
public MockRequestDispatcher(String url) {
this.url = url;
}
@Override
public void forward(ServletRequest request, ServletResponse response) {
if (response.isCommitted()) {
throw new IllegalStateException("Cannot perform forward - response is already committed");
}
getMockHttpServletResponse(response).setForwardedUrl(this.url);
if (logger.isDebugEnabled()) {
logger.debug("MockRequestDispatcher: forwarding to URL [" + this.url + "]");
}
}
@Override
public void include(ServletRequest request, ServletResponse response) {
getMockHttpServletResponse(response).setIncludedUrl(this.url);
if (logger.isDebugEnabled()) {
logger.debug("MockRequestDispatcher: including URL [" + this.url + "]");
}
}
/**
* Obtain the underlying MockHttpServletResponse,
* unwrapping {@link HttpServletResponseWrapper} decorators if necessary.
*/
protected MockHttpServletResponse getMockHttpServletResponse(ServletResponse response) {
if (response instanceof MockHttpServletResponse) {
return (MockHttpServletResponse) response;
}
if (response instanceof HttpServletResponseWrapper) {
return getMockHttpServletResponse(((HttpServletResponseWrapper) response).getResponse());
}
throw new IllegalArgumentException("MockRequestDispatcher requires MockHttpServletResponse");
}
} | edolganov/live-chat-engine | common/util/test-src/web/MockRequestDispatcher.java | Java | apache-2.0 | 2,739 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.core.metrics;
import static junit.framework.TestCase.assertEquals;
import static org.apache.beam.runners.core.metrics.MonitoringInfoEncodings.decodeInt64Counter;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.beam.model.pipeline.v1.MetricsApi.MonitoringInfo;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests for {@link SimpleMonitoringInfoBuilder}. */
@RunWith(JUnit4.class)
public class SimpleMonitoringInfoBuilderTest {
@Test
public void testReturnsNullIfSpecRequirementsNotMet() {
SimpleMonitoringInfoBuilder builder = new SimpleMonitoringInfoBuilder();
builder.setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT);
assertNull(builder.build());
builder.setInt64SumValue(1);
assertNull(builder.build());
}
@Test
public void testReturnsExpectedMonitoringInfo() throws Exception {
SimpleMonitoringInfoBuilder builder = new SimpleMonitoringInfoBuilder();
builder.setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT);
builder.setInt64SumValue(1);
builder.setLabel(MonitoringInfoConstants.Labels.PCOLLECTION, "myPcollection");
// Pass now that the spec is fully met.
MonitoringInfo monitoringInfo = builder.build();
assertTrue(monitoringInfo != null);
assertEquals(
"myPcollection",
monitoringInfo.getLabelsOrDefault(MonitoringInfoConstants.Labels.PCOLLECTION, null));
assertEquals(MonitoringInfoConstants.Urns.ELEMENT_COUNT, monitoringInfo.getUrn());
assertEquals(MonitoringInfoConstants.TypeUrns.SUM_INT64_TYPE, monitoringInfo.getType());
assertEquals(1L, decodeInt64Counter(monitoringInfo.getPayload()));
assertEquals(
"myPcollection",
monitoringInfo.getLabelsMap().get(MonitoringInfoConstants.Labels.PCOLLECTION));
}
@Test
public void testUserDistribution() throws Exception {
SimpleMonitoringInfoBuilder builder = new SimpleMonitoringInfoBuilder();
builder.setUrn(MonitoringInfoConstants.Urns.USER_DISTRIBUTION_INT64);
builder.setLabel(MonitoringInfoConstants.Labels.NAME, "myName");
builder.setLabel(MonitoringInfoConstants.Labels.NAMESPACE, "myNamespace");
builder.setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "myStep");
assertNull(builder.build());
builder.setInt64DistributionValue(DistributionData.create(10, 2, 1, 9));
// Pass now that the spec is fully met.
MonitoringInfo monitoringInfo = builder.build();
assertTrue(monitoringInfo != null);
assertEquals(MonitoringInfoConstants.Urns.USER_DISTRIBUTION_INT64, monitoringInfo.getUrn());
assertEquals(
"myName", monitoringInfo.getLabelsOrDefault(MonitoringInfoConstants.Labels.NAME, ""));
assertEquals(
"myNamespace",
monitoringInfo.getLabelsOrDefault(MonitoringInfoConstants.Labels.NAMESPACE, ""));
assertEquals(
MonitoringInfoConstants.TypeUrns.DISTRIBUTION_INT64_TYPE, monitoringInfo.getType());
DistributionData data =
MonitoringInfoEncodings.decodeInt64Distribution(monitoringInfo.getPayload());
assertEquals(10L, data.sum());
assertEquals(2L, data.count());
assertEquals(9L, data.max());
assertEquals(1L, data.min());
}
}
| lukecwik/incubator-beam | runners/core-java/src/test/java/org/apache/beam/runners/core/metrics/SimpleMonitoringInfoBuilderTest.java | Java | apache-2.0 | 4,076 |
package org.hibernate.validator.referenceguide.chapter11.constraintapi;
public class Person {
private String name;
}
| fazerish/hibernate-validator | documentation/src/test/java/org/hibernate/validator/referenceguide/chapter11/constraintapi/Person.java | Java | apache-2.0 | 120 |
package bndtools.editor.pages;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import org.eclipse.ui.forms.events.ExpansionAdapter;
import org.eclipse.ui.forms.events.ExpansionEvent;
public class ResizeExpansionAdapter extends ExpansionAdapter {
private final Composite layoutParent;
private final Control control;
public ResizeExpansionAdapter(Control control) {
this(control.getParent(), control);
}
public ResizeExpansionAdapter(Composite layoutParent, Control control) {
this.layoutParent = layoutParent;
this.control = control;
}
@Override
public void expansionStateChanged(ExpansionEvent e) {
Object layoutData = (Boolean.TRUE.equals(e.data)) ? PageLayoutUtils.createExpanded()
: PageLayoutUtils.createCollapsed();
control.setLayoutData(layoutData);
layoutParent.layout(true, true);
}
}
| psoreide/bnd | bndtools.core/src/bndtools/editor/pages/ResizeExpansionAdapter.java | Java | apache-2.0 | 858 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.synapse.util.resolver;
import java.util.List;
import org.apache.synapse.config.SynapseConfiguration;
import org.apache.synapse.mediators.Value;
import org.apache.ws.commons.schema.resolver.URIResolver;
import org.w3c.dom.ls.LSResourceResolver;
/**
* This interface lets user to write his/her own XmlSchemaURIResolver rather
* using {@link CustomXmlSchemaURIResolver} .
* Here using WSDLKey/schemaKey, user can perform his/her own mapping between Relativelocation
* and Registrypath . User needs to provide a synapse.property call,"synapse.schema.resolver="
* pointing to the implementation.
*/
public interface UserDefinedXmlSchemaURIResolver extends URIResolver, LSResourceResolver {
/**
* Initiate the UserDefinedXmlSchemaURIResolver with the required parameters
*
* @param resourceMap {@link ResourceMap} object
* @param synCfg Synapseconfiguration
* @param wsdlKey The registry key of the wsdl file
*/
void init(ResourceMap resourceMap, SynapseConfiguration synCfg, String wsdlKey);
/**
* This will used by Validate mediator to resolve external schema references
* defined in Validate mediator configuration
* using
*
* <pre>
* <resource location="location" key="key"/>
* </pre>
*
* inside Validate mediator configuration.
*
* @param resourceMap
* {@link ResourceMap} object
* @param synCfg
* Synapseconfiguration
* @param schemaRegKey
* , List of base schemas' registryKeys
*/
void init(ResourceMap resourceMap, SynapseConfiguration synCfg, List<Value> schemaRegKey);
}
| maheshika/wso2-synapse | modules/core/src/main/java/org/apache/synapse/util/resolver/UserDefinedXmlSchemaURIResolver.java | Java | apache-2.0 | 2,403 |
package com.fasterxml.jackson.annotation;
/**
* Enumeration used to define kinds of elements (called "property accessors")
* that annotations like {@link JsonAutoDetect} apply to.
*<p>
* In addition to method types (GETTER/IS_GETTER, SETTER, CREATOR) and the
* field type (FIELD), 2 pseudo-types
* are defined for convenience: <code>ALWAYS</code> and <code>NONE</code>. These
* can be used to indicate, all or none of available method types (respectively),
* for use by annotations that takes <code>JsonMethod</code> argument.
*/
public enum PropertyAccessor
{
/**
* Getters are methods used to get a POJO field value for serialization,
* or, under certain conditions also for de-serialization. Latter
* can be used for effectively setting Collection or Map values
* in absence of setters, iff returned value is not a copy but
* actual value of the logical property.
*<p>
* Since version 1.3, this does <b>NOT</b> include "is getters" (methods
* that return boolean and named 'isXxx' for property 'xxx'); instead,
* {@link #IS_GETTER} is used}.
*/
GETTER,
/**
* Setters are methods used to set a POJO value for deserialization.
*/
SETTER,
/**
* Creators are constructors and (static) factory methods used to
* construct POJO instances for deserialization
*/
CREATOR,
/**
* Field refers to fields of regular Java objects. Although
* they are not really methods, addition of optional field-discovery
* in version 1.1 meant that there was need to enable/disable
* their auto-detection, and this is the place to add it in.
*/
FIELD,
/**
* "Is getters" are getter-like methods that are named "isXxx"
* (instead of "getXxx" for getters) and return boolean value
* (either primitive, or {@link java.lang.Boolean}).
*
*/
IS_GETTER,
/**
* This pseudo-type indicates that none of accessors if affected.
*/
NONE,
/**
* This pseudo-type indicates that all accessors are affected.
*/
ALL
;
private PropertyAccessor() { }
public boolean creatorEnabled() {
return (this == CREATOR) || (this == ALL);
}
public boolean getterEnabled() {
return (this == GETTER) || (this == ALL);
}
public boolean isGetterEnabled() {
return (this == IS_GETTER) || (this == ALL);
}
public boolean setterEnabled() {
return (this == SETTER) || (this == ALL);
}
public boolean fieldEnabled() {
return (this == FIELD) || (this == ALL);
}
}
| FasterXML/jackson-annotations | src/main/java/com/fasterxml/jackson/annotation/PropertyAccessor.java | Java | apache-2.0 | 2,611 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.processors;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.hive.conf.HiveVariableSource;
import org.apache.hadoop.hive.conf.VariableSubstitution;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Schema;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
/**
* DfsProcessor.
*
*/
public class DfsProcessor implements CommandProcessor {
private static final Logger LOG = LoggerFactory.getLogger(DfsProcessor.class.getName());
private static final LogHelper console = new LogHelper(LOG);
public static final String DFS_RESULT_HEADER = "DFS Output";
private final FsShell dfs;
private final Schema dfsSchema;
public DfsProcessor(Configuration conf) {
this(conf, false);
}
public DfsProcessor(Configuration conf, boolean addSchema) {
dfs = new FsShell(conf);
dfsSchema = new Schema();
dfsSchema.addToFieldSchemas(new FieldSchema(DFS_RESULT_HEADER, "string", ""));
}
@Override
public CommandProcessorResponse run(String command) throws CommandProcessorException {
try {
SessionState ss = SessionState.get();
command = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(ss.getConf(), command);
String[] tokens = splitCmd(command);
CommandProcessorResponse authErrResp =
CommandUtil.authorizeCommand(ss, HiveOperationType.DFS, Arrays.asList(tokens));
if(authErrResp != null){
// there was an authorization issue
return authErrResp;
}
PrintStream oldOut = System.out;
if (ss != null && ss.out != null) {
System.setOut(ss.out);
}
int ret = dfs.run(tokens);
System.setOut(oldOut);
if (ret != 0) {
console.printError("Command " + command + " failed with exit code = " + ret);
throw new CommandProcessorException(ret);
}
return new CommandProcessorResponse(dfsSchema, null);
} catch (CommandProcessorException e) {
throw e;
} catch (Exception e) {
console.printError("Exception raised from DFSShell.run "
+ e.getLocalizedMessage(), org.apache.hadoop.util.StringUtils
.stringifyException(e));
throw new CommandProcessorException(1);
}
}
private String[] splitCmd(String command) throws HiveException {
ArrayList<String> paras = new ArrayList<String>();
int cmdLng = command.length();
char y = 0;
int start = 0;
for (int i = 0; i < cmdLng; i++) {
char x = command.charAt(i);
switch(x) {
case ' ':
if (y == 0) {
String str = command.substring(start, i).trim();
if (!str.equals("")) {
paras.add(str);
start = i + 1;
}
}
break;
case '"':
if (y == 0) {
y = x;
start = i + 1;
} else if ('"' == y) {
paras.add(command.substring(start, i).trim());
y = 0;
start = i + 1;
}
break;
case '\'':
if (y == 0) {
y = x;
start = i + 1;
} else if ('\'' == y) {
paras.add(command.substring(start, i).trim());
y = 0;
start = i + 1;
}
break;
default:
if (i == cmdLng-1 && start < cmdLng) {
paras.add(command.substring(start, cmdLng).trim());
}
break;
}
}
if (y != 0) {
String message = "Syntax error on hadoop options: dfs " + command;
console.printError(message);
throw new HiveException(message);
}
return paras.toArray(new String[paras.size()]);
}
@Override
public void close() throws Exception {
}
}
| vineetgarg02/hive | ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java | Java | apache-2.0 | 5,151 |
/*
* Copyright 2000-2011 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.impl.smartPointers;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Segment;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.PsiAnchor;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class ClsElementInfo extends SmartPointerElementInfo {
private final PsiAnchor.StubIndexReference myStubIndexReference;
public ClsElementInfo(@NotNull PsiAnchor.StubIndexReference stubReference) {
myStubIndexReference = stubReference;
}
@Override
public PsiElement restoreElement() {
return myStubIndexReference.retrieve();
}
@Override
public int elementHashCode() {
return myStubIndexReference.hashCode();
}
@Override
public boolean pointsToTheSameElementAs(@NotNull SmartPointerElementInfo other) {
return other instanceof ClsElementInfo && myStubIndexReference.equals(((ClsElementInfo)other).myStubIndexReference);
}
@Override
public VirtualFile getVirtualFile() {
return myStubIndexReference.getVirtualFile();
}
@Override
public Segment getRange() {
return null;
}
@NotNull
@Override
public Project getProject() {
return myStubIndexReference.getProject();
}
@Nullable
@Override
public Segment getPsiRange() {
return null;
}
@Override
public PsiFile restoreFile() {
return myStubIndexReference.getFile();
}
@Override
public String toString() {
return myStubIndexReference.toString();
}
}
| asedunov/intellij-community | platform/core-impl/src/com/intellij/psi/impl/smartPointers/ClsElementInfo.java | Java | apache-2.0 | 2,181 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.parse;
import java.util.HashMap;
import java.util.Map;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.StringUtil;
import com.google.common.collect.ImmutableMap;
/**
* Node representing optimizer hints in SQL
*/
public class HintNode {
public static final HintNode EMPTY_HINT_NODE = new HintNode();
public static final char SEPARATOR = ' ';
public static final String PREFIX = "(";
public static final String SUFFIX = ")";
// Split on whitespace and parenthesis, keeping the parenthesis in the token array
private static final String SPLIT_REGEXP = "\\s+|((?<=\\" + PREFIX + ")|(?=\\" + PREFIX + "))|((?<=\\" + SUFFIX + ")|(?=\\" + SUFFIX + "))";
public enum Hint {
/**
* Forces a range scan to be used to process the query.
*/
RANGE_SCAN,
/**
* Forces a skip scan to be used to process the query.
*/
SKIP_SCAN,
/**
* Prevents the usage of child-parent-join optimization.
*/
NO_CHILD_PARENT_JOIN_OPTIMIZATION,
/**
* Prevents the usage of indexes, forcing usage
* of the data table for a query.
*/
NO_INDEX,
/**
* Hint of the form INDEX(<table_name> <index_name>...)
* to suggest usage of the index if possible. The first
* usable index in the list of indexes will be choosen.
* Table and index names may be surrounded by double quotes
* if they are case sensitive.
*/
INDEX,
/**
* All things being equal, use the data table instead of
* the index table when optimizing.
*/
USE_DATA_OVER_INDEX_TABLE,
/**
* All things being equal, use the index table instead of
* the data table when optimizing.
*/
USE_INDEX_OVER_DATA_TABLE,
/**
* Avoid caching any HBase blocks loaded by this query.
*/
NO_CACHE,
/**
* Use sort-merge join algorithm instead of broadcast join (hash join) algorithm.
*/
USE_SORT_MERGE_JOIN,
/**
* Avoid using star-join optimization. Used for broadcast join (hash join) only.
*/
NO_STAR_JOIN,
/**
* Avoid using the no seek optimization. When there are many columns which are not selected coming in between 2
* selected columns and/or versions of columns, this should be used.
*/
SEEK_TO_COLUMN,
/**
* Avoid seeks to select specified columns. When there are very less number of columns which are not selected in
* between 2 selected columns this will be give better performance.
*/
NO_SEEK_TO_COLUMN,
/**
* Saves an RPC call on the scan. See Scan.setSmall(true) in HBase documentation.
*/
SMALL,
/**
* Enforces a serial scan.
*/
SERIAL,
};
private final Map<Hint,String> hints;
public static HintNode create(HintNode hintNode, Hint hint) {
return create(hintNode, hint, "");
}
public static HintNode create(HintNode hintNode, Hint hint, String value) {
Map<Hint,String> hints = new HashMap<Hint,String>(hintNode.hints);
hints.put(hint, value);
return new HintNode(hints);
}
public static HintNode combine(HintNode hintNode, HintNode override) {
Map<Hint,String> hints = new HashMap<Hint,String>(hintNode.hints);
hints.putAll(override.hints);
return new HintNode(hints);
}
public static HintNode subtract(HintNode hintNode, Hint[] remove) {
Map<Hint,String> hints = new HashMap<Hint,String>(hintNode.hints);
for (Hint hint : remove) {
hints.remove(hint);
}
return new HintNode(hints);
}
private HintNode() {
hints = new HashMap<Hint,String>();
}
private HintNode(Map<Hint,String> hints) {
this.hints = ImmutableMap.copyOf(hints);
}
public HintNode(String hint) {
Map<Hint,String> hints = new HashMap<Hint,String>();
// Split on whitespace or parenthesis. We do not need to handle escaped or
// embedded whitespace/parenthesis, since we are parsing what will be HBase
// table names which are not allowed to contain whitespace or parenthesis.
String[] hintWords = hint.split(SPLIT_REGEXP);
for (int i = 0; i < hintWords.length; i++) {
String hintWord = hintWords[i];
if (hintWord.isEmpty()) {
continue;
}
try {
Hint key = Hint.valueOf(hintWord.toUpperCase());
String hintValue = "";
if (i+1 < hintWords.length && PREFIX.equals(hintWords[i+1])) {
StringBuffer hintValueBuf = new StringBuffer(hint.length());
hintValueBuf.append(PREFIX);
i+=2;
while (i < hintWords.length && !SUFFIX.equals(hintWords[i])) {
hintValueBuf.append(SchemaUtil.normalizeIdentifier(hintWords[i++]));
hintValueBuf.append(SEPARATOR);
}
// Replace trailing separator with suffix
hintValueBuf.replace(hintValueBuf.length()-1, hintValueBuf.length(), SUFFIX);
hintValue = hintValueBuf.toString();
}
String oldValue = hints.put(key, hintValue);
// Concatenate together any old value with the new value
if (oldValue != null) {
hints.put(key, oldValue + hintValue);
}
} catch (IllegalArgumentException e) { // Ignore unknown/invalid hints
}
}
this.hints = ImmutableMap.copyOf(hints);
}
public boolean isEmpty() {
return hints.isEmpty();
}
/**
* Gets the value of the hint or null if the hint is not present.
* @param hint the hint
* @return the value specified in parenthesis following the hint or null
* if the hint is not present.
*
*/
public String getHint(Hint hint) {
return hints.get(hint);
}
/**
* Tests for the presence of a hint in a query
* @param hint the hint
* @return true if the hint is present and false otherwise
*/
public boolean hasHint(Hint hint) {
return hints.containsKey(hint);
}
@Override
public String toString() {
if (hints.isEmpty()) {
return StringUtil.EMPTY_STRING;
}
StringBuilder buf = new StringBuilder("/*+ ");
for (Map.Entry<Hint, String> entry : hints.entrySet()) {
buf.append(entry.getKey());
buf.append(entry.getValue());
buf.append(' ');
}
buf.append("*/ ");
return buf.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((hints == null) ? 0 : hints.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
HintNode other = (HintNode)obj;
if (hints == null) {
if (other.hints != null) return false;
} else if (!hints.equals(other.hints)) return false;
return true;
}
}
| shehzaadn/phoenix | phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java | Java | apache-2.0 | 8,329 |
/*
* Copyright 2012-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.actuate.autoconfigure.management;
import org.junit.Test;
import org.springframework.boot.actuate.management.ThreadDumpEndpoint;
import org.springframework.boot.autoconfigure.AutoConfigurations;
import org.springframework.boot.test.context.runner.ApplicationContextRunner;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link ThreadDumpEndpointAutoConfiguration}.
*
* @author Phillip Webb
*/
public class ThreadDumpEndpointAutoConfigurationTests {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(
AutoConfigurations.of(ThreadDumpEndpointAutoConfiguration.class));
@Test
public void runShouldHaveEndpointBean() {
this.contextRunner.run(
(context) -> assertThat(context).hasSingleBean(ThreadDumpEndpoint.class));
}
@Test
public void runWhenEnabledPropertyIsFalseShouldNotHaveEndpointBean() {
this.contextRunner
.withPropertyValues("management.endpoint.threaddump.enabled:false")
.run((context) -> assertThat(context)
.doesNotHaveBean(ThreadDumpEndpoint.class));
}
}
| bclozel/spring-boot | spring-boot-project/spring-boot-actuator-autoconfigure/src/test/java/org/springframework/boot/actuate/autoconfigure/management/ThreadDumpEndpointAutoConfigurationTests.java | Java | apache-2.0 | 1,752 |
/*
* Copyright 2003-2018 Dave Griffith, Bas Leijdekkers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.siyeh.ig.migration;
import com.intellij.codeInspection.ProblemDescriptor;
import com.intellij.codeInspection.ui.SingleCheckboxOptionsPanel;
import com.intellij.openapi.project.Project;
import com.intellij.psi.*;
import com.intellij.psi.util.PsiTreeUtil;
import com.intellij.psi.util.PsiUtil;
import com.siyeh.InspectionGadgetsBundle;
import com.siyeh.ig.BaseInspection;
import com.siyeh.ig.BaseInspectionVisitor;
import com.siyeh.ig.InspectionGadgetsFix;
import com.siyeh.ig.PsiReplacementUtil;
import com.siyeh.ig.psiutils.*;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.util.HashMap;
import java.util.Map;
public class UnnecessaryUnboxingInspection extends BaseInspection {
@SuppressWarnings("PublicField")
public boolean onlyReportSuperfluouslyUnboxed = false;
@NonNls static final Map<String, String> s_unboxingMethods = new HashMap<>(8);
static {
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_INTEGER, "intValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_SHORT, "shortValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_BOOLEAN, "booleanValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_LONG, "longValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_BYTE, "byteValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_FLOAT, "floatValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_DOUBLE, "doubleValue");
s_unboxingMethods.put(CommonClassNames.JAVA_LANG_CHARACTER, "charValue");
}
@Override
@NotNull
public String getDisplayName() {
return InspectionGadgetsBundle.message(
"unnecessary.unboxing.display.name");
}
@Override
@NotNull
protected String buildErrorString(Object... infos) {
return InspectionGadgetsBundle.message(
"unnecessary.unboxing.problem.descriptor");
}
@Override
public boolean isEnabledByDefault() {
return true;
}
@Nullable
@Override
public JComponent createOptionsPanel() {
return new SingleCheckboxOptionsPanel(InspectionGadgetsBundle.message("unnecessary.unboxing.superfluous.option"),
this, "onlyReportSuperfluouslyUnboxed");
}
@Override
public InspectionGadgetsFix buildFix(Object... infos) {
return new UnnecessaryUnboxingFix();
}
private static class UnnecessaryUnboxingFix extends InspectionGadgetsFix {
@Override
@NotNull
public String getFamilyName() {
return InspectionGadgetsBundle.message(
"unnecessary.unboxing.remove.quickfix");
}
@Override
public void doFix(Project project, ProblemDescriptor descriptor) {
final PsiMethodCallExpression methodCall = (PsiMethodCallExpression)descriptor.getPsiElement();
final PsiReferenceExpression methodExpression = methodCall.getMethodExpression();
final PsiExpression qualifier = methodExpression.getQualifierExpression();
final PsiExpression strippedQualifier = ParenthesesUtils.stripParentheses(qualifier);
if (strippedQualifier == null) {
return;
}
CommentTracker commentTracker = new CommentTracker();
if (strippedQualifier instanceof PsiReferenceExpression) {
final PsiReferenceExpression referenceExpression = (PsiReferenceExpression)strippedQualifier;
final PsiElement element = referenceExpression.resolve();
if (element instanceof PsiField) {
final PsiField field = (PsiField)element;
final PsiClass containingClass = field.getContainingClass();
if (containingClass == null) {
return;
}
final String classname = containingClass.getQualifiedName();
if (CommonClassNames.JAVA_LANG_BOOLEAN.equals(classname)) {
@NonNls final String name = field.getName();
if ("TRUE".equals(name)) {
PsiReplacementUtil.replaceExpression(methodCall, "true", commentTracker);
return;
}
else if ("FALSE".equals(name)) {
PsiReplacementUtil.replaceExpression(methodCall, "false", commentTracker);
return;
}
}
}
}
final String strippedQualifierText = commentTracker.text(strippedQualifier);
PsiReplacementUtil.replaceExpression(methodCall, strippedQualifierText, commentTracker);
}
}
@Override
public boolean shouldInspect(PsiFile file) {
return PsiUtil.isLanguageLevel5OrHigher(file);
}
@Override
public BaseInspectionVisitor buildVisitor() {
return new UnnecessaryUnboxingVisitor();
}
private class UnnecessaryUnboxingVisitor extends BaseInspectionVisitor {
@Override
public void visitMethodCallExpression(@NotNull PsiMethodCallExpression expression) {
super.visitMethodCallExpression(expression);
if (!isUnboxingExpression(expression)) {
return;
}
final PsiReferenceExpression methodExpression = expression.getMethodExpression();
final PsiExpression qualifier = methodExpression.getQualifierExpression();
if (qualifier == null || isUnboxingNecessary(expression, qualifier)) {
return;
}
registerError(expression);
}
private boolean isUnboxingNecessary(@NotNull PsiExpression expression, @NotNull PsiExpression unboxedExpression) {
PsiElement parent = expression.getParent();
while (parent instanceof PsiParenthesizedExpression) {
expression = (PsiExpression)parent;
parent = parent.getParent();
}
if (parent instanceof PsiPolyadicExpression) {
final PsiPolyadicExpression polyadicExpression = (PsiPolyadicExpression)parent;
if (isPossibleObjectComparison(expression, polyadicExpression)) {
return true;
}
}
if (parent instanceof PsiTypeCastExpression) {
final PsiTypeCastExpression typeCastExpression = (PsiTypeCastExpression)parent;
final PsiTypeElement typeElement = typeCastExpression.getCastType();
if (typeElement == null) {
return true;
}
final PsiType castType = typeElement.getType();
final PsiType expressionType = expression.getType();
if (expressionType == null || !castType.isAssignableFrom(expressionType)) {
return true;
}
}
else if (parent instanceof PsiConditionalExpression) {
final PsiConditionalExpression conditionalExpression = (PsiConditionalExpression)parent;
final PsiExpression thenExpression = conditionalExpression.getThenExpression();
if (thenExpression == null) {
return true;
}
final PsiExpression elseExpression = conditionalExpression.getElseExpression();
if (elseExpression == null) {
return true;
}
if (PsiTreeUtil.isAncestor(thenExpression, expression, false)) {
final PsiType type = elseExpression.getType();
if (!(type instanceof PsiPrimitiveType)) {
return true;
}
}
else if (PsiTreeUtil.isAncestor(elseExpression, expression, false)) {
final PsiType type = thenExpression.getType();
if (!(type instanceof PsiPrimitiveType)) {
return true;
}
}
}
else if (MethodCallUtils.isNecessaryForSurroundingMethodCall(expression, unboxedExpression)) {
return true;
}
if (!LambdaUtil.isSafeLambdaReturnValueReplacement(expression, unboxedExpression)) return true;
if (onlyReportSuperfluouslyUnboxed) {
final PsiType expectedType = ExpectedTypeUtils.findExpectedType(expression, false, true);
if (!(expectedType instanceof PsiClassType)) {
return true;
}
}
return false;
}
private boolean isPossibleObjectComparison(PsiExpression expression, PsiPolyadicExpression polyadicExpression) {
if (!ComparisonUtils.isEqualityComparison(polyadicExpression)) {
return false;
}
for (PsiExpression operand : polyadicExpression.getOperands()) {
if (operand == expression) {
continue;
}
if (!(operand.getType() instanceof PsiPrimitiveType) || isUnboxingExpression(operand)) {
return true;
}
}
return false;
}
private boolean isUnboxingExpression(PsiExpression expression) {
if (!(expression instanceof PsiMethodCallExpression)) {
return false;
}
final PsiMethodCallExpression methodCallExpression = (PsiMethodCallExpression)expression;
final PsiReferenceExpression methodExpression = methodCallExpression.getMethodExpression();
final PsiExpression qualifier = methodExpression.getQualifierExpression();
if (qualifier == null) {
return false;
}
final PsiType qualifierType = qualifier.getType();
if (qualifierType == null) {
return false;
}
final String qualifierTypeName = qualifierType.getCanonicalText();
if (!s_unboxingMethods.containsKey(qualifierTypeName)) {
return false;
}
final String methodName = methodExpression.getReferenceName();
final String unboxingMethod = s_unboxingMethods.get(qualifierTypeName);
return unboxingMethod.equals(methodName);
}
}
} | paplorinc/intellij-community | plugins/InspectionGadgets/InspectionGadgetsAnalysis/src/com/siyeh/ig/migration/UnnecessaryUnboxingInspection.java | Java | apache-2.0 | 9,944 |
package de.lessvoid.nifty.render.batch.spi;
import java.nio.ByteBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import javax.annotation.Nonnull;
/**
* @author Aaron Mahan <[email protected]>
*/
public interface BufferFactory {
@Nonnull
public ByteBuffer createNativeOrderedByteBuffer(final int numBytes);
@Nonnull
public FloatBuffer createNativeOrderedFloatBuffer(final int numFloats);
@Nonnull
public IntBuffer createNativeOrderedIntBuffer(final int numInts);
}
| void256/nifty-gui | nifty-core/src/main/java/de/lessvoid/nifty/render/batch/spi/BufferFactory.java | Java | bsd-2-clause | 511 |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.permissions;
import androidx.test.filters.MediumTest;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.chromium.base.test.util.CommandLineFlags;
import org.chromium.base.test.util.Feature;
import org.chromium.chrome.browser.flags.ChromeSwitches;
import org.chromium.chrome.browser.permissions.PermissionTestRule.PermissionUpdateWaiter;
import org.chromium.chrome.browser.tab.Tab;
import org.chromium.chrome.test.ChromeJUnit4ClassRunner;
import org.chromium.chrome.test.util.browser.LocationSettingsTestUtil;
import org.chromium.content_public.browser.test.util.TestThreadUtils;
import org.chromium.device.geolocation.LocationProviderOverrider;
import org.chromium.device.geolocation.MockLocationProvider;
/**
* Test suite for Geo-Location functionality.
*
* These tests rely on the device being specially setup (which the bots do automatically):
* - Global location is enabled.
* - Google location is enabled.
*/
@RunWith(ChromeJUnit4ClassRunner.class)
@CommandLineFlags.Add({ChromeSwitches.DISABLE_FIRST_RUN_EXPERIENCE})
public class GeolocationTest {
@Rule
public PermissionTestRule mPermissionRule = new PermissionTestRule();
private static final String TEST_FILE = "/content/test/data/android/geolocation.html";
private static final String PERSIST_ACCEPT_HISTOGRAM =
"Permissions.Prompt.Accepted.Persisted.Geolocation";
public GeolocationTest() {}
@Before
public void setUp() throws Exception {
mPermissionRule.setUpActivity();
LocationSettingsTestUtil.setSystemLocationSettingEnabled(true);
LocationProviderOverrider.setLocationProviderImpl(new MockLocationProvider());
}
private void runTest(String javascript, int nUpdates, boolean withGesture, boolean isDialog)
throws Exception {
Tab tab = mPermissionRule.getActivity().getActivityTab();
PermissionUpdateWaiter updateWaiter =
new PermissionUpdateWaiter("Count:", mPermissionRule.getActivity());
TestThreadUtils.runOnUiThreadBlocking(() -> tab.addObserver(updateWaiter));
mPermissionRule.runAllowTest(
updateWaiter, TEST_FILE, javascript, nUpdates, withGesture, isDialog);
TestThreadUtils.runOnUiThreadBlocking(() -> tab.removeObserver(updateWaiter));
}
/**
* Verify Geolocation creates a dialog and receives a mock location.
* @throws Exception
*/
@Test
@MediumTest
@Feature({"Location", "Main"})
public void testGeolocationPlumbingAllowedDialog() throws Exception {
runTest("initiate_getCurrentPosition()", 1, true, true);
}
/**
* Verify Geolocation creates a dialog and receives a mock location when dialogs are
* enabled and there is no user gesture.
* @throws Exception
*/
@Test
@MediumTest
@Feature({"Location", "Main"})
public void testGeolocationPlumbingAllowedDialogNoGesture() throws Exception {
runTest("initiate_getCurrentPosition()", 1, false, true);
}
/**
* Verify Geolocation creates a dialog and receives multiple locations.
* @throws Exception
*/
@Test
@MediumTest
@Feature({"Location"})
public void testGeolocationWatchDialog() throws Exception {
runTest("initiate_watchPosition()", 2, true, true);
}
}
| ric2b/Vivaldi-browser | chromium/chrome/android/javatests/src/org/chromium/chrome/browser/permissions/GeolocationTest.java | Java | bsd-3-clause | 3,570 |
/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.effect.potion;
import org.spongepowered.api.CatalogType;
import org.spongepowered.api.text.translation.Translatable;
import org.spongepowered.api.text.translation.Translation;
import org.spongepowered.api.util.annotation.CatalogedBy;
/**
* Represents a possible type of {@link PotionEffect}.
*/
@CatalogedBy(PotionEffectTypes.class)
public interface PotionEffectType extends CatalogType, Translatable {
/**
* Gets whether this potion effect is applied instantly or over time.
*
* @return If applied instantly.
*/
boolean isInstant();
/**
* Gets the {@link Translation} for this potion effect type as a potion
* name.
*
* @return The translation representing this effect as potion
*/
Translation getPotionTranslation();
}
| kashike/SpongeAPI | src/main/java/org/spongepowered/api/effect/potion/PotionEffectType.java | Java | mit | 2,070 |
/*
* Copyright (c) 1998-2012 Caucho Technology -- all rights reserved
*
* This file is part of Resin(R) Open Source
*
* Each copy or derived work must preserve the copyright notice and this
* notice unmodified.
*
* Resin Open Source is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Resin Open Source is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, or any warranty
* of NON-INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with Resin Open Source; if not, write to the
* Free SoftwareFoundation, Inc.
* 59 Temple Place, Suite 330
* Boston, MA 02111-1307 USA
*
* @author Scott Ferguson
*/
package com.caucho.xml;
import com.caucho.util.CharBuffer;
import org.xml.sax.Attributes;
class QAttributes implements Attributes {
QName []names = new QName[32];
String []values = new String[32];
int size;
void clear()
{
size = 0;
}
void add(QName name, String value)
{
if (size == names.length) {
QName []newNames = new QName[2 * names.length];
String []newValues = new String[2 * names.length];
System.arraycopy(names, 0, newNames, 0, names.length);
System.arraycopy(values, 0, newValues, 0, names.length);
names = newNames;
values = newValues;
}
names[size] = name;
values[size] = value;
size++;
}
public int getLength()
{
return size;
}
public QName getName(int i)
{
return names[i];
}
public String getQName(int i)
{
return names[i].getName();
}
public String getURI(int i)
{
String uri = names[i].getNamespaceURI();
if (uri != null)
return uri;
else
return "";
}
public String getLocalName(int i)
{
String name = names[i].getLocalName();
if (name != null)
return name;
else
return "";
}
public String getValue(int i)
{
return values[i];
}
public String getValue(String qName)
{
for (int i = 0; i < size; i++) {
if (qName.equals(names[i].getName()))
return values[i];
}
return null;
}
public String getValue(String uri, String localName)
{
for (int i = 0; i < size; i++) {
String testURI = names[i].getNamespaceURI();
if (testURI == null)
testURI = "";
if (uri.equals(testURI) && localName.equals(names[i].getLocalName()))
return values[i];
}
return null;
}
public int getIndex(String qName)
{
for (int i = 0; i < size; i++) {
if (qName.equals(names[i].getName()))
return i;
}
return -1;
}
public int getIndex(String uri, String localName)
{
for (int i = 0; i < size; i++) {
if (uri.equals(names[i].getNamespaceURI()) &&
localName.equals(names[i].getLocalName()))
return i;
}
return -1;
}
public String getType(int i)
{
return "CDATA";
}
public String getType(String uri, String localName)
{
return "CDATA";
}
public String getType(String qName)
{
return "CDATA";
}
public String toString()
{
CharBuffer cb = CharBuffer.allocate();
cb.append("[QAttributes");
for (int i = 0; i < size; i++) {
cb.append(" ");
cb.append(names[i]);
cb.append("=\"");
cb.append(values[i]);
cb.append("\"");
}
cb.append("]");
return cb.close();
}
}
| dwango/quercus | src/main/java/com/caucho/xml/QAttributes.java | Java | gpl-2.0 | 3,777 |
package org.zarroboogs.weibo.hot.bean.huatidetail;
import org.json.*;
public class TopicStruct {
private String topicTitle;
private String topicUrl;
public TopicStruct () {
}
public TopicStruct (JSONObject json) {
this.topicTitle = json.optString("topic_title");
this.topicUrl = json.optString("topic_url");
}
public String getTopicTitle() {
return this.topicTitle;
}
public void setTopicTitle(String topicTitle) {
this.topicTitle = topicTitle;
}
public String getTopicUrl() {
return this.topicUrl;
}
public void setTopicUrl(String topicUrl) {
this.topicUrl = topicUrl;
}
}
| bestwpw/iBeebo | app/src/main/java/org/zarroboogs/weibo/hot/bean/huatidetail/TopicStruct.java | Java | gpl-3.0 | 721 |
/*
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
*/
package org.zeromq.czmq;
import org.junit.Assert;
import org.junit.Test;
public class ZstrTest {
static {
try {
System.loadLibrary ("czmqjni");
}
catch (Exception e) {
System.exit (-1);
}
}
@Test
public void test () {
Zstr.test (false);
}
}
| eburkitt/czmq | bindings/jni/src/test/java/org/zeromq/czmq/ZstrTest.java | Java | mpl-2.0 | 657 |
/*
* SonarQube, open source software quality management tool.
* Copyright (C) 2008-2014 SonarSource
* mailto:contact AT sonarsource DOT com
*
* SonarQube is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* SonarQube is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.sonar.api.issue;
import com.google.common.collect.ImmutableList;
import java.io.Serializable;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Map;
import javax.annotation.CheckForNull;
import org.sonar.api.batch.BatchSide;
import org.sonar.api.rule.RuleKey;
import org.sonar.api.utils.Duration;
/**
* @since 3.6
*/
public interface Issue extends Serializable {
/**
* Maximum number of characters in the message.
*/
int MESSAGE_MAX_SIZE = 4000;
/**
* Default status when creating an issue.
*/
String STATUS_OPEN = "OPEN";
String STATUS_CONFIRMED = "CONFIRMED";
String STATUS_REOPENED = "REOPENED";
String STATUS_RESOLVED = "RESOLVED";
String STATUS_CLOSED = "CLOSED";
String RESOLUTION_FIXED = "FIXED";
/**
* Resolution when issue is flagged as false positive.
*/
String RESOLUTION_FALSE_POSITIVE = "FALSE-POSITIVE";
/**
* Resolution when rule has been uninstalled or disabled in the Quality profile.
*/
String RESOLUTION_REMOVED = "REMOVED";
/**
* Issue is irrelevant in the context and was muted by user.
* @since 5.1
*/
String RESOLUTION_WONT_FIX = "WONTFIX";
List<String> RESOLUTIONS = ImmutableList.of(RESOLUTION_FALSE_POSITIVE, RESOLUTION_WONT_FIX, RESOLUTION_FIXED, RESOLUTION_REMOVED);
/**
* Return all available statuses
*
* @since 4.4
*/
List<String> STATUSES = ImmutableList.of(STATUS_OPEN, STATUS_CONFIRMED, STATUS_REOPENED, STATUS_RESOLVED, STATUS_CLOSED);
/**
* Unique generated key. It looks like "d2de809c-1512-4ae2-9f34-f5345c9f1a13".
*/
String key();
/**
* Components are modules ("my_project"), directories ("my_project:my/dir") or files ("my_project:my/file.c").
* Keys of Java packages and classes are currently in a special format: "my_project:com.company" and "my_project:com.company.Foo".
*/
String componentKey();
RuleKey ruleKey();
String language();
/**
* See constants in {@link org.sonar.api.rule.Severity}.
*/
String severity();
@CheckForNull
String message();
/**
* Optional line number. If set, then it's greater than or equal 1.
*/
@CheckForNull
Integer line();
/**
* Arbitrary distance to threshold for resolving the issue.
* <p/>
* For examples:
* <ul>
* <li>for the rule "Avoid too complex methods" : current complexity - max allowed complexity</li>
* <li>for the rule "Avoid Duplications" : number of duplicated blocks</li>
* <li>for the rule "Insufficient Line Coverage" : number of lines to cover to reach the accepted threshold</li>
* </ul>
*/
@CheckForNull
Double effortToFix();
/**
* See constant values in {@link Issue}.
*/
String status();
/**
* The type of resolution, or null if the issue is not resolved. See constant values in {@link Issue}.
*/
@CheckForNull
String resolution();
/**
* Login of the user who reported this issue. Null if the issue is reported by a rule engine.
*/
@CheckForNull
String reporter();
/**
* Login of the user who is assigned to this issue. Null if the issue is not assigned.
*/
@CheckForNull
String assignee();
Date creationDate();
Date updateDate();
/**
* Date when status was set to {@link Issue#STATUS_CLOSED}, else null.
*/
@CheckForNull
Date closeDate();
@CheckForNull
String attribute(String key);
Map<String, String> attributes();
/**
* Login of the SCM account that introduced this issue. Requires the
* <a href="http://www.sonarsource.com/products/plugins/developer-tools/developer-cockpit/">Developer Cockpit Plugin</a> to be installed.
*/
@CheckForNull
String authorLogin();
@CheckForNull
String actionPlanKey();
/**
* Non-null list of comments, ordered by chronological order.
* <p/>
* IMPORTANT: existing comments are not loaded when this method is called when analyzing project
* (from {@link BatchSide}).
*/
List<IssueComment> comments();
/**
* During a scan return if the current issue is a new one.
* @return always false on server side
* @since 4.0
*/
boolean isNew();
/**
* @since 5.0
*/
@CheckForNull
Duration debt();
/**
* @since 5.0
*/
String projectKey();
/**
* @since 5.0
*/
String projectUuid();
/**
* @since 5.0
*/
String componentUuid();
/**
* @since 5.1
*/
Collection<String> tags();
}
| sulabh-msft/sonarqube | sonar-plugin-api/src/main/java/org/sonar/api/issue/Issue.java | Java | lgpl-3.0 | 5,315 |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.ui.mac.touchbar;
import com.intellij.openapi.actionSystem.AnAction;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.keymap.Keymap;
import com.intellij.openapi.keymap.KeymapManager;
import com.intellij.openapi.project.DumbAwareAction;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.event.KeyEvent;
import java.util.HashMap;
import java.util.Map;
class FNKeyAction extends DumbAwareAction {
private static final boolean SHOW_ACTION_TEMPLATE_TEXT = Boolean.getBoolean("touchbar.fn.mode.show.template");
private final int myFN;
private final Map<Integer, String[]> myCache = new HashMap<>();
private AnAction myAction; // particular action (from keymap for given modifiers) calculated in last update
private boolean myIsActionDisabled;
private String @Nullable[] getActionsIds(int modifiers) {
final KeymapManager manager = KeymapManager.getInstance();
if (manager == null)
return null;
final @NotNull Keymap keymap = manager.getActiveKeymap();
String[] result = myCache.get(modifiers);
if (result != null) {
return result;
}
KeyStroke keyStroke = KeyStroke.getKeyStroke(KeyEvent.VK_F1 + myFN - 1, modifiers);
result = keymap.getActionIds(keyStroke);
myCache.put(modifiers, result);
return result;
}
FNKeyAction(int FN) {
myFN = Math.max(1, Math.min(FN, 12));
// TODO: clear cache when keymap changes (or FN-shortcut changes)
// KeymapManagerEx.getInstanceEx().addWeakListener(new MyKeymapManagerListener);
}
int getFN() { return myFN; }
boolean isActionDisabled() { return myIsActionDisabled; }
@Override
public void actionPerformed(@NotNull AnActionEvent e) {
if (myAction == null || myIsActionDisabled) {
Helpers.emulateKeyPress(KeyEvent.VK_F1 + myFN - 1);
return;
}
myAction.actionPerformed(e);
}
@Override
public void update(@NotNull AnActionEvent e) {
e.getPresentation().setEnabledAndVisible(true); // FN-keys are always enabled and visible
e.getPresentation().setText("");
myIsActionDisabled = false;
myAction = null;
final String[] ids = getActionsIds(TouchBarsManager.getLastModifiersEx());
if (ids == null || ids.length < 1) {
return;
}
int c = 0;
myAction = e.getActionManager().getAction(ids[c]);
while (myAction == null && c + 1 < ids.length) {
++c;
e.getActionManager().getAction(ids[c]);
}
if (myAction == null) {
return;
}
myAction.update(e);
myIsActionDisabled = !e.getPresentation().isEnabled();
e.getPresentation().setEnabledAndVisible(true); // FN-keys are always enabled and visible
final String text = e.getPresentation().getText();
if (SHOW_ACTION_TEMPLATE_TEXT || text == null || text.isEmpty()) {
// replace with template presentation text
e.getPresentation().setText(myAction.getTemplateText());
}
}
}
| siosio/intellij-community | platform/platform-impl/src/com/intellij/ui/mac/touchbar/FNKeyAction.java | Java | apache-2.0 | 3,152 |
/*
* Copyright 2012-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.test.autoconfigure.data.cassandra;
import java.util.UUID;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.CqlSessionBuilder;
import org.junit.jupiter.api.Test;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.data.redis.ExampleService;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.boot.testsupport.testcontainers.CassandraContainer;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.data.cassandra.core.CassandraTemplate;
import org.springframework.test.context.DynamicPropertyRegistry;
import org.springframework.test.context.DynamicPropertySource;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
/**
* Integration test for {@link DataCassandraTest @DataCassandraTest}.
*
* @author Artsiom Yudovin
*/
@DataCassandraTest(properties = { "spring.data.cassandra.local-datacenter=datacenter1",
"spring.data.cassandra.schema-action=create-if-not-exists",
"spring.data.cassandra.connection.connect-timeout=20s",
"spring.data.cassandra.connection.init-query-timeout=10s", "spring.data.cassandra.request.timeout=10s" })
@Testcontainers(disabledWithoutDocker = true)
class DataCassandraTestIntegrationTests {
@Container
static final CassandraContainer cassandra = new CassandraContainer();
@DynamicPropertySource
static void cassandraProperties(DynamicPropertyRegistry registry) {
registry.add("spring.data.cassandra.contact-points",
() -> cassandra.getHost() + ":" + cassandra.getFirstMappedPort());
}
@Autowired
private CassandraTemplate cassandraTemplate;
@Autowired
private ExampleRepository exampleRepository;
@Autowired
private ApplicationContext applicationContext;
@Test
void didNotInjectExampleService() {
assertThatExceptionOfType(NoSuchBeanDefinitionException.class)
.isThrownBy(() -> this.applicationContext.getBean(ExampleService.class));
}
@Test
void testRepository() {
ExampleEntity entity = new ExampleEntity();
entity.setDescription("Look, new @DataCassandraTest!");
String id = UUID.randomUUID().toString();
entity.setId(id);
ExampleEntity savedEntity = this.exampleRepository.save(entity);
ExampleEntity getEntity = this.cassandraTemplate.selectOneById(id, ExampleEntity.class);
assertThat(getEntity).isNotNull();
assertThat(getEntity.getId()).isNotNull();
assertThat(getEntity.getId()).isEqualTo(savedEntity.getId());
this.exampleRepository.deleteAll();
}
@TestConfiguration(proxyBeanMethods = false)
static class KeyspaceTestConfiguration {
@Bean
CqlSession cqlSession(CqlSessionBuilder cqlSessionBuilder) {
try (CqlSession session = cqlSessionBuilder.build()) {
session.execute("CREATE KEYSPACE IF NOT EXISTS boot_test"
+ " WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };");
}
return cqlSessionBuilder.withKeyspace("boot_test").build();
}
}
}
| mdeinum/spring-boot | spring-boot-project/spring-boot-test-autoconfigure/src/test/java/org/springframework/boot/test/autoconfigure/data/cassandra/DataCassandraTestIntegrationTests.java | Java | apache-2.0 | 3,946 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.plan;
public class DropIndexDesc {
private static final long serialVersionUID = 1L;
private String indexName;
private String tableName;
private boolean throwException;
/**
* @param indexName
* @param tableName
*/
public DropIndexDesc(String indexName, String tableName, boolean throwException) {
this.indexName = indexName;
this.tableName = tableName;
this.throwException = throwException;
}
/**
* @return index name
*/
public String getIndexName() {
return indexName;
}
/**
* @param indexName index name
*/
public void setIndexName(String indexName) {
this.indexName = indexName;
}
/**
* @return table name
*/
public String getTableName() {
return tableName;
}
/**
* @param tableName table name
*/
public void setTableName(String tableName) {
this.tableName = tableName;
}
public boolean isThrowException() {
return throwException;
}
public void setThrowException(boolean throwException) {
this.throwException = throwException;
}
}
| WANdisco/amplab-hive | ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java | Java | apache-2.0 | 1,909 |
/*
* Copyright (c) 2008, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.carbon.registry.extensions.handlers.utils;
import com.ibm.wsdl.util.xml.DOM2Writer;
import com.ibm.wsdl.xml.WSDLReaderImpl;
import org.apache.axiom.om.OMAbstractFactory;
import org.apache.axiom.om.OMElement;
import org.apache.axiom.om.OMFactory;
import org.apache.axiom.om.OMNamespace;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.wso2.carbon.registry.core.*;
import org.wso2.carbon.registry.core.config.RegistryContext;
import org.wso2.carbon.registry.core.exceptions.RegistryException;
import org.wso2.carbon.registry.core.jdbc.handlers.RequestContext;
import org.wso2.carbon.registry.core.session.CurrentSession;
import org.wso2.carbon.registry.core.utils.RegistryUtils;
import org.wso2.carbon.registry.extensions.services.Utils;
import org.wso2.carbon.registry.extensions.utils.CommonConstants;
import org.wso2.carbon.registry.extensions.utils.CommonUtil;
import org.wso2.carbon.registry.extensions.utils.WSDLUtil;
import org.wso2.carbon.registry.extensions.utils.WSDLValidationInfo;
import org.xml.sax.InputSource;
import javax.wsdl.*;
import javax.wsdl.extensions.schema.Schema;
import javax.wsdl.factory.WSDLFactory;
import javax.wsdl.xml.WSDLReader;
import javax.wsdl.xml.WSDLWriter;
import javax.xml.namespace.QName;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.net.ConnectException;
import java.net.UnknownHostException;
import java.util.*;
import java.util.concurrent.ConcurrentSkipListSet;
public class WSDLProcessor {
private Registry registry;
private Registry systemRegistry;
private Definition originalDefinition;
private List<Association> associations;
private SchemaProcessor schemaProcessor;
private int i;
private String resourceName;
public static final String IMPORT_TAG = "import";
public static final String INCLUDE_TAG = "include";
private static final String SAMPLE_BASE_URL = "http://this.wsdl.needs/a/valid/url/to/proceed.wsdl";
private static String commonWSDLLocation;
private static String commonSchemaLocation;
private static String commonPolicyLocation;
private List<String> visitedWSDLs;
private List<String> processedWSDLs;
private Map<String, WSDLInfo> wsdls;
private WSDLValidationInfo wsdlValidationInfo = null;
private WSDLValidationInfo wsiValidationInfo = null;
private boolean hasWSDLImports = false;
private static Log log = LogFactory.getLog(WSDLProcessor.class);
private static InheritableThreadLocal<Set<String>> importedWSDLs =
new InheritableThreadLocal<Set<String>>() {
protected Set<String> initialValue() {
return new ConcurrentSkipListSet<String>();
}
};
public static void loadImportedWSDLMap() {
importedWSDLs.get();
}
public static void clearImportedWSDLMap() {
importedWSDLs.remove();
}
public boolean getCreateService() {
return createService;
}
public void setCreateService(boolean createService) {
this.createService = createService;
}
private boolean createService = true;
private boolean createSOAPService = true;
public boolean isCreateSOAPService() {
return createSOAPService;
}
public void setCreateSOAPService(boolean createSOAPService) {
this.createSOAPService = createSOAPService;
}
public WSDLProcessor(RequestContext requestContext) {
this.registry = requestContext.getRegistry();
try {
this.systemRegistry = CommonUtil.getUnchrootedSystemRegistry(requestContext);
if (!systemRegistry.resourceExists(
getChrootedSchemaLocation(requestContext.getRegistryContext()))) {
systemRegistry.put(getChrootedSchemaLocation(requestContext.getRegistryContext()),
systemRegistry.newCollection());
}
if (!systemRegistry.resourceExists(
getChrootedWSDLLocation(requestContext.getRegistryContext()))) {
systemRegistry.put(getChrootedWSDLLocation(requestContext.getRegistryContext()),
systemRegistry.newCollection());
}
} catch (RegistryException ignore) {
this.systemRegistry = null;
}
i = 0;
associations = new ArrayList<Association>();
visitedWSDLs = new ArrayList<String>();
processedWSDLs = new ArrayList<String>();
schemaProcessor = buildSchemaProcessor(requestContext, null);
wsdls = new LinkedHashMap<String, WSDLInfo>();
resourceName = "";
}
public WSDLProcessor(RequestContext requestContext, boolean useOriginalSchema) {
this(requestContext);
schemaProcessor = buildSchemaProcessor(requestContext, null, useOriginalSchema);
}
public static void setCommonWSDLLocation(String commonWSDLLocation) {
WSDLProcessor.commonWSDLLocation = commonWSDLLocation;
}
public static void setCommonSchemaLocation(String commonSchemaLocation) {
WSDLProcessor.commonSchemaLocation = commonSchemaLocation;
}
public static void setCommonPolicyLocation(String commonPolicyLocation) {
WSDLProcessor.commonPolicyLocation = commonPolicyLocation;
}
public static String getChrootedWSDLLocation(RegistryContext registryContext) {
return RegistryUtils.getAbsolutePath(registryContext,
RegistryConstants.GOVERNANCE_REGISTRY_BASE_PATH + WSDLProcessor.commonWSDLLocation);
}
private String getChrootedSchemaLocation(RegistryContext registryContext) {
return RegistryUtils.getAbsolutePath(registryContext,
RegistryConstants.GOVERNANCE_REGISTRY_BASE_PATH + commonSchemaLocation);
}
private String getChrootedPolicyLocation(RegistryContext registryContext) {
return RegistryUtils.getAbsolutePath(registryContext,
RegistryConstants.GOVERNANCE_REGISTRY_BASE_PATH + commonPolicyLocation);
}
private String getChrootedServiceLocation(Registry registry, RegistryContext registryContext) {
return RegistryUtils.getAbsolutePath(registryContext,
registry.getRegistryContext().getServicePath()); // service path contains the base
}
public String addWSDLToRegistry(
RequestContext context,
String wsdlURL,
Resource metadata, boolean isPut, boolean addService, boolean skipValidation,boolean disableSymLinkCreation)
throws RegistryException {
boolean evaluateExports = true;
boolean isDefaultEnvironment =true;
String currentWsdlLocation = null;
String currentSchemaLocation = null;
String currentPolicyLocation = null ;
String currentEndpointLocation = null;
String currentEnvironment = null;
String masterVersion= null;
List<String> listOfDependencies = new ArrayList<String>();
String version = context.getResource().getProperty("version");
if(version == null){
version = CommonConstants.WSDL_VERSION_DEFAULT_VALUE;
context.getResource().setProperty("version", version);
}
String resourcePath = context.getResourcePath().getPath();
resourceName = resourcePath.substring(resourcePath.lastIndexOf(RegistryConstants.PATH_SEPARATOR) + 1);
RegistryContext registryContext = context.getRegistryContext();
// 3rd parameter is false, for importing WSDLs.
evaluateWSDLsToDefinitions(wsdlURL, context, evaluateExports, false, isPut, skipValidation);
String wsdlPath = "";
for (WSDLInfo wsdlInfo : wsdls.values()) {
Definition wsdlDefinition = wsdlInfo.getWSDLDefinition();
if (wsdlDefinition != null) {
Types types = wsdlDefinition.getTypes();
schemaProcessor.evaluateSchemas(types,
wsdlDefinition.getDocumentBaseURI(),
evaluateExports,
wsdlInfo.getSchemaDependencies());
String wsdlName = wsdlInfo.getProposedRegistryURL();
int index = wsdlName.lastIndexOf("/");
String wsdlResourceName = wsdlName.substring(index +1);
wsdlPath = getWsdlLocation(context, wsdlDefinition, wsdlResourceName, version);
/*wsdlPath = (getChrootedWSDLLocation(registryContext) + CommonUtil.
derivePathFragmentFromNamespace(wsdlDefinition.getTargetNamespace())).
replace("//", "/");
wsdlPath += version + "/" + wsdlResourceName;*/
if(!resourcePath.contains(wsdlResourceName)){
wsdlInfo.setProposedRegistryURL(wsdlPath);
continue;
}
if (!resourcePath.equals(RegistryConstants.PATH_SEPARATOR+wsdlName)
& !resourcePath.contains(commonWSDLLocation) & (registry.resourceExists(resourcePath))) {
if(currentWsdlLocation == null){
currentEnvironment = resourcePath.substring(0,resourcePath.indexOf(CommonUtil.
derivePathFragmentFromNamespace(wsdlDefinition.getTargetNamespace()).replace("//", "/")));
String[] pathFragments = commonWSDLLocation.split(RegistryConstants.PATH_SEPARATOR);
String wsdlLocation = commonWSDLLocation;
String schemaLocation = commonSchemaLocation;
String policyLocation = commonPolicyLocation;
String endpointLocation = EndpointUtils.getEndpointLocation();
for (String pathFragment : pathFragments) {
wsdlLocation = wsdlLocation.replace(pathFragment,"");
if(wsdlLocation.startsWith(RegistryConstants.PATH_SEPARATOR)){
wsdlLocation = wsdlLocation.replaceFirst(RegistryConstants.PATH_SEPARATOR,"");
}
schemaLocation = schemaLocation.replace(pathFragment,"");
if(schemaLocation.startsWith(RegistryConstants.PATH_SEPARATOR)){
schemaLocation = schemaLocation.replaceFirst(RegistryConstants.PATH_SEPARATOR,"");
}
policyLocation = policyLocation.replace(pathFragment,"");
if(policyLocation.startsWith(RegistryConstants.PATH_SEPARATOR)){
policyLocation = policyLocation.replaceFirst(RegistryConstants.PATH_SEPARATOR,"");
}
endpointLocation = endpointLocation.replace(pathFragment,"");
if(endpointLocation.startsWith(RegistryConstants.PATH_SEPARATOR)){
endpointLocation = endpointLocation.replaceFirst(RegistryConstants.PATH_SEPARATOR,"");
}
currentEnvironment = currentEnvironment.replace(pathFragment,"");
}
if(wsdlLocation.endsWith(RegistryConstants.PATH_SEPARATOR)){
wsdlLocation = wsdlLocation.substring(0, wsdlLocation.length() - 1);
}
if(schemaLocation.endsWith(RegistryConstants.PATH_SEPARATOR)){
schemaLocation = schemaLocation.substring(0, schemaLocation.length() - 1);
}
if(policyLocation.startsWith(RegistryConstants.PATH_SEPARATOR)){
policyLocation = policyLocation.substring(0, policyLocation.length() - 1);
}
if(endpointLocation.startsWith(RegistryConstants.PATH_SEPARATOR)){
endpointLocation = endpointLocation.substring(0, endpointLocation.length() - 1);
}
currentWsdlLocation = currentEnvironment + wsdlLocation;
currentSchemaLocation = currentEnvironment + schemaLocation;
currentPolicyLocation = currentEnvironment + policyLocation;
currentEndpointLocation = currentEnvironment + endpointLocation;
}
if(masterVersion == null){
String namespaceSegment = CommonUtil.
derivePathFragmentFromNamespace(wsdlDefinition.getTargetNamespace()).replace("//", "/");
String suffix = resourcePath.substring(resourcePath.indexOf(namespaceSegment)
+ namespaceSegment.length());
masterVersion = suffix.substring(0,suffix.indexOf(RegistryConstants.PATH_SEPARATOR));
}
wsdlPath = resourcePath;
isDefaultEnvironment = false;
Association[] associations = registry.getAssociations(wsdlPath,CommonConstants.DEPENDS);
for (Association association : associations) {
if(association.getSourcePath().equals(wsdlPath)){
listOfDependencies.add(association.getDestinationPath());
}
}
}
wsdlInfo.setProposedRegistryURL(wsdlPath);
}
}
String symlinkLocation = RegistryUtils.getAbsolutePath(context.getRegistryContext(),
context.getResource().getProperty(RegistryConstants.SYMLINK_PROPERTY_NAME));
if (symlinkLocation == null && !(resourcePath.equals("/") || resourcePath.equals(wsdlPath)
|| resourcePath.equals(""))) {
symlinkLocation = RegistryUtils.getParentPath(resourcePath);
}
String masterWSDLPath;
if (!isDefaultEnvironment) {
schemaProcessor.saveSchemasToRegistry(context, currentSchemaLocation,
null, null,masterVersion,listOfDependencies,disableSymLinkCreation);
updateWSDLSchemaLocations();
masterWSDLPath = saveWSDLsToRepositoryNew(context, symlinkLocation, metadata,currentEndpointLocation
,listOfDependencies,masterVersion,disableSymLinkCreation);// 3rd parameter is false, for importing WSDLs.
addPolicyImportys(context, version);
saveAssociations();
} else {
schemaProcessor.saveSchemasToRegistry(context, getChrootedSchemaLocation(registryContext),
null, null, version, listOfDependencies, disableSymLinkCreation);
updateWSDLSchemaLocations();
masterWSDLPath = saveWSDLsToRepositoryNew(context, symlinkLocation, metadata,disableSymLinkCreation);// 3rd parameter is false, for importing WSDLs.
addPolicyImportys(context, version);
saveAssociations();
if (addService && getCreateService()) {
List<OMElement> serviceContentBeans = createServiceContent(masterWSDLPath, metadata);
for (OMElement serviceContentBean : serviceContentBeans) {
if (isCreateSOAPService()) {
CommonUtil.addSoapService(serviceContentBean, context);
} else {
CommonUtil.addService(serviceContentBean, context);
}
}
}
}
return masterWSDLPath;
}
private String getWsdlLocation(RequestContext context, Definition wsdlDefinition, String wsdlResourceName,
String version) {
if (Utils.getRxtService() != null) {
String pathExpression = Utils.getRxtService().getStoragePath(RegistryConstants.WSDL_MEDIA_TYPE);
pathExpression = CommonUtil.replaceExpressionOfPath(pathExpression, "name", wsdlResourceName);
String namespace = CommonUtil.derivePathFragmentFromNamespace(
wsdlDefinition.getTargetNamespace()).replace("//", "/");
namespace = namespace.replace(".", "/");
pathExpression = CommonUtil.replaceExpressionOfPath(pathExpression, "namespace", namespace);
pathExpression = CommonUtil.getPathFromPathExpression(pathExpression,
context.getResource().getProperties(), null);
pathExpression = pathExpression.replace("//", "/");
pathExpression = CommonUtil.replaceExpressionOfPath(pathExpression, "version", version);
String wsdlPath = RegistryUtils.getAbsolutePath(context.getRegistryContext(), pathExpression.replace("//", "/"));
/**
* Fix for the REGISTRY-3052 : validation is to check the whether this invoked by ZIPWSDLMediaTypeHandler
* Setting the registry and absolute paths to current session to avoid incorrect resource path entry in REG_LOG table
*/
if (CurrentSession.getLocalPathMap() != null && !Boolean.valueOf(CurrentSession.getLocalPathMap().get(CommonConstants.ARCHIEVE_UPLOAD))) {
wsdlPath = CommonUtil.getRegistryPath(context.getRegistry().getRegistryContext(), wsdlPath);
CurrentSession.getLocalPathMap().remove(context.getResourcePath().getCompletePath());
if (log.isDebugEnabled()) {
log.debug("Saving current session local paths, key: " + wsdlPath + " | value: " + pathExpression);
}
CurrentSession.getLocalPathMap().put(wsdlPath, pathExpression);
}
return wsdlPath;
} else {
String wsdlPath = (getChrootedWSDLLocation(context.getRegistryContext()) +
CommonUtil.derivePathFragmentFromNamespace(
wsdlDefinition.getTargetNamespace())).replace("//", "/");
wsdlPath += version + "/" + wsdlResourceName;
return wsdlPath;
}
}
private void addPolicyImportys(RequestContext context, String version) throws RegistryException {
/* storing policyReferences in to Registry if available in the WSDL */
for (WSDLInfo wsdlInfo : wsdls.values()) {
if(wsdlInfo.isExistPolicyReferences()){
Iterator iter = wsdlInfo.getPolicyDependencies().iterator();
while(iter.hasNext()){
String policyURL = (String)iter.next();
boolean lockAlreadyAcquired = !CommonUtil.isUpdateLockAvailable();
CommonUtil.releaseUpdateLock();
try{
Resource policyResource = registry.newResource();
policyResource.setMediaType("application/policy+xml");
String path = policyURL.substring(policyURL.lastIndexOf(RegistryConstants.PATH_SEPARATOR) + 1);
if(policyURL.lastIndexOf(RegistryConstants.PATH_SEPARATOR) > 0){
policyResource.setProperty("version", version);
policyResource.setProperties(copyProperties(context));
String policyPath = registry.importResource(path ,policyURL,policyResource);
registry.addAssociation(policyPath, wsdlInfo.getProposedRegistryURL(), CommonConstants.USED_BY);
registry.addAssociation(wsdlInfo.getProposedRegistryURL(), policyPath, CommonConstants.DEPENDS);
}
}finally {
if (lockAlreadyAcquired) {
CommonUtil.acquireUpdateLock();
}
}
}
}
}
}
/**
* Get Master WSDL
* @return WSDLinfo object of the Master WSDL
*/
public WSDLInfo getMasterWSDLInfo() {
for (WSDLInfo wsdlInfo : wsdls.values()) {
if (wsdlInfo.isMasterWSDL()) {
return wsdlInfo;
}
}
return null;
}
/**
* Save associations to the registry if they do not exist.
* Execution time could be improved if registry provides a better way to check existing associations.
*
* @throws RegistryException Thrown in case a association cannot be saved
*/
private void saveAssociations() throws RegistryException {
// until registry provides a functionality to check existing associations, this method will consume a LOT of time
for (Association association : associations) {
boolean isAssociationExist = false;
Association[] existingAssociations = registry.getAllAssociations(association.getSourcePath());
if (existingAssociations != null) {
for (Association currentAssociation : existingAssociations) {
if (currentAssociation.getDestinationPath().equals(association.getDestinationPath()) &&
currentAssociation.getAssociationType().equals(association.getAssociationType())) {
isAssociationExist = true;
break;
}
}
}
if (!isAssociationExist) {
registry.addAssociation(association.getSourcePath(),
association.getDestinationPath(),
association.getAssociationType());
}
}
}
/**
* Extract an appropriate name for the resource from the given URL
*
* @param wsdlURL, the URL
* @param suffix, the suffix introduced
* @return resource name
*/
private String extractResourceFromURL(String wsdlURL, String suffix) {
String resourceName = wsdlURL;
if (wsdlURL.indexOf("?") > 0) {
resourceName = wsdlURL.substring(0, wsdlURL.indexOf("?")) + suffix;
} else if (wsdlURL.indexOf(".") > 0) {
resourceName = wsdlURL.substring(0, wsdlURL.lastIndexOf(".")) + suffix;
} else if (!wsdlURL.endsWith(".wsdl")) {
resourceName = wsdlURL + suffix;
}
return resourceName;
}
private void evaluateWSDLsToDefinitions(String wsdlLocation,
RequestContext context,
boolean evaluateImports,
boolean isServiceImport,
boolean isPut,
boolean skipValidation)
throws RegistryException {
WSDLReader wsdlReader;
Definition wsdlDefinition = null;
originalDefinition = null;
try {
wsdlReader = new ExWSDLReaderImpl(
(WSDLReaderImpl) WSDLFactory.newInstance().newWSDLReader());
} catch (WSDLException e) {
String msg = "Could not initiate the wsdl reader. Caused by: " + e.getMessage();
throw new RegistryException(msg);
}
wsdlReader.setFeature("javax.wsdl.importDocuments", true);
wsdlReader.setFeature("javax.wsdl.verbose", log.isDebugEnabled());
try {
if (isPut) {
ByteArrayInputStream byteArrayInputStream =
new ByteArrayInputStream((byte[]) context.getResource().getContent());
InputSource inputSource = new InputSource(byteArrayInputStream);
wsdlDefinition = wsdlReader.readWSDL(null, inputSource);
} else {
wsdlDefinition = wsdlReader.readWSDL(wsdlLocation);
}
originalDefinition = wsdlDefinition;
} catch (WSDLException e) {
String msg = "Could not evaluate WSDL Definition.";
if (e.getCause() instanceof ConnectException ||
e.getCause() instanceof UnknownHostException) {
msg += " Unable to resolve imported document at '" + wsdlLocation +
"'. Connection refused.";
log.error(msg, e);
} else if (e.getCause() instanceof IOException) {
msg += " This WSDL file or one of its imports was not found.";
log.error(msg, e);
}
}
if (!isServiceImport) {
processedWSDLs.add(resourceName);
}
Map map = null;
if (wsdlDefinition != null) {
map = wsdlDefinition.getImports();
}
// We perform validation only if there are no wsdl imports
if (!skipValidation) {
if (map != null && map.size() == 0) {
log.trace("Starting WSDL Validation");
wsdlValidationInfo = WSDLUtils.validateWSDL(context);
log.trace("Ending WSDL Validation");
log.trace("Starting WSI Validation");
wsiValidationInfo = WSDLUtils.validateWSI(context);
log.trace("Ending WSI Validation");
} else {
hasWSDLImports = true;
}
}
if(wsdlDefinition == null) {
log.trace("Invalid WSDL definition found.");
throw new RegistryException("Invalid WSDL definition found.");
}
evaluateWSDLsToDefinitionsRecursively(wsdlDefinition, evaluateImports, isServiceImport, null, true);
}
private void evaluateWSDLsToDefinitionsRecursively(Definition wsdlDefinition,
boolean evaluateImports,
boolean isServiceImport, WSDLInfo parent, boolean masterWSDL)
throws RegistryException {
WSDLInfo wsdlInfo = new WSDLInfo();
wsdlInfo.setMasterWSDL(masterWSDL);
wsdlInfo.setParent(parent);
if (evaluateImports) {
Iterator iter = wsdlDefinition.getImports().values().iterator();
Vector values;
Import wsdlImport;
visitedWSDLs.add(wsdlDefinition.getDocumentBaseURI());
CommonUtil.addImportedArtifact(new File(wsdlDefinition.getDocumentBaseURI()).toString());
for (; iter.hasNext();) {
values = (Vector) iter.next();
for (Object value : values) {
wsdlImport = (Import) value;
Definition innerDefinition = wsdlImport.getDefinition();
if (innerDefinition == null) {
continue;
}
if (innerDefinition.getTargetNamespace() == null) {
// if this import was a schema, WSDL4J will not extract the target
// namespace.
continue;
}
wsdlInfo.getWSDLDependencies().add(innerDefinition.getDocumentBaseURI());
if (!visitedWSDLs.contains(innerDefinition.getDocumentBaseURI())) {
evaluateWSDLsToDefinitionsRecursively(
innerDefinition,
evaluateImports,
isServiceImport, wsdlInfo, false);
}
}
}
}
Iterator iter = wsdlDefinition.getBindings().values().iterator();
while(iter.hasNext()){
Binding binding = (Binding)iter.next();
if(binding.getBindingPolicyReference() != null){
wsdlInfo.setExistPolicyReferences(true);
wsdlInfo.getPolicyDependencies().add(binding.getBindingPolicyReference().getURI());
}
}
String baseURI = wsdlDefinition.getDocumentBaseURI();
String fileNameToSave;
if (baseURI != null) {
String wsdlFileName = baseURI.substring(baseURI.lastIndexOf(RegistryConstants.PATH_SEPARATOR) + 1);
if ((baseURI.equals(originalDefinition.getDocumentBaseURI()))
&& (!isServiceImport)) {
fileNameToSave = extractResourceFromURL(resourceName, ".wsdl");
}
else {
fileNameToSave = extractResourceFromURL(wsdlFileName, ".wsdl");
while (processedWSDLs.contains(fileNameToSave)) {
fileNameToSave = extractResourceFromURL(wsdlFileName, (++i) + ".wsdl");
}
}
wsdlInfo.setOriginalURL(baseURI);
}
else {
// This is taken from the file system. So, no base URI is available for the wsdl.
fileNameToSave = extractResourceFromURL(resourceName, ".wsdl");
wsdlInfo.setOriginalURL(SAMPLE_BASE_URL);
wsdlDefinition.setDocumentBaseURI(SAMPLE_BASE_URL);
}
wsdlInfo.setWSDLDefinition(wsdlDefinition);
wsdlInfo.setProposedRegistryURL(fileNameToSave);
wsdls.put(baseURI, wsdlInfo);
processedWSDLs.add(fileNameToSave);
}
/**
* Change all schema path locations to registry locations in each WSDL definitions
* @throws org.wso2.carbon.registry.core.exceptions.RegistryException Thrown in case the schema
* locations cannot be updated
*/
private void updateWSDLSchemaLocations() throws RegistryException {
updateWSDLocations();
for (WSDLInfo wsdlInfo : wsdls.values()) {
Definition definition = wsdlInfo.getWSDLDefinition();
Types types = definition.getTypes();
if (types != null) {
List extensibleElements = types.getExtensibilityElements();
Schema schemaExtension;
Object extensionObject;
for (Object extensibleElement : extensibleElements) {
extensionObject = extensibleElement;
if (extensionObject instanceof Schema) {
// first get the schema object
schemaExtension = (Schema) extensionObject;
NodeList nodeList = schemaExtension.getElement().getChildNodes();
String tagName;
for (int i = 0; i < nodeList.getLength(); i++) {
tagName = nodeList.item(i).getLocalName();
if (IMPORT_TAG.equals(tagName) || INCLUDE_TAG.equals(tagName)) {
NamedNodeMap nodeMap = nodeList.item(i).getAttributes();
Node attribute;
String attributeValue;
for (int j = 0; j < nodeMap.getLength(); j++) {
attribute = nodeMap.item(j);
if (attribute.getNodeName().equals("schemaLocation")) {
attributeValue = attribute.getNodeValue();
String schemaPath = schemaProcessor.getSchemaRegistryPath(wsdlInfo.getProposedRegistryURL(), attributeValue);
if (schemaPath != null) {
attribute.setNodeValue(schemaPath);
}
}
}
}
}
}
}
}
}
}
private void updateWSDLocations() {
for (WSDLInfo wsdlInfo : wsdls.values()) {
Definition definition = wsdlInfo.getWSDLDefinition();
ArrayList<String> wsdlDependancies = wsdlInfo.getWSDLDependencies();
Vector[] importVector = (Vector[])definition.getImports().values().toArray(new Vector[definition.getImports().values().size()]);
int count = 0;
for (String wsdlDependancy : wsdlDependancies) {
Vector values = importVector[count];
WSDLInfo dependantWSDLInfo = wsdls.get(wsdlDependancy);
dependantWSDLInfo.getProposedRegistryURL();
for (Object value : values) {
Import importedWSDL = (Import) value;
String relativeSchemaPath = WSDLUtil.computeRelativePathWithVersion(wsdlInfo.getProposedRegistryURL(),
dependantWSDLInfo.getProposedRegistryURL(), registry);
importedWSDL.setLocationURI(relativeSchemaPath);
}
count++;
}
}
}
private void identifyAssociationsNew(WSDLInfo wsdlInfo) {
String wsdlPath = wsdlInfo.getProposedRegistryURL();
for (String association : wsdlInfo.getSchemaDependencies()) {
String associatedTo = schemaProcessor.getSchemaAssociationPath(association);
if (associatedTo != null) {
associations.add(new Association(wsdlPath,
associatedTo,
CommonConstants.DEPENDS));
associations.add(new Association(associatedTo,
wsdlPath,
CommonConstants.USED_BY));
}
}
for (String association : wsdlInfo.getWSDLDependencies()) {
WSDLInfo info = wsdls.get(association);
if (info != null) {
String associatedTo = info.getProposedRegistryURL();
if (associatedTo != null) {
associations.add(new Association(wsdlPath,
associatedTo,
CommonConstants.DEPENDS));
associations.add(new Association(associatedTo,
wsdlPath,
CommonConstants.USED_BY));
}
}
}
}
@SuppressWarnings("unchecked")
private String saveWSDLsToRepositoryNew(RequestContext context, String symlinkLocation,
Resource metaDataResource,boolean disableSymLinkCreation)
throws RegistryException {
String masterWSDLPath = null;
try {
for (WSDLInfo wsdlInfo : wsdls.values()) {
Definition wsdlDefinition = wsdlInfo.getWSDLDefinition();
WSDLWriter wsdlWriter = WSDLFactory.newInstance().newWSDLWriter();
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
wsdlWriter.writeWSDL(wsdlDefinition, byteArrayOutputStream);
byte[] wsdlResourceContent = byteArrayOutputStream.toByteArray();
// create a resource this wsdlResourceContent and put it to the registry with the name
// importedResourceName (in some path)
String wsdlPath = wsdlInfo.getProposedRegistryURL();
Resource wsdlResource;
if (metaDataResource != null && registry.resourceExists(wsdlPath)) {
wsdlResource = registry.get(wsdlPath);
} else {
wsdlResource = new ResourceImpl();
if (metaDataResource != null) {
Properties properties = metaDataResource.getProperties();
if (properties != null) {
List<String> linkProperties =
Arrays.asList(RegistryConstants.REGISTRY_LINK,
RegistryConstants.REGISTRY_USER,
RegistryConstants.REGISTRY_MOUNT,
RegistryConstants.REGISTRY_AUTHOR,
RegistryConstants.REGISTRY_MOUNT_POINT,
RegistryConstants.REGISTRY_TARGET_POINT,
RegistryConstants.REGISTRY_ACTUAL_PATH,
RegistryConstants.REGISTRY_REAL_PATH);
for (Map.Entry<Object, Object> e : properties.entrySet()) {
String key = (String) e.getKey();
if (!linkProperties.contains(key)) {
wsdlResource.setProperty(key, (List<String>) e.getValue());
}
}
}
}
}
copyAllParameters(wsdlPath, wsdlResource);
// getting the parameters
masterWSDLPath = addProperties(masterWSDLPath, wsdlInfo, wsdlDefinition, wsdlResourceContent, wsdlPath, wsdlResource);
if (metaDataResource != null) {
wsdlResource.setDescription(metaDataResource.getDescription());
}
boolean newWSDLUpload = !registry.resourceExists(wsdlPath);
if (metaDataResource != null && metaDataResource.getProperty(CommonConstants.SOURCE_PROPERTY) != null) {
wsdlResource.setProperty(CommonConstants.SOURCE_PROPERTY, metaDataResource.getProperty(CommonConstants.SOURCE_PROPERTY));
} else {
if (context.getResource().getProperty(CommonConstants.SOURCE_PROPERTY) != null) {
if (context.getResource().getMediaType() != null &&
context.getResource().getMediaType().equals(wsdlResource.getMediaType())) {
wsdlResource.setProperty(CommonConstants.SOURCE_PROPERTY,
context.getResource().getProperty(CommonConstants.SOURCE_PROPERTY));
} else {
wsdlResource.setProperty(CommonConstants.SOURCE_PROPERTY, CommonConstants.SOURCE_AUTO);
}
}
}
deleteOldResource(context, metaDataResource, wsdlInfo, wsdlPath, wsdlResource);
saveResource(context, wsdlInfo.getOriginalURL(), wsdlPath, wsdlResource, true);
if (systemRegistry != null) {
EndpointUtils.saveEndpointsFromWSDL(context,wsdlPath, wsdlResource, registry, systemRegistry);
}
// TODO symlink
if(!disableSymLinkCreation) {
symlinkLocation = createLinks(symlinkLocation, wsdlInfo, wsdlPath, newWSDLUpload);
}
identifyAssociationsNew(wsdlInfo);
}
}
catch (WSDLException e) {
throw new RegistryException("Invalid WSDL file");
}
return masterWSDLPath;
}
// TODO symlink
private String createLinks(String symlinkLocation, WSDLInfo wsdlInfo, String wsdlPath, boolean newWSDLUpload) throws RegistryException {
if (wsdlInfo.isMasterWSDL() && symlinkLocation != null) {
if (!symlinkLocation.endsWith(RegistryConstants.PATH_SEPARATOR)) {
symlinkLocation = symlinkLocation + RegistryConstants.PATH_SEPARATOR;
}
if (registry.resourceExists(symlinkLocation)) {
Resource resource = registry.get(symlinkLocation);
if (resource != null) {
String isLink = resource.getProperty("registry.link");
String mountPoint = resource.getProperty("registry.mountpoint");
String targetPoint = resource.getProperty("registry.targetpoint");
String actualPath = resource.getProperty("registry.actualpath");
if (isLink != null && mountPoint != null && targetPoint != null) {
// symlinkLocation = symlinkLocation.replace(mountPoint, targetPoint);
symlinkLocation = actualPath + RegistryConstants.PATH_SEPARATOR;
}
}
}
// 1. New resource: resourcePath = /foo, wsdlPath = /ns/name.wsdl, symlinkPath = /foo, resourceExist = false, resourceIsSymLink = false, createSymlink = true. DoWork = true
// 2. New resource, existing symlink: resourcePath = /foo, wsdlPath = /ns/name.wsdl, symlinkPath = /foo, resourceExist = false, resourceIsSymLink = true, createSymlink = false
// 3. Edit from symlink: resourcePath = /foo, wsdlPath = /ns/name.wsdl, symlinkPath = /foo, resourceExist = true, resourceIsSymLink = true, createSymlink = false,
// 4. Edit from resource: resourcePath = /ns/name.wsdl, wsdlPath = /ns/name.wsdl, symlinkPath = /ns/name.wsdl, resourceExist = true, resourceIsSymLink = false, createSymlink = false,
// 5. Edit from resource, change ns: resourcePath = /ns/name.wsdl, wsdlPath = /ns2/name.wsdl, symlinkPath = /ns/name.wsdl, resourceExist = true, resourceIsSymLink = false, createSymlink = false, deleteResource = true. DoWork = true
// 6. Edit from symlink, change ns: resourcePath = /ns/name.wsdl, wsdlPath = /ns2/name.wsdl, symlinkPath = /ns/name.wsdl, resourceExist = true, resourceIsSymLink = true, createSymlink = delete and add, deleteResource = true. DoWork = true
if (!symlinkLocation.endsWith(RegistryConstants.PATH_SEPARATOR)) {
symlinkLocation = symlinkLocation + RegistryConstants.PATH_SEPARATOR;
}
String symlinkPath = symlinkLocation + resourceName;
if (!registry.resourceExists(symlinkPath)) {
if (CommonUtil.isSymbolicLinkMapExisting()) {
CommonUtil.addToSymbolicLinkMap(symlinkPath, wsdlPath);
} else {
systemRegistry.createLink(symlinkPath, wsdlPath);
}
} else if (newWSDLUpload) {
if (registry.get(symlinkPath).getProperty(RegistryConstants.REGISTRY_LINK) != null) {
String actualPath = registry.get(symlinkPath).getProperty(RegistryConstants.REGISTRY_ACTUAL_PATH);
if (!wsdlPath.equals(actualPath)) {
if (actualPath != null) {
registry.delete(actualPath);
}
if (CommonUtil.isSymbolicLinkMapExisting()) {
CommonUtil.addToSymbolicLinkMap(symlinkPath, wsdlPath);
} else {
systemRegistry.removeLink(symlinkPath);
systemRegistry.createLink(symlinkPath, wsdlPath);
}
}
} else {
registry.delete(symlinkPath);
}
}
}
return symlinkLocation;
}
private String addProperties(String masterWSDLPath, WSDLInfo wsdlInfo, Definition wsdlDefinition,
byte[] wsdlResourceContent, String wsdlPath, Resource wsdlResource)
throws RegistryException {
//Commented to fix REGISTRY-2329
/*if (wsdlDefinition.getQName() != null) {
String name = wsdlDefinition.getQName().getLocalPart();
if (name != null) {
wsdlResource.addProperty("registry.wsdl.Name", name);
}
}
if (wsdlDefinition.getDocumentationElement() != null) {
String document = wsdlDefinition.getDocumentationElement().getTextContent();
if (document != null) {
wsdlResource.addProperty("registry.wsdl.documentation", document);
}
}*/
//Commented to fix REGISTRY-2329
//String targetNamespace = wsdlDefinition.getTargetNamespace();
//wsdlResource.addProperty("registry.wsdl.TargetNamespace", targetNamespace);
wsdlResource.setMediaType(RegistryConstants.WSDL_MEDIA_TYPE);
wsdlResource.setContent(wsdlResourceContent);
if (wsdlInfo.isMasterWSDL()) {
masterWSDLPath = wsdlPath;
log.trace("Setting WSDL Validation properties");
if (wsdlValidationInfo != null) {
wsdlResource.setProperty(WSDLUtils.WSDL_STATUS, wsdlValidationInfo.getStatus());
ArrayList<String> validationMessages = wsdlValidationInfo.getValidationMessages();
if (validationMessages.size() > 0) {
int i = 1;
for (String message : validationMessages) {
if (message == null) {
continue;
}
if (message.length() > 1000) {
message = message.substring(0, 997) + "...";
}
wsdlResource.setProperty(WSDLUtils.WSDL_VALIDATION_MESSAGE + i,
message);
i++;
}
}
} else if (hasWSDLImports) {
wsdlResource.setProperty(WSDLUtils.WSDL_STATUS,
"Validation is not supported for WSDLs containing WSDL imports.");
}
log.trace("Finished setting WSDL Validation properties");
log.trace("Setting WSI Validation properties");
if (wsiValidationInfo != null) {
wsdlResource.setProperty(WSDLUtils.WSI_STATUS,
wsiValidationInfo.getStatus());
ArrayList<String> validationMessages =
wsiValidationInfo.getValidationMessages();
if (validationMessages.size() > 0) {
int i = 1;
for (String message : validationMessages) {
if (message == null) {
continue;
}
if (message.length() > 1000) {
message = message.substring(0, 997) + "...";
}
wsdlResource.setProperty(WSDLUtils.WSI_VALIDATION_MESSAGE + i,
message);
i++;
}
}
} else if (hasWSDLImports) {
wsdlResource.setProperty(WSDLUtils.WSI_STATUS,
"Validation is not supported for WSDLs containing WSDL imports.");
}
log.trace("Finished setting WSI Validation properties");
}
return masterWSDLPath;
}
private void copyAllParameters(String wsdlPath, Resource wsdlResource) throws RegistryException {
if(registry.resourceExists(wsdlPath)){
// we are copying all the properties, rather than using the exisint pointer
Resource oldWsdlResource = registry.get(wsdlPath);
Properties properties = oldWsdlResource.getProperties();
for (Map.Entry<Object, Object> e : properties.entrySet()) {
if (e.getValue() instanceof String) {
wsdlResource.setProperty((String) e.getKey(), (String) e.getValue());
} else {
wsdlResource.setProperty((String) e.getKey(),
(List<String>) e.getValue());
}
}
}}
@SuppressWarnings("unchecked")
private String saveWSDLsToRepositoryNew(RequestContext context, String symlinkLocation, Resource metaDataResource
,String endpointEnvironment,List<String> dependenciesList,String version,boolean disableSymLinkCreation)
throws RegistryException {
String masterWSDLPath = null;
try {
for (WSDLInfo wsdlInfo : wsdls.values()) {
Definition wsdlDefinition = wsdlInfo.getWSDLDefinition();
WSDLWriter wsdlWriter = WSDLFactory.newInstance().newWSDLWriter();
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
wsdlWriter.writeWSDL(wsdlDefinition, byteArrayOutputStream);
byte[] wsdlResourceContent = byteArrayOutputStream.toByteArray();
// create a resource this wsdlResourceContent and put it to the registry with the name
// importedResourceName (in some path)
String wsdlPath = wsdlInfo.getProposedRegistryURL();
Resource wsdlResource;
if (metaDataResource != null && registry.resourceExists(wsdlPath)) {
wsdlResource = registry.get(wsdlPath);
} else {
wsdlResource = new ResourceImpl();
if (metaDataResource != null) {
Properties properties = metaDataResource.getProperties();
if (properties != null) {
List<String> linkProperties =
Arrays.asList(RegistryConstants.REGISTRY_LINK,
RegistryConstants.REGISTRY_USER,
RegistryConstants.REGISTRY_MOUNT,
RegistryConstants.REGISTRY_AUTHOR,
RegistryConstants.REGISTRY_MOUNT_POINT,
RegistryConstants.REGISTRY_TARGET_POINT,
RegistryConstants.REGISTRY_ACTUAL_PATH,
RegistryConstants.REGISTRY_REAL_PATH);
for (Map.Entry<Object, Object> e : properties.entrySet()) {
String key = (String) e.getKey();
if (!linkProperties.contains(key)) {
wsdlResource.setProperty(key, (List<String>) e.getValue());
}
}
}
}
}
if (context.getResource().getProperty(CommonConstants.SOURCE_PROPERTY) != null){
wsdlResource.setProperty(CommonConstants.SOURCE_PROPERTY, CommonConstants.SOURCE_AUTO);
} else {
wsdlResource.setProperty(CommonConstants.SOURCE_PROPERTY, "undefined");
}
if(registry.resourceExists(wsdlPath)){
// we are copying all the properties, rather than using the existing pointer
Resource oldWsdlResource = registry.get(wsdlPath);
Properties properties = oldWsdlResource.getProperties();
for (Map.Entry<Object, Object> e : properties.entrySet()) {
if (e.getValue() instanceof String) {
wsdlResource.setProperty((String) e.getKey(), (String) e.getValue());
} else {
wsdlResource.setProperty((String) e.getKey(),
(List<String>) e.getValue());
}
}
}
// getting the parameters
masterWSDLPath = addProperties(masterWSDLPath, wsdlInfo, wsdlDefinition, wsdlResourceContent, wsdlPath, wsdlResource);
if (metaDataResource != null) {
wsdlResource.setDescription(metaDataResource.getDescription());
}
boolean newWSDLUpload = !registry.resourceExists(wsdlPath);
deleteOldResource(context, metaDataResource, wsdlInfo, wsdlPath, wsdlResource);
saveResource(context, wsdlInfo.getOriginalURL(), wsdlPath, wsdlResource, true);
if (systemRegistry != null) {
EndpointUtils.saveEndpointsFromWSDL(context ,wsdlPath, wsdlResource, registry,
systemRegistry,endpointEnvironment,dependenciesList,version);
}
if(!disableSymLinkCreation) {
symlinkLocation = createLinks(symlinkLocation, wsdlInfo, wsdlPath, newWSDLUpload);
}
identifyAssociationsNew(wsdlInfo);
}
}
catch (WSDLException e) {
throw new RegistryException("Invalid WSDL file");
}
return masterWSDLPath;
}
private void deleteOldResource(RequestContext context, Resource metaDataResource, WSDLInfo wsdlInfo, String wsdlPath, Resource wsdlResource) throws RegistryException {
if(wsdlInfo.isMasterWSDL()){
if (metaDataResource != null) {
wsdlResource.setUUID(metaDataResource.getUUID());
}
if(!wsdlPath.equals(context.getResourcePath().getPath())
&& registry.resourceExists(context.getResourcePath().getPath())){
registry.delete(context.getResourcePath().getPath());
}
}
}
private List<OMElement> createServiceContent(String wsdlURL, Resource metadata)
throws RegistryException {
List<OMElement> serviceContentomelements = new ArrayList<OMElement>();
if (originalDefinition != null) {
Map servicesMap = originalDefinition.getServices();
for (Object serviceObject : servicesMap.values()) {
Service service = (Service) serviceObject;
QName qname = service.getQName();
OMFactory fac = OMAbstractFactory.getOMFactory();
OMNamespace namespace = fac.createOMNamespace(CommonConstants.SERVICE_ELEMENT_NAMESPACE, "");
OMElement data = fac.createOMElement(CommonConstants.SERVICE_ELEMENT_ROOT, namespace);
OMElement definitionURL = fac.createOMElement("wsdlURL", namespace);
OMElement overview = fac.createOMElement("overview", namespace);
OMElement interfaceelement = fac.createOMElement("interface", namespace);
OMElement name = fac.createOMElement("name", namespace);
OMElement version = fac.createOMElement("version",namespace);
name.setText(qname.getLocalPart());
version.setText(metadata.getProperty("version"));
definitionURL.setText(RegistryUtils.getRelativePath(registry.getRegistryContext(), wsdlURL));
OMElement namespaceElement = fac.createOMElement("namespace", namespace);
OMElement descriptionelement = fac.createOMElement("description", namespace);
namespaceElement.setText(qname.getNamespaceURI());
String description = metadata.getDescription();
if (description == null) {
Element documentationElement = originalDefinition.getDocumentationElement();
if ((documentationElement != null) && (documentationElement.getFirstChild() != null)) {
Node firstChild = documentationElement.getFirstChild();
description = DOM2Writer.nodeToString(firstChild);
}
}
descriptionelement.setText(description);
// the endpoints will not be added right here, as it will be added by the
// service.
// OMElement endpointlist = fac.createOMElement("endpoints", namespace);
// OMElement count = fac.createOMElement("count", namespace);
// Map ports = service.getPorts();
// count.setText(Integer.toString(ports.size()));
// endpointlist.addChild(count);
// for (Object portObject : ports.values()) {
// Port port = (Port) portObject;
// List elements = port.getExtensibilityElements();
// for (Object element : elements) {
// eprcount++;
// OMElement endpoint = fac.createOMElement("text_EndPoint" + eprcount,null);
// OMElement endpointoption = fac.createOMElement("EndPoint" + eprcount,null);
// if (element instanceof SOAP12Address) {
// SOAP12Address soapAddress = (SOAP12Address) element;
// endpoint.setText(soapAddress.getLocationURI());
// } else if (element instanceof SOAPAddress) {
// SOAPAddress soapAddress = (SOAPAddress) element;
// endpoint.setText(soapAddress.getLocationURI());
// } else if (element instanceof HTTPAddress) {
// HTTPAddress httpAddress = (HTTPAddress) element;
// endpoint.setText(httpAddress.getLocationURI());
// }
// endpointoption.setText("0");
// endpointlist.addChild(endpointoption);
// endpointlist.addChild(endpoint);
// }
// }
overview.addChild(name);
overview.addChild(version);
overview.addChild(namespaceElement);
overview.addChild(descriptionelement);
interfaceelement.addChild(definitionURL);
data.addChild(overview);
data.addChild(interfaceelement);
serviceContentomelements.add(data);
}
}
return serviceContentomelements;
}
private void saveResource(RequestContext context, String url, String path, Resource resource,
boolean isWSDL)
throws RegistryException {
log.trace("Started saving resource");
String artifactId = resource.getUUID();
if (artifactId == null) {
// generate a service id
artifactId = UUID.randomUUID().toString();
resource.setUUID(artifactId);
}
// if (systemRegistry != null) {
// CommonUtil.addGovernanceArtifactEntryWithAbsoluteValues(systemRegistry, artifactId, path);
// }
String relativeArtifactPath = RegistryUtils.getRelativePath(registry.getRegistryContext(), path);
// adn then get the relative path to the GOVERNANCE_BASE_PATH
relativeArtifactPath = RegistryUtils.getRelativePathToOriginal(relativeArtifactPath,
RegistryConstants.GOVERNANCE_REGISTRY_BASE_PATH);
/* This property will be used in ServiceMediatype handler to recognize that particular service addition is
initialized due to wsdl addition
*/
resource.setProperty("registry.DefinitionImport","true");
if (!isWSDL) {
registry.put(path, resource);
} else {
addWSDLToRegistry(context, path, url, resource, registry);
}
// if (!(resource instanceof Collection) &&
// ((ResourceImpl) resource).isVersionableChange()) {
// registry.createVersion(path);
// }
((ResourceImpl)resource).setPath(relativeArtifactPath);
log.trace("Finished saving resource");
}
/**
* Method that gets called instructing a WSDL to be added the registry.
*
* @param context the request context for this request.
* @param path the path to add the resource to.
* @param url the path from which the resource was imported from.
* @param resource the resource to be added.
* @param registry the registry instance to use.
*
* @throws RegistryException if the operation failed.
*/
protected void addWSDLToRegistry(RequestContext context, String path, String url,
Resource resource, Registry registry) throws RegistryException {
registry.put(path, resource);
}
/**
* Method to customize the Schema Processor.
* @param requestContext the request context for the import/put operation.
* @param validationInfo the WSDL validation information.
* @return the Schema Processor instance.
*/
@SuppressWarnings("unused")
protected SchemaProcessor buildSchemaProcessor(RequestContext requestContext,
WSDLValidationInfo validationInfo) {
return new SchemaProcessor(requestContext, validationInfo);
}
/**
* Method to customize the Schema Processor.
* @param requestContext the request context for the import/put operation.
* @param validationInfo the WSDL validation information.
* @param useOriginalSchema whether the schema to be original
* @return the Schema Processor instance.
*/
@SuppressWarnings("unused")
protected SchemaProcessor buildSchemaProcessor(RequestContext requestContext,
WSDLValidationInfo validationInfo, boolean useOriginalSchema) {
return new SchemaProcessor(requestContext, validationInfo, useOriginalSchema);
}
private static Properties copyProperties(RequestContext requestContext){
Properties properties = requestContext.getResource().getProperties();
Properties copiedProperties = new Properties();
if (properties != null) {
List<String> linkProperties = Arrays.asList(
RegistryConstants.REGISTRY_LINK,
RegistryConstants.REGISTRY_USER,
RegistryConstants.REGISTRY_MOUNT,
RegistryConstants.REGISTRY_AUTHOR,
RegistryConstants.REGISTRY_MOUNT_POINT,
RegistryConstants.REGISTRY_TARGET_POINT,
RegistryConstants.REGISTRY_ACTUAL_PATH,
RegistryConstants.REGISTRY_REAL_PATH);
for (Map.Entry<Object, Object> e : properties.entrySet()) {
String key = (String) e.getKey();
if (!linkProperties.contains(key) && !(key.startsWith("resource") || key.startsWith("registry"))) {
copiedProperties.put(key, (List<String>) e.getValue());
}
}
}
return copiedProperties;
}
}
| laki88/carbon-registry | components/registry/org.wso2.carbon.registry.extensions/src/main/java/org/wso2/carbon/registry/extensions/handlers/utils/WSDLProcessor.java | Java | apache-2.0 | 64,435 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.idea.svn.dialogs;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.util.Disposer;
import org.tmatesoft.svn.core.SVNURL;
import javax.swing.tree.TreeNode;
import java.text.Collator;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
public class RepositoryTreeRootNode implements TreeNode, Disposable {
private final List<TreeNode> myChildren;
private final RepositoryTreeModel myModel;
public RepositoryTreeRootNode(RepositoryTreeModel model, SVNURL[] urls) {
myChildren = new ArrayList<>();
myModel = model;
for (SVNURL url : urls) {
RepositoryTreeNode rootNode = new RepositoryTreeNode(model, this, url, url);
Disposer.register(this, rootNode);
myChildren.add(rootNode);
}
Collections.sort(myChildren, (o1, o2) -> Collator.getInstance().compare(o1.toString(), o2.toString()));
}
public void addRoot(SVNURL url) {
RepositoryTreeNode rootNode = new RepositoryTreeNode(myModel, this, url, url);
Disposer.register(this, rootNode);
myChildren.add(rootNode);
Collections.sort(myChildren, (o1, o2) -> Collator.getInstance().compare(o1.toString(), o2.toString()));
myModel.nodesWereInserted(this, new int[]{myChildren.indexOf(rootNode)});
}
public void remove(TreeNode node) {
int index = getIndex(node);
myChildren.remove(node);
myModel.nodesWereRemoved(this, new int[]{index}, new Object[]{node});
}
public Enumeration children() {
return Collections.enumeration(myChildren);
}
public boolean getAllowsChildren() {
return true;
}
public TreeNode getChildAt(int childIndex) {
return myChildren.get(childIndex);
}
public int getChildCount() {
return myChildren.size();
}
public int getIndex(TreeNode node) {
return myChildren.indexOf(node);
}
public TreeNode getParent() {
return null;
}
public boolean isLeaf() {
return false;
}
public void dispose() {
}
}
| apixandru/intellij-community | plugins/svn4idea/src/org/jetbrains/idea/svn/dialogs/RepositoryTreeRootNode.java | Java | apache-2.0 | 2,615 |
package org.jetbrains.yaml.psi;
import com.intellij.openapi.project.Project;
import com.intellij.psi.PsiDirectory;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import com.intellij.psi.PsiFileSystemItem;
import com.intellij.psi.impl.PsiTreeChangeEventImpl;
import com.intellij.psi.impl.PsiTreeChangePreprocessorBase;
import org.jetbrains.annotations.NotNull;
/**
* @author oleg
*/
final class YAMLPsiManager extends PsiTreeChangePreprocessorBase {
public YAMLPsiManager(@NotNull Project project) {
super(project);
}
@Override
protected boolean isInsideCodeBlock(PsiElement element) {
if (element instanceof PsiFileSystemItem) {
return false;
}
if (element == null || element.getParent() == null) {
return true;
}
while (true) {
if (element instanceof YAMLFile) {
return false;
}
if (element instanceof PsiFile || element instanceof PsiDirectory) {
return true;
}
PsiElement parent = element.getParent();
if (!(parent instanceof YAMLFile ||
parent instanceof YAMLKeyValue ||
parent instanceof YAMLCompoundValue ||
parent instanceof YAMLDocument)) {
return true;
}
element = parent;
}
}
@Override
public void treeChanged(@NotNull PsiTreeChangeEventImpl event) {
if (!(event.getFile() instanceof YAMLFile)) return;
super.treeChanged(event);
}
}
| idea4bsd/idea4bsd | plugins/yaml/src/org/jetbrains/yaml/psi/YAMLPsiManager.java | Java | apache-2.0 | 1,445 |
package org.jolokia.backend.executor;
/*
* Copyright 2009-2013 Roland Huss
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.*;
import javax.management.*;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.testng.Assert.*;
/**
* @author roland
* @since 23.01.13
*/
public class AbstractMBeanServerExecutorTest {
TestExecutor executor;
@BeforeMethod
public void setup() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException, InterruptedException, InstanceNotFoundException, IOException {
executor = new TestExecutor();
}
@Test
public void jolokiaServer() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException {
assertNotNull(executor.getJolokiaMBeanServer());
AbstractMBeanServerExecutor executorNull = new AbstractMBeanServerExecutor() {
@Override
protected Set<MBeanServerConnection> getMBeanServers() {
return null;
}
};
assertNull(executorNull.getJolokiaMBeanServer());
}
@Test
public void eachNull() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanException, IOException, ReflectionException {
executor.each(null, new MBeanServerExecutor.MBeanEachCallback() {
public void callback(MBeanServerConnection pConn, ObjectName pName) throws ReflectionException, InstanceNotFoundException, IOException, MBeanException {
checkHiddenMBeans(pConn, pName);
}
});
}
@Test
public void eachObjectName() throws MalformedObjectNameException, MBeanException, IOException, ReflectionException, NotCompliantMBeanException, InstanceAlreadyExistsException {
for (final ObjectName name : new ObjectName[] { new ObjectName("test:type=one"), new ObjectName("test:type=two") }) {
executor.each(name,new MBeanServerExecutor.MBeanEachCallback() {
public void callback(MBeanServerConnection pConn, ObjectName pName) throws ReflectionException, InstanceNotFoundException, IOException, MBeanException {
assertEquals(pName,name);
checkHiddenMBeans(pConn,pName);
}
});
}
}
@Test
public void updateChangeTest() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException, InstanceNotFoundException, InterruptedException, IOException {
executor.registerForMBeanNotifications();
try {
assertTrue(executor.hasMBeansListChangedSince(0),"updatedSince: When 0 is given, always return true");
long time = currentTime();
assertFalse(executor.hasMBeansListChangedSince(time), "No update yet");
for (int id = 1; id <=2; id++) {
time = currentTime();
executor.addMBean(id);
try {
assertTrue(executor.hasMBeansListChangedSince(0),"updatedSince: For 0, always return true");
assertTrue(executor.hasMBeansListChangedSince(time),"MBean has been added in the same second, hence it has been updated");
// Wait at a least a second
time = currentTime() + 1;
assertFalse(executor.hasMBeansListChangedSince(time),"No updated since the last call");
} finally {
executor.rmMBean(id);
}
}
} finally {
executor.unregisterFromMBeanNotifications();
}
}
@Test
public void destroyWithoutPriorRegistration() throws NoSuchFieldException, IllegalAccessException {
// Should always work, even when no registration has happened. Non exisiting listeners will be simplu ignored, since we didnt do any registration before
executor.unregisterFromMBeanNotifications();
}
private long currentTime() {
return System.currentTimeMillis() / 1000;
}
@Test
public void call() throws MalformedObjectNameException, MBeanException, InstanceAlreadyExistsException, NotCompliantMBeanException, IOException, ReflectionException, AttributeNotFoundException, InstanceNotFoundException {
String name = getAttribute(executor,"test:type=one","Name");
assertEquals(name,"jolokia");
}
private String getAttribute(AbstractMBeanServerExecutor pExecutor, String name, String attribute) throws IOException, ReflectionException, MBeanException, MalformedObjectNameException, AttributeNotFoundException, InstanceNotFoundException {
return (String) pExecutor.call(new ObjectName(name),new MBeanServerExecutor.MBeanAction<Object>() {
public Object execute(MBeanServerConnection pConn, ObjectName pName, Object... extraArgs) throws ReflectionException, InstanceNotFoundException, IOException, MBeanException, AttributeNotFoundException {
return pConn.getAttribute(pName, (String) extraArgs[0]);
}
},attribute);
}
@Test(expectedExceptions = InstanceNotFoundException.class,expectedExceptionsMessageRegExp = ".*test:type=bla.*")
public void callWithInvalidObjectName() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanException, IOException, ReflectionException, AttributeNotFoundException, InstanceNotFoundException {
getAttribute(executor,"test:type=bla","Name");
}
@Test(expectedExceptions = AttributeNotFoundException.class,expectedExceptionsMessageRegExp = ".*Bla.*")
public void callWithInvalidAttributeName() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanException, IOException, ReflectionException, AttributeNotFoundException, InstanceNotFoundException {
getAttribute(executor,"test:type=one","Bla");
}
@Test
public void queryNames() throws IOException, MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException {
Set<ObjectName> names = executor.queryNames(null);
assertTrue(names.contains(new ObjectName("test:type=one")));
assertTrue(names.contains(new ObjectName("test:type=two")));
assertEquals(names.size(), 3);
}
private void checkHiddenMBeans(MBeanServerConnection pConn, ObjectName pName) throws MBeanException, InstanceNotFoundException, ReflectionException, IOException {
try {
if (!pName.equals(new ObjectName("JMImplementation:type=MBeanServerDelegate"))) {
assertEquals(pConn.getAttribute(pName,"Name"),"jolokia");
}
} catch (AttributeNotFoundException e) {
fail("Name should be accessible on all MBeans");
} catch (MalformedObjectNameException e) {
// wont happen
}
try {
pConn.getAttribute(pName, "Age");
fail("No access to hidden MBean allowed");
} catch (AttributeNotFoundException exp) {
// Expected
}
}
class TestExecutor extends AbstractMBeanServerExecutor {
private MBeanServer jolokiaMBeanServer;
private final Set<MBeanServerConnection> servers;
private final MBeanServer otherMBeanServer;
private Testing jOne = new Testing(), oTwo = new Testing();
private Hidden hidden = new Hidden();
TestExecutor() throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException, InterruptedException, InstanceNotFoundException, IOException {
jolokiaMBeanServer = MBeanServerFactory.newMBeanServer();
otherMBeanServer = MBeanServerFactory.newMBeanServer();
servers = new LinkedHashSet<MBeanServerConnection>(Arrays.asList(jolokiaMBeanServer, otherMBeanServer));
jolokiaMBeanServer.registerMBean(jOne, new ObjectName("test:type=one"));
otherMBeanServer.registerMBean(hidden, new ObjectName("test:type=one"));
otherMBeanServer.registerMBean(oTwo, new ObjectName("test:type=two"));
}
@Override
protected Set<MBeanServerConnection> getMBeanServers() {
return servers;
}
@Override
protected MBeanServerConnection getJolokiaMBeanServer() {
return jolokiaMBeanServer;
}
void addMBean(int id) throws MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException {
MBeanServer server = getMBeanServerShuffled(id);
server.registerMBean(new Testing(),new ObjectName("test:type=update,id=" + id));
}
void rmMBean(int id) throws MalformedObjectNameException, MBeanRegistrationException, InstanceNotFoundException {
MBeanServer server = getMBeanServerShuffled(id);
server.unregisterMBean(new ObjectName("test:type=update,id=" + id));
}
private MBeanServer getMBeanServerShuffled(int pId) {
if (pId % 2 == 0) {
return jolokiaMBeanServer;
} else {
return otherMBeanServer;
}
}
}
public interface TestingMBean {
String getName();
}
public static class Testing implements TestingMBean {
public String getName() {
return "jolokia";
}
}
public interface HiddenMBean {
int getAge();
}
public static class Hidden implements HiddenMBean {
public int getAge() {
return 1;
}
}
}
| cinhtau/jolokia | agent/core/src/test/java/org/jolokia/backend/executor/AbstractMBeanServerExecutorTest.java | Java | apache-2.0 | 10,401 |
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.registry.eventing.internal;
import org.apache.axis2.context.ConfigurationContext;
import org.wso2.carbon.email.verification.util.EmailVerifcationSubscriber;
import org.wso2.carbon.email.verification.util.EmailVerifierConfig;
import org.wso2.carbon.registry.event.core.EventBroker;
import org.wso2.carbon.registry.core.service.RegistryService;
import org.wso2.carbon.registry.eventing.services.EventingService;
public class EventingDataHolder {
private static EventingDataHolder holder = new EventingDataHolder();
private EventingDataHolder(){
}
public static EventingDataHolder getInstance(){
return holder;
}
private RegistryService registryService;
private String defaultEventingServiceURL;
private EventingService registryEventingService;
private EventBroker registryEventBrokerService;
private ConfigurationContext configurationContext;
private EmailVerifcationSubscriber emailVerificationSubscriber;
private EmailVerifierConfig emailVerifierConfig = null;
private JMXEventsBean eventsBean;
private NotificationConfig notificationConfig;
public RegistryService getRegistryService() {
return registryService;
}
public void setRegistryService(RegistryService registryService) {
this.registryService = registryService;
}
public String getDefaultEventingServiceURL() {
return defaultEventingServiceURL;
}
public void setDefaultEventingServiceURL(String defaultEventingServiceURL) {
this.defaultEventingServiceURL = defaultEventingServiceURL;
}
public EventingService getRegistryEventingService() {
return registryEventingService;
}
public void setRegistryEventingService(EventingService registryEventingService) {
this.registryEventingService = registryEventingService;
}
public EventBroker getRegistryEventBrokerService() {
return registryEventBrokerService;
}
public void setRegistryEventBrokerService(EventBroker registryEventBrokerService) {
this.registryEventBrokerService = registryEventBrokerService;
}
public ConfigurationContext getConfigurationContext() {
return configurationContext;
}
public void setConfigurationContext(ConfigurationContext configurationContext) {
this.configurationContext = configurationContext;
}
public EmailVerifcationSubscriber getEmailVerificationSubscriber() {
return emailVerificationSubscriber;
}
public void setEmailVerificationSubscriber(EmailVerifcationSubscriber emailVerificationSubscriber) {
this.emailVerificationSubscriber = emailVerificationSubscriber;
}
public EmailVerifierConfig getEmailVerifierConfig() {
return emailVerifierConfig;
}
public void setEmailVerifierConfig(EmailVerifierConfig emailVerifierConfig) {
this.emailVerifierConfig = emailVerifierConfig;
}
public JMXEventsBean getEventsBean() {
return eventsBean;
}
public void setEventsBean(JMXEventsBean eventsBean) {
this.eventsBean = eventsBean;
}
public NotificationConfig getNotificationConfig() {
return notificationConfig;
}
public void setNotificationConfig(NotificationConfig notificationConfig) {
this.notificationConfig = notificationConfig;
}
}
| laki88/carbon-registry | components/registry/org.wso2.carbon.registry.eventing/src/main/java/org/wso2/carbon/registry/eventing/internal/EventingDataHolder.java | Java | apache-2.0 | 4,035 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.util.Locale;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum ConfigFormat {
JSON("json"),
PROPERTIES("properties"),
XML("xml"),
HADOOP_XML("hadoop_xml"),
ENV("env"),
TEMPLATE("template"),
YAML("yaml"),
;
ConfigFormat(String suffix) {
this.suffix = suffix;
}
private final String suffix;
public String getSuffix() {
return suffix;
}
@Override
public String toString() {
return suffix;
}
/**
* Get a matching format or null
* @param type
* @return the format
*/
public static ConfigFormat resolve(String type) {
for (ConfigFormat format: values()) {
if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) {
return format;
}
}
return null;
}
}
| dennishuo/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java | Java | apache-2.0 | 1,763 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.exec.mr;
import java.io.IOException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.mapred.Counters;
@SuppressWarnings("deprecation")
public interface HadoopJobExecHook {
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg);
public void logPlanProgress(SessionState ss) throws IOException;
}
| nishantmonu51/hive | ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java | Java | apache-2.0 | 1,195 |
/*
* Copyright 2002-2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.http.converter;
import java.io.IOException;
import java.io.StringReader;
import java.nio.charset.Charset;
import java.util.List;
import javax.xml.transform.Source;
import javax.xml.transform.stream.StreamSource;
import org.springframework.core.io.AssetResource;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.MockHttpInputMessage;
import org.springframework.http.MockHttpOutputMessage;
import org.springframework.http.converter.support.AllEncompassingFormHttpMessageConverter;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import android.os.Build;
import android.test.AndroidTestCase;
import android.test.suitebuilder.annotation.SmallTest;
/**
* @author Arjen Poutsma
* @author Roy Clarkson
*/
public class FormHttpMessageConverterTests extends AndroidTestCase {
private static final boolean javaxXmlTransformPresent =
(Build.VERSION.SDK_INT >= Build.VERSION_CODES.FROYO);
private FormHttpMessageConverter converter;
@Override
public void setUp() throws Exception {
super.setUp();
if (javaxXmlTransformPresent) {
converter = new AllEncompassingFormHttpMessageConverter();
} else {
// javax.xml.transform not available on this version of Android
converter = new FormHttpMessageConverter();
}
}
@Override
public void tearDown() {
converter = null;
}
@SmallTest
public void testCanRead() {
assertTrue(converter.canRead(MultiValueMap.class, new MediaType("application", "x-www-form-urlencoded")));
assertFalse(converter.canRead(MultiValueMap.class, new MediaType("multipart", "form-data")));
}
@SmallTest
public void testCanWrite() {
assertTrue(converter.canWrite(MultiValueMap.class, new MediaType("application", "x-www-form-urlencoded")));
assertTrue(converter.canWrite(MultiValueMap.class, new MediaType("multipart", "form-data")));
assertTrue(converter.canWrite(MultiValueMap.class, MediaType.valueOf("multipart/form-data; charset=utf-8")));
assertTrue(converter.canWrite(MultiValueMap.class, MediaType.ALL));
}
@SmallTest
public void testReadForm() throws Exception {
String body = "name+1=value+1&name+2=value+2%2B1&name+2=value+2%2B2&name+3";
Charset iso88591 = Charset.forName("ISO-8859-1");
MockHttpInputMessage inputMessage = new MockHttpInputMessage(body.getBytes(iso88591.displayName()));
inputMessage.getHeaders().setContentType(new MediaType("application", "x-www-form-urlencoded", iso88591));
MultiValueMap<String, String> result = converter.read(null, inputMessage);
assertEquals("Invalid result", 3, result.size());
assertEquals("Invalid result", "value 1", result.getFirst("name 1"));
List<String> values = result.get("name 2");
assertEquals("Invalid result", 2, values.size());
assertEquals("Invalid result", "value 2+1", values.get(0));
assertEquals("Invalid result", "value 2+2", values.get(1));
assertNull("Invalid result", result.getFirst("name 3"));
}
@SmallTest
public void testWriteForm() throws IOException {
MultiValueMap<String, String> body = new LinkedMultiValueMap<String, String>();
body.set("name 1", "value 1");
body.add("name 2", "value 2+1");
body.add("name 2", "value 2+2");
body.add("name 3", null);
MockHttpOutputMessage outputMessage = new MockHttpOutputMessage();
converter.write(body, MediaType.APPLICATION_FORM_URLENCODED, outputMessage);
assertEquals("Invalid result", "name+1=value+1&name+2=value+2%2B1&name+2=value+2%2B2&name+3",
outputMessage.getBodyAsString(Charset.forName("UTF-8")));
assertEquals("Invalid content-type", new MediaType("application", "x-www-form-urlencoded"),
outputMessage.getHeaders().getContentType());
assertEquals("Invalid content-length", outputMessage.getBodyAsBytes().length,
outputMessage.getHeaders().getContentLength());
}
@SmallTest
public void testWriteMultipart() throws Exception {
MultiValueMap<String, Object> parts = new LinkedMultiValueMap<String, Object>();
parts.add("name 1", "value 1");
parts.add("name 2", "value 2+1");
parts.add("name 2", "value 2+2");
parts.add("name 3", null);
Resource logo = new AssetResource(getContext().getAssets(), "logo.jpg");
parts.add("logo", logo);
Source xml = new StreamSource(new StringReader("<root><child/></root>"));
HttpHeaders entityHeaders = new HttpHeaders();
entityHeaders.setContentType(MediaType.TEXT_XML);
HttpEntity<Source> entity = new HttpEntity<Source>(xml, entityHeaders);
parts.add("xml", entity);
MockHttpOutputMessage outputMessage = new MockHttpOutputMessage();
converter.write(parts, new MediaType("multipart", "form-data", Charset.forName("UTF-8")), outputMessage);
final MediaType contentType = outputMessage.getHeaders().getContentType();
assertNotNull("No boundary found", contentType.getParameter("boundary"));
// // see if Commons FileUpload can read what we wrote
// FileItemFactory fileItemFactory = new DiskFileItemFactory();
// FileUpload fileUpload = new FileUpload(fileItemFactory);
// List<FileItem> items = fileUpload.parseRequest(new MockHttpOutputMessageRequestContext(outputMessage));
// assertEquals(5, items.size());
// FileItem item = items.get(0);
// assertTrue(item.isFormField());
// assertEquals("name 1", item.getFieldName());
// assertEquals("value 1", item.getString());
//
// item = items.get(1);
// assertTrue(item.isFormField());
// assertEquals("name 2", item.getFieldName());
// assertEquals("value 2+1", item.getString());
//
// item = items.get(2);
// assertTrue(item.isFormField());
// assertEquals("name 2", item.getFieldName());
// assertEquals("value 2+2", item.getString());
//
// item = items.get(3);
// assertFalse(item.isFormField());
// assertEquals("logo", item.getFieldName());
// assertEquals("logo.jpg", item.getName());
// assertEquals("image/jpeg", item.getContentType());
// assertEquals(logo.getFile().length(), item.getSize());
//
// item = items.get(4);
// assertEquals("xml", item.getFieldName());
// assertEquals("text/xml", item.getContentType());
// verify(outputMessage.getBody(), never()).close();
}
// private static class MockHttpOutputMessageRequestContext implements RequestContext {
//
// private final MockHttpOutputMessage outputMessage;
//
// private MockHttpOutputMessageRequestContext(MockHttpOutputMessage outputMessage) {
// this.outputMessage = outputMessage;
// }
//
// @Override
// public String getCharacterEncoding() {
// MediaType contentType = outputMessage.getHeaders().getContentType();
// return contentType != null && contentType.getCharSet() != null ? contentType.getCharSet().name() : null;
// }
//
// @Override
// public String getContentType() {
// MediaType contentType = outputMessage.getHeaders().getContentType();
// return contentType != null ? contentType.toString() : null;
// }
//
// @Override
// public int getContentLength() {
// return outputMessage.getBodyAsBytes().length;
// }
//
// @Override
// public InputStream getInputStream() throws IOException {
// return new ByteArrayInputStream(outputMessage.getBodyAsBytes());
// }
// }
}
| spring-projects/spring-android | test/spring-android-rest-template-test/src/main/java/org/springframework/http/converter/FormHttpMessageConverterTests.java | Java | apache-2.0 | 7,868 |
/*
* Copyright 2012 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.optaplanner.core.impl.heuristic.selector.move.decorator;
import java.util.Iterator;
import java.util.List;
import org.optaplanner.core.impl.heuristic.move.Move;
import org.optaplanner.core.impl.heuristic.selector.common.decorator.SelectionFilter;
import org.optaplanner.core.impl.heuristic.selector.common.iterator.UpcomingSelectionIterator;
import org.optaplanner.core.impl.heuristic.selector.move.AbstractMoveSelector;
import org.optaplanner.core.impl.heuristic.selector.move.MoveSelector;
import org.optaplanner.core.impl.phase.scope.AbstractPhaseScope;
import org.optaplanner.core.impl.score.director.ScoreDirector;
public class FilteringMoveSelector extends AbstractMoveSelector {
protected final MoveSelector childMoveSelector;
protected final List<SelectionFilter> filterList;
protected final boolean bailOutEnabled;
protected ScoreDirector scoreDirector = null;
public FilteringMoveSelector(MoveSelector childMoveSelector, List<SelectionFilter> filterList) {
this.childMoveSelector = childMoveSelector;
this.filterList = filterList;
bailOutEnabled = childMoveSelector.isNeverEnding();
phaseLifecycleSupport.addEventListener(childMoveSelector);
}
// ************************************************************************
// Worker methods
// ************************************************************************
@Override
public void phaseStarted(AbstractPhaseScope phaseScope) {
super.phaseStarted(phaseScope);
scoreDirector = phaseScope.getScoreDirector();
}
@Override
public void phaseEnded(AbstractPhaseScope phaseScope) {
super.phaseEnded(phaseScope);
scoreDirector = null;
}
public boolean isCountable() {
return childMoveSelector.isCountable();
}
public boolean isNeverEnding() {
return childMoveSelector.isNeverEnding();
}
public long getSize() {
return childMoveSelector.getSize();
}
public Iterator<Move> iterator() {
return new JustInTimeFilteringMoveIterator(childMoveSelector.iterator());
}
private class JustInTimeFilteringMoveIterator extends UpcomingSelectionIterator<Move> {
private final Iterator<Move> childMoveIterator;
public JustInTimeFilteringMoveIterator(Iterator<Move> childMoveIterator) {
this.childMoveIterator = childMoveIterator;
}
@Override
protected Move createUpcomingSelection() {
Move next;
long attemptsBeforeBailOut = bailOutEnabled ? determineBailOutSize() : 0L;
do {
if (!childMoveIterator.hasNext()) {
return noUpcomingSelection();
}
if (bailOutEnabled) {
// if childMoveIterator is neverEnding and nothing is accepted, bail out of the infinite loop
if (attemptsBeforeBailOut <= 0L) {
logger.warn("Bailing out of neverEnding selector ({}) to avoid infinite loop.",
FilteringMoveSelector.this);
return noUpcomingSelection();
}
attemptsBeforeBailOut--;
}
next = childMoveIterator.next();
} while (!accept(scoreDirector, next));
return next;
}
}
protected long determineBailOutSize() {
return childMoveSelector.getSize() * 10L;
}
private boolean accept(ScoreDirector scoreDirector, Move move) {
for (SelectionFilter filter : filterList) {
if (!filter.accept(scoreDirector, move)) {
return false;
}
}
return true;
}
@Override
public String toString() {
return "Filtering(" + childMoveSelector + ")";
}
}
| codeaudit/optaplanner | optaplanner-core/src/main/java/org/optaplanner/core/impl/heuristic/selector/move/decorator/FilteringMoveSelector.java | Java | apache-2.0 | 4,464 |
/**
* Copyright 2013, Big Switch Networks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
**/
package net.floodlightcontroller.core.module;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.floodlightcontroller.core.module.FloodlightModuleLoader;
import net.floodlightcontroller.core.module.IFloodlightModule;
import net.floodlightcontroller.core.test.MockFloodlightProvider;
import net.floodlightcontroller.core.test.MockThreadPoolService;
import net.floodlightcontroller.counter.NullCounterStore;
import net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier;
import net.floodlightcontroller.devicemanager.test.MockDeviceManager;
import net.floodlightcontroller.perfmon.NullPktInProcessingTime;
import net.floodlightcontroller.storage.memory.MemoryStorageSource;
import net.floodlightcontroller.topology.TopologyManager;
public class FloodlightTestModuleLoader extends FloodlightModuleLoader {
protected static Logger log = LoggerFactory.getLogger(FloodlightTestModuleLoader.class);
// List of default modules to use unless specified otherwise
public static final Class<? extends IFloodlightModule> DEFAULT_STORAGE_SOURCE =
MemoryStorageSource.class;
public static final Class<? extends IFloodlightModule> DEFAULT_FLOODLIGHT_PRPOVIDER =
MockFloodlightProvider.class;
public static final Class<? extends IFloodlightModule> DEFAULT_TOPOLOGY_PROVIDER =
TopologyManager.class;
public static final Class<? extends IFloodlightModule> DEFAULT_DEVICE_SERVICE =
MockDeviceManager.class;
public static final Class<? extends IFloodlightModule> DEFAULT_COUNTER_STORE =
NullCounterStore.class;
public static final Class<? extends IFloodlightModule> DEFAULT_THREADPOOL =
MockThreadPoolService.class;
public static final Class<? extends IFloodlightModule> DEFAULT_ENTITY_CLASSIFIER =
DefaultEntityClassifier.class;
public static final Class<? extends IFloodlightModule> DEFAULT_PERFMON =
NullPktInProcessingTime.class;
protected static final Collection<Class<? extends IFloodlightModule>> DEFAULT_MODULE_LIST;
static {
DEFAULT_MODULE_LIST = new ArrayList<Class<? extends IFloodlightModule>>();
DEFAULT_MODULE_LIST.add(DEFAULT_DEVICE_SERVICE);
DEFAULT_MODULE_LIST.add(DEFAULT_FLOODLIGHT_PRPOVIDER);
DEFAULT_MODULE_LIST.add(DEFAULT_STORAGE_SOURCE);
DEFAULT_MODULE_LIST.add(DEFAULT_TOPOLOGY_PROVIDER);
DEFAULT_MODULE_LIST.add(DEFAULT_COUNTER_STORE);
DEFAULT_MODULE_LIST.add(DEFAULT_THREADPOOL);
DEFAULT_MODULE_LIST.add(DEFAULT_ENTITY_CLASSIFIER);
DEFAULT_MODULE_LIST.add(DEFAULT_PERFMON);
}
protected IFloodlightModuleContext fmc;
/**
* Adds default modules to the list of modules to load. This is done
* in order to avoid the module loader throwing errors about duplicate
* modules and neither one is specified by the user.
* @param userModules The list of user specified modules to add to.
*/
protected void addDefaultModules(Collection<Class<? extends IFloodlightModule>> userModules) {
Collection<Class<? extends IFloodlightModule>> defaultModules =
new ArrayList<Class<? extends IFloodlightModule>>(DEFAULT_MODULE_LIST.size());
defaultModules.addAll(DEFAULT_MODULE_LIST);
Iterator<Class<? extends IFloodlightModule>> modIter = userModules.iterator();
while (modIter.hasNext()) {
Class<? extends IFloodlightModule> userMod = modIter.next();
Iterator<Class<? extends IFloodlightModule>> dmIter = defaultModules.iterator();
while (dmIter.hasNext()) {
Class<? extends IFloodlightModule> dmMod = dmIter.next();
Collection<Class<? extends IFloodlightService>> userModServs;
Collection<Class<? extends IFloodlightService>> dmModServs;
try {
dmModServs = dmMod.newInstance().getModuleServices();
userModServs = userMod.newInstance().getModuleServices();
} catch (InstantiationException e) {
log.error(e.getMessage());
break;
} catch (IllegalAccessException e) {
log.error(e.getMessage());
break;
}
// If either of these are null continue as they have no services
if (dmModServs == null || userModServs == null) continue;
// If the user supplied modules has a service
// that is in the default module list we remove
// the default module from the list.
boolean shouldBreak = false;
Iterator<Class<? extends IFloodlightService>> userModServsIter
= userModServs.iterator();
while (userModServsIter.hasNext()) {
Class<? extends IFloodlightService> userModServIntf = userModServsIter.next();
Iterator<Class<? extends IFloodlightService>> dmModsServsIter
= dmModServs.iterator();
while (dmModsServsIter.hasNext()) {
Class<? extends IFloodlightService> dmModServIntf
= dmModsServsIter.next();
if (dmModServIntf.getCanonicalName().equals(
userModServIntf.getCanonicalName())) {
logger.debug("Removing default module {} because it was " +
"overriden by an explicitly specified module",
dmModServIntf.getCanonicalName());
dmIter.remove();
shouldBreak = true;
break;
}
}
if (shouldBreak) break;
}
if (shouldBreak) break;
}
}
// Append the remaining default modules to the user specified ones.
// This avoids the module loader throwing duplicate module errors.
userModules.addAll(defaultModules);
log.debug("Using module set " + userModules.toString());
}
/**
* Sets up all modules and their dependencies.
* @param modules The list of modules that the user wants to load.
* @param mockedServices The list of services that will be mocked. Any
* module that provides this service will not be loaded.
*/
public void setupModules(Collection<Class<? extends IFloodlightModule>> modules,
Collection<IFloodlightService> mockedServices) {
addDefaultModules(modules);
Collection<String> modulesAsString = new ArrayList<String>();
for (Class<? extends IFloodlightModule> m : modules) {
modulesAsString.add(m.getCanonicalName());
}
try {
fmc = loadModulesFromList(modulesAsString, null, mockedServices);
} catch (FloodlightModuleException e) {
log.error(e.getMessage());
}
}
/**
* Gets the inited/started instance of a module from the context.
* @param ifl The name if the module to get, i.e. "LearningSwitch.class".
* @return The inited/started instance of the module.
*/
public IFloodlightModule getModuleByName(Class<? extends IFloodlightModule> ifl) {
Collection<IFloodlightModule> modules = fmc.getAllModules();
for (IFloodlightModule m : modules) {
if (ifl.getCanonicalName().equals(m.getClass().getCanonicalName())) {
return m;
}
}
return null;
}
/**
* Gets an inited/started instance of a service from the context.
* @param ifs The name of the service to get, i.e. "ITopologyService.class".
* @return The inited/started instance of the service from teh context.
*/
public IFloodlightService getModuleByService(Class<? extends IFloodlightService> ifs) {
Collection<IFloodlightModule> modules = fmc.getAllModules();
for (IFloodlightModule m : modules) {
Collection<Class<? extends IFloodlightService>> mServs = m.getModuleServices();
if (mServs == null) continue;
for (Class<? extends IFloodlightService> mServClass : mServs) {
if (mServClass.getCanonicalName().equals(ifs.getCanonicalName())) {
assert(m instanceof IFloodlightService);
return (IFloodlightService)m;
}
}
}
return null;
}
}
| wallnerryan/FL_HAND | src/test/java/net/floodlightcontroller/core/module/FloodlightTestModuleLoader.java | Java | apache-2.0 | 8,105 |
/*
* Licensed to DuraSpace under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership.
*
* DuraSpace licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fcrepo.auth.common;
import static org.slf4j.LoggerFactory.getLogger;
import java.io.IOException;
import java.security.Principal;
import java.util.HashSet;
import java.util.Set;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import org.apache.shiro.SecurityUtils;
import org.apache.shiro.subject.Subject;
import org.slf4j.Logger;
/**
* @author peichman
*/
public class ServletContainerAuthFilter implements Filter {
private static final Logger log = getLogger(ServletContainerAuthFilter.class);
/**
* User role for Fedora's admin users
*/
public static final String FEDORA_ADMIN_ROLE = "fedoraAdmin";
/**
* User role for Fedora's ordinary users
*/
public static final String FEDORA_USER_ROLE = "fedoraUser";
// TODO: configurable set of role names: https://jira.duraspace.org/browse/FCREPO-2770
private static final String[] ROLE_NAMES = { FEDORA_ADMIN_ROLE, FEDORA_USER_ROLE };
@Override
public void init(final FilterConfig filterConfig) {
// this method intentionally left empty
}
@Override
public void doFilter(final ServletRequest request, final ServletResponse response, final FilterChain chain)
throws IOException, ServletException {
final HttpServletRequest httpRequest = (HttpServletRequest) request;
final Principal servletUser = httpRequest.getUserPrincipal();
final Subject currentUser = SecurityUtils.getSubject();
if (servletUser != null) {
log.debug("There is a servlet user: {}", servletUser.getName());
final Set<String> roles = new HashSet<>();
for (String roleName : ROLE_NAMES) {
log.debug("Testing role {}", roleName);
if (httpRequest.isUserInRole(roleName)) {
log.debug("Servlet user {} has servlet role: {}", servletUser.getName(), roleName);
roles.add(roleName);
}
}
final ContainerAuthToken token = new ContainerAuthToken(servletUser.getName(), roles);
log.debug("Credentials for servletUser = {}", token.getCredentials());
currentUser.login(token);
} else {
log.debug("Anonymous request");
// ensure the user is actually logged out
currentUser.logout();
}
chain.doFilter(request, response);
}
@Override
public void destroy() {
// this method intentionally left empty
}
}
| dbernstein/fcrepo4 | fcrepo-auth-common/src/main/java/org/fcrepo/auth/common/ServletContainerAuthFilter.java | Java | apache-2.0 | 3,489 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.operator.aggregation.minmaxby;
public interface LongBooleanState
extends TwoNullableValueState
{
long getFirst();
void setFirst(long first);
boolean getSecond();
void setSecond(boolean second);
}
| miniway/presto | presto-main/src/main/java/io/prestosql/operator/aggregation/minmaxby/LongBooleanState.java | Java | apache-2.0 | 807 |
package com.thoughtworks.go.plugin.access.configrepo.contract;
import com.thoughtworks.go.plugin.access.configrepo.ErrorCollection;
import org.junit.Test;
import java.util.Map;
import static junit.framework.TestCase.assertTrue;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
public class CRMingleTest extends CRBaseTest<CRMingle> {
private final CRMingle mingle;
private final CRMingle invalidNoUrl;
private final CRMingle invalidNoId;
public CRMingleTest()
{
mingle = new CRMingle("http://mingle.example.com","my_project");
invalidNoUrl = new CRMingle(null,"my_project");
invalidNoId = new CRMingle("http://mingle.example.com",null);
}
@Override
public void addGoodExamples(Map<String, CRMingle> examples) {
examples.put("mingle",mingle);
}
@Override
public void addBadExamples(Map<String, CRMingle> examples) {
examples.put("invalidNoUrl",invalidNoUrl);
examples.put("invalidNoId",invalidNoId);
}
@Test
public void shouldDeserializeFromAPILikeObject()
{
String json = "{\n" +
" \"base_url\": \"https://mingle.example.com\",\n" +
" \"project_identifier\": \"foobar_widgets\",\n" +
" \"mql_grouping_conditions\": \"status > 'In Dev'\"\n" +
" }";
CRMingle deserializedValue = gson.fromJson(json,CRMingle.class);
assertThat(deserializedValue.getBaseUrl(),is("https://mingle.example.com"));
assertThat(deserializedValue.getProjectIdentifier(),is("foobar_widgets"));
assertThat(deserializedValue.getMqlGroupingConditions(),is("status > 'In Dev'"));
ErrorCollection errors = deserializedValue.getErrors();
assertTrue(errors.isEmpty());
}
}
| kyleolivo/gocd | plugin-infra/go-plugin-access/test/com/thoughtworks/go/plugin/access/configrepo/contract/CRMingleTest.java | Java | apache-2.0 | 1,839 |
package org.motechproject.config.core.filestore;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang.StringUtils;
import org.motechproject.config.core.MotechConfigurationException;
import org.motechproject.config.core.domain.ConfigLocation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.List;
import static java.util.Arrays.asList;
/**
* <p>Used to read default platform config location(s) from <code>config-location.properties</code> and also to save in the file in the default location.</p>
* <p><code>config-location.properties</code> file will be loaded according to the behaviour of {@link org.apache.commons.configuration.PropertiesConfiguration}
* as specified <a href="http://commons.apache.org/proper/commons-configuration/userguide/howto_filebased.html#Specifying_the_file">here</a>.</p>
*/
@Component
public class ConfigLocationFileStore {
private PropertiesConfiguration propertiesConfiguration;
public static final String CONFIG_LOCATION_PROPERTY_KEY = "config.location";
@Autowired
public ConfigLocationFileStore(PropertiesConfiguration propertiesConfiguration) {
this.propertiesConfiguration = propertiesConfiguration;
}
/**
* Returns all the configuration locations stored by this object.
*
* @return the list of configuration locations
*/
public Iterable<ConfigLocation> getAll() {
List<String> configLocations = loadAll();
return map(configLocations);
}
private Iterable<ConfigLocation> map(List<String> configPaths) {
List<ConfigLocation> configLocations = new ArrayList<>();
for (String configLocation : configPaths) {
configLocations.add(new ConfigLocation(configLocation));
}
return configLocations;
}
private List<String> loadAll() {
return asList(propertiesConfiguration.getStringArray(CONFIG_LOCATION_PROPERTY_KEY));
}
/**
* Adds the given location to the store.
*
* @param location the location to be stored
*/
public void add(String location) {
List<String> configLocations = new ArrayList<>(loadAll());
configLocations.add(location);
save(configLocations);
}
private void save(List<String> configLocations) {
try {
propertiesConfiguration.setProperty(CONFIG_LOCATION_PROPERTY_KEY, StringUtils.join(configLocations, ","));
propertiesConfiguration.save();
} catch (ConfigurationException e) {
String errorMessage = String.format("Could not save %s in this location %s.", propertiesConfiguration.getFileName(), propertiesConfiguration.getBasePath());
throw new MotechConfigurationException(errorMessage, e);
}
}
}
| frankhuster/motech | platform/config-core/src/main/java/org/motechproject/config/core/filestore/ConfigLocationFileStore.java | Java | bsd-3-clause | 2,957 |
//=== File Prolog =============================================================
// This code was developed by NASA, Goddard Space Flight Center, Code 588
// for the Scientist's Expert Assistant (SEA) project.
//
//--- Contents ----------------------------------------------------------------
// IllegalNodePositionException
//
//--- Description -------------------------------------------------------------
// An exception that gets thrown when a time line node is set to an illegal position
//
//--- Notes -------------------------------------------------------------------
//
//--- Development History -----------------------------------------------------
//
// 07/12/99 M. Fishman
//
// Original implementation.
//
//
//--- DISCLAIMER---------------------------------------------------------------
//
// This software is provided "as is" without any warranty of any kind, either
// express, implied, or statutory, including, but not limited to, any
// warranty that the software will conform to specification, any implied
// warranties of merchantability, fitness for a particular purpose, and
// freedom from infringement, and any warranty that the documentation will
// conform to the program, or any warranty that the software will be error
// free.
//
// In no event shall NASA be liable for any damages, including, but not
// limited to direct, indirect, special or consequential damages, arising out
// of, resulting from, or in any way connected with this software, whether or
// not based upon warranty, contract, tort or otherwise, whether or not
// injury was sustained by persons or property or otherwise, and whether or
// not loss was sustained from or arose out of the results of, or use of,
// their software or services provided hereunder.
//
//=== End File Prolog =========================================================
//package gov.nasa.gsfc.util.gui;
package jsky.timeline;
/**
*
* An exception that gets thrown when a time line node is set to an illegal position.
*
* <P>This code was developed by NASA, Goddard Space Flight Center, Code 588
* for the Scientist's Expert Assistant (SEA) project.
*
* @version 06/23/99
* @author M. Fishman / 588
**/
public class IllegalNodePositionException extends Exception {
/**
*
* Constructs an Exception with no specified detail message
*
**/
public IllegalNodePositionException() {
super();
}
/**
*
* Constructs an Exception with the specified detail message
*
* @param message the detail message
*
**/
public IllegalNodePositionException(String message) {
super(message);
}
}
| arturog8m/ocs | bundle/jsky.app.ot/src/main/java/jsky/timeline/IllegalNodePositionException.java | Java | bsd-3-clause | 2,651 |
package com.tinkerpop.frames;
import com.tinkerpop.blueprints.GraphQuery;
import com.tinkerpop.blueprints.Predicate;
import com.tinkerpop.blueprints.Query.Compare;
/**
* GraphQuery that allows framing of results.
*
* @author Bryn Cooke
*
*/
public interface FramedGraphQuery extends GraphQuery {
@Override
public FramedGraphQuery has(String key);
@Override
public FramedGraphQuery hasNot(String key);
@Override
public FramedGraphQuery has(String key, Object value);
@Override
public FramedGraphQuery hasNot(String key, Object value);
@Override
public FramedGraphQuery has(String key, Predicate predicate, Object value);
@Override
@Deprecated
public <T extends Comparable<T>> FramedGraphQuery has(String key, T value, Compare compare);
@Override
public <T extends Comparable<?>> FramedGraphQuery interval(String key, T startValue, T endValue);
@Override
public FramedGraphQuery limit(int limit);
/**
* Execute the query and return the matching edges.
*
* @param the default annotated interface to frame the edge as
* @return the unfiltered incident edges
*/
public <T> Iterable<T> edges(Class<T> kind);
/**
* Execute the query and return the vertices on the other end of the matching edges.
*
* @param the default annotated interface to frame the vertex as
* @return the unfiltered adjacent vertices
*/
public <T> Iterable<T> vertices(Class<T> kind);
}
| tinkerpop/frames | src/main/java/com/tinkerpop/frames/FramedGraphQuery.java | Java | bsd-3-clause | 1,503 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.metrics.geocentroid;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.GeoPointValuesSourceParser;
import org.elasticsearch.search.aggregations.support.XContentParseContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
import java.io.IOException;
import java.util.Map;
/**
* Parser class for {@link org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregator}
*/
public class GeoCentroidParser extends GeoPointValuesSourceParser {
public GeoCentroidParser() {
super(true, false);
}
@Override
protected boolean token(String aggregationName, String currentFieldName, Token token,
XContentParseContext context, Map<ParseField, Object> otherOptions) throws IOException {
return false;
}
@Override
protected GeoCentroidAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType,
ValueType targetValueType, Map<ParseField, Object> otherOptions) {
return new GeoCentroidAggregationBuilder(aggregationName);
}
}
| strahanjen/strahanjen.github.io | elasticsearch-master/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidParser.java | Java | bsd-3-clause | 2,169 |
package ic2.api.energy.prefab;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.tileentity.TileEntity;
import cpw.mods.fml.common.FMLCommonHandler;
import net.minecraftforge.common.MinecraftForge;
import net.minecraftforge.common.util.ForgeDirection;
import ic2.api.energy.EnergyNet;
import ic2.api.energy.event.EnergyTileLoadEvent;
import ic2.api.energy.event.EnergyTileUnloadEvent;
import ic2.api.energy.tile.IEnergySource;
import ic2.api.info.Info;
import ic2.api.item.ElectricItem;
/**
* BasicSource is a simple adapter to provide an ic2 energy source.
*
* It's designed to be attached to a tile entity as a delegate. Functionally BasicSource acts as a
* one-time configurable output energy buffer, thus providing a common use case for generators.
*
* Sub-classing BasicSource instead of using it as a delegate works as well, but isn't recommended.
* The delegate can be extended with additional functionality through a sub class though.
*
* The constraints set by BasicSource like the strict tank-like energy buffering should provide a
* more easy to use and stable interface than using IEnergySource directly while aiming for
* optimal performance.
*
* Using BasicSource involves the following steps:
* - create a BasicSource instance in your TileEntity, typically in a field
* - forward invalidate, onChunkUnload, readFromNBT, writeToNBT and updateEntity to the BasicSource
* instance.
* If you have other means of determining when the tile entity is fully loaded, notify onLoaded
* that way instead of forwarding updateEntity.
* - call addEnergy whenever appropriate, using getFreeCapacity may determine if e.g. the generator
* should run
* - optionally use getEnergyStored to display the output buffer charge level
* - optionally use setEnergyStored to sync the stored energy to the client (e.g. in the Container)
*
* Example implementation code:
* @code{.java}
* public class SomeTileEntity extends TileEntity {
* // new basic energy source, 1000 EU buffer, tier 1 (32 EU/t, LV)
* private BasicSource ic2EnergySource = new BasicSource(this, 1000, 1);
*
* @Override
* public void invalidate() {
* ic2EnergySource.invalidate(); // notify the energy source
* ...
* super.invalidate(); // this is important for mc!
* }
*
* @Override
* public void onChunkUnload() {
* ic2EnergySource.onChunkUnload(); // notify the energy source
* ...
* }
*
* @Override
* public void readFromNBT(NBTTagCompound tag) {
* super.readFromNBT(tag);
*
* ic2EnergySource.readFromNBT(tag);
* ...
* }
*
* @Override
* public void writeToNBT(NBTTagCompound tag) {
* super.writeToNBT(tag);
*
* ic2EnergySource.writeToNBT(tag);
* ...
* }
*
* @Override
* public void updateEntity() {
* ic2EnergySource.updateEntity(); // notify the energy source
* ...
* ic2EnergySource.addEnergy(5); // add 5 eu to the source's buffer this tick
* ...
* }
*
* ...
* }
* @endcode
*/
public class BasicSource extends TileEntity implements IEnergySource {
// **********************************
// *** Methods for use by the mod ***
// **********************************
/**
* Constructor for a new BasicSource delegate.
*
* @param parent1 Base TileEntity represented by this energy source.
* @param capacity1 Maximum amount of eu to store.
* @param tier1 IC2 tier, 1 = LV, 2 = MV, ...
*/
public BasicSource(TileEntity parent1, double capacity1, int tier1) {
double power = EnergyNet.instance.getPowerFromTier(tier1);
this.parent = parent1;
this.capacity = capacity1 < power ? power : capacity1;
this.tier = tier1;
this.power = power;
}
// in-world te forwards >>
/**
* Forward for the base TileEntity's updateEntity(), used for creating the energy net link.
* Either updateEntity or onLoaded have to be used.
*/
@Override
public void updateEntity() {
if (!addedToEnet) onLoaded();
}
/**
* Notification that the base TileEntity finished loading, for advanced uses.
* Either updateEntity or onLoaded have to be used.
*/
public void onLoaded() {
if (!addedToEnet &&
!FMLCommonHandler.instance().getEffectiveSide().isClient() &&
Info.isIc2Available()) {
worldObj = parent.getWorldObj();
xCoord = parent.xCoord;
yCoord = parent.yCoord;
zCoord = parent.zCoord;
MinecraftForge.EVENT_BUS.post(new EnergyTileLoadEvent(this));
addedToEnet = true;
}
}
/**
* Forward for the base TileEntity's invalidate(), used for destroying the energy net link.
* Both invalidate and onChunkUnload have to be used.
*/
@Override
public void invalidate() {
super.invalidate();
onChunkUnload();
}
/**
* Forward for the base TileEntity's onChunkUnload(), used for destroying the energy net link.
* Both invalidate and onChunkUnload have to be used.
*/
@Override
public void onChunkUnload() {
if (addedToEnet &&
Info.isIc2Available()) {
MinecraftForge.EVENT_BUS.post(new EnergyTileUnloadEvent(this));
addedToEnet = false;
}
}
/**
* Forward for the base TileEntity's readFromNBT(), used for loading the state.
*
* @param tag Compound tag as supplied by TileEntity.readFromNBT()
*/
@Override
public void readFromNBT(NBTTagCompound tag) {
super.readFromNBT(tag);
NBTTagCompound data = tag.getCompoundTag("IC2BasicSource");
energyStored = data.getDouble("energy");
}
/**
* Forward for the base TileEntity's writeToNBT(), used for saving the state.
*
* @param tag Compound tag as supplied by TileEntity.writeToNBT()
*/
@Override
public void writeToNBT(NBTTagCompound tag) {
try {
super.writeToNBT(tag);
} catch (RuntimeException e) {
// happens if this is a delegate, ignore
}
NBTTagCompound data = new NBTTagCompound();
data.setDouble("energy", energyStored);
tag.setTag("IC2BasicSource", data);
}
// << in-world te forwards
// methods for using this adapter >>
/**
* Get the maximum amount of energy this source can hold in its buffer.
*
* @return Capacity in EU.
*/
public double getCapacity() {
return capacity;
}
/**
* Set the maximum amount of energy this source can hold in its buffer.
*
* @param capacity1 Capacity in EU.
*/
public void setCapacity(double capacity1) {
if (capacity1 < power) capacity1 = power;
this.capacity = capacity1;
}
/**
* Get the IC2 energy tier for this source.
*
* @return IC2 Tier.
*/
public int getTier() {
return tier;
}
/**
* Set the IC2 energy tier for this source.
*
* @param tier1 IC2 Tier.
*/
public void setTier(int tier1) {
double power = EnergyNet.instance.getPowerFromTier(tier1);
if (capacity < power) capacity = power;
this.tier = tier1;
this.power = power;
}
/**
* Determine the energy stored in the source's output buffer.
*
* @return amount in EU
*/
public double getEnergyStored() {
return energyStored;
}
/**
* Set the stored energy to the specified amount.
*
* This is intended for server -> client synchronization, e.g. to display the stored energy in
* a GUI through getEnergyStored().
*
* @param amount
*/
public void setEnergyStored(double amount) {
energyStored = amount;
}
/**
* Determine the free capacity in the source's output buffer.
*
* @return amount in EU
*/
public double getFreeCapacity() {
return capacity - energyStored;
}
/**
* Add some energy to the output buffer.
*
* @param amount maximum amount of energy to add
* @return amount added/used, NOT remaining
*/
public double addEnergy(double amount) {
if (FMLCommonHandler.instance().getEffectiveSide().isClient()) return 0;
if (amount > capacity - energyStored) amount = capacity - energyStored;
energyStored += amount;
return amount;
}
/**
* Charge the supplied ItemStack from this source's energy buffer.
*
* @param stack ItemStack to charge (null is ignored)
* @return true if energy was transferred
*/
public boolean charge(ItemStack stack) {
if (stack == null || !Info.isIc2Available()) return false;
double amount = ElectricItem.manager.charge(stack, energyStored, tier, false, false);
energyStored -= amount;
return amount > 0;
}
// << methods for using this adapter
// backwards compatibility (ignore these) >>
@Deprecated
public void onUpdateEntity() {
updateEntity();
}
@Deprecated
public void onInvalidate() {
invalidate();
}
@Deprecated
public void onOnChunkUnload() {
onChunkUnload();
}
@Deprecated
public void onReadFromNbt(NBTTagCompound tag) {
readFromNBT(tag);
}
@Deprecated
public void onWriteToNbt(NBTTagCompound tag) {
writeToNBT(tag);
}
// << backwards compatibility
// ******************************
// *** Methods for use by ic2 ***
// ******************************
// energy net interface >>
@Override
public boolean emitsEnergyTo(TileEntity receiver, ForgeDirection direction) {
return true;
}
@Override
public double getOfferedEnergy() {
return Math.min(energyStored, power);
}
@Override
public void drawEnergy(double amount) {
energyStored -= amount;
}
@Override
public int getSourceTier() {
return tier;
}
// << energy net interface
public final TileEntity parent;
protected double capacity;
protected int tier;
protected double power;
protected double energyStored;
protected boolean addedToEnet;
}
| ZanyLeonic/Balloons | src/main/java/ic2/api/energy/prefab/BasicSource.java | Java | mit | 9,550 |
/*
* reserved comment block
* DO NOT REMOVE OR ALTER!
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sun.org.apache.xml.internal.security.c14n;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer11_OmitComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer11_WithComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer20010315ExclOmitComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer20010315ExclWithComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer20010315OmitComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer20010315WithComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.CanonicalizerPhysical;
import com.sun.org.apache.xml.internal.security.exceptions.AlgorithmAlreadyRegisteredException;
import com.sun.org.apache.xml.internal.security.utils.JavaUtils;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
/**
*
* @author Christian Geuer-Pollmann
*/
public class Canonicalizer {
/** The output encoding of canonicalized data */
public static final String ENCODING = "UTF8";
/**
* XPath Expression for selecting every node and continuous comments joined
* in only one node
*/
public static final String XPATH_C14N_WITH_COMMENTS_SINGLE_NODE =
"(.//. | .//@* | .//namespace::*)";
/**
* The URL defined in XML-SEC Rec for inclusive c14n <b>without</b> comments.
*/
public static final String ALGO_ID_C14N_OMIT_COMMENTS =
"http://www.w3.org/TR/2001/REC-xml-c14n-20010315";
/**
* The URL defined in XML-SEC Rec for inclusive c14n <b>with</b> comments.
*/
public static final String ALGO_ID_C14N_WITH_COMMENTS =
ALGO_ID_C14N_OMIT_COMMENTS + "#WithComments";
/**
* The URL defined in XML-SEC Rec for exclusive c14n <b>without</b> comments.
*/
public static final String ALGO_ID_C14N_EXCL_OMIT_COMMENTS =
"http://www.w3.org/2001/10/xml-exc-c14n#";
/**
* The URL defined in XML-SEC Rec for exclusive c14n <b>with</b> comments.
*/
public static final String ALGO_ID_C14N_EXCL_WITH_COMMENTS =
ALGO_ID_C14N_EXCL_OMIT_COMMENTS + "WithComments";
/**
* The URI for inclusive c14n 1.1 <b>without</b> comments.
*/
public static final String ALGO_ID_C14N11_OMIT_COMMENTS =
"http://www.w3.org/2006/12/xml-c14n11";
/**
* The URI for inclusive c14n 1.1 <b>with</b> comments.
*/
public static final String ALGO_ID_C14N11_WITH_COMMENTS =
ALGO_ID_C14N11_OMIT_COMMENTS + "#WithComments";
/**
* Non-standard algorithm to serialize the physical representation for XML Encryption
*/
public static final String ALGO_ID_C14N_PHYSICAL =
"http://santuario.apache.org/c14n/physical";
private static Map<String, Class<? extends CanonicalizerSpi>> canonicalizerHash =
new ConcurrentHashMap<String, Class<? extends CanonicalizerSpi>>();
private final CanonicalizerSpi canonicalizerSpi;
/**
* Constructor Canonicalizer
*
* @param algorithmURI
* @throws InvalidCanonicalizerException
*/
private Canonicalizer(String algorithmURI) throws InvalidCanonicalizerException {
try {
Class<? extends CanonicalizerSpi> implementingClass =
canonicalizerHash.get(algorithmURI);
@SuppressWarnings("deprecation")
CanonicalizerSpi tmp = implementingClass.newInstance();
canonicalizerSpi = tmp;
canonicalizerSpi.reset = true;
} catch (Exception e) {
Object exArgs[] = { algorithmURI };
throw new InvalidCanonicalizerException(
"signature.Canonicalizer.UnknownCanonicalizer", exArgs, e
);
}
}
/**
* Method getInstance
*
* @param algorithmURI
* @return a Canonicalizer instance ready for the job
* @throws InvalidCanonicalizerException
*/
public static final Canonicalizer getInstance(String algorithmURI)
throws InvalidCanonicalizerException {
return new Canonicalizer(algorithmURI);
}
/**
* Method register
*
* @param algorithmURI
* @param implementingClass
* @throws AlgorithmAlreadyRegisteredException
* @throws SecurityException if a security manager is installed and the
* caller does not have permission to register the canonicalizer
*/
@SuppressWarnings("unchecked")
public static void register(String algorithmURI, String implementingClass)
throws AlgorithmAlreadyRegisteredException, ClassNotFoundException {
JavaUtils.checkRegisterPermission();
// check whether URI is already registered
Class<? extends CanonicalizerSpi> registeredClass =
canonicalizerHash.get(algorithmURI);
if (registeredClass != null) {
Object exArgs[] = { algorithmURI, registeredClass };
throw new AlgorithmAlreadyRegisteredException("algorithm.alreadyRegistered", exArgs);
}
canonicalizerHash.put(
algorithmURI, (Class<? extends CanonicalizerSpi>)Class.forName(implementingClass)
);
}
/**
* Method register
*
* @param algorithmURI
* @param implementingClass
* @throws AlgorithmAlreadyRegisteredException
* @throws SecurityException if a security manager is installed and the
* caller does not have permission to register the canonicalizer
*/
public static void register(String algorithmURI, Class<? extends CanonicalizerSpi> implementingClass)
throws AlgorithmAlreadyRegisteredException, ClassNotFoundException {
JavaUtils.checkRegisterPermission();
// check whether URI is already registered
Class<? extends CanonicalizerSpi> registeredClass = canonicalizerHash.get(algorithmURI);
if (registeredClass != null) {
Object exArgs[] = { algorithmURI, registeredClass };
throw new AlgorithmAlreadyRegisteredException("algorithm.alreadyRegistered", exArgs);
}
canonicalizerHash.put(algorithmURI, implementingClass);
}
/**
* This method registers the default algorithms.
*/
public static void registerDefaultAlgorithms() {
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N_OMIT_COMMENTS,
Canonicalizer20010315OmitComments.class
);
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N_WITH_COMMENTS,
Canonicalizer20010315WithComments.class
);
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N_EXCL_OMIT_COMMENTS,
Canonicalizer20010315ExclOmitComments.class
);
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N_EXCL_WITH_COMMENTS,
Canonicalizer20010315ExclWithComments.class
);
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N11_OMIT_COMMENTS,
Canonicalizer11_OmitComments.class
);
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N11_WITH_COMMENTS,
Canonicalizer11_WithComments.class
);
canonicalizerHash.put(
Canonicalizer.ALGO_ID_C14N_PHYSICAL,
CanonicalizerPhysical.class
);
}
/**
* Method getURI
*
* @return the URI defined for this c14n instance.
*/
public final String getURI() {
return canonicalizerSpi.engineGetURI();
}
/**
* Method getIncludeComments
*
* @return true if the c14n respect the comments.
*/
public boolean getIncludeComments() {
return canonicalizerSpi.engineGetIncludeComments();
}
/**
* This method tries to canonicalize the given bytes. It's possible to even
* canonicalize non-wellformed sequences if they are well-formed after being
* wrapped with a <CODE>>a<...>/a<</CODE>.
*
* @param inputBytes
* @return the result of the canonicalization.
* @throws CanonicalizationException
* @throws java.io.IOException
* @throws javax.xml.parsers.ParserConfigurationException
* @throws org.xml.sax.SAXException
*/
public byte[] canonicalize(byte[] inputBytes)
throws javax.xml.parsers.ParserConfigurationException,
java.io.IOException, org.xml.sax.SAXException, CanonicalizationException {
InputStream bais = new ByteArrayInputStream(inputBytes);
InputSource in = new InputSource(bais);
DocumentBuilderFactory dfactory = DocumentBuilderFactory.newInstance();
dfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, Boolean.TRUE);
dfactory.setNamespaceAware(true);
// needs to validate for ID attribute normalization
dfactory.setValidating(true);
DocumentBuilder db = dfactory.newDocumentBuilder();
/*
* for some of the test vectors from the specification,
* there has to be a validating parser for ID attributes, default
* attribute values, NMTOKENS, etc.
* Unfortunately, the test vectors do use different DTDs or
* even no DTD. So Xerces 1.3.1 fires many warnings about using
* ErrorHandlers.
*
* Text from the spec:
*
* The input octet stream MUST contain a well-formed XML document,
* but the input need not be validated. However, the attribute
* value normalization and entity reference resolution MUST be
* performed in accordance with the behaviors of a validating
* XML processor. As well, nodes for default attributes (declared
* in the ATTLIST with an AttValue but not specified) are created
* in each element. Thus, the declarations in the document type
* declaration are used to help create the canonical form, even
* though the document type declaration is not retained in the
* canonical form.
*/
db.setErrorHandler(new com.sun.org.apache.xml.internal.security.utils.IgnoreAllErrorHandler());
Document document = db.parse(in);
return this.canonicalizeSubtree(document);
}
/**
* Canonicalizes the subtree rooted by <CODE>node</CODE>.
*
* @param node The node to canonicalize
* @return the result of the c14n.
*
* @throws CanonicalizationException
*/
public byte[] canonicalizeSubtree(Node node) throws CanonicalizationException {
return canonicalizerSpi.engineCanonicalizeSubTree(node);
}
/**
* Canonicalizes the subtree rooted by <CODE>node</CODE>.
*
* @param node
* @param inclusiveNamespaces
* @return the result of the c14n.
* @throws CanonicalizationException
*/
public byte[] canonicalizeSubtree(Node node, String inclusiveNamespaces)
throws CanonicalizationException {
return canonicalizerSpi.engineCanonicalizeSubTree(node, inclusiveNamespaces);
}
/**
* Canonicalizes an XPath node set. The <CODE>xpathNodeSet</CODE> is treated
* as a list of XPath nodes, not as a list of subtrees.
*
* @param xpathNodeSet
* @return the result of the c14n.
* @throws CanonicalizationException
*/
public byte[] canonicalizeXPathNodeSet(NodeList xpathNodeSet)
throws CanonicalizationException {
return canonicalizerSpi.engineCanonicalizeXPathNodeSet(xpathNodeSet);
}
/**
* Canonicalizes an XPath node set. The <CODE>xpathNodeSet</CODE> is treated
* as a list of XPath nodes, not as a list of subtrees.
*
* @param xpathNodeSet
* @param inclusiveNamespaces
* @return the result of the c14n.
* @throws CanonicalizationException
*/
public byte[] canonicalizeXPathNodeSet(
NodeList xpathNodeSet, String inclusiveNamespaces
) throws CanonicalizationException {
return
canonicalizerSpi.engineCanonicalizeXPathNodeSet(xpathNodeSet, inclusiveNamespaces);
}
/**
* Canonicalizes an XPath node set.
*
* @param xpathNodeSet
* @return the result of the c14n.
* @throws CanonicalizationException
*/
public byte[] canonicalizeXPathNodeSet(Set<Node> xpathNodeSet)
throws CanonicalizationException {
return canonicalizerSpi.engineCanonicalizeXPathNodeSet(xpathNodeSet);
}
/**
* Canonicalizes an XPath node set.
*
* @param xpathNodeSet
* @param inclusiveNamespaces
* @return the result of the c14n.
* @throws CanonicalizationException
*/
public byte[] canonicalizeXPathNodeSet(
Set<Node> xpathNodeSet, String inclusiveNamespaces
) throws CanonicalizationException {
return
canonicalizerSpi.engineCanonicalizeXPathNodeSet(xpathNodeSet, inclusiveNamespaces);
}
/**
* Sets the writer where the canonicalization ends. ByteArrayOutputStream
* if none is set.
* @param os
*/
public void setWriter(OutputStream os) {
canonicalizerSpi.setWriter(os);
}
/**
* Returns the name of the implementing {@link CanonicalizerSpi} class
*
* @return the name of the implementing {@link CanonicalizerSpi} class
*/
public String getImplementingCanonicalizerClass() {
return canonicalizerSpi.getClass().getName();
}
/**
* Set the canonicalizer behaviour to not reset.
*/
public void notReset() {
canonicalizerSpi.reset = false;
}
}
| FauxFaux/jdk9-jdk | src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/c14n/Canonicalizer.java | Java | gpl-2.0 | 14,839 |
/*
* JBoss, Home of Professional Open Source.
* Copyright 2012, Red Hat, Inc., and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.jboss.as.domain.management._private;
import org.jboss.as.controller.descriptions.DeprecatedResourceDescriptionResolver;
import org.jboss.as.controller.descriptions.ResourceDescriptionResolver;
import org.jboss.as.controller.descriptions.StandardResourceDescriptionResolver;
/**
*
* @author <a href="[email protected]">Kabir Khan</a>
*/
public final class DomainManagementResolver {
public static final String RESOURCE_NAME = DomainManagementResolver.class.getPackage().getName() + ".LocalDescriptions";
public static ResourceDescriptionResolver getResolver(final String... keyPrefix) {
return getResolver(false, keyPrefix);
}
public static ResourceDescriptionResolver getDeprecatedResolver(final String deprecatedParent, final String... keyPrefix) {
String prefix = getPrefix(keyPrefix);
return new DeprecatedResourceDescriptionResolver(deprecatedParent, prefix, RESOURCE_NAME, DomainManagementResolver.class.getClassLoader(), true, false);
}
public static ResourceDescriptionResolver getResolver(boolean useUnprefixedChildTypes, final String... keyPrefix) {
String prefix = getPrefix(keyPrefix);
return new StandardResourceDescriptionResolver(prefix, RESOURCE_NAME, DomainManagementResolver.class.getClassLoader(), true, useUnprefixedChildTypes);
}
private static String getPrefix(final String... keyPrefix) {
StringBuilder prefix = new StringBuilder();
for (String kp : keyPrefix) {
if (prefix.length() > 0) {
prefix.append('.').append(kp);
} else {
prefix.append(kp);
}
}
return prefix.toString();
}
}
| jamezp/wildfly-core | domain-management/src/main/java/org/jboss/as/domain/management/_private/DomainManagementResolver.java | Java | lgpl-2.1 | 2,730 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010, Red Hat Inc., and individual contributors as indicated
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.jboss.as.test.integration.weld.servlet;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.shrinkwrap.api.Archive;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.asset.EmptyAsset;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.junit.Test;
import org.junit.runner.RunWith;
/**
* A CDI-incompatible {@link javax.servletAsyncListener} may be bundled with a deployment.
* This is OK as long as the application does it pass the listener class to
* {@link javax.servletAsyncContext#createListener(Class)}. This test verifies that the deployment
* of an application with the listener does not fail.
*
* @author Jozef Hartinger
*
* @see https://issues.jboss.org/browse/WFLY-2165
*
*/
@RunWith(Arquillian.class)
public class NonCdiCompliantAsyncListenerTestCase {
@Deployment
public static Archive<?> getDeployment() {
return ShrinkWrap.create(WebArchive.class)
.addAsWebInfResource(EmptyAsset.INSTANCE, "beans.xml")
.addClass(NonCdiCompliantAsyncListener.class);
}
@Test
public void test() {
// noop, just test that the app deploys
}
}
| xasx/wildfly | testsuite/integration/basic/src/test/java/org/jboss/as/test/integration/weld/servlet/NonCdiCompliantAsyncListenerTestCase.java | Java | lgpl-2.1 | 2,255 |
/*
* Copyright 2004-2006 Stefan Reuter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.asteriskjava.manager.event;
import java.net.InetAddress;
/**
* An RtcpSentEvent is triggered when Asterisk sends an RTCP message.<p>
* Available since Asterisk 1.6<p>
* It is implemented in <code>main/rtp.c</code>
*
* @author srt
* @version $Id$
* @since 1.0.0
*/
public class RtcpSentEvent extends AbstractRtcpEvent
{
private static final long serialVersionUID = 1L;
private InetAddress toAddress;
private Integer toPort;
private Long ourSsrc;
private Double sentNtp;
private Long sentRtp;
private Long sentPackets;
private Long sentOctets;
private Long cumulativeLoss;
private Long theirLastSr;
public RtcpSentEvent(Object source)
{
super(source);
}
/**
* Returns the IP address the RTCP message has been sent to.
*
* @return the IP address the RTCP message has been sent to.
*/
public InetAddress getToAddress()
{
return toAddress;
}
/**
* Returns the port the RTCP message has been sent to.
*
* @return the port the RTCP message has been sent to.
*/
public Integer getToPort()
{
return toPort;
}
public void setTo(String to)
{
// Format is "%s:%d"
this.toAddress = stringToAddress(to);
this.toPort = stringToPort(to);
}
/**
* Returns our synchronization source identifier that uniquely identifies the source of a stream.
* @return our synchronization source identifier.
*/
public Long getOurSsrc()
{
return ourSsrc;
}
public void setOurSsrc(Long ourSsrc)
{
this.ourSsrc = ourSsrc;
}
public Double getSentNtp()
{
return sentNtp;
}
public void setSentNtp(Double sentNtp)
{
this.sentNtp = sentNtp;
}
public Long getSentRtp()
{
return sentRtp;
}
public void setSentRtp(Long sentRtp)
{
this.sentRtp = sentRtp;
}
/**
* Returns the number of packets sent.
*
* @return the number of packets sent.
*/
public Long getSentPackets()
{
return sentPackets;
}
public void setSentPackets(Long sentPackets)
{
this.sentPackets = sentPackets;
}
/**
* Returns the number of octets (bytes) sent.
*
* @return the number of octets (bytes) sent.
*/
public Long getSentOctets()
{
return sentOctets;
}
public void setSentOctets(Long sentOctets)
{
this.sentOctets = sentOctets;
}
public Long getCumulativeLoss()
{
return cumulativeLoss;
}
public void setCumulativeLoss(Long cumulativeLoss)
{
this.cumulativeLoss = cumulativeLoss;
}
public Long getTheirLastSr()
{
return theirLastSr;
}
public void setTheirLastSr(Long theirLastSr)
{
this.theirLastSr = theirLastSr;
}
} | filius/asterisk-java | src/main/java/org/asteriskjava/manager/event/RtcpSentEvent.java | Java | apache-2.0 | 3,531 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.api.dto.search;
import com.wordnik.swagger.annotations.ApiModelProperty;
import java.util.List;
import javax.xml.bind.annotation.XmlType;
/**
* The components that match a search performed on this NiFi.
*/
@XmlType(name = "componentSearchResult")
public class ComponentSearchResultDTO {
private String id;
private String groupId;
private String name;
private List<String> matches;
/**
* @return id of the component that matched
*/
@ApiModelProperty(
value = "The id of the component that matched the search."
)
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
/**
* @return group id of the component that matched
*/
@ApiModelProperty(
value = "The group id of the component that matched the search."
)
public String getGroupId() {
return groupId;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
/**
* @return name of the component that matched
*/
@ApiModelProperty(
value = "The name of the component that matched the search."
)
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/**
* @return What matched the search string for this component
*/
@ApiModelProperty(
value = "What matched the search from the component."
)
public List<String> getMatches() {
return matches;
}
public void setMatches(List<String> matches) {
this.matches = matches;
}
}
| WilliamNouet/nifi | nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/search/ComponentSearchResultDTO.java | Java | apache-2.0 | 2,475 |
package io.oasp.module.security.common.api.accesscontrol;
import java.security.Principal;
import java.util.Collection;
/**
* This is the interface for a provide that allows to {@link #getAccessControlIds(Principal) get the permission groups} for a
* {@link Principal}.
*
* @param <P> is the generic type of the {@link Principal} representing the user or subject.
*
* @author hohwille
*/
public interface PrincipalAccessControlProvider<P extends Principal> {
/**
* @param principal is the {@link Principal} (user).
* @return the {@link Collection} of {@link AccessControl#getId() IDs} with the groups of the given {@link Principal}.
*/
Collection<String> getAccessControlIds(P principal);
}
| sesslinger/oasp4j | oasp4j-modules/oasp4j-security/src/main/java/io/oasp/module/security/common/api/accesscontrol/PrincipalAccessControlProvider.java | Java | apache-2.0 | 714 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashSet;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
/**
* Exception thrown from non-transactional cache in case when update succeeded only partially.
* One can get list of keys for which update failed with method {@link #failedKeys()}.
*/
public class CachePartialUpdateCheckedException extends IgniteCheckedException {
/** */
private static final long serialVersionUID = 0L;
/** Failed keys. */
private final Collection<Object> failedKeys = new ArrayList<>();
/** */
private transient AffinityTopologyVersion topVer;
/**
* @param msg Error message.
*/
public CachePartialUpdateCheckedException(String msg) {
super(msg);
}
/**
* @param msg Error message.
*/
public CachePartialUpdateCheckedException(String msg, Throwable cause) {
super(msg, cause);
}
/**
* Gets collection of failed keys.
* @return Collection of failed keys.
*/
public synchronized <K> Collection<K> failedKeys() {
return new LinkedHashSet<>((Collection<K>)failedKeys);
}
/**
* @param failedKeys Failed keys.
* @param err Error.
* @param topVer Topology version for failed update.
*/
public synchronized void add(Collection<?> failedKeys, Throwable err, AffinityTopologyVersion topVer) {
if (topVer != null) {
AffinityTopologyVersion topVer0 = this.topVer;
if (topVer0 == null || topVer.compareTo(topVer0) > 0)
this.topVer = topVer;
}
this.failedKeys.addAll(failedKeys);
addSuppressed(err);
}
/**
* @return Topology version.
*/
public synchronized AffinityTopologyVersion topologyVersion() {
return topVer;
}
/**
* @param failedKeys Failed keys.
* @param err Error.
*/
public synchronized void add(Collection<?> failedKeys, Throwable err) {
add(failedKeys, err, null);
}
/** {@inheritDoc} */
@Override public String getMessage() {
return super.getMessage() + ": " + failedKeys;
}
}
| samaitra/ignite | modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachePartialUpdateCheckedException.java | Java | apache-2.0 | 3,097 |
/*
* Copyright (c) 2005-2009, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.wso2.carbon.registry.ws.client.internal;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.osgi.framework.ServiceRegistration;
import org.osgi.service.component.ComponentContext;
import org.wso2.carbon.registry.core.Registry;
import org.wso2.carbon.registry.core.exceptions.RegistryException;
import org.wso2.carbon.registry.core.service.RegistryProvider;
import org.wso2.carbon.registry.core.utils.RegistryUtils;
import org.wso2.carbon.registry.ws.client.registry.WSRegistryServiceClient;
import org.wso2.carbon.utils.ConfigurationContextService;
import java.util.Hashtable;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* Service Component for Client to WS API.
*
* @scr.component name="registry.ws.client.component" immediate="true"
* @scr.reference name="config.context.service"
* interface="org.wso2.carbon.utils.ConfigurationContextService"
* cardinality="1..1" policy="dynamic" bind="setConfigurationContextService"
* unbind="unsetConfigurationContextService"
*/
public class WSClientServiceComponent {
private WSClientDataHolder dataHolder = WSClientDataHolder.getInstance();
private static Log log = LogFactory.getLog(WSClientServiceComponent.class);
private ServiceRegistration serviceRegistration = null;
protected void activate(ComponentContext context) {
RegistryProvider provider = new RegistryProvider() {
private WSRegistryServiceClient client = null;
private ScheduledExecutorService scheduledExecutor;
public Registry getRegistry(String registryURL, String username, String password)
throws RegistryException {
if (client != null) {
return client;
}
if (registryURL != null && username != null && password != null) {
if (registryURL.endsWith("/")) {
registryURL = registryURL.substring(0, registryURL.length() - 1);
}
String serverURL = registryURL.substring(0, registryURL.indexOf("/registry"))
+ "/services/";
RegistryUtils.setTrustStoreSystemProperties();
client = new WSRegistryServiceClient(serverURL, username, password,
dataHolder.getConfigurationContext());
startExecutor(100000);
return client;
}
throw new RegistryException("Unable to create an instance of a WS Registry");
}
private void startExecutor(int timePeriod){
if(scheduledExecutor == null){
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
scheduledExecutor.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
client = null;
}
},timePeriod,timePeriod, TimeUnit.MILLISECONDS);
}
}
};
Hashtable<String, String> ht = new Hashtable<String, String>();
ht.put("type", "ws");
serviceRegistration =
context.getBundleContext().registerService(RegistryProvider.class.getName(),
provider, ht);
if (log.isDebugEnabled()) {
log.info("Registry WS Client bundle is activated");
}
}
protected void deactivate(ComponentContext context) {
if (serviceRegistration != null) {
serviceRegistration.unregister();
serviceRegistration = null;
}
if (log.isDebugEnabled()) {
log.info("Registry WS Client bundle is deactivated");
}
}
protected void setConfigurationContextService(ConfigurationContextService contextService) {
dataHolder.setConfigurationContext(contextService.getServerConfigContext());
}
protected void unsetConfigurationContextService(ConfigurationContextService contextService) {
dataHolder.setConfigurationContext(null);
}
}
| sameerak/carbon-registry | components/registry/org.wso2.carbon.registry.ws.client/src/main/java/org/wso2/carbon/registry/ws/client/internal/WSClientServiceComponent.java | Java | apache-2.0 | 4,946 |
package org.cloudfoundry.samples.music.web;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping("/errors")
public class ErrorController {
private static final Logger logger = LoggerFactory.getLogger(ErrorController.class);
@RequestMapping(value = "/kill")
public void kill() {
logger.info("Forcing application exit");
System.exit(1);
}
@RequestMapping(value = "/throw")
public void throwException() {
logger.info("Forcing an exception to be thrown");
throw new NullPointerException("Forcing an exception to be thrown");
}
} | poprygun/spring-music | src/main/java/org/cloudfoundry/samples/music/web/ErrorController.java | Java | apache-2.0 | 750 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.search.aggregations.bucket.terms;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<TermsAggregationBuilder> {
public static final String NAME = "terms";
public static final ValuesSourceRegistry.RegistryKey<TermsAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
NAME,
TermsAggregatorSupplier.class
);
public static final ParseField EXECUTION_HINT_FIELD_NAME = new ParseField("execution_hint");
public static final ParseField SHARD_SIZE_FIELD_NAME = new ParseField("shard_size");
public static final ParseField MIN_DOC_COUNT_FIELD_NAME = new ParseField("min_doc_count");
public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count");
public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
static final TermsAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS = new TermsAggregator.BucketCountThresholds(
1,
0,
10,
-1
);
public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error");
public static final ParseField ORDER_FIELD = new ParseField("order");
public static final ObjectParser<TermsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(NAME, TermsAggregationBuilder::new);
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
PARSER.declareBoolean(TermsAggregationBuilder::showTermDocCountError, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR);
PARSER.declareInt(TermsAggregationBuilder::shardSize, SHARD_SIZE_FIELD_NAME);
PARSER.declareLong(TermsAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD_NAME);
PARSER.declareLong(TermsAggregationBuilder::shardMinDocCount, SHARD_MIN_DOC_COUNT_FIELD_NAME);
PARSER.declareInt(TermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME);
PARSER.declareString(TermsAggregationBuilder::executionHint, EXECUTION_HINT_FIELD_NAME);
PARSER.declareField(
TermsAggregationBuilder::collectMode,
(p, c) -> SubAggCollectionMode.parse(p.text(), LoggingDeprecationHandler.INSTANCE),
SubAggCollectionMode.KEY,
ObjectParser.ValueType.STRING
);
PARSER.declareObjectArray(
TermsAggregationBuilder::order,
(p, c) -> InternalOrder.Parser.parseOrderParam(p),
TermsAggregationBuilder.ORDER_FIELD
);
PARSER.declareField(
(b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())),
IncludeExclude::parseInclude,
IncludeExclude.INCLUDE_FIELD,
ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING
);
PARSER.declareField(
(b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),
IncludeExclude::parseExclude,
IncludeExclude.EXCLUDE_FIELD,
ObjectParser.ValueType.STRING_ARRAY
);
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
TermsAggregatorFactory.registerAggregators(builder);
}
private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order
private IncludeExclude includeExclude = null;
private String executionHint = null;
private SubAggCollectionMode collectMode = null;
private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(
DEFAULT_BUCKET_COUNT_THRESHOLDS
);
private boolean showTermDocCountError = false;
public TermsAggregationBuilder(String name) {
super(name);
}
protected TermsAggregationBuilder(
TermsAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map<String, Object> metadata
) {
super(clone, factoriesBuilder, metadata);
this.order = clone.order;
this.executionHint = clone.executionHint;
this.includeExclude = clone.includeExclude;
this.collectMode = clone.collectMode;
this.bucketCountThresholds = new BucketCountThresholds(clone.bucketCountThresholds);
this.showTermDocCountError = clone.showTermDocCountError;
}
@Override
protected ValuesSourceType defaultValueSourceType() {
return CoreValuesSourceType.KEYWORD;
}
@Override
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
return new TermsAggregationBuilder(this, factoriesBuilder, metadata);
}
/**
* Read from a stream.
*/
public TermsAggregationBuilder(StreamInput in) throws IOException {
super(in);
bucketCountThresholds = new BucketCountThresholds(in);
collectMode = in.readOptionalWriteable(SubAggCollectionMode::readFromStream);
executionHint = in.readOptionalString();
includeExclude = in.readOptionalWriteable(IncludeExclude::new);
order = InternalOrder.Streams.readOrder(in);
showTermDocCountError = in.readBoolean();
}
@Override
protected boolean serializeTargetValueType(Version version) {
return true;
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
bucketCountThresholds.writeTo(out);
out.writeOptionalWriteable(collectMode);
out.writeOptionalString(executionHint);
out.writeOptionalWriteable(includeExclude);
order.writeTo(out);
out.writeBoolean(showTermDocCountError);
}
/**
* Sets the size - indicating how many term buckets should be returned
* (defaults to 10)
*/
public TermsAggregationBuilder size(int size) {
if (size <= 0) {
throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
}
bucketCountThresholds.setRequiredSize(size);
return this;
}
/**
* Returns the number of term buckets currently configured
*/
public int size() {
return bucketCountThresholds.getRequiredSize();
}
/**
* Sets the shard_size - indicating the number of term buckets each shard
* will return to the coordinating node (the node that coordinates the
* search execution). The higher the shard size is, the more accurate the
* results are.
*/
public TermsAggregationBuilder shardSize(int shardSize) {
if (shardSize <= 0) {
throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
}
bucketCountThresholds.setShardSize(shardSize);
return this;
}
/**
* Returns the number of term buckets per shard that are currently configured
*/
public int shardSize() {
return bucketCountThresholds.getShardSize();
}
/**
* Set the minimum document count terms should have in order to appear in
* the response.
*/
public TermsAggregationBuilder minDocCount(long minDocCount) {
if (minDocCount < 0) {
throw new IllegalArgumentException(
"[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
);
}
bucketCountThresholds.setMinDocCount(minDocCount);
return this;
}
/**
* Returns the minimum document count required per term
*/
public long minDocCount() {
return bucketCountThresholds.getMinDocCount();
}
/**
* Set the minimum document count terms should have on the shard in order to
* appear in the response.
*/
public TermsAggregationBuilder shardMinDocCount(long shardMinDocCount) {
if (shardMinDocCount < 0) {
throw new IllegalArgumentException(
"[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"
);
}
bucketCountThresholds.setShardMinDocCount(shardMinDocCount);
return this;
}
/**
* Returns the minimum document count required per term, per shard
*/
public long shardMinDocCount() {
return bucketCountThresholds.getShardMinDocCount();
}
/** Set a new order on this builder and return the builder so that calls
* can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */
public TermsAggregationBuilder order(BucketOrder order) {
if (order == null) {
throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
}
if (order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) {
this.order = order; // if order already contains a tie-breaker we are good to go
} else { // otherwise add a tie-breaker by using a compound order
this.order = BucketOrder.compound(order);
}
return this;
}
/**
* Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic
* ordering.
*/
public TermsAggregationBuilder order(List<BucketOrder> orders) {
if (orders == null) {
throw new IllegalArgumentException("[orders] must not be null: [" + name + "]");
}
// if the list only contains one order use that to avoid inconsistent xcontent
order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0));
return this;
}
/**
* Gets the order in which the buckets will be returned.
*/
public BucketOrder order() {
return order;
}
/**
* Expert: sets an execution hint to the aggregation.
*/
public TermsAggregationBuilder executionHint(String executionHint) {
this.executionHint = executionHint;
return this;
}
/**
* Expert: gets an execution hint to the aggregation.
*/
public String executionHint() {
return executionHint;
}
/**
* Expert: set the collection mode.
*/
public TermsAggregationBuilder collectMode(SubAggCollectionMode collectMode) {
if (collectMode == null) {
throw new IllegalArgumentException("[collectMode] must not be null: [" + name + "]");
}
this.collectMode = collectMode;
return this;
}
/**
* Expert: get the collection mode.
*/
public SubAggCollectionMode collectMode() {
return collectMode;
}
/**
* Set terms to include and exclude from the aggregation results
*/
public TermsAggregationBuilder includeExclude(IncludeExclude includeExclude) {
this.includeExclude = includeExclude;
return this;
}
/**
* Get terms to include and exclude from the aggregation results
*/
public IncludeExclude includeExclude() {
return includeExclude;
}
/**
* Get whether doc count error will be return for individual terms
*/
public boolean showTermDocCountError() {
return showTermDocCountError;
}
/**
* Set whether doc count error will be return for individual terms
*/
public TermsAggregationBuilder showTermDocCountError(boolean showTermDocCountError) {
this.showTermDocCountError = showTermDocCountError;
return this;
}
@Override
public BucketCardinality bucketCardinality() {
return BucketCardinality.MANY;
}
@Override
protected ValuesSourceAggregatorFactory innerBuild(
AggregationContext context,
ValuesSourceConfig config,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder
) throws IOException {
TermsAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
return new TermsAggregatorFactory(
name,
config,
order,
includeExclude,
executionHint,
collectMode,
bucketCountThresholds,
showTermDocCountError,
context,
parent,
subFactoriesBuilder,
metadata,
aggregatorSupplier
);
}
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
bucketCountThresholds.toXContent(builder, params);
builder.field(SHOW_TERM_DOC_COUNT_ERROR.getPreferredName(), showTermDocCountError);
if (executionHint != null) {
builder.field(TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME.getPreferredName(), executionHint);
}
builder.field(ORDER_FIELD.getPreferredName());
order.toXContent(builder, params);
if (collectMode != null) {
builder.field(SubAggCollectionMode.KEY.getPreferredName(), collectMode.parseField().getPreferredName());
}
if (includeExclude != null) {
includeExclude.toXContent(builder, params);
}
return builder;
}
@Override
public int hashCode() {
return Objects.hash(
super.hashCode(),
bucketCountThresholds,
collectMode,
executionHint,
includeExclude,
order,
showTermDocCountError
);
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null || getClass() != obj.getClass()) return false;
if (super.equals(obj) == false) return false;
TermsAggregationBuilder other = (TermsAggregationBuilder) obj;
return Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
&& Objects.equals(collectMode, other.collectMode)
&& Objects.equals(executionHint, other.executionHint)
&& Objects.equals(includeExclude, other.includeExclude)
&& Objects.equals(order, other.order)
&& Objects.equals(showTermDocCountError, other.showTermDocCountError);
}
@Override
public String getType() {
return NAME;
}
@Override
protected ValuesSourceRegistry.RegistryKey<?> getRegistryKey() {
return REGISTRY_KEY;
}
}
| jmluy/elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java | Java | apache-2.0 | 16,332 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.extractor;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.search.SearchHit;
import java.util.Set;
public class DocValueField extends AbstractField {
public DocValueField(String name, Set<String> types) {
super(name, types);
}
@Override
public Method getMethod() {
return Method.DOC_VALUE;
}
@Override
public Object[] value(SearchHit hit) {
return getFieldValue(hit);
}
@Override
public boolean supportsFromSource() {
return true;
}
@Override
public ExtractedField newFromSource() {
return new SourceField(getSearchField(), getTypes());
}
@Override
public boolean isMultiField() {
return false;
}
@Nullable
public String getDocValueFormat() {
return null;
}
}
| gingerwizard/elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/DocValueField.java | Java | apache-2.0 | 1,102 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jackrabbit.core.security.authorization;
import org.apache.jackrabbit.test.JUnitTest;
/**
* <code>PrivilegeBitsTest</code>...
*/
public class PrivilegeBitsTest extends JUnitTest {
private static final PrivilegeBits READ_PRIVILEGE_BITS = PrivilegeBits.getInstance(1);
private static final long[] LONGS = new long[] {1, 2, 13, 199, 512, Long.MAX_VALUE/2, Long.MAX_VALUE-1, Long.MAX_VALUE};
@Override
protected void setUp() throws Exception {
super.setUp();
}
//-----------------------------------------------------------< internal >---
public void testLongValue() {
// empty
assertEquals(PrivilegeRegistry.NO_PRIVILEGE, PrivilegeBits.EMPTY.longValue());
// long based privilege bits
for (long l : LONGS) {
assertEquals(l, PrivilegeBits.getInstance(l).longValue());
}
// long based privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
long l = pb.longValue();
while (l < Long.MAX_VALUE/2) {
l = l << 1;
pb = pb.nextBits();
assertEquals(l, pb.longValue());
}
// other privilege bits: long value not available.
for (int i = 0; i < 10; i++) {
pb = pb.nextBits();
assertEquals(0, pb.longValue());
}
// modifiable privilege bits
pb = READ_PRIVILEGE_BITS;
for (int i = 0; i < 100; i++) {
PrivilegeBits modifiable = PrivilegeBits.getInstance(pb);
assertEquals(pb.longValue(), modifiable.longValue());
pb = pb.nextBits();
}
}
public void testNextBits() {
// empty
assertSame(PrivilegeBits.EMPTY, PrivilegeBits.EMPTY.nextBits());
// long based privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
long l = pb.longValue();
while (l < Long.MAX_VALUE/2) {
l = l << 1;
pb = pb.nextBits();
assertEquals(l, pb.longValue());
}
// other privilege bits: long value not available.
for (int i = 0; i < 10; i++) {
PrivilegeBits nxt = pb.nextBits();
assertEquals(nxt, pb.nextBits());
assertFalse(pb.equals(nxt));
pb = nxt;
}
// modifiable privilege bits
pb = READ_PRIVILEGE_BITS;
for (int i = 0; i < 100; i++) {
PrivilegeBits modifiable = PrivilegeBits.getInstance(pb);
try {
modifiable.nextBits();
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
pb = pb.nextBits();
}
}
public void testUnmodifiable() {
assertSame(PrivilegeBits.EMPTY, PrivilegeBits.EMPTY.unmodifiable());
// other privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
PrivilegeBits mod = PrivilegeBits.getInstance(pb);
for (int i = 0; i < 100; i++) {
PrivilegeBits nxt = pb.nextBits();
assertSame(nxt, nxt.unmodifiable());
assertEquals(nxt, nxt.unmodifiable());
mod.add(nxt);
assertNotSame(mod, mod.unmodifiable());
assertEquals(mod, mod.unmodifiable());
pb = nxt;
}
}
//---------------------------------------------------------------< test >---
public void testIncludesRead() {
// empty
assertFalse(PrivilegeBits.EMPTY.includesRead());
// other privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
assertTrue(pb.includesRead());
assertTrue(PrivilegeBits.getInstance(pb).includesRead());
PrivilegeBits mod = PrivilegeBits.getInstance();
for (int i = 0; i < 100; i++) {
mod.add(pb);
assertTrue(mod.includesRead());
pb = pb.nextBits();
assertFalse(pb.toString(), pb.includesRead());
assertFalse(PrivilegeBits.getInstance(pb).includesRead());
PrivilegeBits modifiable = PrivilegeBits.getInstance(pb);
modifiable.add(READ_PRIVILEGE_BITS);
assertTrue(modifiable.includesRead());
}
}
public void testIncludes() {
// empty
assertTrue(PrivilegeBits.EMPTY.includes(PrivilegeBits.EMPTY));
// other privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
PrivilegeBits mod = PrivilegeBits.getInstance();
for (int i = 0; i < 100; i++) {
assertFalse(PrivilegeBits.EMPTY.includes(pb));
assertTrue(pb.includes(PrivilegeBits.EMPTY));
mod.add(pb);
assertTrue(mod.includes(pb));
PrivilegeBits nxt = pb.nextBits();
assertTrue(nxt.includes(nxt));
assertTrue(nxt.includes(PrivilegeBits.getInstance(nxt)));
assertFalse(pb + " should not include " + nxt, pb.includes(nxt));
assertFalse(nxt + " should not include " + pb, nxt.includes(pb));
assertFalse(mod.includes(nxt));
assertFalse(nxt.includes(mod));
pb = nxt;
}
}
public void testIsEmpty() {
// empty
assertTrue(PrivilegeBits.EMPTY.isEmpty());
// any other bits should not be empty
PrivilegeBits pb = READ_PRIVILEGE_BITS;
PrivilegeBits mod = PrivilegeBits.getInstance(pb);
for (int i = 0; i < 100; i++) {
assertFalse(pb.isEmpty());
assertFalse(PrivilegeBits.getInstance(pb).isEmpty());
pb = pb.nextBits();
mod.add(pb);
assertFalse(mod.isEmpty());
PrivilegeBits tmp = PrivilegeBits.getInstance(pb);
tmp.diff(pb);
assertTrue(tmp.toString(), tmp.isEmpty());
}
}
//----------------------------------------------------------------< mod >---
public void testAdd() {
// empty
try {
PrivilegeBits.EMPTY.add(PrivilegeBits.EMPTY);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
// other privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
PrivilegeBits mod = PrivilegeBits.getInstance(pb);
for (int i = 0; i < 100; i++) {
try {
pb.add(PrivilegeBits.EMPTY);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.add(mod);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
PrivilegeBits nxt = pb.nextBits();
try {
pb.add(nxt);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
long before = mod.longValue();
long nxtLong = nxt.longValue();
mod.add(nxt);
if (nxt.longValue() != 0) {
assertEquals(before | nxtLong, mod.longValue());
}
assertTrue(mod.includes(nxt));
PrivilegeBits tmp = PrivilegeBits.getInstance(pb);
assertTrue(tmp.includes(pb));
assertFalse(tmp.includes(nxt));
if (READ_PRIVILEGE_BITS.equals(pb)) {
assertTrue(tmp.includesRead());
} else {
assertFalse(tmp.includesRead());
}
tmp.add(nxt);
assertTrue(tmp.includes(pb) && tmp.includes(nxt));
if (READ_PRIVILEGE_BITS.equals(pb)) {
assertTrue(tmp.includesRead());
assertTrue(tmp.includes(READ_PRIVILEGE_BITS));
} else {
assertFalse(tmp.toString(), tmp.includesRead());
assertFalse(tmp.includes(READ_PRIVILEGE_BITS));
}
tmp.add(READ_PRIVILEGE_BITS);
assertTrue(tmp.includesRead());
assertTrue(tmp.includes(READ_PRIVILEGE_BITS));
pb = nxt;
}
}
public void testDiff() {
// empty
try {
PrivilegeBits.EMPTY.diff(PrivilegeBits.EMPTY);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
// other privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
PrivilegeBits mod = PrivilegeBits.getInstance(pb);
for (int i = 0; i < 100; i++) {
PrivilegeBits nxt = pb.nextBits();
try {
pb.diff(nxt);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.diff(mod);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
PrivilegeBits before = PrivilegeBits.getInstance(mod);
mod.diff(nxt);
assertEquals(before, mod);
mod.add(nxt);
assertFalse(before.equals(mod));
mod.diff(nxt);
assertEquals(before, mod);
mod.add(nxt);
// diff with same pb must leave original bits empty
PrivilegeBits tmp = PrivilegeBits.getInstance(pb);
tmp.add(nxt);
tmp.add(READ_PRIVILEGE_BITS);
tmp.diff(tmp);
assertEquals(nxt.toString(), PrivilegeBits.EMPTY, tmp);
tmp = PrivilegeBits.getInstance(pb);
tmp.add(nxt);
tmp.add(READ_PRIVILEGE_BITS);
tmp.diff(PrivilegeBits.getInstance(tmp));
assertEquals(nxt.toString(), PrivilegeBits.EMPTY, tmp);
// diff without intersection -> leave privilege unmodified.
tmp = PrivilegeBits.getInstance(pb);
tmp.diff(nxt);
assertEquals(PrivilegeBits.getInstance(pb), tmp);
// diff with intersection -> privilege must be modified accordingly.
tmp = PrivilegeBits.getInstance(nxt);
tmp.add(READ_PRIVILEGE_BITS);
assertTrue(tmp.includes(READ_PRIVILEGE_BITS));
assertTrue(tmp.includes(nxt));
tmp.diff(nxt);
assertEquals(READ_PRIVILEGE_BITS, tmp);
assertTrue(tmp.includes(READ_PRIVILEGE_BITS));
assertFalse(tmp.includes(nxt));
tmp = PrivilegeBits.getInstance(pb);
tmp.add(READ_PRIVILEGE_BITS);
PrivilegeBits tmp2 = PrivilegeBits.getInstance(pb);
tmp2.add(nxt);
PrivilegeBits tmp3 = PrivilegeBits.getInstance(tmp2);
assertEquals(tmp2, tmp3);
tmp.diff(tmp2);
if (READ_PRIVILEGE_BITS.equals(pb)) {
assertEquals(PrivilegeBits.EMPTY, tmp);
} else {
assertEquals(READ_PRIVILEGE_BITS, tmp);
}
// but pb passed to the diff call must not be modified.
assertEquals(tmp3, tmp2);
pb = nxt;
}
}
public void testAddDifference() {
// empty
try {
PrivilegeBits.EMPTY.addDifference(PrivilegeBits.EMPTY, PrivilegeBits.EMPTY);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
// other privilege bits
PrivilegeBits pb = READ_PRIVILEGE_BITS;
PrivilegeBits mod = PrivilegeBits.getInstance(pb);
for (int i = 0; i < 100; i++) {
PrivilegeBits nxt = pb.nextBits();
try {
pb.addDifference(nxt, mod);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.addDifference(nxt, READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
PrivilegeBits tmp = PrivilegeBits.getInstance(mod);
tmp.addDifference(nxt, READ_PRIVILEGE_BITS);
mod.add(nxt);
assertEquals(mod, tmp); // since there is diff(nxt, read) which results in nxt
if (!pb.equals(READ_PRIVILEGE_BITS)) {
tmp = PrivilegeBits.getInstance(nxt);
PrivilegeBits mod2 = PrivilegeBits.getInstance(mod);
tmp.addDifference(mod2, READ_PRIVILEGE_BITS);
assertFalse(nxt.equals(tmp)); // tmp should be modified by addDifference call.
assertEquals(mod2, mod); // mod2 should not be modified here
assertTrue(tmp.includes(pb));
assertFalse(tmp.includes(READ_PRIVILEGE_BITS));
assertFalse(tmp.includes(mod));
}
tmp = PrivilegeBits.getInstance(nxt);
PrivilegeBits mod2 = PrivilegeBits.getInstance(mod);
tmp.addDifference(READ_PRIVILEGE_BITS, mod2);
assertEquals(nxt, tmp); // tmp not modified by addDifference call.
assertEquals(mod2, mod); // mod2 should not be modified here
assertFalse(tmp.includes(pb));
assertFalse(tmp.includes(READ_PRIVILEGE_BITS));
assertFalse(tmp.includes(mod));
tmp = PrivilegeBits.getInstance(nxt);
tmp.addDifference(READ_PRIVILEGE_BITS, READ_PRIVILEGE_BITS);
assertEquals(nxt, tmp); // tmp not modified by addDifference call.
assertFalse(tmp.includes(READ_PRIVILEGE_BITS));
tmp = PrivilegeBits.getInstance(mod);
tmp.addDifference(READ_PRIVILEGE_BITS, READ_PRIVILEGE_BITS);
assertEquals(mod, tmp); // tmp not modified by addDifference call.
assertTrue(tmp.includes(READ_PRIVILEGE_BITS));
pb = nxt;
}
}
//------------------------------------------------------------< general >---
public void testGetInstance() {
PrivilegeBits pb = PrivilegeBits.getInstance();
assertEquals(PrivilegeBits.EMPTY, pb);
assertNotSame(PrivilegeBits.EMPTY, pb);
assertNotSame(pb, pb.unmodifiable());
pb.add(READ_PRIVILEGE_BITS);
pb.addDifference(READ_PRIVILEGE_BITS, READ_PRIVILEGE_BITS);
pb.diff(READ_PRIVILEGE_BITS);
pb = PrivilegeBits.getInstance(PrivilegeBits.EMPTY);
assertEquals(PrivilegeBits.EMPTY, pb);
assertNotSame(PrivilegeBits.EMPTY, pb);
assertNotSame(pb, pb.unmodifiable());
pb.add(READ_PRIVILEGE_BITS);
pb.addDifference(READ_PRIVILEGE_BITS, READ_PRIVILEGE_BITS);
pb.diff(READ_PRIVILEGE_BITS);
pb = PrivilegeBits.getInstance(READ_PRIVILEGE_BITS);
assertEquals(READ_PRIVILEGE_BITS, pb);
assertNotSame(READ_PRIVILEGE_BITS, pb);
assertNotSame(pb, pb.unmodifiable());
pb.add(READ_PRIVILEGE_BITS);
pb.addDifference(READ_PRIVILEGE_BITS, PrivilegeBits.EMPTY);
pb.diff(READ_PRIVILEGE_BITS);
pb = PrivilegeBits.getInstance(PrivilegeRegistry.NO_PRIVILEGE);
assertEquals(pb, PrivilegeBits.EMPTY);
assertSame(pb, PrivilegeBits.EMPTY);
assertSame(pb, pb.unmodifiable());
try {
pb.add(READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.addDifference(READ_PRIVILEGE_BITS, READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.diff(READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
PrivilegeBits.getInstance(-1);
fail();
} catch (IllegalArgumentException e) {
// success.
}
PrivilegeBits bts = PrivilegeBits.getInstance(PrivilegeRegistry.NO_PRIVILEGE);
assertSame(PrivilegeBits.EMPTY, bts);
for (long l : LONGS) {
pb = PrivilegeBits.getInstance(l);
assertEquals(pb, PrivilegeBits.getInstance(l));
assertSame(pb, pb.unmodifiable());
assertEquals(pb, PrivilegeBits.getInstance(pb));
assertEquals(PrivilegeBits.getInstance(pb), pb);
assertNotSame(pb, PrivilegeBits.getInstance(pb));
try {
pb.add(READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.addDifference(READ_PRIVILEGE_BITS, READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
try {
pb.diff(READ_PRIVILEGE_BITS);
fail("UnsupportedOperation expected");
} catch (UnsupportedOperationException e) {
// success
}
}
}
} | Kast0rTr0y/jackrabbit | jackrabbit-core/src/test/java/org/apache/jackrabbit/core/security/authorization/PrivilegeBitsTest.java | Java | apache-2.0 | 18,326 |
package org.zstack.core.cloudbus;
import org.springframework.beans.factory.annotation.Autowired;
import org.zstack.core.Platform;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.core.db.SimpleQuery;
import org.zstack.header.exception.CloudRuntimeException;
import org.zstack.header.managementnode.ManagementNodeChangeListener;
import org.zstack.header.managementnode.ManagementNodeVO;
import org.zstack.header.managementnode.ManagementNodeVO_;
import org.zstack.utils.hash.ApacheHash;
import org.zstack.utils.hash.ConsistentHash;
import java.util.ArrayList;
import java.util.List;
/**
* Created with IntelliJ IDEA.
* User: frank
* Time: 12:36 AM
* To change this template use File | Settings | File Templates.
*/
public class ResourceDestinationMakerImpl implements ManagementNodeChangeListener, ResourceDestinationMaker {
private ConsistentHash<String> nodeHash = new ConsistentHash<String>(new ApacheHash(), 500, new ArrayList<String>()) ;
@Autowired
private DatabaseFacade dbf;
@Override
public void nodeJoin(String nodeId) {
nodeHash.add(nodeId);
}
@Override
public void nodeLeft(String nodeId) {
nodeHash.remove(nodeId);
}
@Override
public void iAmDead(String nodeId) {
nodeHash.remove(nodeId);
}
@Override
public void iJoin(String nodeId) {
SimpleQuery<ManagementNodeVO> q = dbf.createQuery(ManagementNodeVO.class);
q.select(ManagementNodeVO_.uuid);
List<String> nodeIds = q.listValue();
for (String id : nodeIds) {
nodeHash.add(id);
}
}
@Override
public String makeDestination(String resourceUuid) {
String nodeUuid = nodeHash.get(resourceUuid);
if (nodeUuid == null) {
throw new CloudRuntimeException("Cannot find any available management node to send message");
}
return nodeUuid;
}
@Override
public boolean isManagedByUs(String resourceUuid) {
String nodeUuid = makeDestination(resourceUuid);
return nodeUuid.equals(Platform.getManagementServerId());
}
public boolean isNodeInCircle(String nodeId) {
return nodeHash.hasNode(nodeId);
}
}
| SoftwareKing/zstack | core/src/main/java/org/zstack/core/cloudbus/ResourceDestinationMakerImpl.java | Java | apache-2.0 | 2,285 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.hadoop.rdf.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* A reducer which takes text keys with a sequence of longs representing counts
* as the values and sums the counts together into pairs consisting of a node
* key and a count value.
*
*
*
*/
public class TextCountReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException,
InterruptedException {
long count = 0;
Iterator<LongWritable> iter = values.iterator();
while (iter.hasNext()) {
count += iter.next().get();
}
context.write(key, new LongWritable(count));
}
}
| CesarPantoja/jena | jena-elephas/jena-elephas-mapreduce/src/main/java/org/apache/jena/hadoop/rdf/mapreduce/TextCountReducer.java | Java | apache-2.0 | 1,727 |
package org.zstack.test.virtualrouter;
import org.junit.Before;
import org.junit.Test;
import org.zstack.core.CoreGlobalProperty;
import org.zstack.core.cloudbus.CloudBus;
import org.zstack.core.componentloader.ComponentLoader;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.core.thread.SyncTask;
import org.zstack.core.thread.SyncThread;
import org.zstack.core.thread.ThreadFacade;
import org.zstack.core.workflow.SimpleFlowChain;
import org.zstack.core.workflow.WorkFlowStatistic;
import org.zstack.header.configuration.InstanceOfferingInventory;
import org.zstack.header.identity.SessionInventory;
import org.zstack.header.image.ImageInventory;
import org.zstack.header.image.ImageVO;
import org.zstack.header.network.l2.L2NetworkInventory;
import org.zstack.header.network.l3.L3NetworkInventory;
import org.zstack.header.rest.HttpCallStatistic;
import org.zstack.header.rest.RESTFacade;
import org.zstack.header.vm.VmInstanceInventory;
import org.zstack.network.service.portforwarding.PortForwardingProtocolType;
import org.zstack.network.service.portforwarding.PortForwardingRuleInventory;
import org.zstack.network.service.vip.VipInventory;
import org.zstack.simulator.SimulatorGlobalProperty;
import org.zstack.simulator.kvm.KVMSimulatorConfig;
import org.zstack.simulator.virtualrouter.VirtualRouterSimulatorConfig;
import org.zstack.test.*;
import org.zstack.test.deployer.Deployer;
import org.zstack.utils.DebugUtils;
import org.zstack.utils.DebugUtils.TimeStatistic;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.zstack.utils.StringDSL.ln;
public class TestVirtualRouterPortForwarding1000Vm {
Deployer deployer;
Api api;
ComponentLoader loader;
CloudBus bus;
DatabaseFacade dbf;
SessionInventory session;
VirtualRouterSimulatorConfig vconfig;
KVMSimulatorConfig kconfig;
ThreadFacade thdf;
RESTFacade restf;
int total = 1000;
int syncLevel = 150;
int timeout = 600;
int ruleNum = 5000;
CountDownLatch ruleLatch = new CountDownLatch(ruleNum);
List<Long> timeCost = new ArrayList<Long>(ruleNum);
List<Long> vipCost = new ArrayList<Long>(ruleNum);
List<Long> createCost = new ArrayList<Long>(ruleNum);
final List<String> vmNicUuids = new ArrayList<String>(total);
@Before
public void setUp() throws Exception {
DBUtil.reDeployDB();
WebBeanConstructor con = new WebBeanConstructor();
deployer = new Deployer("deployerXml/virtualRouter/TestPortForwarding1000Vm.xml", con);
deployer.addSpringConfig("VirtualRouter.xml");
deployer.addSpringConfig("VirtualRouterSimulator.xml");
deployer.addSpringConfig("KVMRelated.xml");
deployer.addSpringConfig("PortForwarding.xml");
deployer.addSpringConfig("vip.xml");
deployer.build();
api = deployer.getApi();
loader = deployer.getComponentLoader();
vconfig = loader.getComponent(VirtualRouterSimulatorConfig.class);
kconfig = loader.getComponent(KVMSimulatorConfig.class);
bus = loader.getComponent(CloudBus.class);
dbf = loader.getComponent(DatabaseFacade.class);
thdf = loader.getComponent(ThreadFacade.class);
restf = loader.getComponent(RESTFacade.class);
session = api.loginAsAdmin();
SimulatorGlobalProperty.NOT_CACHE_AGENT_COMMAND = true;
}
@SyncThread(level = 1000)
private void createRule(L3NetworkInventory pub, L3NetworkInventory guest, String vmNicUuid, int port) throws ApiSenderException {
try {
api.setTimeout(600);
long s = System.currentTimeMillis();
VipInventory vip = api.acquireIp(pub.getUuid());
long s1 = System.currentTimeMillis();
vipCost.add(s1 -s);
PortForwardingRuleInventory rule = new PortForwardingRuleInventory();
rule.setVipUuid(vip.getUuid());
rule.setName("pf");
rule.setVipPortStart(port);
rule.setVipPortEnd(port);
rule.setPrivatePortStart(port);
rule.setPrivatePortEnd(port);
rule.setProtocolType(PortForwardingProtocolType.TCP.toString());
rule = api.createPortForwardingRuleByFullConfig(rule);
long s2 = System.currentTimeMillis();
createCost.add(s2 -s1);
api.attachPortForwardingRule(rule.getUuid(), vmNicUuid);
timeCost.add(System.currentTimeMillis() - s2);
} finally {
ruleLatch.countDown();
}
}
@Test
public void test() throws ApiSenderException, InterruptedException {
CoreGlobalProperty.VM_TRACER_ON = false;
final L3NetworkInventory guestL3 = deployer.l3Networks.get("TestL3Network1");
L3NetworkInventory pubL3 = deployer.l3Networks.get("MgmtNetwork");
final ImageInventory img = deployer.images.get("TestImage");
ImageVO imgvo = dbf.findByUuid(img.getUuid(), ImageVO.class);
imgvo.setSize(1);
dbf.update(imgvo);
final InstanceOfferingInventory ioinv = deployer.instanceOfferings.get("TestInstanceOffering");
final CountDownLatch latch = new CountDownLatch(total);
for (int i=0; i<total; i++) {
final int finalI = i;
thdf.syncSubmit(new SyncTask<Object>() {
@Override
public String getSyncSignature() {
return "creating-vm";
}
@Override
public int getSyncLevel() {
return syncLevel;
}
@Override
public String getName() {
return getSyncSignature();
}
@Override
public Object call() throws Exception {
try {
VmCreator creator = new VmCreator(api);
creator.addL3Network(guestL3.getUuid());
creator.instanceOfferingUuid = ioinv.getUuid();
creator.imageUuid = img.getUuid();
creator.name = "vm-" + finalI;
creator.timeout = (int) TimeUnit.MINUTES.toSeconds(10);
VmInstanceInventory vm = creator.create();
synchronized (vmNicUuids) {
vmNicUuids.add(vm.getVmNics().get(0).getUuid());
}
} finally {
latch.countDown();
}
return null;
}
});
}
latch.await(timeout, TimeUnit.MINUTES);
CoreGlobalProperty.PROFILER_WORKFLOW = true;
CoreGlobalProperty.PROFILER_HTTP_CALL = true;
SimulatorGlobalProperty.NOT_CACHE_AGENT_COMMAND = true;
long start = System.currentTimeMillis();
long rulePerVm = ruleNum / total;
System.out.println(String.format("start creating port forwarding rule, total: %s, rule per vm: %s", ruleNum, rulePerVm));
for (int j=0; j<total; j++) {
String nicUuid = vmNicUuids.get(j);
for (int i=0; i<rulePerVm; i++) {
createRule(pubL3, guestL3, nicUuid, i);
}
}
ruleLatch.await(timeout, TimeUnit.MINUTES);
long end = System.currentTimeMillis();
long min = 0;
long max = 0;
long avg = 0;
long total = 0;
for (long t : vipCost) {
min = Math.min(t, min);
max = Math.max(t, max);
total += t;
}
avg = total / vipCost.size();
System.out.println(ln(
"create vip cost:",
"total time: {0}",
"max: {1}",
"min: {2}",
"avg: {3}"
).format(
TimeUnit.MILLISECONDS.toSeconds(end - start),
TimeUnit.MILLISECONDS.toSeconds(max),
TimeUnit.MILLISECONDS.toSeconds(min),
TimeUnit.MILLISECONDS.toSeconds(avg)
));
min = 0;
max = 0;
avg = 0;
total = 0;
for (long t : createCost) {
min = Math.min(t, min);
max = Math.max(t, max);
total += t;
}
avg = total / createCost.size();
System.out.println(ln(
"create pf cost:",
"total time: {0}",
"max: {1}",
"min: {2}",
"avg: {3}"
).format(
TimeUnit.MILLISECONDS.toSeconds(end - start),
TimeUnit.MILLISECONDS.toSeconds(max),
TimeUnit.MILLISECONDS.toSeconds(min),
TimeUnit.MILLISECONDS.toSeconds(avg)
));
min = 0;
max = 0;
avg = 0;
total = 0;
for (long t : timeCost) {
min = Math.min(t, min);
max = Math.max(t, max);
total += t;
}
avg = total / timeCost.size();
System.out.println(ln(
"attach pf cost:",
"total time: {0}",
"max: {1}",
"min: {2}",
"avg: {3}"
).format(
TimeUnit.MILLISECONDS.toSeconds(end - start),
TimeUnit.MILLISECONDS.toSeconds(max),
TimeUnit.MILLISECONDS.toSeconds(min),
TimeUnit.MILLISECONDS.toSeconds(avg)
));
List<WorkFlowStatistic> stats = new ArrayList<WorkFlowStatistic>();
stats.addAll(SimpleFlowChain.getStatistics().values());
Collections.sort(stats, new Comparator<WorkFlowStatistic>() {
@Override
public int compare(WorkFlowStatistic o1, WorkFlowStatistic o2) {
return (int) (o2.getTotalTime() - o1.getTotalTime());
}
});
for (WorkFlowStatistic stat : stats) {
System.out.println(stat.toString());
}
System.out.println();
List<HttpCallStatistic> hstats = new ArrayList<HttpCallStatistic>();
hstats.addAll(restf.getStatistics().values());
Collections.sort(hstats, new Comparator<HttpCallStatistic>() {
@Override
public int compare(HttpCallStatistic o1, HttpCallStatistic o2) {
return (int) (o2.getTotalTime() - o1.getTotalTime());
}
});
for (HttpCallStatistic stat : hstats) {
System.out.println(stat.toString());
}
}
}
| SoftwareKing/zstack | test/src/test/java/org/zstack/test/virtualrouter/TestVirtualRouterPortForwarding1000Vm.java | Java | apache-2.0 | 10,806 |
/*
* Copyright 2009-2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ibatis.parsing;
import java.io.InputStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import javax.xml.namespace.QName;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import org.apache.ibatis.builder.BuilderException;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.EntityResolver;
import org.xml.sax.ErrorHandler;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
/**
* @author Clinton Begin
*/
/**
* XPath解析器,用的都是JDK的类包,封装了一下,使得使用起来更方便
*
*/
public class XPathParser {
private Document document;
private boolean validation;
private EntityResolver entityResolver;
private Properties variables;
private XPath xpath;
//一些构造函数,全部调用commonConstructor以及createDocument
//1~4,默认不需要验证
public XPathParser(String xml) {
commonConstructor(false, null, null);
this.document = createDocument(new InputSource(new StringReader(xml)));
}
public XPathParser(Reader reader) {
commonConstructor(false, null, null);
this.document = createDocument(new InputSource(reader));
}
public XPathParser(InputStream inputStream) {
commonConstructor(false, null, null);
this.document = createDocument(new InputSource(inputStream));
}
public XPathParser(Document document) {
commonConstructor(false, null, null);
this.document = document;
}
//5~8,传入是否需要验证参数
public XPathParser(String xml, boolean validation) {
commonConstructor(validation, null, null);
this.document = createDocument(new InputSource(new StringReader(xml)));
}
public XPathParser(Reader reader, boolean validation) {
commonConstructor(validation, null, null);
this.document = createDocument(new InputSource(reader));
}
public XPathParser(InputStream inputStream, boolean validation) {
commonConstructor(validation, null, null);
this.document = createDocument(new InputSource(inputStream));
}
public XPathParser(Document document, boolean validation) {
commonConstructor(validation, null, null);
this.document = document;
}
//9~12,传入是否需要验证参数,Properties
public XPathParser(String xml, boolean validation, Properties variables) {
commonConstructor(validation, variables, null);
this.document = createDocument(new InputSource(new StringReader(xml)));
}
public XPathParser(Reader reader, boolean validation, Properties variables) {
commonConstructor(validation, variables, null);
this.document = createDocument(new InputSource(reader));
}
public XPathParser(InputStream inputStream, boolean validation, Properties variables) {
commonConstructor(validation, variables, null);
this.document = createDocument(new InputSource(inputStream));
}
public XPathParser(Document document, boolean validation, Properties variables) {
commonConstructor(validation, variables, null);
this.document = document;
}
//13~16,传入是否需要验证参数,Properties,EntityResolver
public XPathParser(String xml, boolean validation, Properties variables, EntityResolver entityResolver) {
commonConstructor(validation, variables, entityResolver);
this.document = createDocument(new InputSource(new StringReader(xml)));
}
public XPathParser(Reader reader, boolean validation, Properties variables, EntityResolver entityResolver) {
commonConstructor(validation, variables, entityResolver);
this.document = createDocument(new InputSource(reader));
}
public XPathParser(InputStream inputStream, boolean validation, Properties variables, EntityResolver entityResolver) {
commonConstructor(validation, variables, entityResolver);
this.document = createDocument(new InputSource(inputStream));
}
public XPathParser(Document document, boolean validation, Properties variables, EntityResolver entityResolver) {
commonConstructor(validation, variables, entityResolver);
this.document = document;
}
//17.设置Properties
public void setVariables(Properties variables) {
this.variables = variables;
}
public String evalString(String expression) {
return evalString(document, expression);
}
public String evalString(Object root, String expression) {
//1.先用xpath解析
String result = (String) evaluate(expression, root, XPathConstants.STRING);
//2.再调用PropertyParser去解析,也就是替换 ${} 这种格式的字符串
result = PropertyParser.parse(result, variables);
return result;
}
public Boolean evalBoolean(String expression) {
return evalBoolean(document, expression);
}
public Boolean evalBoolean(Object root, String expression) {
return (Boolean) evaluate(expression, root, XPathConstants.BOOLEAN);
}
public Short evalShort(String expression) {
return evalShort(document, expression);
}
public Short evalShort(Object root, String expression) {
return Short.valueOf(evalString(root, expression));
}
public Integer evalInteger(String expression) {
return evalInteger(document, expression);
}
public Integer evalInteger(Object root, String expression) {
return Integer.valueOf(evalString(root, expression));
}
public Long evalLong(String expression) {
return evalLong(document, expression);
}
public Long evalLong(Object root, String expression) {
return Long.valueOf(evalString(root, expression));
}
public Float evalFloat(String expression) {
return evalFloat(document, expression);
}
//??这里有点疑问,为何Float用evalString,Double用evaluate XPathConstants.NUMBER
public Float evalFloat(Object root, String expression) {
return Float.valueOf(evalString(root, expression));
}
public Double evalDouble(String expression) {
return evalDouble(document, expression);
}
public Double evalDouble(Object root, String expression) {
return (Double) evaluate(expression, root, XPathConstants.NUMBER);
}
public List<XNode> evalNodes(String expression) {
return evalNodes(document, expression);
}
//返回节点List
public List<XNode> evalNodes(Object root, String expression) {
List<XNode> xnodes = new ArrayList<XNode>();
NodeList nodes = (NodeList) evaluate(expression, root, XPathConstants.NODESET);
for (int i = 0; i < nodes.getLength(); i++) {
xnodes.add(new XNode(this, nodes.item(i), variables));
}
return xnodes;
}
public XNode evalNode(String expression) {
return evalNode(document, expression);
}
//返回节点
public XNode evalNode(Object root, String expression) {
Node node = (Node) evaluate(expression, root, XPathConstants.NODE);
if (node == null) {
return null;
}
return new XNode(this, node, variables);
}
private Object evaluate(String expression, Object root, QName returnType) {
try {
//最终合流到这儿,直接调用XPath.evaluate
return xpath.evaluate(expression, root, returnType);
} catch (Exception e) {
throw new BuilderException("Error evaluating XPath. Cause: " + e, e);
}
}
private Document createDocument(InputSource inputSource) {
// important: this must only be called AFTER common constructor
try {
//这个是DOM解析方式
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setValidating(validation);
//名称空间
factory.setNamespaceAware(false);
//忽略注释
factory.setIgnoringComments(true);
//忽略空白
factory.setIgnoringElementContentWhitespace(false);
//把 CDATA 节点转换为 Text 节点
factory.setCoalescing(false);
//扩展实体引用
factory.setExpandEntityReferences(true);
DocumentBuilder builder = factory.newDocumentBuilder();
//需要注意的就是定义了EntityResolver(XMLMapperEntityResolver),这样不用联网去获取DTD,
//将DTD放在org\apache\ibatis\builder\xml\mybatis-3-config.dtd,来达到验证xml合法性的目的
builder.setEntityResolver(entityResolver);
builder.setErrorHandler(new ErrorHandler() {
@Override
public void error(SAXParseException exception) throws SAXException {
throw exception;
}
@Override
public void fatalError(SAXParseException exception) throws SAXException {
throw exception;
}
@Override
public void warning(SAXParseException exception) throws SAXException {
}
});
return builder.parse(inputSource);
} catch (Exception e) {
throw new BuilderException("Error creating document instance. Cause: " + e, e);
}
}
private void commonConstructor(boolean validation, Properties variables, EntityResolver entityResolver) {
this.validation = validation;
this.entityResolver = entityResolver;
this.variables = variables;
//共通构造函数,除了把参数都设置到实例变量里面去以外,还初始化了XPath
XPathFactory factory = XPathFactory.newInstance();
this.xpath = factory.newXPath();
}
}
| Liyue1314/mybatis | src/main/java/org/apache/ibatis/parsing/XPathParser.java | Java | apache-2.0 | 10,032 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.ml.preprocessing.standardscaling;
import org.apache.ignite.ml.dataset.Dataset;
import org.apache.ignite.ml.dataset.DatasetBuilder;
import org.apache.ignite.ml.dataset.UpstreamEntry;
import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
import org.apache.ignite.ml.environment.LearningEnvironmentBuilder;
import org.apache.ignite.ml.math.primitives.vector.Vector;
import org.apache.ignite.ml.preprocessing.PreprocessingTrainer;
import org.apache.ignite.ml.preprocessing.Preprocessor;
/**
* Trainer of the standard scaler preprocessor.
*
* @param <K> Type of a key in {@code upstream} data.
* @param <V> Type of a value in {@code upstream} data.
*/
public class StandardScalerTrainer<K, V> implements PreprocessingTrainer<K, V> {
/** {@inheritDoc} */
@Override public StandardScalerPreprocessor<K, V> fit(LearningEnvironmentBuilder envBuilder,
DatasetBuilder<K, V> datasetBuilder,
Preprocessor<K, V> basePreprocessor) {
StandardScalerData standardScalerData = computeSum(envBuilder, datasetBuilder, basePreprocessor);
int n = standardScalerData.sum.length;
long cnt = standardScalerData.cnt;
double[] mean = new double[n];
double[] sigma = new double[n];
for (int i = 0; i < n; i++) {
mean[i] = standardScalerData.sum[i] / cnt;
double variance = (standardScalerData.squaredSum[i] - Math.pow(standardScalerData.sum[i], 2) / cnt) / cnt;
sigma[i] = Math.sqrt(variance);
}
return new StandardScalerPreprocessor<>(mean, sigma, basePreprocessor);
}
/** Computes sum, squared sum and row count. */
private StandardScalerData computeSum(LearningEnvironmentBuilder envBuilder,
DatasetBuilder<K, V> datasetBuilder,
Preprocessor<K, V> basePreprocessor) {
try (Dataset<EmptyContext, StandardScalerData> dataset = datasetBuilder.build(
envBuilder,
(env, upstream, upstreamSize) -> new EmptyContext(),
(env, upstream, upstreamSize, ctx) -> {
double[] sum = null;
double[] squaredSum = null;
long cnt = 0;
while (upstream.hasNext()) {
UpstreamEntry<K, V> entity = upstream.next();
Vector row = basePreprocessor.apply(entity.getKey(), entity.getValue()).features();
if (sum == null) {
sum = new double[row.size()];
squaredSum = new double[row.size()];
}
else {
assert sum.length == row.size() : "Base preprocessor must return exactly " + sum.length
+ " features";
}
++cnt;
for (int i = 0; i < row.size(); i++) {
double x = row.get(i);
sum[i] += x;
squaredSum[i] += x * x;
}
}
return new StandardScalerData(sum, squaredSum, cnt);
}, learningEnvironment(basePreprocessor)
)) {
return dataset.compute(data -> data,
(a, b) -> {
if (a == null)
return b;
if (b == null)
return a;
return a.merge(b);
});
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| samaitra/ignite | modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/standardscaling/StandardScalerTrainer.java | Java | apache-2.0 | 4,355 |
// Rev 1.121 copied from http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?view=markup
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166e;
import jsr166e.ForkJoinPool;
import java.io.ObjectStreamField;
import java.io.Serializable;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.ConcurrentModificationException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReentrantLock;
/**
* A hash table supporting full concurrency of retrievals and
* high expected concurrency for updates. This class obeys the
* same functional specification as {@link java.util.Hashtable}, and
* includes versions of methods corresponding to each method of
* {@code Hashtable}. However, even though all operations are
* thread-safe, retrieval operations do <em>not</em> entail locking,
* and there is <em>not</em> any support for locking the entire table
* in a way that prevents all access. This class is fully
* interoperable with {@code Hashtable} in programs that rely on its
* thread safety but not on its synchronization details.
*
* <p>Retrieval operations (including {@code get}) generally do not
* block, so may overlap with update operations (including {@code put}
* and {@code remove}). Retrievals reflect the results of the most
* recently <em>completed</em> update operations holding upon their
* onset. (More formally, an update operation for a given key bears a
* <em>happens-before</em> relation with any (non-null) retrieval for
* that key reporting the updated value.) For aggregate operations
* such as {@code putAll} and {@code clear}, concurrent retrievals may
* reflect insertion or removal of only some entries. Similarly,
* Iterators and Enumerations return elements reflecting the state of
* the hash table at some point at or since the creation of the
* iterator/enumeration. They do <em>not</em> throw {@link
* ConcurrentModificationException}. However, iterators are designed
* to be used by only one thread at a time. Bear in mind that the
* results of aggregate status methods including {@code size}, {@code
* isEmpty}, and {@code containsValue} are typically useful only when
* a map is not undergoing concurrent updates in other threads.
* Otherwise the results of these methods reflect transient states
* that may be adequate for monitoring or estimation purposes, but not
* for program control.
*
* <p>The table is dynamically expanded when there are too many
* collisions (i.e., keys that have distinct hash codes but fall into
* the same slot modulo the table size), with the expected average
* effect of maintaining roughly two bins per mapping (corresponding
* to a 0.75 load factor threshold for resizing). There may be much
* variance around this average as mappings are added and removed, but
* overall, this maintains a commonly accepted time/space tradeoff for
* hash tables. However, resizing this or any other kind of hash
* table may be a relatively slow operation. When possible, it is a
* good idea to provide a size estimate as an optional {@code
* initialCapacity} constructor argument. An additional optional
* {@code loadFactor} constructor argument provides a further means of
* customizing initial table capacity by specifying the table density
* to be used in calculating the amount of space to allocate for the
* given number of elements. Also, for compatibility with previous
* versions of this class, constructors may optionally specify an
* expected {@code concurrencyLevel} as an additional hint for
* internal sizing. Note that using many keys with exactly the same
* {@code hashCode()} is a sure way to slow down performance of any
* hash table. To ameliorate impact, when keys are {@link Comparable},
* this class may use comparison order among keys to help break ties.
*
* <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
* (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
* (using {@link #keySet(Object)} when only keys are of interest, and the
* mapped values are (perhaps transiently) not used or all take the
* same mapping value.
*
* <p>This class and its views and iterators implement all of the
* <em>optional</em> methods of the {@link Map} and {@link Iterator}
* interfaces.
*
* <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
* does <em>not</em> allow {@code null} to be used as a key or value.
*
* <p>ConcurrentHashMapV8s support a set of sequential and parallel bulk
* operations that are designed
* to be safely, and often sensibly, applied even with maps that are
* being concurrently updated by other threads; for example, when
* computing a snapshot summary of the values in a shared registry.
* There are three kinds of operation, each with four forms, accepting
* functions with Keys, Values, Entries, and (Key, Value) arguments
* and/or return values. Because the elements of a ConcurrentHashMapV8
* are not ordered in any particular way, and may be processed in
* different orders in different parallel executions, the correctness
* of supplied functions should not depend on any ordering, or on any
* other objects or values that may transiently change while
* computation is in progress; and except for forEach actions, should
* ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
* objects do not support method {@code setValue}.
*
* <ul>
* <li> forEach: Perform a given action on each element.
* A variant form applies a given transformation on each element
* before performing the action.</li>
*
* <li> search: Return the first available non-null result of
* applying a given function on each element; skipping further
* search when a result is found.</li>
*
* <li> reduce: Accumulate each element. The supplied reduction
* function cannot rely on ordering (more formally, it should be
* both associative and commutative). There are five variants:
*
* <ul>
*
* <li> Plain reductions. (There is not a form of this method for
* (key, value) function arguments since there is no corresponding
* return type.)</li>
*
* <li> Mapped reductions that accumulate the results of a given
* function applied to each element.</li>
*
* <li> Reductions to scalar doubles, longs, and ints, using a
* given basis value.</li>
*
* </ul>
* </li>
* </ul>
*
* <p>These bulk operations accept a {@code parallelismThreshold}
* argument. Methods proceed sequentially if the current map size is
* estimated to be less than the given threshold. Using a value of
* {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
* of {@code 1} results in maximal parallelism by partitioning into
* enough subtasks to fully utilize the {@link
* ForkJoinPool#commonPool()} that is used for all parallel
* computations. Normally, you would initially choose one of these
* extreme values, and then measure performance of using in-between
* values that trade off overhead versus throughput.
*
* <p>The concurrency properties of bulk operations follow
* from those of ConcurrentHashMapV8: Any non-null result returned
* from {@code get(key)} and related access methods bears a
* happens-before relation with the associated insertion or
* update. The result of any bulk operation reflects the
* composition of these per-element relations (but is not
* necessarily atomic with respect to the map as a whole unless it
* is somehow known to be quiescent). Conversely, because keys
* and values in the map are never null, null serves as a reliable
* atomic indicator of the current lack of any result. To
* maintain this property, null serves as an implicit basis for
* all non-scalar reduction operations. For the double, long, and
* int versions, the basis should be one that, when combined with
* any other value, returns that other value (more formally, it
* should be the identity element for the reduction). Most common
* reductions have these properties; for example, computing a sum
* with basis 0 or a minimum with basis MAX_VALUE.
*
* <p>Search and transformation functions provided as arguments
* should similarly return null to indicate the lack of any result
* (in which case it is not used). In the case of mapped
* reductions, this also enables transformations to serve as
* filters, returning null (or, in the case of primitive
* specializations, the identity basis) if the element should not
* be combined. You can create compound transformations and
* filterings by composing them yourself under this "null means
* there is nothing there now" rule before using them in search or
* reduce operations.
*
* <p>Methods accepting and/or returning Entry arguments maintain
* key-value associations. They may be useful for example when
* finding the key for the greatest value. Note that "plain" Entry
* arguments can be supplied using {@code new
* AbstractMap.SimpleEntry(k,v)}.
*
* <p>Bulk operations may complete abruptly, throwing an
* exception encountered in the application of a supplied
* function. Bear in mind when handling such exceptions that other
* concurrently executing functions could also have thrown
* exceptions, or would have done so if the first exception had
* not occurred.
*
* <p>Speedups for parallel compared to sequential forms are common
* but not guaranteed. Parallel operations involving brief functions
* on small maps may execute more slowly than sequential forms if the
* underlying work to parallelize the computation is more expensive
* than the computation itself. Similarly, parallelization may not
* lead to much actual parallelism if all processors are busy
* performing unrelated tasks.
*
* <p>All arguments to all task methods must be non-null.
*
* <p><em>jsr166e note: During transition, this class
* uses nested functional interfaces with different names but the
* same forms as those expected for JDK8.</em>
*
* <p>This class is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
* Java Collections Framework</a>.
*
* @since 1.5
* @author Doug Lea
* @param <K> the type of keys maintained by this map
* @param <V> the type of mapped values
*/
public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V>
implements ConcurrentMap<K,V>, Serializable {
private static final long serialVersionUID = 7249069246763182397L;
/**
* An object for traversing and partitioning elements of a source.
* This interface provides a subset of the functionality of JDK8
* java.util.Spliterator.
*/
public static interface ConcurrentHashMapSpliterator<T> {
/**
* If possible, returns a new spliterator covering
* approximately one half of the elements, which will not be
* covered by this spliterator. Returns null if cannot be
* split.
*/
ConcurrentHashMapSpliterator<T> trySplit();
/**
* Returns an estimate of the number of elements covered by
* this Spliterator.
*/
long estimateSize();
/** Applies the action to each untraversed element */
void forEachRemaining(Action<? super T> action);
/** If an element remains, applies the action and returns true. */
boolean tryAdvance(Action<? super T> action);
}
// Sams
/** Interface describing a void action of one argument */
public interface Action<A> { void apply(A a); }
/** Interface describing a void action of two arguments */
public interface BiAction<A,B> { void apply(A a, B b); }
/** Interface describing a function of one argument */
public interface Fun<A,T> { T apply(A a); }
/** Interface describing a function of two arguments */
public interface BiFun<A,B,T> { T apply(A a, B b); }
/** Interface describing a function mapping its argument to a double */
public interface ObjectToDouble<A> { double apply(A a); }
/** Interface describing a function mapping its argument to a long */
public interface ObjectToLong<A> { long apply(A a); }
/** Interface describing a function mapping its argument to an int */
public interface ObjectToInt<A> {int apply(A a); }
/** Interface describing a function mapping two arguments to a double */
public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
/** Interface describing a function mapping two arguments to a long */
public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
/** Interface describing a function mapping two arguments to an int */
public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
/** Interface describing a function mapping two doubles to a double */
public interface DoubleByDoubleToDouble { double apply(double a, double b); }
/** Interface describing a function mapping two longs to a long */
public interface LongByLongToLong { long apply(long a, long b); }
/** Interface describing a function mapping two ints to an int */
public interface IntByIntToInt { int apply(int a, int b); }
/*
* Overview:
*
* The primary design goal of this hash table is to maintain
* concurrent readability (typically method get(), but also
* iterators and related methods) while minimizing update
* contention. Secondary goals are to keep space consumption about
* the same or better than java.util.HashMap, and to support high
* initial insertion rates on an empty table by many threads.
*
* This map usually acts as a binned (bucketed) hash table. Each
* key-value mapping is held in a Node. Most nodes are instances
* of the basic Node class with hash, key, value, and next
* fields. However, various subclasses exist: TreeNodes are
* arranged in balanced trees, not lists. TreeBins hold the roots
* of sets of TreeNodes. ForwardingNodes are placed at the heads
* of bins during resizing. ReservationNodes are used as
* placeholders while establishing values in computeIfAbsent and
* related methods. The types TreeBin, ForwardingNode, and
* ReservationNode do not hold normal user keys, values, or
* hashes, and are readily distinguishable during search etc
* because they have negative hash fields and null key and value
* fields. (These special nodes are either uncommon or transient,
* so the impact of carrying around some unused fields is
* insignificant.)
*
* The table is lazily initialized to a power-of-two size upon the
* first insertion. Each bin in the table normally contains a
* list of Nodes (most often, the list has only zero or one Node).
* Table accesses require volatile/atomic reads, writes, and
* CASes. Because there is no other way to arrange this without
* adding further indirections, we use intrinsics
* (sun.misc.Unsafe) operations.
*
* We use the top (sign) bit of Node hash fields for control
* purposes -- it is available anyway because of addressing
* constraints. Nodes with negative hash fields are specially
* handled or ignored in map methods.
*
* Insertion (via put or its variants) of the first node in an
* empty bin is performed by just CASing it to the bin. This is
* by far the most common case for put operations under most
* key/hash distributions. Other update operations (insert,
* delete, and replace) require locks. We do not want to waste
* the space required to associate a distinct lock object with
* each bin, so instead use the first node of a bin list itself as
* a lock. Locking support for these locks relies on builtin
* "synchronized" monitors.
*
* Using the first node of a list as a lock does not by itself
* suffice though: When a node is locked, any update must first
* validate that it is still the first node after locking it, and
* retry if not. Because new nodes are always appended to lists,
* once a node is first in a bin, it remains first until deleted
* or the bin becomes invalidated (upon resizing).
*
* The main disadvantage of per-bin locks is that other update
* operations on other nodes in a bin list protected by the same
* lock can stall, for example when user equals() or mapping
* functions take a long time. However, statistically, under
* random hash codes, this is not a common problem. Ideally, the
* frequency of nodes in bins follows a Poisson distribution
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a
* parameter of about 0.5 on average, given the resizing threshold
* of 0.75, although with a large variance because of resizing
* granularity. Ignoring variance, the expected occurrences of
* list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
* first values are:
*
* 0: 0.60653066
* 1: 0.30326533
* 2: 0.07581633
* 3: 0.01263606
* 4: 0.00157952
* 5: 0.00015795
* 6: 0.00001316
* 7: 0.00000094
* 8: 0.00000006
* more: less than 1 in ten million
*
* Lock contention probability for two threads accessing distinct
* elements is roughly 1 / (8 * #elements) under random hashes.
*
* Actual hash code distributions encountered in practice
* sometimes deviate significantly from uniform randomness. This
* includes the case when N > (1<<30), so some keys MUST collide.
* Similarly for dumb or hostile usages in which multiple keys are
* designed to have identical hash codes or ones that differs only
* in masked-out high bits. So we use a secondary strategy that
* applies when the number of nodes in a bin exceeds a
* threshold. These TreeBins use a balanced tree to hold nodes (a
* specialized form of red-black trees), bounding search time to
* O(log N). Each search step in a TreeBin is at least twice as
* slow as in a regular list, but given that N cannot exceed
* (1<<64) (before running out of addresses) this bounds search
* steps, lock hold times, etc, to reasonable constants (roughly
* 100 nodes inspected per operation worst case) so long as keys
* are Comparable (which is very common -- String, Long, etc).
* TreeBin nodes (TreeNodes) also maintain the same "next"
* traversal pointers as regular nodes, so can be traversed in
* iterators in the same way.
*
* The table is resized when occupancy exceeds a percentage
* threshold (nominally, 0.75, but see below). Any thread
* noticing an overfull bin may assist in resizing after the
* initiating thread allocates and sets up the replacement array.
* However, rather than stalling, these other threads may proceed
* with insertions etc. The use of TreeBins shields us from the
* worst case effects of overfilling while resizes are in
* progress. Resizing proceeds by transferring bins, one by one,
* from the table to the next table. However, threads claim small
* blocks of indices to transfer (via field transferIndex) before
* doing so, reducing contention. A generation stamp in field
* sizeCtl ensures that resizings do not overlap. Because we are
* using power-of-two expansion, the elements from each bin must
* either stay at same index, or move with a power of two
* offset. We eliminate unnecessary node creation by catching
* cases where old nodes can be reused because their next fields
* won't change. On average, only about one-sixth of them need
* cloning when a table doubles. The nodes they replace will be
* garbage collectable as soon as they are no longer referenced by
* any reader thread that may be in the midst of concurrently
* traversing table. Upon transfer, the old table bin contains
* only a special forwarding node (with hash field "MOVED") that
* contains the next table as its key. On encountering a
* forwarding node, access and update operations restart, using
* the new table.
*
* Each bin transfer requires its bin lock, which can stall
* waiting for locks while resizing. However, because other
* threads can join in and help resize rather than contend for
* locks, average aggregate waits become shorter as resizing
* progresses. The transfer operation must also ensure that all
* accessible bins in both the old and new table are usable by any
* traversal. This is arranged in part by proceeding from the
* last bin (table.length - 1) up towards the first. Upon seeing
* a forwarding node, traversals (see class Traverser) arrange to
* move to the new table without revisiting nodes. To ensure that
* no intervening nodes are skipped even when moved out of order,
* a stack (see class TableStack) is created on first encounter of
* a forwarding node during a traversal, to maintain its place if
* later processing the current table. The need for these
* save/restore mechanics is relatively rare, but when one
* forwarding node is encountered, typically many more will be.
* So Traversers use a simple caching scheme to avoid creating so
* many new TableStack nodes. (Thanks to Peter Levart for
* suggesting use of a stack here.)
*
* The traversal scheme also applies to partial traversals of
* ranges of bins (via an alternate Traverser constructor)
* to support partitioned aggregate operations. Also, read-only
* operations give up if ever forwarded to a null table, which
* provides support for shutdown-style clearing, which is also not
* currently implemented.
*
* Lazy table initialization minimizes footprint until first use,
* and also avoids resizings when the first operation is from a
* putAll, constructor with map argument, or deserialization.
* These cases attempt to override the initial capacity settings,
* but harmlessly fail to take effect in cases of races.
*
* The element count is maintained using a specialization of
* LongAdder. We need to incorporate a specialization rather than
* just use a LongAdder in order to access implicit
* contention-sensing that leads to creation of multiple
* CounterCells. The counter mechanics avoid contention on
* updates but can encounter cache thrashing if read too
* frequently during concurrent access. To avoid reading so often,
* resizing under contention is attempted only upon adding to a
* bin already holding two or more nodes. Under uniform hash
* distributions, the probability of this occurring at threshold
* is around 13%, meaning that only about 1 in 8 puts check
* threshold (and after resizing, many fewer do so).
*
* TreeBins use a special form of comparison for search and
* related operations (which is the main reason we cannot use
* existing collections such as TreeMaps). TreeBins contain
* Comparable elements, but may contain others, as well as
* elements that are Comparable but not necessarily Comparable for
* the same T, so we cannot invoke compareTo among them. To handle
* this, the tree is ordered primarily by hash value, then by
* Comparable.compareTo order if applicable. On lookup at a node,
* if elements are not comparable or compare as 0 then both left
* and right children may need to be searched in the case of tied
* hash values. (This corresponds to the full list search that
* would be necessary if all elements were non-Comparable and had
* tied hashes.) On insertion, to keep a total ordering (or as
* close as is required here) across rebalancings, we compare
* classes and identityHashCodes as tie-breakers. The red-black
* balancing code is updated from pre-jdk-collections
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
* based in turn on Cormen, Leiserson, and Rivest "Introduction to
* Algorithms" (CLR).
*
* TreeBins also require an additional locking mechanism. While
* list traversal is always possible by readers even during
* updates, tree traversal is not, mainly because of tree-rotations
* that may change the root node and/or its linkages. TreeBins
* include a simple read-write lock mechanism parasitic on the
* main bin-synchronization strategy: Structural adjustments
* associated with an insertion or removal are already bin-locked
* (and so cannot conflict with other writers) but must wait for
* ongoing readers to finish. Since there can be only one such
* waiter, we use a simple scheme using a single "waiter" field to
* block writers. However, readers need never block. If the root
* lock is held, they proceed along the slow traversal path (via
* next-pointers) until the lock becomes available or the list is
* exhausted, whichever comes first. These cases are not fast, but
* maximize aggregate expected throughput.
*
* Maintaining API and serialization compatibility with previous
* versions of this class introduces several oddities. Mainly: We
* leave untouched but unused constructor arguments refering to
* concurrencyLevel. We accept a loadFactor constructor argument,
* but apply it only to initial table capacity (which is the only
* time that we can guarantee to honor it.) We also declare an
* unused "Segment" class that is instantiated in minimal form
* only when serializing.
*
* Also, solely for compatibility with previous versions of this
* class, it extends AbstractMap, even though all of its methods
* are overridden, so it is just useless baggage.
*
* This file is organized to make things a little easier to follow
* while reading than they might otherwise: First the main static
* declarations and utilities, then fields, then main public
* methods (with a few factorings of multiple public methods into
* internal ones), then sizing methods, trees, traversers, and
* bulk operations.
*/
/* ---------------- Constants -------------- */
/**
* The largest possible table capacity. This value must be
* exactly 1<<30 to stay within Java array allocation and indexing
* bounds for power of two table sizes, and is further required
* because the top two bits of 32bit hash fields are used for
* control purposes.
*/
private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* The default initial table capacity. Must be a power of 2
* (i.e., at least 1) and at most MAXIMUM_CAPACITY.
*/
private static final int DEFAULT_CAPACITY = 16;
/**
* The largest possible (non-power of two) array size.
* Needed by toArray and related methods.
*/
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
* The default concurrency level for this table. Unused but
* defined for compatibility with previous versions of this class.
*/
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
* The load factor for this table. Overrides of this value in
* constructors affect only the initial table capacity. The
* actual floating point value isn't normally used -- it is
* simpler to use expressions such as {@code n - (n >>> 2)} for
* the associated resizing threshold.
*/
private static final float LOAD_FACTOR = 0.75f;
/**
* The bin count threshold for using a tree rather than list for a
* bin. Bins are converted to trees when adding an element to a
* bin with at least this many nodes. The value must be greater
* than 2, and should be at least 8 to mesh with assumptions in
* tree removal about conversion back to plain bins upon
* shrinkage.
*/
static final int TREEIFY_THRESHOLD = 8;
/**
* The bin count threshold for untreeifying a (split) bin during a
* resize operation. Should be less than TREEIFY_THRESHOLD, and at
* most 6 to mesh with shrinkage detection under removal.
*/
static final int UNTREEIFY_THRESHOLD = 6;
/**
* The smallest table capacity for which bins may be treeified.
* (Otherwise the table is resized if too many nodes in a bin.)
* The value should be at least 4 * TREEIFY_THRESHOLD to avoid
* conflicts between resizing and treeification thresholds.
*/
static final int MIN_TREEIFY_CAPACITY = 64;
/**
* Minimum number of rebinnings per transfer step. Ranges are
* subdivided to allow multiple resizer threads. This value
* serves as a lower bound to avoid resizers encountering
* excessive memory contention. The value should be at least
* DEFAULT_CAPACITY.
*/
private static final int MIN_TRANSFER_STRIDE = 16;
/**
* The number of bits used for generation stamp in sizeCtl.
* Must be at least 6 for 32bit arrays.
*/
private static int RESIZE_STAMP_BITS = 16;
/**
* The maximum number of threads that can help resize.
* Must fit in 32 - RESIZE_STAMP_BITS bits.
*/
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
/**
* The bit shift for recording size stamp in sizeCtl.
*/
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
/*
* Encodings for Node hash fields. See above for explanation.
*/
static final int MOVED = -1; // hash for forwarding nodes
static final int TREEBIN = -2; // hash for roots of trees
static final int RESERVED = -3; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** Number of CPUS, to place bounds on some sizings */
static final int NCPU = Runtime.getRuntime().availableProcessors();
/** For serialization compatibility. */
private static final ObjectStreamField[] serialPersistentFields = {
new ObjectStreamField("segments", Segment[].class),
new ObjectStreamField("segmentMask", Integer.TYPE),
new ObjectStreamField("segmentShift", Integer.TYPE)
};
/* ---------------- Nodes -------------- */
/**
* Key-value entry. This class is never exported out as a
* user-mutable Map.Entry (i.e., one supporting setValue; see
* MapEntry below), but can be used for read-only traversals used
* in bulk tasks. Subclasses of Node with a negative hash field
* are special, and contain null keys and values (but are never
* exported). Otherwise, keys and vals are never null.
*/
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;
Node(int hash, K key, V val, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.val = val;
this.next = next;
}
public final K getKey() { return key; }
public final V getValue() { return val; }
public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
public final String toString(){ return key + "=" + val; }
public final V setValue(V value) {
throw new UnsupportedOperationException();
}
public final boolean equals(Object o) {
Object k, v, u; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == (u = val) || v.equals(u)));
}
/**
* Virtualized support for map.get(); overridden in subclasses.
*/
Node<K,V> find(int h, Object k) {
Node<K,V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
/* ---------------- Static utilities -------------- */
/**
* Spreads (XORs) higher bits of hash to lower and also forces top
* bit to 0. Because the table uses power-of-two masking, sets of
* hashes that vary only in bits above the current mask will
* always collide. (Among known examples are sets of Float keys
* holding consecutive whole numbers in small tables.) So we
* apply a transform that spreads the impact of higher bits
* downward. There is a tradeoff between speed, utility, and
* quality of bit-spreading. Because many common sets of hashes
* are already reasonably distributed (so don't benefit from
* spreading), and because we use trees to handle large sets of
* collisions in bins, we just XOR some shifted bits in the
* cheapest possible way to reduce systematic lossage, as well as
* to incorporate impact of the highest bits that would otherwise
* never be used in index calculations because of table bounds.
*/
static final int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS;
}
/**
* Returns a power of two table size for the given desired capacity.
* See Hackers Delight, sec 3.2
*/
private static final int tableSizeFor(int c) {
int n = c - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
* Returns x's Class if it is of the form "class C implements
* Comparable<C>", else null.
*/
static Class<?> comparableClassFor(Object x) {
if (x instanceof Comparable) {
Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks
return c;
if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) {
if (((t = ts[i]) instanceof ParameterizedType) &&
((p = (ParameterizedType)t).getRawType() ==
Comparable.class) &&
(as = p.getActualTypeArguments()) != null &&
as.length == 1 && as[0] == c) // type arg is c
return c;
}
}
}
return null;
}
/**
* Returns k.compareTo(x) if x matches kc (k's screened comparable
* class), else 0.
*/
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
static int compareComparables(Class<?> kc, Object k, Object x) {
return (x == null || x.getClass() != kc ? 0 :
((Comparable)k).compareTo(x));
}
/* ---------------- Table element access -------------- */
/*
* Volatile access methods are used for table elements as well as
* elements of in-progress next table while resizing. All uses of
* the tab arguments must be null checked by callers. All callers
* also paranoically precheck that tab's length is not zero (or an
* equivalent check), thus ensuring that any index argument taking
* the form of a hash value anded with (length - 1) is a valid
* index. Note that, to be correct wrt arbitrary concurrency
* errors by users, these checks must operate on local variables,
* which accounts for some odd-looking inline assignments below.
* Note that calls to setTabAt always occur within locked regions,
* and so in principle require only release ordering, not
* full volatile semantics, but are currently coded as volatile
* writes to be conservative.
*/
@SuppressWarnings("unchecked")
static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
}
static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
Node<K,V> c, Node<K,V> v) {
return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
}
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
}
/* ---------------- Fields -------------- */
/**
* The array of bins. Lazily initialized upon first insertion.
* Size is always a power of two. Accessed directly by iterators.
*/
transient volatile Node<K,V>[] table;
/**
* The next table to use; non-null only while resizing.
*/
private transient volatile Node<K,V>[] nextTable;
/**
* Base counter value, used mainly when there is no contention,
* but also as a fallback during table initialization
* races. Updated via CAS.
*/
private transient volatile long baseCount;
/**
* Table initialization and resizing control. When negative, the
* table is being initialized or resized: -1 for initialization,
* else -(1 + the number of active resizing threads). Otherwise,
* when table is null, holds the initial table size to use upon
* creation, or 0 for default. After initialization, holds the
* next element count value upon which to resize the table.
*/
private transient volatile int sizeCtl;
/**
* The next table index (plus one) to split while resizing.
*/
private transient volatile int transferIndex;
/**
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
*/
private transient volatile int cellsBusy;
/**
* Table of counter cells. When non-null, size is a power of 2.
*/
private transient volatile CounterCell[] counterCells;
// views
private transient KeySetView<K,V> keySet;
private transient ValuesView<K,V> values;
private transient EntrySetView<K,V> entrySet;
/* ---------------- Public operations -------------- */
/**
* Creates a new, empty map with the default initial table size (16).
*/
public ConcurrentHashMapV8() {
}
/**
* Creates a new, empty map with an initial table size
* accommodating the specified number of elements without the need
* to dynamically resize.
*
* @param initialCapacity The implementation performs internal
* sizing to accommodate this many elements.
* @throws IllegalArgumentException if the initial capacity of
* elements is negative
*/
public ConcurrentHashMapV8(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
/**
* Creates a new map with the same mappings as the given map.
*
* @param m the map
*/
public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
/**
* Creates a new, empty map with an initial table size based on
* the given number of elements ({@code initialCapacity}) and
* initial table density ({@code loadFactor}).
*
* @param initialCapacity the initial capacity. The implementation
* performs internal sizing to accommodate this many elements,
* given the specified load factor.
* @param loadFactor the load factor (table density) for
* establishing the initial table size
* @throws IllegalArgumentException if the initial capacity of
* elements is negative or the load factor is nonpositive
*
* @since 1.6
*/
public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
/**
* Creates a new, empty map with an initial table size based on
* the given number of elements ({@code initialCapacity}), table
* density ({@code loadFactor}), and number of concurrently
* updating threads ({@code concurrencyLevel}).
*
* @param initialCapacity the initial capacity. The implementation
* performs internal sizing to accommodate this many elements,
* given the specified load factor.
* @param loadFactor the load factor (table density) for
* establishing the initial table size
* @param concurrencyLevel the estimated number of concurrently
* updating threads. The implementation may use this value as
* a sizing hint.
* @throws IllegalArgumentException if the initial capacity is
* negative or the load factor or concurrencyLevel are
* nonpositive
*/
public ConcurrentHashMapV8(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
// Original (since JDK1.2) Map methods
/**
* {@inheritDoc}
*/
public int size() {
long n = sumCount();
return ((n < 0L) ? 0 :
(n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
(int)n);
}
/**
* {@inheritDoc}
*/
public boolean isEmpty() {
return sumCount() <= 0L; // ignore transient negative values
}
/**
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code key.equals(k)},
* then this method returns {@code v}; otherwise it returns
* {@code null}. (There can be at most one such mapping.)
*
* @throws NullPointerException if the specified key is null
*/
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
/**
* Tests if the specified object is a key in this table.
*
* @param key possible key
* @return {@code true} if and only if the specified object
* is a key in this table, as determined by the
* {@code equals} method; {@code false} otherwise
* @throws NullPointerException if the specified key is null
*/
public boolean containsKey(Object key) {
return get(key) != null;
}
/**
* Returns {@code true} if this map maps one or more keys to the
* specified value. Note: This method may require a full traversal
* of the map, and is much slower than method {@code containsKey}.
*
* @param value value whose presence in this map is to be tested
* @return {@code true} if this map maps one or more keys to the
* specified value
* @throws NullPointerException if the specified value is null
*/
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
V v;
if ((v = p.val) == value || (v != null && value.equals(v)))
return true;
}
}
return false;
}
/**
* Maps the specified key to the specified value in this table.
* Neither the key nor the value can be null.
*
* <p>The value can be retrieved by calling the {@code get} method
* with a key that is equal to the original key.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key or value is null
*/
public V put(K key, V value) {
return putVal(key, value, false);
}
/** Implementation for put and putIfAbsent */
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
/**
* Copies all of the mappings from the specified map to this one.
* These mappings replace any mappings that this map had for any of the
* keys currently in the specified map.
*
* @param m mappings to be stored in this map
*/
public void putAll(Map<? extends K, ? extends V> m) {
tryPresize(m.size());
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putVal(e.getKey(), e.getValue(), false);
}
/**
* Removes the key (and its corresponding value) from this map.
* This method does nothing if the key is not in the map.
*
* @param key the key that needs to be removed
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key is null
*/
public V remove(Object key) {
return replaceNode(key, null, null);
}
/**
* Implementation for the four public remove/replace methods:
* Replaces node value with v, conditional upon match of cv if
* non-null. If resulting value is null, delete.
*/
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value;
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
/**
* Removes all of the mappings from this map.
*/
public void clear() {
long delta = 0L; // negative number of deletions
int i = 0;
Node<K,V>[] tab = table;
while (tab != null && i < tab.length) {
int fh;
Node<K,V> f = tabAt(tab, i);
if (f == null)
++i;
else if ((fh = f.hash) == MOVED) {
tab = helpTransfer(tab, f);
i = 0; // restart
}
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> p = (fh >= 0 ? f :
(f instanceof TreeBin) ?
((TreeBin<K,V>)f).first : null);
while (p != null) {
--delta;
p = p.next;
}
setTabAt(tab, i++, null);
}
}
}
}
if (delta != 0L)
addCount(delta, -1);
}
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. The set supports element
* removal, which removes the corresponding mapping from this map,
* via the {@code Iterator.remove}, {@code Set.remove},
* {@code removeAll}, {@code retainAll}, and {@code clear}
* operations. It does not support the {@code add} or
* {@code addAll} operations.
*
* <p>The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
*
* @return the set view
*/
public KeySetView<K,V> keySet() {
KeySetView<K,V> ks;
return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
}
/**
* Returns a {@link Collection} view of the values contained in this map.
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. The collection
* supports element removal, which removes the corresponding
* mapping from this map, via the {@code Iterator.remove},
* {@code Collection.remove}, {@code removeAll},
* {@code retainAll}, and {@code clear} operations. It does not
* support the {@code add} or {@code addAll} operations.
*
* <p>The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
*
* @return the collection view
*/
public Collection<V> values() {
ValuesView<K,V> vs;
return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
}
/**
* Returns a {@link Set} view of the mappings contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. The set supports element
* removal, which removes the corresponding mapping from the map,
* via the {@code Iterator.remove}, {@code Set.remove},
* {@code removeAll}, {@code retainAll}, and {@code clear}
* operations.
*
* <p>The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
*
* @return the set view
*/
public Set<Map.Entry<K,V>> entrySet() {
EntrySetView<K,V> es;
return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
}
/**
* Returns the hash code value for this {@link Map}, i.e.,
* the sum of, for each key-value pair in the map,
* {@code key.hashCode() ^ value.hashCode()}.
*
* @return the hash code value for this map
*/
public int hashCode() {
int h = 0;
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; )
h += p.key.hashCode() ^ p.val.hashCode();
}
return h;
}
/**
* Returns a string representation of this map. The string
* representation consists of a list of key-value mappings (in no
* particular order) enclosed in braces ("{@code {}}"). Adjacent
* mappings are separated by the characters {@code ", "} (comma
* and space). Each key-value mapping is rendered as the key
* followed by an equals sign ("{@code =}") followed by the
* associated value.
*
* @return a string representation of this map
*/
public String toString() {
Node<K,V>[] t;
int f = (t = table) == null ? 0 : t.length;
Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
StringBuilder sb = new StringBuilder();
sb.append('{');
Node<K,V> p;
if ((p = it.advance()) != null) {
for (;;) {
K k = p.key;
V v = p.val;
sb.append(k == this ? "(this Map)" : k);
sb.append('=');
sb.append(v == this ? "(this Map)" : v);
if ((p = it.advance()) == null)
break;
sb.append(',').append(' ');
}
}
return sb.append('}').toString();
}
/**
* Compares the specified object with this map for equality.
* Returns {@code true} if the given object is a map with the same
* mappings as this map. This operation may return misleading
* results if either map is concurrently modified during execution
* of this method.
*
* @param o object to be compared for equality with this map
* @return {@code true} if the specified object is equal to this map
*/
public boolean equals(Object o) {
if (o != this) {
if (!(o instanceof Map))
return false;
Map<?,?> m = (Map<?,?>) o;
Node<K,V>[] t;
int f = (t = table) == null ? 0 : t.length;
Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
for (Node<K,V> p; (p = it.advance()) != null; ) {
V val = p.val;
Object v = m.get(p.key);
if (v == null || (v != val && !v.equals(val)))
return false;
}
for (Map.Entry<?,?> e : m.entrySet()) {
Object mk, mv, v;
if ((mk = e.getKey()) == null ||
(mv = e.getValue()) == null ||
(v = get(mk)) == null ||
(mv != v && !mv.equals(v)))
return false;
}
}
return true;
}
/**
* Stripped-down version of helper class used in previous version,
* declared for the sake of serialization compatibility
*/
static class Segment<K,V> extends ReentrantLock implements Serializable {
private static final long serialVersionUID = 2249069246763182397L;
final float loadFactor;
Segment(float lf) { this.loadFactor = lf; }
}
/**
* Saves the state of the {@code ConcurrentHashMapV8} instance to a
* stream (i.e., serializes it).
* @param s the stream
* @throws java.io.IOException if an I/O error occurs
* @serialData
* the key (Object) and value (Object)
* for each key-value mapping, followed by a null pair.
* The key-value mappings are emitted in no particular order.
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
// For serialization compatibility
// Emulate segment calculation from previous version of this class
int sshift = 0;
int ssize = 1;
while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
++sshift;
ssize <<= 1;
}
int segmentShift = 32 - sshift;
int segmentMask = ssize - 1;
@SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
for (int i = 0; i < segments.length; ++i)
segments[i] = new Segment<K,V>(LOAD_FACTOR);
s.putFields().put("segments", segments);
s.putFields().put("segmentShift", segmentShift);
s.putFields().put("segmentMask", segmentMask);
s.writeFields();
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
s.writeObject(p.key);
s.writeObject(p.val);
}
}
s.writeObject(null);
s.writeObject(null);
segments = null; // throw away
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
* @throws ClassNotFoundException if the class of a serialized object
* could not be found
* @throws java.io.IOException if an I/O error occurs
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
/*
* To improve performance in typical cases, we create nodes
* while reading, then place in table once size is known.
* However, we must also validate uniqueness and deal with
* overpopulated bins while doing so, which requires
* specialized versions of putVal mechanics.
*/
sizeCtl = -1; // force exclusion for table construction
s.defaultReadObject();
long size = 0L;
Node<K,V> p = null;
for (;;) {
@SuppressWarnings("unchecked") K k = (K) s.readObject();
@SuppressWarnings("unchecked") V v = (V) s.readObject();
if (k != null && v != null) {
p = new Node<K,V>(spread(k.hashCode()), k, v, p);
++size;
}
else
break;
}
if (size == 0L)
sizeCtl = 0;
else {
int n;
if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
n = MAXIMUM_CAPACITY;
else {
int sz = (int)size;
n = tableSizeFor(sz + (sz >>> 1) + 1);
}
@SuppressWarnings("unchecked")
Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
int mask = n - 1;
long added = 0L;
while (p != null) {
boolean insertAtFront;
Node<K,V> next = p.next, first;
int h = p.hash, j = h & mask;
if ((first = tabAt(tab, j)) == null)
insertAtFront = true;
else {
K k = p.key;
if (first.hash < 0) {
TreeBin<K,V> t = (TreeBin<K,V>)first;
if (t.putTreeVal(h, k, p.val) == null)
++added;
insertAtFront = false;
}
else {
int binCount = 0;
insertAtFront = true;
Node<K,V> q; K qk;
for (q = first; q != null; q = q.next) {
if (q.hash == h &&
((qk = q.key) == k ||
(qk != null && k.equals(qk)))) {
insertAtFront = false;
break;
}
++binCount;
}
if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
insertAtFront = false;
++added;
p.next = first;
TreeNode<K,V> hd = null, tl = null;
for (q = p; q != null; q = q.next) {
TreeNode<K,V> t = new TreeNode<K,V>
(q.hash, q.key, q.val, null, null);
if ((t.prev = tl) == null)
hd = t;
else
tl.next = t;
tl = t;
}
setTabAt(tab, j, new TreeBin<K,V>(hd));
}
}
}
if (insertAtFront) {
++added;
p.next = first;
setTabAt(tab, j, p);
}
p = next;
}
table = tab;
sizeCtl = n - (n >>> 2);
baseCount = added;
}
}
// ConcurrentMap methods
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key,
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
public V putIfAbsent(K key, V value) {
return putVal(key, value, true);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if the specified key is null
*/
public boolean remove(Object key, Object value) {
if (key == null)
throw new NullPointerException();
return value != null && replaceNode(key, null, value) != null;
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if any of the arguments are null
*/
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
return replaceNode(key, newValue, oldValue) != null;
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key,
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
public V replace(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return replaceNode(key, value, null);
}
// Overrides of JDK8+ Map extension method defaults
/**
* Returns the value to which the specified key is mapped, or the
* given default value if this map contains no mapping for the
* key.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the value to return if this map contains
* no mapping for the given key
* @return the mapping for the key, if present; else the default value
* @throws NullPointerException if the specified key is null
*/
public V getOrDefault(Object key, V defaultValue) {
V v;
return (v = get(key)) == null ? defaultValue : v;
}
public void forEach(BiAction<? super K, ? super V> action) {
if (action == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
action.apply(p.key, p.val);
}
}
}
public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) {
if (function == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
V oldValue = p.val;
for (K key = p.key;;) {
V newValue = function.apply(key, oldValue);
if (newValue == null)
throw new NullPointerException();
if (replaceNode(key, newValue, oldValue) != null ||
(oldValue = get(key)) == null)
break;
}
}
}
}
/**
* If the specified key is not already associated with a value,
* attempts to compute its value using the given mapping function
* and enters it into this map unless {@code null}. The entire
* method invocation is performed atomically, so the function is
* applied at most once per key. Some attempted update operations
* on this map by other threads may be blocked while computation
* is in progress, so the computation should be short and simple,
* and must not attempt to update any other mappings of this map.
*
* @param key key with which the specified value is to be associated
* @param mappingFunction the function to compute a value
* @return the current (existing or computed) value associated with
* the specified key, or null if the computed value is null
* @throws NullPointerException if the specified key or mappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the mappingFunction does so,
* in which case the mapping is left unestablished
*/
public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
if (key == null || mappingFunction == null)
throw new NullPointerException();
int h = spread(key.hashCode());
V val = null;
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
Node<K,V> r = new ReservationNode<K,V>();
synchronized (r) {
if (casTabAt(tab, i, null, r)) {
binCount = 1;
Node<K,V> node = null;
try {
if ((val = mappingFunction.apply(key)) != null)
node = new Node<K,V>(h, key, val, null);
} finally {
setTabAt(tab, i, node);
}
}
}
if (binCount != 0)
break;
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
boolean added = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek; V ev;
if (e.hash == h &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
val = e.val;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
if ((val = mappingFunction.apply(key)) != null) {
added = true;
pred.next = new Node<K,V>(h, key, val, null);
}
break;
}
}
}
else if (f instanceof TreeBin) {
binCount = 2;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(h, key, null)) != null)
val = p.val;
else if ((val = mappingFunction.apply(key)) != null) {
added = true;
t.putTreeVal(h, key, val);
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (!added)
return val;
break;
}
}
}
if (val != null)
addCount(1L, binCount);
return val;
}
/**
* If the value for the specified key is present, attempts to
* compute a new mapping given the key and its current mapped
* value. The entire method invocation is performed atomically.
* Some attempted update operations on this map by other threads
* may be blocked while computation is in progress, so the
* computation should be short and simple, and must not attempt to
* update any other mappings of this map.
*
* @param key key with which a value may be associated
* @param remappingFunction the function to compute a value
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or remappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
int h = spread(key.hashCode());
V val = null;
int delta = 0;
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f, pred = null;; ++binCount) {
K ek;
if (e.hash == h &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
val = remappingFunction.apply(key, e.val);
if (val != null)
e.val = val;
else {
delta = -1;
Node<K,V> en = e.next;
if (pred != null)
pred.next = en;
else
setTabAt(tab, i, en);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
binCount = 2;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(h, key, null)) != null) {
val = remappingFunction.apply(key, p.val);
if (val != null)
p.val = val;
else {
delta = -1;
if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (binCount != 0)
break;
}
}
if (delta != 0)
addCount((long)delta, binCount);
return val;
}
/**
* Attempts to compute a mapping for the specified key and its
* current mapped value (or {@code null} if there is no current
* mapping). The entire method invocation is performed atomically.
* Some attempted update operations on this map by other threads
* may be blocked while computation is in progress, so the
* computation should be short and simple, and must not attempt to
* update any other mappings of this Map.
*
* @param key key with which the specified value is to be associated
* @param remappingFunction the function to compute a value
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or remappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
public V compute(K key,
BiFun<? super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
int h = spread(key.hashCode());
V val = null;
int delta = 0;
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
Node<K,V> r = new ReservationNode<K,V>();
synchronized (r) {
if (casTabAt(tab, i, null, r)) {
binCount = 1;
Node<K,V> node = null;
try {
if ((val = remappingFunction.apply(key, null)) != null) {
delta = 1;
node = new Node<K,V>(h, key, val, null);
}
} finally {
setTabAt(tab, i, node);
}
}
}
if (binCount != 0)
break;
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f, pred = null;; ++binCount) {
K ek;
if (e.hash == h &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
val = remappingFunction.apply(key, e.val);
if (val != null)
e.val = val;
else {
delta = -1;
Node<K,V> en = e.next;
if (pred != null)
pred.next = en;
else
setTabAt(tab, i, en);
}
break;
}
pred = e;
if ((e = e.next) == null) {
val = remappingFunction.apply(key, null);
if (val != null) {
delta = 1;
pred.next =
new Node<K,V>(h, key, val, null);
}
break;
}
}
}
else if (f instanceof TreeBin) {
binCount = 1;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null)
p = r.findTreeNode(h, key, null);
else
p = null;
V pv = (p == null) ? null : p.val;
val = remappingFunction.apply(key, pv);
if (val != null) {
if (p != null)
p.val = val;
else {
delta = 1;
t.putTreeVal(h, key, val);
}
}
else if (p != null) {
delta = -1;
if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
break;
}
}
}
if (delta != 0)
addCount((long)delta, binCount);
return val;
}
/**
* If the specified key is not already associated with a
* (non-null) value, associates it with the given value.
* Otherwise, replaces the value with the results of the given
* remapping function, or removes if {@code null}. The entire
* method invocation is performed atomically. Some attempted
* update operations on this map by other threads may be blocked
* while computation is in progress, so the computation should be
* short and simple, and must not attempt to update any other
* mappings of this Map.
*
* @param key key with which the specified value is to be associated
* @param value the value to use if absent
* @param remappingFunction the function to recompute a value if present
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or the
* remappingFunction is null
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
if (key == null || value == null || remappingFunction == null)
throw new NullPointerException();
int h = spread(key.hashCode());
V val = null;
int delta = 0;
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
delta = 1;
val = value;
break;
}
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f, pred = null;; ++binCount) {
K ek;
if (e.hash == h &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
val = remappingFunction.apply(e.val, value);
if (val != null)
e.val = val;
else {
delta = -1;
Node<K,V> en = e.next;
if (pred != null)
pred.next = en;
else
setTabAt(tab, i, en);
}
break;
}
pred = e;
if ((e = e.next) == null) {
delta = 1;
val = value;
pred.next =
new Node<K,V>(h, key, val, null);
break;
}
}
}
else if (f instanceof TreeBin) {
binCount = 2;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r = t.root;
TreeNode<K,V> p = (r == null) ? null :
r.findTreeNode(h, key, null);
val = (p == null) ? value :
remappingFunction.apply(p.val, value);
if (val != null) {
if (p != null)
p.val = val;
else {
delta = 1;
t.putTreeVal(h, key, val);
}
}
else if (p != null) {
delta = -1;
if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
break;
}
}
}
if (delta != 0)
addCount((long)delta, binCount);
return val;
}
// Hashtable legacy methods
/**
* Legacy method testing if some key maps into the specified value
* in this table. This method is identical in functionality to
* {@link #containsValue(Object)}, and exists solely to ensure
* full compatibility with class {@link java.util.Hashtable},
* which supported this method prior to introduction of the
* Java Collections framework.
*
* @param value a value to search for
* @return {@code true} if and only if some key maps to the
* {@code value} argument in this table as
* determined by the {@code equals} method;
* {@code false} otherwise
* @throws NullPointerException if the specified value is null
*/
@Deprecated public boolean contains(Object value) {
return containsValue(value);
}
/**
* Returns an enumeration of the keys in this table.
*
* @return an enumeration of the keys in this table
* @see #keySet()
*/
public Enumeration<K> keys() {
Node<K,V>[] t;
int f = (t = table) == null ? 0 : t.length;
return new KeyIterator<K,V>(t, f, 0, f, this);
}
/**
* Returns an enumeration of the values in this table.
*
* @return an enumeration of the values in this table
* @see #values()
*/
public Enumeration<V> elements() {
Node<K,V>[] t;
int f = (t = table) == null ? 0 : t.length;
return new ValueIterator<K,V>(t, f, 0, f, this);
}
// ConcurrentHashMapV8-only methods
/**
* Returns the number of mappings. This method should be used
* instead of {@link #size} because a ConcurrentHashMapV8 may
* contain more mappings than can be represented as an int. The
* value returned is an estimate; the actual count may differ if
* there are concurrent insertions or removals.
*
* @return the number of mappings
* @since 1.8
*/
public long mappingCount() {
long n = sumCount();
return (n < 0L) ? 0L : n; // ignore transient negative values
}
/**
* Creates a new {@link Set} backed by a ConcurrentHashMapV8
* from the given type to {@code Boolean.TRUE}.
*
* @return the new set
* @since 1.8
*/
public static <K> KeySetView<K,Boolean> newKeySet() {
return new KeySetView<K,Boolean>
(new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE);
}
/**
* Creates a new {@link Set} backed by a ConcurrentHashMapV8
* from the given type to {@code Boolean.TRUE}.
*
* @param initialCapacity The implementation performs internal
* sizing to accommodate this many elements.
* @return the new set
* @throws IllegalArgumentException if the initial capacity of
* elements is negative
* @since 1.8
*/
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
return new KeySetView<K,Boolean>
(new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE);
}
/**
* Returns a {@link Set} view of the keys in this map, using the
* given common mapped value for any additions (i.e., {@link
* Collection#add} and {@link Collection#addAll(Collection)}).
* This is of course only appropriate if it is acceptable to use
* the same value for all additions from this view.
*
* @param mappedValue the mapped value to use for any additions
* @return the set view
* @throws NullPointerException if the mappedValue is null
*/
public KeySetView<K,V> keySet(V mappedValue) {
if (mappedValue == null)
throw new NullPointerException();
return new KeySetView<K,V>(this, mappedValue);
}
/* ---------------- Special Nodes -------------- */
/**
* A node inserted at head of bins during transfer operations.
*/
static final class ForwardingNode<K,V> extends Node<K,V> {
final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab) {
super(MOVED, null, null, null);
this.nextTable = tab;
}
Node<K,V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else
return e.find(h, k);
}
if ((e = e.next) == null)
return null;
}
}
}
}
/**
* A place-holder node used in computeIfAbsent and compute
*/
static final class ReservationNode<K,V> extends Node<K,V> {
ReservationNode() {
super(RESERVED, null, null, null);
}
Node<K,V> find(int h, Object k) {
return null;
}
}
/* ---------------- Table Initialization and Resizing -------------- */
/**
* Returns the stamp bits for resizing a table of size n.
* Must be negative when shifted left by RESIZE_STAMP_SHIFT.
*/
static final int resizeStamp(int n) {
return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
}
/**
* Initializes table, using the size recorded in sizeCtl.
*/
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
/**
* Adds to count, and if table is too small and not already
* resizing, initiates transfer. If already resizing, helps
* perform transfer if work is available. Rechecks occupancy
* after a transfer to see if another resize is already needed
* because resizings are lagging additions.
*
* @param x the count to add
* @param check if <0, don't check resize, if <= 1 only check if uncontended
*/
private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterHashCode hc; CounterCell a; long v; int m;
boolean uncontended = true;
if ((hc = threadCounterHashCode.get()) == null ||
as == null || (m = as.length - 1) < 0 ||
(a = as[m & hc.code]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
fullAddCount(x, hc, uncontended);
return;
}
if (check <= 1)
return;
s = sumCount();
}
if (check >= 0) {
Node<K,V>[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
int rs = resizeStamp(n);
if (sc < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
s = sumCount();
}
}
}
/**
* Helps transfer if a resize is in progress.
*/
final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
Node<K,V>[] nextTab; int sc;
if (tab != null && (f instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && table == tab &&
(sc = sizeCtl) < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return table;
}
/**
* Tries to presize table to accommodate the given number of elements.
*
* @param size number of elements (doesn't need to be perfectly accurate)
*/
private final void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) {
Node<K,V>[] tab = table; int n;
if (tab == null || (n = tab.length) == 0) {
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
}
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
if (sc < 0) {
Node<K,V>[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
/**
* Moves and/or copies the nodes in each bin to new table. See
* above for explanation.
*/
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
if (fh >= 0) {
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
/* ---------------- Conversion from/to TreeBins -------------- */
/**
* Replaces all linked nodes in bin at given index unless table is
* too small, in which case resizes instead.
*/
private final void treeifyBin(Node<K,V>[] tab, int index) {
Node<K,V> b; int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<K,V> hd = null, tl = null;
for (Node<K,V> e = b; e != null; e = e.next) {
TreeNode<K,V> p =
new TreeNode<K,V>(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index, new TreeBin<K,V>(hd));
}
}
}
}
}
/**
* Returns a list on non-TreeNodes replacing those in given list.
*/
static <K,V> Node<K,V> untreeify(Node<K,V> b) {
Node<K,V> hd = null, tl = null;
for (Node<K,V> q = b; q != null; q = q.next) {
Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
if (tl == null)
hd = p;
else
tl.next = p;
tl = p;
}
return hd;
}
/* ---------------- TreeNodes -------------- */
/**
* Nodes for use in TreeBins
*/
static final class TreeNode<K,V> extends Node<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next,
TreeNode<K,V> parent) {
super(hash, key, val, next);
this.parent = parent;
}
Node<K,V> find(int h, Object k) {
return findTreeNode(h, k, null);
}
/**
* Returns the TreeNode (or null if not found) for the given key
* starting at given root.
*/
final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
if (k != null) {
TreeNode<K,V> p = this;
do {
int ph, dir; K pk; TreeNode<K,V> q;
TreeNode<K,V> pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.findTreeNode(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
}
return null;
}
}
/* ---------------- TreeBins -------------- */
/**
* TreeNodes used at the heads of bins. TreeBins do not hold user
* keys or values, but instead point to list of TreeNodes and
* their root. They also maintain a parasitic read-write lock
* forcing writers (who hold bin lock) to wait for readers (who do
* not) to complete before tree restructuring operations.
*/
static final class TreeBin<K,V> extends Node<K,V> {
TreeNode<K,V> root;
volatile TreeNode<K,V> first;
volatile Thread waiter;
volatile int lockState;
// values for lockState
static final int WRITER = 1; // set while holding write lock
static final int WAITER = 2; // set when waiting for write lock
static final int READER = 4; // increment value for setting read lock
/**
* Tie-breaking utility for ordering insertions when equal
* hashCodes and non-comparable. We don't require a total
* order, just a consistent insertion rule to maintain
* equivalence across rebalancings. Tie-breaking further than
* necessary simplifies testing a bit.
*/
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null ||
(d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
}
/**
* Creates bin with initial set of nodes headed by b.
*/
TreeBin(TreeNode<K,V> b) {
super(TREEBIN, null, null, null);
this.first = b;
TreeNode<K,V> r = null;
for (TreeNode<K,V> x = b, next; x != null; x = next) {
next = (TreeNode<K,V>)x.next;
x.left = x.right = null;
if (r == null) {
x.parent = null;
x.red = false;
r = x;
}
else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K,V> p = r;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
r = balanceInsertion(r, x);
break;
}
}
}
}
this.root = r;
assert checkInvariants(root);
}
/**
* Acquires write lock for tree restructuring.
*/
private final void lockRoot() {
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
contendedLock(); // offload to separate method
}
/**
* Releases write lock for tree restructuring.
*/
private final void unlockRoot() {
lockState = 0;
}
/**
* Possibly blocks awaiting root lock.
*/
private final void contendedLock() {
boolean waiting = false;
for (int s;;) {
if (((s = lockState) & ~WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
if (waiting)
waiter = null;
return;
}
}
else if ((s & WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
waiting = true;
waiter = Thread.currentThread();
}
}
else if (waiting)
LockSupport.park(this);
}
}
/**
* Returns matching node or null if none. Tries to search
* using tree comparisons from root, but continues linear
* search when lock not available.
*/
final Node<K,V> find(int h, Object k) {
if (k != null) {
for (Node<K,V> e = first; e != null; ) {
int s; K ek;
if (((s = lockState) & (WAITER|WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
}
else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
TreeNode<K,V> r, p;
try {
p = ((r = root) == null ? null :
r.findTreeNode(h, k, null));
} finally {
Thread w;
int ls;
do {} while (!U.compareAndSwapInt
(this, LOCKSTATE,
ls = lockState, ls - READER));
if (ls == (READER|WAITER) && (w = waiter) != null)
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
/**
* Finds or adds a node.
* @return null if added
*/
final TreeNode<K,V> putTreeVal(int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if (p == null) {
first = root = new TreeNode<K,V>(h, k, v, null, null);
break;
}
else if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.findTreeNode(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.findTreeNode(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
TreeNode<K,V> x, f = first;
first = x = new TreeNode<K,V>(h, k, v, f, xp);
if (f != null)
f.prev = x;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
if (!xp.red)
x.red = true;
else {
lockRoot();
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot();
}
}
break;
}
}
assert checkInvariants(root);
return null;
}
/**
* Removes the given node, that must be present before this
* call. This is messier than typical red-black deletion code
* because we cannot swap the contents of an interior node
* with a leaf successor that is pinned by "next" pointers
* that are accessible independently of lock. So instead we
* swap the tree linkages.
*
* @return true if now too small, so should be untreeified
*/
final boolean removeTreeNode(TreeNode<K,V> p) {
TreeNode<K,V> next = (TreeNode<K,V>)p.next;
TreeNode<K,V> pred = p.prev; // unlink traversal pointers
TreeNode<K,V> r, rl;
if (pred == null)
first = next;
else
pred.next = next;
if (next != null)
next.prev = pred;
if (first == null) {
root = null;
return true;
}
if ((r = root) == null || r.right == null || // too small
(rl = r.left) == null || rl.left == null)
return true;
lockRoot();
try {
TreeNode<K,V> replacement;
TreeNode<K,V> pl = p.left;
TreeNode<K,V> pr = p.right;
if (pl != null && pr != null) {
TreeNode<K,V> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
TreeNode<K,V> sr = s.right;
TreeNode<K,V> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
}
else {
TreeNode<K,V> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
if ((s.right = pr) != null)
pr.parent = s;
}
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
r = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
}
else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K,V> pp = replacement.parent = p.parent;
if (pp == null)
r = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
root = (p.red) ? r : balanceDeletion(r, replacement);
if (p == replacement) { // detach pointers
TreeNode<K,V> pp;
if ((pp = p.parent) != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
p.parent = null;
}
}
} finally {
unlockRoot();
}
assert checkInvariants(root);
return false;
}
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else
pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
TreeNode<K,V> x) {
x.red = true;
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
}
}
}
else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
}
}
}
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
TreeNode<K,V> x) {
for (TreeNode<K,V> xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (x.red) {
x.red = false;
return root;
}
else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr == null)
x = xp;
else {
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) &&
(sl == null || !sl.red)) {
xpr.red = true;
x = xp;
}
else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ?
null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
x = root;
}
}
}
else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl == null)
x = xp;
else {
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) &&
(sr == null || !sr.red)) {
xpl.red = true;
x = xp;
}
else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
}
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
}
}
}
}
/**
* Recursive invariant check
*/
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K,V>)t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
}
private static final sun.misc.Unsafe U;
private static final long LOCKSTATE;
static {
try {
U = getUnsafe();
Class<?> k = TreeBin.class;
LOCKSTATE = U.objectFieldOffset
(k.getDeclaredField("lockState"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/* ----------------Table Traversal -------------- */
/**
* Records the table, its length, and current traversal index for a
* traverser that must process a region of a forwarded table before
* proceeding with current table.
*/
static final class TableStack<K,V> {
int length;
int index;
Node<K,V>[] tab;
TableStack<K,V> next;
}
/**
* Encapsulates traversal for methods such as containsValue; also
* serves as a base class for other iterators and spliterators.
*
* Method advance visits once each still-valid node that was
* reachable upon iterator construction. It might miss some that
* were added to a bin after the bin was visited, which is OK wrt
* consistency guarantees. Maintaining this property in the face
* of possible ongoing resizes requires a fair amount of
* bookkeeping state that is difficult to optimize away amidst
* volatile accesses. Even so, traversal maintains reasonable
* throughput.
*
* Normally, iteration proceeds bin-by-bin traversing lists.
* However, if the table has been resized, then all future steps
* must traverse both the bin at the current index as well as at
* (index + baseSize); and so on for further resizings. To
* paranoically cope with potential sharing by users of iterators
* across threads, iteration terminates if a bounds checks fails
* for a table read.
*/
static class Traverser<K,V> {
Node<K,V>[] tab; // current table; updated if resized
Node<K,V> next; // the next entry to use
TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
int index; // index of bin to use next
int baseIndex; // current index of initial table
int baseLimit; // index bound for initial table
final int baseSize; // initial table size
Traverser(Node<K,V>[] tab, int size, int index, int limit) {
this.tab = tab;
this.baseSize = size;
this.baseIndex = this.index = index;
this.baseLimit = limit;
this.next = null;
}
/**
* Advances if possible, returning next valid node, or null if none.
*/
final Node<K,V> advance() {
Node<K,V> e;
if ((e = next) != null)
e = e.next;
for (;;) {
Node<K,V>[] t; int i, n; // must use locals in checks
if (e != null)
return next = e;
if (baseIndex >= baseLimit || (t = tab) == null ||
(n = t.length) <= (i = index) || i < 0)
return next = null;
if ((e = tabAt(t, i)) != null && e.hash < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
e = null;
pushState(t, i, n);
continue;
}
else if (e instanceof TreeBin)
e = ((TreeBin<K,V>)e).first;
else
e = null;
}
if (stack != null)
recoverState(n);
else if ((index = i + baseSize) >= n)
index = ++baseIndex; // visit upper slots if present
}
}
/**
* Saves traversal state upon encountering a forwarding node.
*/
private void pushState(Node<K,V>[] t, int i, int n) {
TableStack<K,V> s = spare; // reuse if possible
if (s != null)
spare = s.next;
else
s = new TableStack<K,V>();
s.tab = t;
s.length = n;
s.index = i;
s.next = stack;
stack = s;
}
/**
* Possibly pops traversal state.
*
* @param n length of current table
*/
private void recoverState(int n) {
TableStack<K,V> s; int len;
while ((s = stack) != null && (index += (len = s.length)) >= n) {
n = len;
index = s.index;
tab = s.tab;
s.tab = null;
TableStack<K,V> next = s.next;
s.next = spare; // save for reuse
stack = next;
spare = s;
}
if (s == null && (index += baseSize) >= n)
index = ++baseIndex;
}
}
/**
* Base of key, value, and entry Iterators. Adds fields to
* Traverser to support iterator.remove.
*/
static class BaseIterator<K,V> extends Traverser<K,V> {
final ConcurrentHashMapV8<K,V> map;
Node<K,V> lastReturned;
BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
ConcurrentHashMapV8<K,V> map) {
super(tab, size, index, limit);
this.map = map;
advance();
}
public final boolean hasNext() { return next != null; }
public final boolean hasMoreElements() { return next != null; }
public final void remove() {
Node<K,V> p;
if ((p = lastReturned) == null)
throw new IllegalStateException();
lastReturned = null;
map.replaceNode(p.key, null, null);
}
}
static final class KeyIterator<K,V> extends BaseIterator<K,V>
implements Iterator<K>, Enumeration<K> {
KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
ConcurrentHashMapV8<K,V> map) {
super(tab, index, size, limit, map);
}
public final K next() {
Node<K,V> p;
if ((p = next) == null)
throw new NoSuchElementException();
K k = p.key;
lastReturned = p;
advance();
return k;
}
public final K nextElement() { return next(); }
}
static final class ValueIterator<K,V> extends BaseIterator<K,V>
implements Iterator<V>, Enumeration<V> {
ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
ConcurrentHashMapV8<K,V> map) {
super(tab, index, size, limit, map);
}
public final V next() {
Node<K,V> p;
if ((p = next) == null)
throw new NoSuchElementException();
V v = p.val;
lastReturned = p;
advance();
return v;
}
public final V nextElement() { return next(); }
}
static final class EntryIterator<K,V> extends BaseIterator<K,V>
implements Iterator<Map.Entry<K,V>> {
EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
ConcurrentHashMapV8<K,V> map) {
super(tab, index, size, limit, map);
}
public final Map.Entry<K,V> next() {
Node<K,V> p;
if ((p = next) == null)
throw new NoSuchElementException();
K k = p.key;
V v = p.val;
lastReturned = p;
advance();
return new MapEntry<K,V>(k, v, map);
}
}
/**
* Exported Entry for EntryIterator
*/
static final class MapEntry<K,V> implements Map.Entry<K,V> {
final K key; // non-null
V val; // non-null
final ConcurrentHashMapV8<K,V> map;
MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) {
this.key = key;
this.val = val;
this.map = map;
}
public K getKey() { return key; }
public V getValue() { return val; }
public int hashCode() { return key.hashCode() ^ val.hashCode(); }
public String toString() { return key + "=" + val; }
public boolean equals(Object o) {
Object k, v; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == val || v.equals(val)));
}
/**
* Sets our entry's value and writes through to the map. The
* value to return is somewhat arbitrary here. Since we do not
* necessarily track asynchronous changes, the most recent
* "previous" value could be different from what we return (or
* could even have been removed, in which case the put will
* re-establish). We do not and cannot guarantee more.
*/
public V setValue(V value) {
if (value == null) throw new NullPointerException();
V v = val;
val = value;
map.put(key, value);
return v;
}
}
static final class KeySpliterator<K,V> extends Traverser<K,V>
implements ConcurrentHashMapSpliterator<K> {
long est; // size estimate
KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
long est) {
super(tab, size, index, limit);
this.est = est;
}
public ConcurrentHashMapSpliterator<K> trySplit() {
int i, f, h;
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
f, est >>>= 1);
}
public void forEachRemaining(Action<? super K> action) {
if (action == null) throw new NullPointerException();
for (Node<K,V> p; (p = advance()) != null;)
action.apply(p.key);
}
public boolean tryAdvance(Action<? super K> action) {
if (action == null) throw new NullPointerException();
Node<K,V> p;
if ((p = advance()) == null)
return false;
action.apply(p.key);
return true;
}
public long estimateSize() { return est; }
}
static final class ValueSpliterator<K,V> extends Traverser<K,V>
implements ConcurrentHashMapSpliterator<V> {
long est; // size estimate
ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
long est) {
super(tab, size, index, limit);
this.est = est;
}
public ConcurrentHashMapSpliterator<V> trySplit() {
int i, f, h;
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
f, est >>>= 1);
}
public void forEachRemaining(Action<? super V> action) {
if (action == null) throw new NullPointerException();
for (Node<K,V> p; (p = advance()) != null;)
action.apply(p.val);
}
public boolean tryAdvance(Action<? super V> action) {
if (action == null) throw new NullPointerException();
Node<K,V> p;
if ((p = advance()) == null)
return false;
action.apply(p.val);
return true;
}
public long estimateSize() { return est; }
}
static final class EntrySpliterator<K,V> extends Traverser<K,V>
implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
final ConcurrentHashMapV8<K,V> map; // To export MapEntry
long est; // size estimate
EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
long est, ConcurrentHashMapV8<K,V> map) {
super(tab, size, index, limit);
this.map = map;
this.est = est;
}
public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
int i, f, h;
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
f, est >>>= 1, map);
}
public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
if (action == null) throw new NullPointerException();
for (Node<K,V> p; (p = advance()) != null; )
action.apply(new MapEntry<K,V>(p.key, p.val, map));
}
public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
if (action == null) throw new NullPointerException();
Node<K,V> p;
if ((p = advance()) == null)
return false;
action.apply(new MapEntry<K,V>(p.key, p.val, map));
return true;
}
public long estimateSize() { return est; }
}
// Parallel bulk operations
/**
* Computes initial batch value for bulk tasks. The returned value
* is approximately exp2 of the number of times (minus one) to
* split task by two before executing leaf action. This value is
* faster to compute and more convenient to use as a guide to
* splitting than is the depth, since it is used while dividing by
* two anyway.
*/
final int batchFor(long b) {
long n;
if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
return 0;
int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
}
/**
* Performs the given action for each (key, value).
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param action the action
* @since 1.8
*/
public void forEach(long parallelismThreshold,
BiAction<? super K,? super V> action) {
if (action == null) throw new NullPointerException();
new ForEachMappingTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
action).invoke();
}
/**
* Performs the given action for each non-null transformation
* of each (key, value).
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case the action is not applied)
* @param action the action
* @since 1.8
*/
public <U> void forEach(long parallelismThreshold,
BiFun<? super K, ? super V, ? extends U> transformer,
Action<? super U> action) {
if (transformer == null || action == null)
throw new NullPointerException();
new ForEachTransformedMappingTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
transformer, action).invoke();
}
/**
* Returns a non-null result from applying the given search
* function on each (key, value), or null if none. Upon
* success, further element processing is suppressed and the
* results of any other parallel invocations of the search
* function are ignored.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param searchFunction a function returning a non-null
* result on success, else null
* @return a non-null result from applying the given search
* function on each (key, value), or null if none
* @since 1.8
*/
public <U> U search(long parallelismThreshold,
BiFun<? super K, ? super V, ? extends U> searchFunction) {
if (searchFunction == null) throw new NullPointerException();
return new SearchMappingsTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
searchFunction, new AtomicReference<U>()).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all (key, value) pairs using the given reducer to
* combine values, or null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case it is not combined)
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all (key, value) pairs
* @since 1.8
*/
public <U> U reduce(long parallelismThreshold,
BiFun<? super K, ? super V, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceMappingsTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all (key, value) pairs using the given reducer to
* combine values, and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all (key, value) pairs
* @since 1.8
*/
public double reduceToDouble(long parallelismThreshold,
ObjectByObjectToDouble<? super K, ? super V> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceMappingsToDoubleTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all (key, value) pairs using the given reducer to
* combine values, and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all (key, value) pairs
* @since 1.8
*/
public long reduceToLong(long parallelismThreshold,
ObjectByObjectToLong<? super K, ? super V> transformer,
long basis,
LongByLongToLong reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceMappingsToLongTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all (key, value) pairs using the given reducer to
* combine values, and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all (key, value) pairs
* @since 1.8
*/
public int reduceToInt(long parallelismThreshold,
ObjectByObjectToInt<? super K, ? super V> transformer,
int basis,
IntByIntToInt reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceMappingsToIntTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Performs the given action for each key.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param action the action
* @since 1.8
*/
public void forEachKey(long parallelismThreshold,
Action<? super K> action) {
if (action == null) throw new NullPointerException();
new ForEachKeyTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
action).invoke();
}
/**
* Performs the given action for each non-null transformation
* of each key.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case the action is not applied)
* @param action the action
* @since 1.8
*/
public <U> void forEachKey(long parallelismThreshold,
Fun<? super K, ? extends U> transformer,
Action<? super U> action) {
if (transformer == null || action == null)
throw new NullPointerException();
new ForEachTransformedKeyTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
transformer, action).invoke();
}
/**
* Returns a non-null result from applying the given search
* function on each key, or null if none. Upon success,
* further element processing is suppressed and the results of
* any other parallel invocations of the search function are
* ignored.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param searchFunction a function returning a non-null
* result on success, else null
* @return a non-null result from applying the given search
* function on each key, or null if none
* @since 1.8
*/
public <U> U searchKeys(long parallelismThreshold,
Fun<? super K, ? extends U> searchFunction) {
if (searchFunction == null) throw new NullPointerException();
return new SearchKeysTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
searchFunction, new AtomicReference<U>()).invoke();
}
/**
* Returns the result of accumulating all keys using the given
* reducer to combine values, or null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param reducer a commutative associative combining function
* @return the result of accumulating all keys using the given
* reducer to combine values, or null if none
* @since 1.8
*/
public K reduceKeys(long parallelismThreshold,
BiFun<? super K, ? super K, ? extends K> reducer) {
if (reducer == null) throw new NullPointerException();
return new ReduceKeysTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all keys using the given reducer to combine values, or
* null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case it is not combined)
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all keys
* @since 1.8
*/
public <U> U reduceKeys(long parallelismThreshold,
Fun<? super K, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceKeysTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all keys using the given reducer to combine values, and
* the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all keys
* @since 1.8
*/
public double reduceKeysToDouble(long parallelismThreshold,
ObjectToDouble<? super K> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceKeysToDoubleTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all keys using the given reducer to combine values, and
* the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all keys
* @since 1.8
*/
public long reduceKeysToLong(long parallelismThreshold,
ObjectToLong<? super K> transformer,
long basis,
LongByLongToLong reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceKeysToLongTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all keys using the given reducer to combine values, and
* the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all keys
* @since 1.8
*/
public int reduceKeysToInt(long parallelismThreshold,
ObjectToInt<? super K> transformer,
int basis,
IntByIntToInt reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceKeysToIntTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Performs the given action for each value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param action the action
* @since 1.8
*/
public void forEachValue(long parallelismThreshold,
Action<? super V> action) {
if (action == null)
throw new NullPointerException();
new ForEachValueTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
action).invoke();
}
/**
* Performs the given action for each non-null transformation
* of each value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case the action is not applied)
* @param action the action
* @since 1.8
*/
public <U> void forEachValue(long parallelismThreshold,
Fun<? super V, ? extends U> transformer,
Action<? super U> action) {
if (transformer == null || action == null)
throw new NullPointerException();
new ForEachTransformedValueTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
transformer, action).invoke();
}
/**
* Returns a non-null result from applying the given search
* function on each value, or null if none. Upon success,
* further element processing is suppressed and the results of
* any other parallel invocations of the search function are
* ignored.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param searchFunction a function returning a non-null
* result on success, else null
* @return a non-null result from applying the given search
* function on each value, or null if none
* @since 1.8
*/
public <U> U searchValues(long parallelismThreshold,
Fun<? super V, ? extends U> searchFunction) {
if (searchFunction == null) throw new NullPointerException();
return new SearchValuesTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
searchFunction, new AtomicReference<U>()).invoke();
}
/**
* Returns the result of accumulating all values using the
* given reducer to combine values, or null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param reducer a commutative associative combining function
* @return the result of accumulating all values
* @since 1.8
*/
public V reduceValues(long parallelismThreshold,
BiFun<? super V, ? super V, ? extends V> reducer) {
if (reducer == null) throw new NullPointerException();
return new ReduceValuesTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all values using the given reducer to combine values, or
* null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case it is not combined)
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all values
* @since 1.8
*/
public <U> U reduceValues(long parallelismThreshold,
Fun<? super V, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceValuesTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all values using the given reducer to combine values,
* and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all values
* @since 1.8
*/
public double reduceValuesToDouble(long parallelismThreshold,
ObjectToDouble<? super V> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceValuesToDoubleTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all values using the given reducer to combine values,
* and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all values
* @since 1.8
*/
public long reduceValuesToLong(long parallelismThreshold,
ObjectToLong<? super V> transformer,
long basis,
LongByLongToLong reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceValuesToLongTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all values using the given reducer to combine values,
* and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all values
* @since 1.8
*/
public int reduceValuesToInt(long parallelismThreshold,
ObjectToInt<? super V> transformer,
int basis,
IntByIntToInt reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceValuesToIntTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Performs the given action for each entry.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param action the action
* @since 1.8
*/
public void forEachEntry(long parallelismThreshold,
Action<? super Map.Entry<K,V>> action) {
if (action == null) throw new NullPointerException();
new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
action).invoke();
}
/**
* Performs the given action for each non-null transformation
* of each entry.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case the action is not applied)
* @param action the action
* @since 1.8
*/
public <U> void forEachEntry(long parallelismThreshold,
Fun<Map.Entry<K,V>, ? extends U> transformer,
Action<? super U> action) {
if (transformer == null || action == null)
throw new NullPointerException();
new ForEachTransformedEntryTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
transformer, action).invoke();
}
/**
* Returns a non-null result from applying the given search
* function on each entry, or null if none. Upon success,
* further element processing is suppressed and the results of
* any other parallel invocations of the search function are
* ignored.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param searchFunction a function returning a non-null
* result on success, else null
* @return a non-null result from applying the given search
* function on each entry, or null if none
* @since 1.8
*/
public <U> U searchEntries(long parallelismThreshold,
Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
if (searchFunction == null) throw new NullPointerException();
return new SearchEntriesTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
searchFunction, new AtomicReference<U>()).invoke();
}
/**
* Returns the result of accumulating all entries using the
* given reducer to combine values, or null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param reducer a commutative associative combining function
* @return the result of accumulating all entries
* @since 1.8
*/
public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
if (reducer == null) throw new NullPointerException();
return new ReduceEntriesTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all entries using the given reducer to combine values,
* or null if none.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element, or null if there is no transformation (in
* which case it is not combined)
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all entries
* @since 1.8
*/
public <U> U reduceEntries(long parallelismThreshold,
Fun<Map.Entry<K,V>, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceEntriesTask<K,V,U>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all entries using the given reducer to combine values,
* and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all entries
* @since 1.8
*/
public double reduceEntriesToDouble(long parallelismThreshold,
ObjectToDouble<Map.Entry<K,V>> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceEntriesToDoubleTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all entries using the given reducer to combine values,
* and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all entries
* @since 1.8
*/
public long reduceEntriesToLong(long parallelismThreshold,
ObjectToLong<Map.Entry<K,V>> transformer,
long basis,
LongByLongToLong reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceEntriesToLongTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/**
* Returns the result of accumulating the given transformation
* of all entries using the given reducer to combine values,
* and the given basis as an identity value.
*
* @param parallelismThreshold the (estimated) number of elements
* needed for this operation to be executed in parallel
* @param transformer a function returning the transformation
* for an element
* @param basis the identity (initial default value) for the reduction
* @param reducer a commutative associative combining function
* @return the result of accumulating the given transformation
* of all entries
* @since 1.8
*/
public int reduceEntriesToInt(long parallelismThreshold,
ObjectToInt<Map.Entry<K,V>> transformer,
int basis,
IntByIntToInt reducer) {
if (transformer == null || reducer == null)
throw new NullPointerException();
return new MapReduceEntriesToIntTask<K,V>
(null, batchFor(parallelismThreshold), 0, 0, table,
null, transformer, basis, reducer).invoke();
}
/* ----------------Views -------------- */
/**
* Base class for views.
*/
abstract static class CollectionView<K,V,E>
implements Collection<E>, java.io.Serializable {
private static final long serialVersionUID = 7249069246763182397L;
final ConcurrentHashMapV8<K,V> map;
CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; }
/**
* Returns the map backing this view.
*
* @return the map backing this view
*/
public ConcurrentHashMapV8<K,V> getMap() { return map; }
/**
* Removes all of the elements from this view, by removing all
* the mappings from the map backing this view.
*/
public final void clear() { map.clear(); }
public final int size() { return map.size(); }
public final boolean isEmpty() { return map.isEmpty(); }
// implementations below rely on concrete classes supplying these
// abstract methods
/**
* Returns a "weakly consistent" iterator that will never
* throw {@link ConcurrentModificationException}, and
* guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not
* guaranteed to) reflect any modifications subsequent to
* construction.
*/
public abstract Iterator<E> iterator();
public abstract boolean contains(Object o);
public abstract boolean remove(Object o);
private static final String oomeMsg = "Required array size too large";
public final Object[] toArray() {
long sz = map.mappingCount();
if (sz > MAX_ARRAY_SIZE)
throw new OutOfMemoryError(oomeMsg);
int n = (int)sz;
Object[] r = new Object[n];
int i = 0;
for (E e : this) {
if (i == n) {
if (n >= MAX_ARRAY_SIZE)
throw new OutOfMemoryError(oomeMsg);
if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
n = MAX_ARRAY_SIZE;
else
n += (n >>> 1) + 1;
r = Arrays.copyOf(r, n);
}
r[i++] = e;
}
return (i == n) ? r : Arrays.copyOf(r, i);
}
@SuppressWarnings("unchecked")
public final <T> T[] toArray(T[] a) {
long sz = map.mappingCount();
if (sz > MAX_ARRAY_SIZE)
throw new OutOfMemoryError(oomeMsg);
int m = (int)sz;
T[] r = (a.length >= m) ? a :
(T[])java.lang.reflect.Array
.newInstance(a.getClass().getComponentType(), m);
int n = r.length;
int i = 0;
for (E e : this) {
if (i == n) {
if (n >= MAX_ARRAY_SIZE)
throw new OutOfMemoryError(oomeMsg);
if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
n = MAX_ARRAY_SIZE;
else
n += (n >>> 1) + 1;
r = Arrays.copyOf(r, n);
}
r[i++] = (T)e;
}
if (a == r && i < n) {
r[i] = null; // null-terminate
return r;
}
return (i == n) ? r : Arrays.copyOf(r, i);
}
/**
* Returns a string representation of this collection.
* The string representation consists of the string representations
* of the collection's elements in the order they are returned by
* its iterator, enclosed in square brackets ({@code "[]"}).
* Adjacent elements are separated by the characters {@code ", "}
* (comma and space). Elements are converted to strings as by
* {@link String#valueOf(Object)}.
*
* @return a string representation of this collection
*/
public final String toString() {
StringBuilder sb = new StringBuilder();
sb.append('[');
Iterator<E> it = iterator();
if (it.hasNext()) {
for (;;) {
Object e = it.next();
sb.append(e == this ? "(this Collection)" : e);
if (!it.hasNext())
break;
sb.append(',').append(' ');
}
}
return sb.append(']').toString();
}
public final boolean containsAll(Collection<?> c) {
if (c != this) {
for (Object e : c) {
if (e == null || !contains(e))
return false;
}
}
return true;
}
public final boolean removeAll(Collection<?> c) {
boolean modified = false;
for (Iterator<E> it = iterator(); it.hasNext();) {
if (c.contains(it.next())) {
it.remove();
modified = true;
}
}
return modified;
}
public final boolean retainAll(Collection<?> c) {
boolean modified = false;
for (Iterator<E> it = iterator(); it.hasNext();) {
if (!c.contains(it.next())) {
it.remove();
modified = true;
}
}
return modified;
}
}
/**
* A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
* which additions may optionally be enabled by mapping to a
* common value. This class cannot be directly instantiated.
* See {@link #keySet() keySet()},
* {@link #keySet(Object) keySet(V)},
* {@link #newKeySet() newKeySet()},
* {@link #newKeySet(int) newKeySet(int)}.
*
* @since 1.8
*/
public static class KeySetView<K,V> extends CollectionView<K,V,K>
implements Set<K>, java.io.Serializable {
private static final long serialVersionUID = 7249069246763182397L;
private final V value;
KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public
super(map);
this.value = value;
}
/**
* Returns the default mapped value for additions,
* or {@code null} if additions are not supported.
*
* @return the default mapped value for additions, or {@code null}
* if not supported
*/
public V getMappedValue() { return value; }
/**
* {@inheritDoc}
* @throws NullPointerException if the specified key is null
*/
public boolean contains(Object o) { return map.containsKey(o); }
/**
* Removes the key from this map view, by removing the key (and its
* corresponding value) from the backing map. This method does
* nothing if the key is not in the map.
*
* @param o the key to be removed from the backing map
* @return {@code true} if the backing map contained the specified key
* @throws NullPointerException if the specified key is null
*/
public boolean remove(Object o) { return map.remove(o) != null; }
/**
* @return an iterator over the keys of the backing map
*/
public Iterator<K> iterator() {
Node<K,V>[] t;
ConcurrentHashMapV8<K,V> m = map;
int f = (t = m.table) == null ? 0 : t.length;
return new KeyIterator<K,V>(t, f, 0, f, m);
}
/**
* Adds the specified key to this set view by mapping the key to
* the default mapped value in the backing map, if defined.
*
* @param e key to be added
* @return {@code true} if this set changed as a result of the call
* @throws NullPointerException if the specified key is null
* @throws UnsupportedOperationException if no default mapped value
* for additions was provided
*/
public boolean add(K e) {
V v;
if ((v = value) == null)
throw new UnsupportedOperationException();
return map.putVal(e, v, true) == null;
}
/**
* Adds all of the elements in the specified collection to this set,
* as if by calling {@link #add} on each one.
*
* @param c the elements to be inserted into this set
* @return {@code true} if this set changed as a result of the call
* @throws NullPointerException if the collection or any of its
* elements are {@code null}
* @throws UnsupportedOperationException if no default mapped value
* for additions was provided
*/
public boolean addAll(Collection<? extends K> c) {
boolean added = false;
V v;
if ((v = value) == null)
throw new UnsupportedOperationException();
for (K e : c) {
if (map.putVal(e, v, true) == null)
added = true;
}
return added;
}
public int hashCode() {
int h = 0;
for (K e : this)
h += e.hashCode();
return h;
}
public boolean equals(Object o) {
Set<?> c;
return ((o instanceof Set) &&
((c = (Set<?>)o) == this ||
(containsAll(c) && c.containsAll(this))));
}
public ConcurrentHashMapSpliterator<K> spliteratorJSR166() {
Node<K,V>[] t;
ConcurrentHashMapV8<K,V> m = map;
long n = m.sumCount();
int f = (t = m.table) == null ? 0 : t.length;
return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
}
public void forEach(Action<? super K> action) {
if (action == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = map.table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; )
action.apply(p.key);
}
}
}
/**
* A view of a ConcurrentHashMapV8 as a {@link Collection} of
* values, in which additions are disabled. This class cannot be
* directly instantiated. See {@link #values()}.
*/
static final class ValuesView<K,V> extends CollectionView<K,V,V>
implements Collection<V>, java.io.Serializable {
private static final long serialVersionUID = 2249069246763182397L;
ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
public final boolean contains(Object o) {
return map.containsValue(o);
}
public final boolean remove(Object o) {
if (o != null) {
for (Iterator<V> it = iterator(); it.hasNext();) {
if (o.equals(it.next())) {
it.remove();
return true;
}
}
}
return false;
}
public final Iterator<V> iterator() {
ConcurrentHashMapV8<K,V> m = map;
Node<K,V>[] t;
int f = (t = m.table) == null ? 0 : t.length;
return new ValueIterator<K,V>(t, f, 0, f, m);
}
public final boolean add(V e) {
throw new UnsupportedOperationException();
}
public final boolean addAll(Collection<? extends V> c) {
throw new UnsupportedOperationException();
}
public ConcurrentHashMapSpliterator<V> spliteratorJS166() {
Node<K,V>[] t;
ConcurrentHashMapV8<K,V> m = map;
long n = m.sumCount();
int f = (t = m.table) == null ? 0 : t.length;
return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
}
public void forEach(Action<? super V> action) {
if (action == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = map.table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; )
action.apply(p.val);
}
}
}
/**
* A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
* entries. This class cannot be directly instantiated. See
* {@link #entrySet()}.
*/
static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
implements Set<Map.Entry<K,V>>, java.io.Serializable {
private static final long serialVersionUID = 2249069246763182397L;
EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); }
public boolean contains(Object o) {
Object k, v, r; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(r = map.get(k)) != null &&
(v = e.getValue()) != null &&
(v == r || v.equals(r)));
}
public boolean remove(Object o) {
Object k, v; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
map.remove(k, v));
}
/**
* @return an iterator over the entries of the backing map
*/
public Iterator<Map.Entry<K,V>> iterator() {
ConcurrentHashMapV8<K,V> m = map;
Node<K,V>[] t;
int f = (t = m.table) == null ? 0 : t.length;
return new EntryIterator<K,V>(t, f, 0, f, m);
}
public boolean add(Entry<K,V> e) {
return map.putVal(e.getKey(), e.getValue(), false) == null;
}
public boolean addAll(Collection<? extends Entry<K,V>> c) {
boolean added = false;
for (Entry<K,V> e : c) {
if (add(e))
added = true;
}
return added;
}
public final int hashCode() {
int h = 0;
Node<K,V>[] t;
if ((t = map.table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
h += p.hashCode();
}
}
return h;
}
public final boolean equals(Object o) {
Set<?> c;
return ((o instanceof Set) &&
((c = (Set<?>)o) == this ||
(containsAll(c) && c.containsAll(this))));
}
public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliteratorJSR166() {
Node<K,V>[] t;
ConcurrentHashMapV8<K,V> m = map;
long n = m.sumCount();
int f = (t = m.table) == null ? 0 : t.length;
return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
}
public void forEach(Action<? super Map.Entry<K,V>> action) {
if (action == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = map.table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; )
action.apply(new MapEntry<K,V>(p.key, p.val, map));
}
}
}
// -------------------------------------------------------
/**
* Base class for bulk tasks. Repeats some fields and code from
* class Traverser, because we need to subclass CountedCompleter.
*/
abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
Node<K,V>[] tab; // same as Traverser
Node<K,V> next;
int index;
int baseIndex;
int baseLimit;
final int baseSize;
int batch; // split control
BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
super(par);
this.batch = b;
this.index = this.baseIndex = i;
if ((this.tab = t) == null)
this.baseSize = this.baseLimit = 0;
else if (par == null)
this.baseSize = this.baseLimit = t.length;
else {
this.baseLimit = f;
this.baseSize = par.baseSize;
}
}
/**
* Same as Traverser version
*/
final Node<K,V> advance() {
Node<K,V> e;
if ((e = next) != null)
e = e.next;
for (;;) {
Node<K,V>[] t; int i, n; K ek; // must use locals in checks
if (e != null)
return next = e;
if (baseIndex >= baseLimit || (t = tab) == null ||
(n = t.length) <= (i = index) || i < 0)
return next = null;
if ((e = tabAt(t, index)) != null && e.hash < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
e = null;
continue;
}
else if (e instanceof TreeBin)
e = ((TreeBin<K,V>)e).first;
else
e = null;
}
if ((index += baseSize) >= n)
index = ++baseIndex; // visit upper slots if present
}
}
}
/*
* Task classes. Coded in a regular but ugly format/style to
* simplify checks that each variant differs in the right way from
* others. The null screenings exist because compilers cannot tell
* that we've already null-checked task arguments, so we force
* simplest hoisted bypass to help avoid convoluted traps.
*/
@SuppressWarnings("serial")
static final class ForEachKeyTask<K,V>
extends BulkTask<K,V,Void> {
final Action<? super K> action;
ForEachKeyTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Action<? super K> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final Action<? super K> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachKeyTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null;)
action.apply(p.key);
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachValueTask<K,V>
extends BulkTask<K,V,Void> {
final Action<? super V> action;
ForEachValueTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Action<? super V> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final Action<? super V> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachValueTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null;)
action.apply(p.val);
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachEntryTask<K,V>
extends BulkTask<K,V,Void> {
final Action<? super Entry<K,V>> action;
ForEachEntryTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Action<? super Entry<K,V>> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final Action<? super Entry<K,V>> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachEntryTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
action.apply(p);
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachMappingTask<K,V>
extends BulkTask<K,V,Void> {
final BiAction<? super K, ? super V> action;
ForEachMappingTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
BiAction<? super K,? super V> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final BiAction<? super K, ? super V> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachMappingTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
action.apply(p.key, p.val);
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachTransformedKeyTask<K,V,U>
extends BulkTask<K,V,Void> {
final Fun<? super K, ? extends U> transformer;
final Action<? super U> action;
ForEachTransformedKeyTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<? super K, ? extends U> transformer, Action<? super U> action) {
super(p, b, i, f, t);
this.transformer = transformer; this.action = action;
}
public final void compute() {
final Fun<? super K, ? extends U> transformer;
final Action<? super U> action;
if ((transformer = this.transformer) != null &&
(action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachTransformedKeyTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
transformer, action).fork();
}
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.key)) != null)
action.apply(u);
}
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachTransformedValueTask<K,V,U>
extends BulkTask<K,V,Void> {
final Fun<? super V, ? extends U> transformer;
final Action<? super U> action;
ForEachTransformedValueTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<? super V, ? extends U> transformer, Action<? super U> action) {
super(p, b, i, f, t);
this.transformer = transformer; this.action = action;
}
public final void compute() {
final Fun<? super V, ? extends U> transformer;
final Action<? super U> action;
if ((transformer = this.transformer) != null &&
(action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachTransformedValueTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
transformer, action).fork();
}
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.val)) != null)
action.apply(u);
}
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachTransformedEntryTask<K,V,U>
extends BulkTask<K,V,Void> {
final Fun<Map.Entry<K,V>, ? extends U> transformer;
final Action<? super U> action;
ForEachTransformedEntryTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) {
super(p, b, i, f, t);
this.transformer = transformer; this.action = action;
}
public final void compute() {
final Fun<Map.Entry<K,V>, ? extends U> transformer;
final Action<? super U> action;
if ((transformer = this.transformer) != null &&
(action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachTransformedEntryTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
transformer, action).fork();
}
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p)) != null)
action.apply(u);
}
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class ForEachTransformedMappingTask<K,V,U>
extends BulkTask<K,V,Void> {
final BiFun<? super K, ? super V, ? extends U> transformer;
final Action<? super U> action;
ForEachTransformedMappingTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
BiFun<? super K, ? super V, ? extends U> transformer,
Action<? super U> action) {
super(p, b, i, f, t);
this.transformer = transformer; this.action = action;
}
public final void compute() {
final BiFun<? super K, ? super V, ? extends U> transformer;
final Action<? super U> action;
if ((transformer = this.transformer) != null &&
(action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachTransformedMappingTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
transformer, action).fork();
}
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.key, p.val)) != null)
action.apply(u);
}
propagateCompletion();
}
}
}
@SuppressWarnings("serial")
static final class SearchKeysTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super K, ? extends U> searchFunction;
final AtomicReference<U> result;
SearchKeysTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<? super K, ? extends U> searchFunction,
AtomicReference<U> result) {
super(p, b, i, f, t);
this.searchFunction = searchFunction; this.result = result;
}
public final U getRawResult() { return result.get(); }
public final void compute() {
final Fun<? super K, ? extends U> searchFunction;
final AtomicReference<U> result;
if ((searchFunction = this.searchFunction) != null &&
(result = this.result) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
if (result.get() != null)
return;
addToPendingCount(1);
new SearchKeysTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
searchFunction, result).fork();
}
while (result.get() == null) {
U u;
Node<K,V> p;
if ((p = advance()) == null) {
propagateCompletion();
break;
}
if ((u = searchFunction.apply(p.key)) != null) {
if (result.compareAndSet(null, u))
quietlyCompleteRoot();
break;
}
}
}
}
}
@SuppressWarnings("serial")
static final class SearchValuesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
SearchValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<? super V, ? extends U> searchFunction,
AtomicReference<U> result) {
super(p, b, i, f, t);
this.searchFunction = searchFunction; this.result = result;
}
public final U getRawResult() { return result.get(); }
public final void compute() {
final Fun<? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
if ((searchFunction = this.searchFunction) != null &&
(result = this.result) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
if (result.get() != null)
return;
addToPendingCount(1);
new SearchValuesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
searchFunction, result).fork();
}
while (result.get() == null) {
U u;
Node<K,V> p;
if ((p = advance()) == null) {
propagateCompletion();
break;
}
if ((u = searchFunction.apply(p.val)) != null) {
if (result.compareAndSet(null, u))
quietlyCompleteRoot();
break;
}
}
}
}
}
@SuppressWarnings("serial")
static final class SearchEntriesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<Entry<K,V>, ? extends U> searchFunction;
final AtomicReference<U> result;
SearchEntriesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Fun<Entry<K,V>, ? extends U> searchFunction,
AtomicReference<U> result) {
super(p, b, i, f, t);
this.searchFunction = searchFunction; this.result = result;
}
public final U getRawResult() { return result.get(); }
public final void compute() {
final Fun<Entry<K,V>, ? extends U> searchFunction;
final AtomicReference<U> result;
if ((searchFunction = this.searchFunction) != null &&
(result = this.result) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
if (result.get() != null)
return;
addToPendingCount(1);
new SearchEntriesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
searchFunction, result).fork();
}
while (result.get() == null) {
U u;
Node<K,V> p;
if ((p = advance()) == null) {
propagateCompletion();
break;
}
if ((u = searchFunction.apply(p)) != null) {
if (result.compareAndSet(null, u))
quietlyCompleteRoot();
return;
}
}
}
}
}
@SuppressWarnings("serial")
static final class SearchMappingsTask<K,V,U>
extends BulkTask<K,V,U> {
final BiFun<? super K, ? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
SearchMappingsTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
BiFun<? super K, ? super V, ? extends U> searchFunction,
AtomicReference<U> result) {
super(p, b, i, f, t);
this.searchFunction = searchFunction; this.result = result;
}
public final U getRawResult() { return result.get(); }
public final void compute() {
final BiFun<? super K, ? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
if ((searchFunction = this.searchFunction) != null &&
(result = this.result) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
if (result.get() != null)
return;
addToPendingCount(1);
new SearchMappingsTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
searchFunction, result).fork();
}
while (result.get() == null) {
U u;
Node<K,V> p;
if ((p = advance()) == null) {
propagateCompletion();
break;
}
if ((u = searchFunction.apply(p.key, p.val)) != null) {
if (result.compareAndSet(null, u))
quietlyCompleteRoot();
break;
}
}
}
}
}
@SuppressWarnings("serial")
static final class ReduceKeysTask<K,V>
extends BulkTask<K,V,K> {
final BiFun<? super K, ? super K, ? extends K> reducer;
K result;
ReduceKeysTask<K,V> rights, nextRight;
ReduceKeysTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceKeysTask<K,V> nextRight,
BiFun<? super K, ? super K, ? extends K> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final K getRawResult() { return result; }
public final void compute() {
final BiFun<? super K, ? super K, ? extends K> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceKeysTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
K r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
K u = p.key;
r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceKeysTask<K,V>
t = (ReduceKeysTask<K,V>)c,
s = t.rights;
while (s != null) {
K tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class ReduceValuesTask<K,V>
extends BulkTask<K,V,V> {
final BiFun<? super V, ? super V, ? extends V> reducer;
V result;
ReduceValuesTask<K,V> rights, nextRight;
ReduceValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceValuesTask<K,V> nextRight,
BiFun<? super V, ? super V, ? extends V> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final V getRawResult() { return result; }
public final void compute() {
final BiFun<? super V, ? super V, ? extends V> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceValuesTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
V r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
V v = p.val;
r = (r == null) ? v : reducer.apply(r, v);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceValuesTask<K,V>
t = (ReduceValuesTask<K,V>)c,
s = t.rights;
while (s != null) {
V tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class ReduceEntriesTask<K,V>
extends BulkTask<K,V,Map.Entry<K,V>> {
final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
Map.Entry<K,V> result;
ReduceEntriesTask<K,V> rights, nextRight;
ReduceEntriesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceEntriesTask<K,V> nextRight,
BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final Map.Entry<K,V> getRawResult() { return result; }
public final void compute() {
final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceEntriesTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
Map.Entry<K,V> r = null;
for (Node<K,V> p; (p = advance()) != null; )
r = (r == null) ? p : reducer.apply(r, p);
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
t = (ReduceEntriesTask<K,V>)c,
s = t.rights;
while (s != null) {
Map.Entry<K,V> tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceKeysTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super K, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceKeysTask<K,V,U> rights, nextRight;
MapReduceKeysTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysTask<K,V,U> nextRight,
Fun<? super K, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final Fun<? super K, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.key)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
t = (MapReduceKeysTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceValuesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceValuesTask<K,V,U> rights, nextRight;
MapReduceValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesTask<K,V,U> nextRight,
Fun<? super V, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final Fun<? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.val)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
t = (MapReduceValuesTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceEntriesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<Map.Entry<K,V>, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceEntriesTask<K,V,U> rights, nextRight;
MapReduceEntriesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceEntriesTask<K,V,U> nextRight,
Fun<Map.Entry<K,V>, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final Fun<Map.Entry<K,V>, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceEntriesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
t = (MapReduceEntriesTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceMappingsTask<K,V,U>
extends BulkTask<K,V,U> {
final BiFun<? super K, ? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceMappingsTask<K,V,U> rights, nextRight;
MapReduceMappingsTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceMappingsTask<K,V,U> nextRight,
BiFun<? super K, ? super V, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final BiFun<? super K, ? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceMappingsTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.key, p.val)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
t = (MapReduceMappingsTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceKeysToDoubleTask<K,V>
extends BulkTask<K,V,Double> {
final ObjectToDouble<? super K> transformer;
final DoubleByDoubleToDouble reducer;
final double basis;
double result;
MapReduceKeysToDoubleTask<K,V> rights, nextRight;
MapReduceKeysToDoubleTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysToDoubleTask<K,V> nextRight,
ObjectToDouble<? super K> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Double getRawResult() { return result; }
public final void compute() {
final ObjectToDouble<? super K> transformer;
final DoubleByDoubleToDouble reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
double r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysToDoubleTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
t = (MapReduceKeysToDoubleTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceValuesToDoubleTask<K,V>
extends BulkTask<K,V,Double> {
final ObjectToDouble<? super V> transformer;
final DoubleByDoubleToDouble reducer;
final double basis;
double result;
MapReduceValuesToDoubleTask<K,V> rights, nextRight;
MapReduceValuesToDoubleTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesToDoubleTask<K,V> nextRight,
ObjectToDouble<? super V> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Double getRawResult() { return result; }
public final void compute() {
final ObjectToDouble<? super V> transformer;
final DoubleByDoubleToDouble reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
double r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesToDoubleTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
t = (MapReduceValuesToDoubleTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceEntriesToDoubleTask<K,V>
extends BulkTask<K,V,Double> {
final ObjectToDouble<Map.Entry<K,V>> transformer;
final DoubleByDoubleToDouble reducer;
final double basis;
double result;
MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
MapReduceEntriesToDoubleTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceEntriesToDoubleTask<K,V> nextRight,
ObjectToDouble<Map.Entry<K,V>> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Double getRawResult() { return result; }
public final void compute() {
final ObjectToDouble<Map.Entry<K,V>> transformer;
final DoubleByDoubleToDouble reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
double r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceEntriesToDoubleTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
t = (MapReduceEntriesToDoubleTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceMappingsToDoubleTask<K,V>
extends BulkTask<K,V,Double> {
final ObjectByObjectToDouble<? super K, ? super V> transformer;
final DoubleByDoubleToDouble reducer;
final double basis;
double result;
MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
MapReduceMappingsToDoubleTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceMappingsToDoubleTask<K,V> nextRight,
ObjectByObjectToDouble<? super K, ? super V> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Double getRawResult() { return result; }
public final void compute() {
final ObjectByObjectToDouble<? super K, ? super V> transformer;
final DoubleByDoubleToDouble reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
double r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceMappingsToDoubleTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key, p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
t = (MapReduceMappingsToDoubleTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceKeysToLongTask<K,V>
extends BulkTask<K,V,Long> {
final ObjectToLong<? super K> transformer;
final LongByLongToLong reducer;
final long basis;
long result;
MapReduceKeysToLongTask<K,V> rights, nextRight;
MapReduceKeysToLongTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysToLongTask<K,V> nextRight,
ObjectToLong<? super K> transformer,
long basis,
LongByLongToLong reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Long getRawResult() { return result; }
public final void compute() {
final ObjectToLong<? super K> transformer;
final LongByLongToLong reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
long r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysToLongTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
t = (MapReduceKeysToLongTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceValuesToLongTask<K,V>
extends BulkTask<K,V,Long> {
final ObjectToLong<? super V> transformer;
final LongByLongToLong reducer;
final long basis;
long result;
MapReduceValuesToLongTask<K,V> rights, nextRight;
MapReduceValuesToLongTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesToLongTask<K,V> nextRight,
ObjectToLong<? super V> transformer,
long basis,
LongByLongToLong reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Long getRawResult() { return result; }
public final void compute() {
final ObjectToLong<? super V> transformer;
final LongByLongToLong reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
long r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesToLongTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
t = (MapReduceValuesToLongTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceEntriesToLongTask<K,V>
extends BulkTask<K,V,Long> {
final ObjectToLong<Map.Entry<K,V>> transformer;
final LongByLongToLong reducer;
final long basis;
long result;
MapReduceEntriesToLongTask<K,V> rights, nextRight;
MapReduceEntriesToLongTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceEntriesToLongTask<K,V> nextRight,
ObjectToLong<Map.Entry<K,V>> transformer,
long basis,
LongByLongToLong reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Long getRawResult() { return result; }
public final void compute() {
final ObjectToLong<Map.Entry<K,V>> transformer;
final LongByLongToLong reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
long r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceEntriesToLongTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
t = (MapReduceEntriesToLongTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceMappingsToLongTask<K,V>
extends BulkTask<K,V,Long> {
final ObjectByObjectToLong<? super K, ? super V> transformer;
final LongByLongToLong reducer;
final long basis;
long result;
MapReduceMappingsToLongTask<K,V> rights, nextRight;
MapReduceMappingsToLongTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceMappingsToLongTask<K,V> nextRight,
ObjectByObjectToLong<? super K, ? super V> transformer,
long basis,
LongByLongToLong reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Long getRawResult() { return result; }
public final void compute() {
final ObjectByObjectToLong<? super K, ? super V> transformer;
final LongByLongToLong reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
long r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceMappingsToLongTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key, p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
t = (MapReduceMappingsToLongTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceKeysToIntTask<K,V>
extends BulkTask<K,V,Integer> {
final ObjectToInt<? super K> transformer;
final IntByIntToInt reducer;
final int basis;
int result;
MapReduceKeysToIntTask<K,V> rights, nextRight;
MapReduceKeysToIntTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysToIntTask<K,V> nextRight,
ObjectToInt<? super K> transformer,
int basis,
IntByIntToInt reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Integer getRawResult() { return result; }
public final void compute() {
final ObjectToInt<? super K> transformer;
final IntByIntToInt reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
int r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysToIntTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
t = (MapReduceKeysToIntTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceValuesToIntTask<K,V>
extends BulkTask<K,V,Integer> {
final ObjectToInt<? super V> transformer;
final IntByIntToInt reducer;
final int basis;
int result;
MapReduceValuesToIntTask<K,V> rights, nextRight;
MapReduceValuesToIntTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesToIntTask<K,V> nextRight,
ObjectToInt<? super V> transformer,
int basis,
IntByIntToInt reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Integer getRawResult() { return result; }
public final void compute() {
final ObjectToInt<? super V> transformer;
final IntByIntToInt reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
int r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesToIntTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
t = (MapReduceValuesToIntTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceEntriesToIntTask<K,V>
extends BulkTask<K,V,Integer> {
final ObjectToInt<Map.Entry<K,V>> transformer;
final IntByIntToInt reducer;
final int basis;
int result;
MapReduceEntriesToIntTask<K,V> rights, nextRight;
MapReduceEntriesToIntTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceEntriesToIntTask<K,V> nextRight,
ObjectToInt<Map.Entry<K,V>> transformer,
int basis,
IntByIntToInt reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Integer getRawResult() { return result; }
public final void compute() {
final ObjectToInt<Map.Entry<K,V>> transformer;
final IntByIntToInt reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
int r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceEntriesToIntTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
t = (MapReduceEntriesToIntTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
@SuppressWarnings("serial")
static final class MapReduceMappingsToIntTask<K,V>
extends BulkTask<K,V,Integer> {
final ObjectByObjectToInt<? super K, ? super V> transformer;
final IntByIntToInt reducer;
final int basis;
int result;
MapReduceMappingsToIntTask<K,V> rights, nextRight;
MapReduceMappingsToIntTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceMappingsToIntTask<K,V> nextRight,
ObjectByObjectToInt<? super K, ? super V> transformer,
int basis,
IntByIntToInt reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Integer getRawResult() { return result; }
public final void compute() {
final ObjectByObjectToInt<? super K, ? super V> transformer;
final IntByIntToInt reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
int r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceMappingsToIntTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key, p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
t = (MapReduceMappingsToIntTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
/* ---------------- Counters -------------- */
// Adapted from LongAdder and Striped64.
// See their internal docs for explanation.
// A padded cell for distributing counts
static final class CounterCell {
volatile long p0, p1, p2, p3, p4, p5, p6;
volatile long value;
volatile long q0, q1, q2, q3, q4, q5, q6;
CounterCell(long x) { value = x; }
}
/**
* Holder for the thread-local hash code determining which
* CounterCell to use. The code is initialized via the
* counterHashCodeGenerator, but may be moved upon collisions.
*/
static final class CounterHashCode {
int code;
}
/**
* Generates initial value for per-thread CounterHashCodes.
*/
static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
/**
* Increment for counterHashCodeGenerator. See class ThreadLocal
* for explanation.
*/
static final int SEED_INCREMENT = 0x61c88647;
/**
* Per-thread counter hash codes. Shared across all instances.
*/
static final ThreadLocal<CounterHashCode> threadCounterHashCode =
new ThreadLocal<CounterHashCode>();
final long sumCount() {
CounterCell[] as = counterCells; CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
}
return sum;
}
// See LongAdder version for explanation
private final void fullAddCount(long x, CounterHashCode hc,
boolean wasUncontended) {
int h;
if (hc == null) {
hc = new CounterHashCode();
int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
h = hc.code = (s == 0) ? 1 : s; // Avoid zero
threadCounterHashCode.set(hc);
}
else
h = hc.code;
boolean collide = false; // True if last slot nonempty
for (;;) {
CounterCell[] as; CounterCell a; int n; long v;
if ((as = counterCells) != null && (n = as.length) > 0) {
if ((a = as[(n - 1) & h]) == null) {
if (cellsBusy == 0) { // Try to attach new Cell
CounterCell r = new CounterCell(x); // Optimistic create
if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean created = false;
try { // Recheck under lock
CounterCell[] rs; int m, j;
if ((rs = counterCells) != null &&
(m = rs.length) > 0 &&
rs[j = (m - 1) & h] == null) {
rs[j] = r;
created = true;
}
} finally {
cellsBusy = 0;
}
if (created)
break;
continue; // Slot is now non-empty
}
}
collide = false;
}
else if (!wasUncontended) // CAS already known to fail
wasUncontended = true; // Continue after rehash
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
break;
else if (counterCells != as || n >= NCPU)
collide = false; // At max size or stale
else if (!collide)
collide = true;
else if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
try {
if (counterCells == as) {// Expand table unless stale
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
counterCells = rs;
}
} finally {
cellsBusy = 0;
}
collide = false;
continue; // Retry with expanded table
}
h ^= h << 13; // Rehash
h ^= h >>> 17;
h ^= h << 5;
}
else if (cellsBusy == 0 && counterCells == as &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean init = false;
try { // Initialize table
if (counterCells == as) {
CounterCell[] rs = new CounterCell[2];
rs[h & 1] = new CounterCell(x);
counterCells = rs;
init = true;
}
} finally {
cellsBusy = 0;
}
if (init)
break;
}
else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
break; // Fall back on using base
}
hc.code = h; // Record index for next time
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT;
static {
try {
U = getUnsafe();
Class<?> k = ConcurrentHashMapV8.class;
SIZECTL = U.objectFieldOffset
(k.getDeclaredField("sizeCtl"));
TRANSFERINDEX = U.objectFieldOffset
(k.getDeclaredField("transferIndex"));
BASECOUNT = U.objectFieldOffset
(k.getDeclaredField("baseCount"));
CELLSBUSY = U.objectFieldOffset
(k.getDeclaredField("cellsBusy"));
Class<?> ck = CounterCell.class;
CELLVALUE = U.objectFieldOffset
(ck.getDeclaredField("value"));
Class<?> ak = Node[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| corochoone/elasticsearch | src/main/java/jsr166e/ConcurrentHashMapV8.java | Java | apache-2.0 | 263,930 |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.browser;
import android.graphics.Bitmap;
import android.graphics.Rect;
import android.util.SparseArray;
import org.chromium.base.CalledByNative;
import org.chromium.base.JNINamespace;
import org.chromium.base.ThreadUtils;
import org.chromium.ui.base.WindowAndroid;
/**
* A class for reading back content.
*/
@JNINamespace("content")
public abstract class ContentReadbackHandler {
/**
* A callback interface for content readback into a bitmap.
*/
public static interface GetBitmapCallback {
/**
* Called when the content readback finishes.
* @param success Indicates whether the readback succeeded or not.
* @param bitmap The {@link Bitmap} of the content.
*/
public void onFinishGetBitmap(boolean success, Bitmap bitmap);
}
private int mNextReadbackId = 1;
private SparseArray<GetBitmapCallback> mGetBitmapRequests;
private long mNativeContentReadbackHandler;
/**
* Creates a {@link ContentReadbackHandler}.
*/
public ContentReadbackHandler() {
mGetBitmapRequests = new SparseArray<GetBitmapCallback>();
}
/**
* Initialize the native object.
*/
public void initNativeContentReadbackHandler() {
mNativeContentReadbackHandler = nativeInit();
}
/**
* Should be called when the ContentReadackHandler is not needed anymore.
*/
public void destroy() {
if (mNativeContentReadbackHandler != 0) nativeDestroy(mNativeContentReadbackHandler);
mNativeContentReadbackHandler = 0;
}
@CalledByNative
private void notifyGetBitmapFinished(int readbackId, boolean success, Bitmap bitmap) {
GetBitmapCallback callback = mGetBitmapRequests.get(readbackId);
if (callback != null) {
mGetBitmapRequests.delete(readbackId);
callback.onFinishGetBitmap(success, bitmap);
} else {
// readback Id is unregistered.
assert false : "Readback finished for unregistered Id: " + readbackId;
}
}
/**
* Asynchronously, generate and grab a bitmap representing what is currently on the screen
* for {@code view}.
*
* @param scale The scale that should be applied to the content.
* @param srcRect A subrect of the original content to capture. If this is empty, it will grab
* the whole surface.
* @param view The {@link ContentViewCore} to grab the bitmap from.
* @param callback The callback to be executed after readback completes.
*/
public void getContentBitmapAsync(float scale, Rect srcRect, ContentViewCore view,
GetBitmapCallback callback) {
if (!readyForReadback()) {
callback.onFinishGetBitmap(false, null);
return;
}
ThreadUtils.assertOnUiThread();
int readbackId = mNextReadbackId++;
mGetBitmapRequests.put(readbackId, callback);
nativeGetContentBitmap(mNativeContentReadbackHandler, readbackId, scale,
Bitmap.Config.ARGB_8888, srcRect.top, srcRect.left, srcRect.width(),
srcRect.height(), view);
}
/**
* Asynchronously, grab a bitmap of the current browser compositor root layer.
*
* @param windowAndroid The window that hosts the compositor.
* @param callback The callback to be executed after readback completes.
*/
public void getCompositorBitmapAsync(WindowAndroid windowAndroid, GetBitmapCallback callback) {
if (!readyForReadback()) {
callback.onFinishGetBitmap(false, null);
return;
}
ThreadUtils.assertOnUiThread();
int readbackId = mNextReadbackId++;
mGetBitmapRequests.put(readbackId, callback);
nativeGetCompositorBitmap(mNativeContentReadbackHandler, readbackId,
windowAndroid.getNativePointer());
}
/**
* Implemented by the owner of this class to signal whether readback is possible or not.
* @return Whether readback is possible or not.
*/
protected abstract boolean readyForReadback();
private native long nativeInit();
private native void nativeDestroy(long nativeContentReadbackHandler);
private native void nativeGetContentBitmap(long nativeContentReadbackHandler, int readback_id,
float scale, Bitmap.Config config, float x, float y, float width, float height,
Object contentViewCore);
private native void nativeGetCompositorBitmap(long nativeContentReadbackHandler,
int readback_id, long nativeWindowAndroid);
}
| CyanogenMod/android_external_chromium_org | content/public/android/java/src/org/chromium/content/browser/ContentReadbackHandler.java | Java | bsd-3-clause | 4,798 |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.android_webview.test;
import android.app.Service;
import android.content.Intent;
import android.os.Binder;
import android.os.IBinder;
import android.os.Parcel;
import android.os.Process;
import org.chromium.android_webview.AwBrowserProcess;
import org.chromium.android_webview.AwResource;
import org.chromium.android_webview.shell.R;
import org.chromium.base.CommandLine;
import org.chromium.base.annotations.SuppressFBWarnings;
/**
* This is a service for imitating a second browser process in the application.
*/
public class SecondBrowserProcess extends Service {
public static final int CODE_START = IBinder.FIRST_CALL_TRANSACTION;
private IBinder mBinder = new Binder() {
@Override
protected boolean onTransact(int code, Parcel data, Parcel reply, int flags) {
switch (code) {
case CODE_START:
reply.writeNoException();
try {
startBrowserProcess();
reply.writeInt(Process.myPid());
} catch (Exception e) {
reply.writeInt(0);
}
return true;
}
return false;
}
};
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
return START_STICKY;
}
@SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
private void startBrowserProcess() throws Exception {
CommandLine.initFromFile("/data/local/tmp/android-webview-command-line");
AwResource.setResources(this.getResources());
AwResource.setConfigKeySystemUuidMapping(R.array.config_key_system_uuid_mapping);
AwBrowserProcess.loadLibrary(this);
AwBrowserProcess.start(this);
}
}
| js0701/chromium-crosswalk | android_webview/test/shell/src/org/chromium/android_webview/test/SecondBrowserProcess.java | Java | bsd-3-clause | 2,062 |
// This file is part of OpenTSDB.
// Copyright (C) 2014 The OpenTSDB Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 2.1 of the License, or (at your
// option) any later version. This program is distributed in the hope that it
// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
// General Public License for more details. You should have received a copy
// of the GNU Lesser General Public License along with this program. If not,
// see <http://www.gnu.org/licenses/>.
package net.opentsdb.core;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.NoSuchElementException;
import org.junit.Before;
import org.junit.Test;
/**
* Tests {@link RateSpan}.
*/
public class TestRateSpan {
private static final DataPoint[] DATA_POINTS = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998400000L, 40.0),
MutableDataPoint.ofLongValue(1356998400000L + 2000000, 50),
MutableDataPoint.ofLongValue(1357002000000L, 40),
MutableDataPoint.ofDoubleValue(1357002000000L + 5000, 50.0),
MutableDataPoint.ofLongValue(1357005600000L, 40),
MutableDataPoint.ofDoubleValue(1357005600000L + 2000000, 50.0)
};
private static final DataPoint[] RATE_DATA_POINTS = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998400000L, 40.0 / 1356998400),
MutableDataPoint.ofDoubleValue(1356998400000L + 2000000, 10.0 / 2000.0),
MutableDataPoint.ofDoubleValue(1357002000000L,
-10.0 / (1357002000L - 1356998400L - 2000)),
MutableDataPoint.ofDoubleValue(1357002000000L + 5000, 10.0 / 5.0),
MutableDataPoint.ofDoubleValue(1357005600000L,
-10.0 / (1357005600L - 1357002005L)),
MutableDataPoint.ofDoubleValue(1357005600000L + 2000000, 10.0 / 2000.0)
};
private static final DataPoint[] RATES_AFTER_SEEK = new DataPoint[] {
// The first rate is calculated against the time zero, not the previous
// data point.
MutableDataPoint.ofDoubleValue(1357002000000L, 40.0 / 1357002000),
RATE_DATA_POINTS[3], RATE_DATA_POINTS[4], RATE_DATA_POINTS[5]
};
private static final long COUNTER_MAX = 70;
private static final DataPoint[] RATES_FOR_COUNTER = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998400000L, 40.0 / 1356998400),
MutableDataPoint.ofDoubleValue(1356998400000L + 2000000, 10.0 / 2000.0),
MutableDataPoint.ofDoubleValue(1357002000000L, (40.0 + 20) / 1600.0),
MutableDataPoint.ofDoubleValue(1357002000000L + 5000, 10.0 / 5.0),
MutableDataPoint.ofDoubleValue(1357005600000L, (40.0 + 20) / 3595),
MutableDataPoint.ofDoubleValue(1357005600000L + 2000000, 10.0 / 2000.0)
};
private SeekableView source;
private RateOptions options;
@Before
public void before() {
source = SeekableViewsForTest.fromArray(DATA_POINTS);
options = new RateOptions();
}
@Test
public void testRateSpan() {
RateSpan rate_span = new RateSpan(source, options);
// The first rate is between the time zero and the first data point.
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
assertFalse(dp.isInteger());
assertEquals(1356998400000L, dp.timestamp());
assertEquals(40.0 / 1356998400L, dp.doubleValue(), 0);
// The second rate comes from the first two data points.
assertTrue(rate_span.hasNext());
DataPoint dp2 = rate_span.next();
assertFalse(dp2.isInteger());
assertEquals(1356998400000L + 2000000, dp2.timestamp());
assertEquals(10.0 / 2000.0, dp2.doubleValue(), 0);
}
@Test
public void testNext_iterateAll() {
RateSpan rate_span = new RateSpan(source, options);
for (DataPoint rate : RATE_DATA_POINTS) {
assertTrue(rate_span.hasNext());
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
assertFalse(rate_span.hasNext());
}
@Test(expected = UnsupportedOperationException.class)
public void testRemove() {
RateSpan rate_span = new RateSpan(source, options);
rate_span.remove();
}
@Test(expected = NoSuchElementException.class)
public void testNext_noMoreData() {
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998400000L, 40)
});
RateSpan rate_span = new RateSpan(source, options);
// The first rate exists.
assertTrue(rate_span.hasNext());
rate_span.next();
// No second rate.
assertFalse(rate_span.hasNext());
rate_span.next();
}
@Test
public void testSeek() {
RateSpan rate_span = new RateSpan(source, options);
rate_span.seek(1357002000000L);
for (DataPoint rate : RATES_AFTER_SEEK) {
assertTrue(rate_span.hasNext());
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
assertFalse(rate_span.hasNext());
}
@Test(expected = IllegalStateException.class)
public void testNext_decreasingTimestamps() {
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1357002000000L + 5000, 50),
MutableDataPoint.ofLongValue(1357002000000L + 4000, 50)
});
RateSpan rate_span = new RateSpan(source, options);
rate_span.next();
}
@Test(expected = IllegalStateException.class)
public void testMoveToNextRate_duplicatedTimestamps() {
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998400000L, 40),
MutableDataPoint.ofLongValue(1356998400000L + 2000000, 50),
MutableDataPoint.ofLongValue(1356998400000L + 2000000, 50)
});
RateSpan rate_span = new RateSpan(source, options);
rate_span.next(); // Abandons the first rate to test next ones.
assertTrue(rate_span.hasNext());
rate_span.next();
}
@Test
public void testCalculateDelta_bigLongValues() {
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998400000L, Long.MAX_VALUE - 100),
MutableDataPoint.ofLongValue(1356998500000L, Long.MAX_VALUE - 20)
});
RateSpan rate_span = new RateSpan(source, options);
rate_span.next(); // Abandons the first rate to test next ones.
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
assertFalse(dp.isInteger());
assertEquals(1356998500000L, dp.timestamp());
assertEquals(0.8, dp.doubleValue(), 0);
assertFalse(rate_span.hasNext());
}
@Test
public void testNext_counter() {
options = new RateOptions(true, COUNTER_MAX,
RateOptions.DEFAULT_RESET_VALUE);
RateSpan rate_span = new RateSpan(source, options);
for (DataPoint rate : RATES_FOR_COUNTER) {
assertTrue(rate_span.hasNext());
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
assertFalse(rate_span.hasNext());
}
@Test
public void testNext_counterLongMax() {
options = new RateOptions(true, Long.MAX_VALUE, 0);
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998430000L, Long.MAX_VALUE - 55),
MutableDataPoint.ofLongValue(1356998460000L, Long.MAX_VALUE - 25),
MutableDataPoint.ofLongValue(1356998490000L, 5),
});
DataPoint[] rates = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998430000L,
(Long.MAX_VALUE - 55) / 1356998430.0),
MutableDataPoint.ofDoubleValue(1356998460000L, 1),
MutableDataPoint.ofDoubleValue(1356998490000L, 1)
};
RateSpan rate_span = new RateSpan(source, options);
for (DataPoint rate : rates) {
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
}
@Test
public void testNext_counterWithResetValue() {
final long RESET_VALUE = 1;
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998400000L, 40),
MutableDataPoint.ofLongValue(1356998401000L, 50),
MutableDataPoint.ofLongValue(1356998402000L, 40)
});
DataPoint[] rates = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998400000L, 40 / 1356998400.0),
MutableDataPoint.ofDoubleValue(1356998401000L, 10),
// Not 60 because the change is too big compared to the reset value.
MutableDataPoint.ofDoubleValue(1356998402000L, 0)
};
options = new RateOptions(true, COUNTER_MAX, RESET_VALUE);
RateSpan rate_span = new RateSpan(source, options);
for (DataPoint rate : rates) {
assertTrue(rate_span.hasNext());
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
assertFalse(rate_span.hasNext());
}
@Test
public void testNext_counterDroResets() {
final long RESET_VALUE = 1;
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998400000L, 40),
MutableDataPoint.ofLongValue(1356998401000L, 50),
MutableDataPoint.ofLongValue(1356998402000L, 40),
MutableDataPoint.ofLongValue(1356998403000L, 50)
});
DataPoint[] rates = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998400000L, 40 / 1356998400.0),
MutableDataPoint.ofDoubleValue(1356998401000L, 10),
// drop the point before
MutableDataPoint.ofDoubleValue(1356998403000L, 10)
};
options = new RateOptions(true, COUNTER_MAX, RESET_VALUE, true);
RateSpan rate_span = new RateSpan(source, options);
for (DataPoint rate : rates) {
assertTrue(rate_span.hasNext());
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
assertFalse(rate_span.hasNext());
}
@Test
public void testNext_counterDroResetsNothingAfter() {
final long RESET_VALUE = 1;
source = SeekableViewsForTest.fromArray(new DataPoint[] {
MutableDataPoint.ofLongValue(1356998400000L, 40),
MutableDataPoint.ofLongValue(1356998401000L, 50),
MutableDataPoint.ofLongValue(1356998402000L, 40)
});
DataPoint[] rates = new DataPoint[] {
MutableDataPoint.ofDoubleValue(1356998400000L, 40 / 1356998400.0),
MutableDataPoint.ofDoubleValue(1356998401000L, 10),
};
options = new RateOptions(true, COUNTER_MAX, RESET_VALUE, true);
RateSpan rate_span = new RateSpan(source, options);
for (DataPoint rate : rates) {
assertTrue(rate_span.hasNext());
assertTrue(rate_span.hasNext());
DataPoint dp = rate_span.next();
String msg = String.format("expected rate = '%s' ", rate);
assertFalse(msg, dp.isInteger());
assertEquals(msg, rate.timestamp(), dp.timestamp());
assertEquals(msg, rate.doubleValue(), dp.doubleValue(), 0.0000001);
}
assertFalse(rate_span.hasNext());
assertFalse(rate_span.hasNext());
}
}
| nickman/opentsdb | test/core/TestRateSpan.java | Java | gpl-3.0 | 12,756 |
/*
Copyright (c) 2005 Redstone Handelsbolag
This library is free software; you can redistribute it and/or modify it under the terms
of the GNU Lesser General Public License as published by the Free Software Foundation;
either version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with this
library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
*/
package redstone.xmlrpc.serializers;
import java.io.IOException;
import java.io.Writer;
import redstone.xmlrpc.XmlRpcCustomSerializer;
import redstone.xmlrpc.XmlRpcException;
import redstone.xmlrpc.XmlRpcSerializer;
/**
* Serializes arrays of primitive longs. Note that unless
* setUseApacheExtension( true ) has been invoked, the longs are demoted to
* integers before being serialized into regular XML-RPC <i4>'s, possibly
* losing significant bits in the conversion.<p>
*
* @author Greger Olsson
*/
public class LongArraySerializer implements XmlRpcCustomSerializer
{
/* (Documentation inherited)
* @see redstone.xmlrpc.XmlRpcCustomSerializer#getSupportedClass()
*/
public Class getSupportedClass()
{
return long[].class;
}
/**
* Sets whether or not to use the <i8> Apache extensions when
* serializing longs.
*
* @param useApacheExtension Flag for specifying the Apache extension to be used.
*/
public void setUseApacheExtension( boolean useApacheExtension )
{
this.useApacheExtension = useApacheExtension;
}
/* (Documentation inherited)
* @see redstone.xmlrpc.XmlRpcCustomSerializer#serialize(java.lang.Object, java.io.Writer, redstone.xmlrpc.XmlRpcSerializer)
*/
public void serialize(
Object value,
Writer writer,
XmlRpcSerializer builtInSerializer )
throws XmlRpcException, IOException
{
writer.write( "<array><data>" );
long[] array = ( long[] ) value;
for ( int i = 0; i < array.length; ++i )
{
if ( !useApacheExtension )
{
writer.write( "<value><i4>" );
writer.write( Integer.toString( ( int ) array[ i ] ) );
writer.write( "</i4></value>" );
}
else
{
writer.write( "<value><i8 xmlns=\"http://ws.apache.org/xmlrpc/namespaces/extensions\">" );
writer.write( Long.toString( array[ i ] ) );
writer.write( "</i8></value>" );
}
}
writer.write( "</data></array>" );
}
/** Flag indicating whether or not the Apache <i8> extension should be used. */
private boolean useApacheExtension;
} | klemens/redstone | source/redstone/xmlrpc/serializers/LongArraySerializer.java | Java | lgpl-2.1 | 3,204 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
public class StopDataFrameAnalyticsResponseTests extends AbstractXContentTestCase<StopDataFrameAnalyticsResponse> {
@Override
protected StopDataFrameAnalyticsResponse createTestInstance() {
return new StopDataFrameAnalyticsResponse(randomBoolean());
}
@Override
protected StopDataFrameAnalyticsResponse doParseInstance(XContentParser parser) throws IOException {
return StopDataFrameAnalyticsResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
}
| robin13/elasticsearch | client/rest-high-level/src/test/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsResponseTests.java | Java | apache-2.0 | 1,094 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test clone snapshots from the client
*/
@Category(LargeTests.class)
public class TestCloneSnapshotFromClient {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
private byte[] emptySnapshot;
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private byte[] tableName;
private HBaseAdmin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
TEST_UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
TEST_UTIL.startMiniCluster(3);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* Initialize the tests with a table filled with some data
* and two snapshots (snapshotName0, snapshotName1) of different states.
* The tableName, snapshotNames and the number of rows in the snapshot are initialized.
*/
@Before
public void setup() throws Exception {
this.admin = TEST_UTIL.getHBaseAdmin();
long tid = System.currentTimeMillis();
tableName = Bytes.toBytes("testtb-" + tid);
emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
// create Table and disable it
createTable(tableName, FAMILY);
admin.disableTable(tableName);
// take an empty snapshot
admin.snapshot(emptySnapshot, tableName);
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
try {
// enable table and insert data
admin.enableTable(tableName);
loadData(table, 500, FAMILY);
snapshot0Rows = TEST_UTIL.countRows(table);
admin.disableTable(tableName);
// take a snapshot
admin.snapshot(snapshotName0, tableName);
// enable table and insert more data
admin.enableTable(tableName);
loadData(table, 500, FAMILY);
snapshot1Rows = TEST_UTIL.countRows(table);
admin.disableTable(tableName);
// take a snapshot of the updated table
admin.snapshot(snapshotName1, tableName);
// re-enable table
admin.enableTable(tableName);
} finally {
table.close();
}
}
@After
public void tearDown() throws Exception {
if (admin.tableExists(tableName)) {
TEST_UTIL.deleteTable(tableName);
}
admin.deleteSnapshot(snapshotName0);
admin.deleteSnapshot(snapshotName1);
// Ensure the archiver to be empty
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
mfs.getFileSystem().delete(
new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
}
@Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis();
String tableName = "random-table-" + System.currentTimeMillis();
admin.cloneSnapshot(snapshotName, tableName);
}
@Test
public void testCloneSnapshot() throws IOException, InterruptedException {
byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
testCloneSnapshot(clonedTableName, emptySnapshot, 0);
}
private void testCloneSnapshot(final byte[] tableName, final byte[] snapshotName,
int snapshotRows) throws IOException, InterruptedException {
// create a new table from snapshot
admin.cloneSnapshot(snapshotName, tableName);
verifyRowCount(tableName, snapshotRows);
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
/**
* Verify that tables created from the snapshot are still alive after source table deletion.
*/
@Test
public void testCloneLinksAfterDelete() throws IOException, InterruptedException {
// Clone a table from the first snapshot
byte[] clonedTableName = Bytes.toBytes("clonedtb1-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
verifyRowCount(clonedTableName, snapshot0Rows);
// Take a snapshot of this cloned table.
admin.disableTable(clonedTableName);
admin.snapshot(snapshotName2, clonedTableName);
// Clone the snapshot of the cloned table
byte[] clonedTableName2 = Bytes.toBytes("clonedtb2-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName2, clonedTableName2);
verifyRowCount(clonedTableName2, snapshot0Rows);
admin.disableTable(clonedTableName2);
// Remove the original table
admin.disableTable(tableName);
admin.deleteTable(tableName);
waitCleanerRun();
// Verify the first cloned table
admin.enableTable(clonedTableName);
verifyRowCount(clonedTableName, snapshot0Rows);
// Verify the second cloned table
admin.enableTable(clonedTableName2);
verifyRowCount(clonedTableName2, snapshot0Rows);
admin.disableTable(clonedTableName2);
// Delete the first cloned table
admin.disableTable(clonedTableName);
admin.deleteTable(clonedTableName);
waitCleanerRun();
// Verify the second cloned table
admin.enableTable(clonedTableName2);
verifyRowCount(clonedTableName2, snapshot0Rows);
// Clone a new table from cloned
byte[] clonedTableName3 = Bytes.toBytes("clonedtb3-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName2, clonedTableName3);
verifyRowCount(clonedTableName3, snapshot0Rows);
// Delete the cloned tables
admin.disableTable(clonedTableName2);
admin.deleteTable(clonedTableName2);
admin.disableTable(clonedTableName3);
admin.deleteTable(clonedTableName3);
admin.deleteSnapshot(snapshotName2);
}
// ==========================================================================
// Helpers
// ==========================================================================
private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (byte[] family: families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.addFamily(hcd);
}
byte[][] splitKeys = new byte[16][];
byte[] hex = Bytes.toBytes("0123456789abcdef");
for (int i = 0; i < 16; ++i) {
splitKeys[i] = new byte[] { hex[i] };
}
admin.createTable(htd, splitKeys);
}
public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
byte[] qualifier = Bytes.toBytes("q");
table.setAutoFlush(false);
while (rows-- > 0) {
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
Put put = new Put(key);
put.setWriteToWAL(false);
for (byte[] family: families) {
put.add(family, qualifier, value);
}
table.put(put);
}
table.flushCommits();
}
private void waitCleanerRun() throws InterruptedException {
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
}
private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
assertEquals(expectedRows, TEST_UTIL.countRows(table));
table.close();
}
}
| zqxjjj/NobidaBase | target/hbase-0.94.9/hbase-0.94.9/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java | Java | apache-2.0 | 9,936 |
package org.tensorflow.demo;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.util.AttributeSet;
import android.util.TypedValue;
import android.view.View;
import org.tensorflow.demo.Classifier.Recognition;
import java.util.List;
public class RecognitionScoreView extends View {
private static final float TEXT_SIZE_DIP = 24;
private List<Recognition> results;
private final float textSizePx;
private final Paint fgPaint;
private final Paint bgPaint;
public RecognitionScoreView(final Context context, final AttributeSet set) {
super(context, set);
textSizePx =
TypedValue.applyDimension(
TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
fgPaint = new Paint();
fgPaint.setTextSize(textSizePx);
bgPaint = new Paint();
bgPaint.setColor(0xcc4285f4);
}
public void setResults(final List<Recognition> results) {
this.results = results;
postInvalidate();
}
@Override
public void onDraw(final Canvas canvas) {
final int x = 10;
int y = (int) (fgPaint.getTextSize() * 1.5f);
canvas.drawPaint(bgPaint);
if (results != null) {
for (final Recognition recog : results) {
canvas.drawText(recog.getTitle() + ": " + recog.getConfidence(), x, y, fgPaint);
y += fgPaint.getTextSize() * 1.5f;
}
}
}
}
| liyu1990/tensorflow | tensorflow/examples/android/src/org/tensorflow/demo/RecognitionScoreView.java | Java | apache-2.0 | 1,410 |
/*
* Copyright 2015 Anton Tananaev ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.traccar.protocol;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder;
import org.traccar.BaseProtocol;
import org.traccar.TrackerServer;
import java.nio.ByteOrder;
import java.util.List;
public class NavisProtocol extends BaseProtocol {
public NavisProtocol() {
super("navis");
}
@Override
public void initTrackerServers(List<TrackerServer> serverList) {
TrackerServer server = new TrackerServer(new ServerBootstrap(), getName()) {
@Override
protected void addSpecificHandlers(ChannelPipeline pipeline) {
pipeline.addLast("frameDecoder", new LengthFieldBasedFrameDecoder(4 * 1024, 12, 2, 2, 0));
pipeline.addLast("objectDecoder", new NavisProtocolDecoder(NavisProtocol.this));
}
};
server.setEndianness(ByteOrder.LITTLE_ENDIAN);
serverList.add(server);
}
}
| vipien/traccar | src/org/traccar/protocol/NavisProtocol.java | Java | apache-2.0 | 1,640 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.pulsar.source.enumerator;
import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicPartition;
import org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit;
import java.util.Map;
import java.util.Set;
/**
* The state class for pulsar source enumerator, used for storing the split state. This class is
* managed and controlled by {@link SplitsAssignmentState}.
*/
public class PulsarSourceEnumState {
/** The topic partitions that have been appended to this source. */
private final Set<TopicPartition> appendedPartitions;
/**
* We convert the topic partition into a split and add to this pending list for assigning to a
* reader. It is used for Key_Shared, Failover, Exclusive subscription.
*/
private final Set<PulsarPartitionSplit> pendingPartitionSplits;
/**
* It is used for Shared subscription. When a reader is crashed in Shared subscription, its
* splits would be put in here.
*/
private final Map<Integer, Set<PulsarPartitionSplit>> sharedPendingPartitionSplits;
/**
* A {@link PulsarPartitionSplit} should be assigned for all flink readers. Using this map for
* recording assign status.
*/
private final Map<Integer, Set<String>> readerAssignedSplits;
private final boolean initialized;
public PulsarSourceEnumState(
Set<TopicPartition> appendedPartitions,
Set<PulsarPartitionSplit> pendingPartitionSplits,
Map<Integer, Set<PulsarPartitionSplit>> pendingSharedPartitionSplits,
Map<Integer, Set<String>> readerAssignedSplits,
boolean initialized) {
this.appendedPartitions = appendedPartitions;
this.pendingPartitionSplits = pendingPartitionSplits;
this.sharedPendingPartitionSplits = pendingSharedPartitionSplits;
this.readerAssignedSplits = readerAssignedSplits;
this.initialized = initialized;
}
public Set<TopicPartition> getAppendedPartitions() {
return appendedPartitions;
}
public Set<PulsarPartitionSplit> getPendingPartitionSplits() {
return pendingPartitionSplits;
}
public Map<Integer, Set<PulsarPartitionSplit>> getSharedPendingPartitionSplits() {
return sharedPendingPartitionSplits;
}
public Map<Integer, Set<String>> getReaderAssignedSplits() {
return readerAssignedSplits;
}
public boolean isInitialized() {
return initialized;
}
}
| lincoln-lil/flink | flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumState.java | Java | apache-2.0 | 3,310 |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.forms.client.fields.colorPicker;
import javax.enterprise.context.Dependent;
import javax.inject.Inject;
import com.google.gwt.event.dom.client.ClickEvent;
import com.google.gwt.event.logical.shared.ValueChangeEvent;
import com.google.gwt.event.logical.shared.ValueChangeHandler;
import com.google.gwt.event.shared.HandlerRegistration;
import com.google.gwt.user.client.ui.Composite;
import com.google.gwt.user.client.ui.HasValue;
import com.google.gwt.user.client.ui.UIObject;
import org.gwtbootstrap3.client.ui.Button;
import org.gwtbootstrap3.client.ui.TextBox;
import org.jboss.errai.ui.shared.api.annotations.DataField;
import org.jboss.errai.ui.shared.api.annotations.EventHandler;
import org.jboss.errai.ui.shared.api.annotations.Templated;
import org.uberfire.ext.widgets.common.client.colorpicker.ColorPickerDialog;
@Dependent
@Templated
public class ColorPickerWidget extends Composite implements HasValue<String> {
@Inject
@DataField
private Button colorButton;
@Inject
@DataField
private TextBox colorTextBox;
private String color;
private boolean readOnly;
@EventHandler("colorButton")
public void onClickColorButton(final ClickEvent clickEvent) {
showColorDialog(colorButton);
}
@EventHandler("colorTextBox")
public void onClickColorTextBox(final ClickEvent clickEvent) {
showColorDialog(colorTextBox);
}
protected void showColorDialog(final UIObject owner) {
if (readOnly) {
return;
}
final ColorPickerDialog dlg = new ColorPickerDialog();
dlg.getElement().getStyle().setZIndex(9999);
dlg.addDialogClosedHandler(event -> {
if (!event.isCanceled()) {
setValue("#" + dlg.getColor(),
true);
}
});
String color = getValue();
if (color.startsWith("#")) {
color = color.substring(1,
color.length());
}
dlg.setColor(color);
dlg.showRelativeTo(owner);
}
@Override
public String getValue() {
return color;
}
@Override
public void setValue(final String value) {
setValue(value,
false);
}
@Override
public void setValue(final String value,
final boolean fireEvents) {
String oldValue = color;
color = value;
initTextBox();
if (fireEvents) {
ValueChangeEvent.fireIfNotEqual(this,
oldValue,
color);
}
}
protected void initTextBox() {
colorTextBox.getElement().getStyle().setBackgroundColor(color);
}
@Override
public HandlerRegistration addValueChangeHandler(ValueChangeHandler<String> handler) {
return addHandler(handler,
ValueChangeEvent.getType());
}
public void setReadOnly(boolean readOnly) {
this.readOnly = readOnly;
colorButton.setEnabled(!readOnly);
}
}
| jomarko/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-extensions/kie-wb-common-stunner-forms/kie-wb-common-stunner-forms-client/src/main/java/org/kie/workbench/common/stunner/forms/client/fields/colorPicker/ColorPickerWidget.java | Java | apache-2.0 | 3,755 |
#parse("Java Source Copyright.java")
#if (${PACKAGE_NAME} && ${PACKAGE_NAME} != "")package ${PACKAGE_NAME};#end
public interface ${NAME} {
}
| facebook/buck | .idea/fileTemplates/internal/Interface.java | Java | apache-2.0 | 141 |
/*
* Copyright 2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.spockframework.compiler;
import org.codehaus.groovy.ast.expr.Expression;
import org.codehaus.groovy.ast.expr.VariableExpression;
/**
* Used to represent the argument to Specification.old() once it has been processed
* by IRewriteResources.captureOldValue(). The original expression is
* kept in case ConditionRewriter still needs it.
*
* @author Peter Niederwieser
*/
public class OldValueExpression extends VariableExpression {
private final Expression originalExpression;
public OldValueExpression(Expression originalExpression, String substitutedVariable) {
super(substitutedVariable);
this.originalExpression = originalExpression;
}
public Expression getOrginalExpression() {
return originalExpression;
}
}
| siordache/spock | spock-core/src/main/java/org/spockframework/compiler/OldValueExpression.java | Java | apache-2.0 | 1,372 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.transport.netty4;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport;
import org.elasticsearch.xpack.core.ssl.SSLConfiguration;
import org.elasticsearch.xpack.core.ssl.SSLService;
import org.elasticsearch.xpack.security.transport.filter.IPFilter;
public class SecurityNetty4ServerTransport extends SecurityNetty4Transport {
@Nullable private final IPFilter authenticator;
public SecurityNetty4ServerTransport(
final Settings settings,
final ThreadPool threadPool,
final NetworkService networkService,
final BigArrays bigArrays,
final NamedWriteableRegistry namedWriteableRegistry,
final CircuitBreakerService circuitBreakerService,
@Nullable final IPFilter authenticator,
final SSLService sslService) {
super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService, sslService);
this.authenticator = authenticator;
}
@Override
protected void doStart() {
super.doStart();
if (authenticator != null) {
authenticator.setBoundTransportAddress(boundAddress(), profileBoundAddresses());
}
}
@Override
protected ChannelHandler getNoSslChannelInitializer(final String name) {
return new IPFilterServerChannelInitializer(name);
}
@Override
protected ServerChannelInitializer getSslChannelInitializer(final String name, final SSLConfiguration configuration) {
return new SecurityServerChannelInitializer(name, configuration);
}
public class IPFilterServerChannelInitializer extends ServerChannelInitializer {
IPFilterServerChannelInitializer(final String name) {
super(name);
}
@Override
protected void initChannel(final Channel ch) throws Exception {
super.initChannel(ch);
maybeAddIPFilter(ch, name);
}
}
public class SecurityServerChannelInitializer extends SslChannelInitializer {
SecurityServerChannelInitializer(final String name, final SSLConfiguration configuration) {
super(name, configuration);
}
@Override
protected void initChannel(final Channel ch) throws Exception {
super.initChannel(ch);
maybeAddIPFilter(ch, name);
}
}
private void maybeAddIPFilter(final Channel ch, final String name) {
if (authenticator != null) {
ch.pipeline().addFirst("ipfilter", new IpFilterRemoteAddressFilter(authenticator, name));
}
}
}
| gfyoung/elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java | Java | apache-2.0 | 3,357 |
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.settings.widget;
import android.content.res.Resources;
import android.text.SpannableStringBuilder;
/**
* Utility to invert another {@link ChartAxis}.
*/
public class InvertedChartAxis implements ChartAxis {
private final ChartAxis mWrapped;
private float mSize;
public InvertedChartAxis(ChartAxis wrapped) {
mWrapped = wrapped;
}
@Override
public boolean setBounds(long min, long max) {
return mWrapped.setBounds(min, max);
}
@Override
public boolean setSize(float size) {
mSize = size;
return mWrapped.setSize(size);
}
@Override
public float convertToPoint(long value) {
return mSize - mWrapped.convertToPoint(value);
}
@Override
public long convertToValue(float point) {
return mWrapped.convertToValue(mSize - point);
}
@Override
public long buildLabel(Resources res, SpannableStringBuilder builder, long value) {
return mWrapped.buildLabel(res, builder, value);
}
@Override
public float[] getTickPoints() {
final float[] points = mWrapped.getTickPoints();
for (int i = 0; i < points.length; i++) {
points[i] = mSize - points[i];
}
return points;
}
@Override
public int shouldAdjustAxis(long value) {
return mWrapped.shouldAdjustAxis(value);
}
}
| Ant-Droid/android_packages_apps_Settings_OLD | src/com/android/settings/widget/InvertedChartAxis.java | Java | apache-2.0 | 2,009 |
package com.intellij.compiler;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.vfs.VfsUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.testFramework.PsiTestUtil;
import java.io.File;
import java.io.IOException;
import static com.intellij.util.io.TestFileSystemBuilder.fs;
/**
* @author nik
*/
public class RecompileOnConfigurationChangeTest extends BaseCompilerTestCase {
public void testChangeOutput() throws IOException {
VirtualFile srcRoot = createFile("src/A.java", "class A{}").getParent();
Module m = addModule("m", srcRoot);
make(m);
assertOutput(m, fs().file("A.class"));
File oldOutput = getOutputDir(m);
File newOutput = createTempDir("new-output");
PsiTestUtil.setCompilerOutputPath(m, VfsUtil.pathToUrl(FileUtil.toSystemIndependentName(newOutput.getAbsolutePath())), false);
make(m);
assertOutput(m, fs().file("A.class"));
File[] files = oldOutput.listFiles();
assertTrue(files == null || files.length == 0);
}
}
| jk1/intellij-community | java/compiler/tests/com/intellij/compiler/RecompileOnConfigurationChangeTest.java | Java | apache-2.0 | 1,073 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.store.parquet;
import org.apache.drill.test.ClusterFixture;
import org.apache.drill.test.ClusterFixtureBuilder;
import org.apache.drill.test.ClusterTest;
import org.apache.drill.test.QueryBuilder;
import org.junit.BeforeClass;
import org.junit.Test;
import java.nio.file.Paths;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestParquetLimitPushDown extends ClusterTest {
@BeforeClass
public static void setup() throws Exception {
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher);
dirTestWatcher.copyResourceToRoot(Paths.get("parquet", "multirowgroup.parquet"));
dirTestWatcher.copyResourceToRoot(Paths.get("parquet", "users"));
startCluster(builder);
}
@Test
public void testMultipleFiles() throws Exception {
String query = "select * from dfs.`parquet/users` limit 1";
QueryBuilder.QuerySummary summary = queryBuilder().sql(query).run();
assertTrue(summary.succeeded());
assertEquals(1, summary.recordCount());
String plan = queryBuilder().sql(query).explainText();
assertThat(plan, containsString("numRowGroups=1"));
}
@Test
public void testMultipleRowGroups() throws Exception {
String query = "select * from dfs.`parquet/multirowgroup.parquet` limit 1";
QueryBuilder.QuerySummary summary = queryBuilder().sql(query).run();
assertTrue(summary.succeeded());
assertEquals(1, summary.recordCount());
String plan = queryBuilder().sql(query).explainText();
assertThat(plan, containsString("numRowGroups=1"));
}
@Test
public void testLimitZero() throws Exception {
String query = "select * from dfs.`parquet/users` limit 0";
QueryBuilder.QuerySummary summary = queryBuilder().sql(query).run();
assertTrue(summary.succeeded());
assertEquals(0, summary.recordCount());
String plan = queryBuilder().sql(query).explainText();
assertThat(plan, containsString("numRowGroups=1"));
}
}
| johnnywale/drill | exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetLimitPushDown.java | Java | apache-2.0 | 2,917 |
/*=========================================================================
* Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
* This product is protected by U.S. and international copyright
* and intellectual property laws. Pivotal products are covered by
* one or more patents listed at http://www.pivotal.io/patents.
*=========================================================================
*/
package com.gemstone.gemfire.internal.cache.partitioned.rebalance;
/**
* A base class for rebalance directors that provides some
* default implementations of methods on rebalance director.
*
* @author dsmith
*
*/
public abstract class RebalanceDirectorAdapter implements RebalanceDirector {
@Override
public boolean isRebalanceNecessary(boolean redundancyImpaired,
boolean withPersistence) {
return true;
}
@Override
public void initialize(PartitionedRegionLoadModel model) {
membershipChanged(model);
}
}
| upthewaterspout/incubator-geode | gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/rebalance/RebalanceDirectorAdapter.java | Java | apache-2.0 | 966 |
/*
* Copyright (C) 2013 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.base;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import static java.lang.Character.MAX_SURROGATE;
import static java.lang.Character.MIN_SURROGATE;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
/**
* Low-level, high-performance utility methods related to the {@linkplain Charsets#UTF_8 UTF-8}
* character encoding. UTF-8 is defined in section D92 of <a
* href="http://www.unicode.org/versions/Unicode6.2.0/ch03.pdf">The Unicode Standard Core
* Specification, Chapter 3</a>.
*
* <p>The variant of UTF-8 implemented by this class is the restricted definition of UTF-8
* introduced in Unicode 3.1. One implication of this is that it rejects <a
* href="http://www.unicode.org/versions/corrigendum1.html">"non-shortest form"</a> byte sequences,
* even though the JDK decoder may accept them.
*
* @author Martin Buchholz
* @author Clément Roux
* @since 16.0
*/
@Beta
@GwtCompatible(emulated = true)
public final class Utf8 {
/**
* Returns the number of bytes in the UTF-8-encoded form of {@code sequence}. For a string, this
* method is equivalent to {@code string.getBytes(UTF_8).length}, but is more efficient in both
* time and space.
*
* @throws IllegalArgumentException if {@code sequence} contains ill-formed UTF-16 (unpaired
* surrogates)
*/
public static int encodedLength(CharSequence sequence) {
// Warning to maintainers: this implementation is highly optimized.
int utf16Length = sequence.length();
int utf8Length = utf16Length;
int i = 0;
// This loop optimizes for pure ASCII.
while (i < utf16Length && sequence.charAt(i) < 0x80) {
i++;
}
// This loop optimizes for chars less than 0x800.
for (; i < utf16Length; i++) {
char c = sequence.charAt(i);
if (c < 0x800) {
utf8Length += ((0x7f - c) >>> 31); // branch free!
} else {
utf8Length += encodedLengthGeneral(sequence, i);
break;
}
}
if (utf8Length < utf16Length) {
// Necessary and sufficient condition for overflow because of maximum 3x expansion
throw new IllegalArgumentException(
"UTF-8 length does not fit in int: " + (utf8Length + (1L << 32)));
}
return utf8Length;
}
private static int encodedLengthGeneral(CharSequence sequence, int start) {
int utf16Length = sequence.length();
int utf8Length = 0;
for (int i = start; i < utf16Length; i++) {
char c = sequence.charAt(i);
if (c < 0x800) {
utf8Length += (0x7f - c) >>> 31; // branch free!
} else {
utf8Length += 2;
// jdk7+: if (Character.isSurrogate(c)) {
if (MIN_SURROGATE <= c && c <= MAX_SURROGATE) {
// Check that we have a well-formed surrogate pair.
if (Character.codePointAt(sequence, i) == c) {
throw new IllegalArgumentException(unpairedSurrogateMsg(i));
}
i++;
}
}
}
return utf8Length;
}
/**
* Returns {@code true} if {@code bytes} is a <i>well-formed</i> UTF-8 byte sequence according to
* Unicode 6.0. Note that this is a stronger criterion than simply whether the bytes can be
* decoded. For example, some versions of the JDK decoder will accept "non-shortest form" byte
* sequences, but encoding never reproduces these. Such byte sequences are <i>not</i> considered
* well-formed.
*
* <p>This method returns {@code true} if and only if {@code Arrays.equals(bytes, new
* String(bytes, UTF_8).getBytes(UTF_8))} does, but is more efficient in both time and space.
*/
public static boolean isWellFormed(byte[] bytes) {
return isWellFormed(bytes, 0, bytes.length);
}
/**
* Returns whether the given byte array slice is a well-formed UTF-8 byte sequence, as defined by
* {@link #isWellFormed(byte[])}. Note that this can be false even when {@code
* isWellFormed(bytes)} is true.
*
* @param bytes the input buffer
* @param off the offset in the buffer of the first byte to read
* @param len the number of bytes to read from the buffer
*/
public static boolean isWellFormed(byte[] bytes, int off, int len) {
int end = off + len;
checkPositionIndexes(off, end, bytes.length);
// Look for the first non-ASCII character.
for (int i = off; i < end; i++) {
if (bytes[i] < 0) {
return isWellFormedSlowPath(bytes, i, end);
}
}
return true;
}
private static boolean isWellFormedSlowPath(byte[] bytes, int off, int end) {
int index = off;
while (true) {
int byte1;
// Optimize for interior runs of ASCII bytes.
do {
if (index >= end) {
return true;
}
} while ((byte1 = bytes[index++]) >= 0);
if (byte1 < (byte) 0xE0) {
// Two-byte form.
if (index == end) {
return false;
}
// Simultaneously check for illegal trailing-byte in leading position
// and overlong 2-byte form.
if (byte1 < (byte) 0xC2 || bytes[index++] > (byte) 0xBF) {
return false;
}
} else if (byte1 < (byte) 0xF0) {
// Three-byte form.
if (index + 1 >= end) {
return false;
}
int byte2 = bytes[index++];
if (byte2 > (byte) 0xBF
// Overlong? 5 most significant bits must not all be zero.
|| (byte1 == (byte) 0xE0 && byte2 < (byte) 0xA0)
// Check for illegal surrogate codepoints.
|| (byte1 == (byte) 0xED && (byte) 0xA0 <= byte2)
// Third byte trailing-byte test.
|| bytes[index++] > (byte) 0xBF) {
return false;
}
} else {
// Four-byte form.
if (index + 2 >= end) {
return false;
}
int byte2 = bytes[index++];
if (byte2 > (byte) 0xBF
// Check that 1 <= plane <= 16. Tricky optimized form of:
// if (byte1 > (byte) 0xF4
// || byte1 == (byte) 0xF0 && byte2 < (byte) 0x90
// || byte1 == (byte) 0xF4 && byte2 > (byte) 0x8F)
|| (((byte1 << 28) + (byte2 - (byte) 0x90)) >> 30) != 0
// Third byte trailing-byte test
|| bytes[index++] > (byte) 0xBF
// Fourth byte trailing-byte test
|| bytes[index++] > (byte) 0xBF) {
return false;
}
}
}
}
private static String unpairedSurrogateMsg(int i) {
return "Unpaired surrogate at index " + i;
}
private Utf8() {}
}
| rgoldberg/guava | guava/src/com/google/common/base/Utf8.java | Java | apache-2.0 | 7,169 |
/*
* Copyright 2013 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.util.concurrent;
import io.netty.util.internal.StringUtil;
import java.util.Locale;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A {@link ThreadFactory} implementation with a simple naming rule.
*/
public class DefaultThreadFactory implements ThreadFactory {
private static final AtomicInteger poolId = new AtomicInteger();
private final AtomicInteger nextId = new AtomicInteger();
private final String prefix;
private final boolean daemon;
private final int priority;
protected final ThreadGroup threadGroup;
public DefaultThreadFactory(Class<?> poolType) {
this(poolType, false, Thread.NORM_PRIORITY);
}
public DefaultThreadFactory(String poolName) {
this(poolName, false, Thread.NORM_PRIORITY);
}
public DefaultThreadFactory(Class<?> poolType, boolean daemon) {
this(poolType, daemon, Thread.NORM_PRIORITY);
}
public DefaultThreadFactory(String poolName, boolean daemon) {
this(poolName, daemon, Thread.NORM_PRIORITY);
}
public DefaultThreadFactory(Class<?> poolType, int priority) {
this(poolType, false, priority);
}
public DefaultThreadFactory(String poolName, int priority) {
this(poolName, false, priority);
}
public DefaultThreadFactory(Class<?> poolType, boolean daemon, int priority) {
this(toPoolName(poolType), daemon, priority);
}
public static String toPoolName(Class<?> poolType) {
if (poolType == null) {
throw new NullPointerException("poolType");
}
String poolName = StringUtil.simpleClassName(poolType);
switch (poolName.length()) {
case 0:
return "unknown";
case 1:
return poolName.toLowerCase(Locale.US);
default:
if (Character.isUpperCase(poolName.charAt(0)) && Character.isLowerCase(poolName.charAt(1))) {
return Character.toLowerCase(poolName.charAt(0)) + poolName.substring(1);
} else {
return poolName;
}
}
}
public DefaultThreadFactory(String poolName, boolean daemon, int priority, ThreadGroup threadGroup) {
if (poolName == null) {
throw new NullPointerException("poolName");
}
if (priority < Thread.MIN_PRIORITY || priority > Thread.MAX_PRIORITY) {
throw new IllegalArgumentException(
"priority: " + priority + " (expected: Thread.MIN_PRIORITY <= priority <= Thread.MAX_PRIORITY)");
}
prefix = poolName + '-' + poolId.incrementAndGet() + '-';
this.daemon = daemon;
this.priority = priority;
this.threadGroup = threadGroup;
}
public DefaultThreadFactory(String poolName, boolean daemon, int priority) {
this(poolName, daemon, priority, System.getSecurityManager() == null ?
Thread.currentThread().getThreadGroup() : System.getSecurityManager().getThreadGroup());
}
@Override
public Thread newThread(Runnable r) {
Thread t = newThread(new DefaultRunnableDecorator(r), prefix + nextId.incrementAndGet());
try {
if (t.isDaemon() != daemon) {
t.setDaemon(daemon);
}
if (t.getPriority() != priority) {
t.setPriority(priority);
}
} catch (Exception ignored) {
// Doesn't matter even if failed to set.
}
return t;
}
protected Thread newThread(Runnable r, String name) {
return new FastThreadLocalThread(threadGroup, r, name);
}
private static final class DefaultRunnableDecorator implements Runnable {
private final Runnable r;
DefaultRunnableDecorator(Runnable r) {
this.r = r;
}
@Override
public void run() {
try {
r.run();
} finally {
FastThreadLocal.removeAll();
}
}
}
}
| wangcy6/storm_app | frame/java/netty-4.1/common/src/main/java/io/netty/util/concurrent/DefaultThreadFactory.java | Java | apache-2.0 | 4,727 |
/**
* Copyright (c) 2014-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
package com.facebook.yoga;
import com.facebook.proguard.annotations.DoNotStrip;
@DoNotStrip
public enum YogaDirection {
INHERIT(0),
LTR(1),
RTL(2);
private int mIntValue;
YogaDirection(int intValue) {
mIntValue = intValue;
}
public int intValue() {
return mIntValue;
}
public static YogaDirection fromInt(int value) {
switch (value) {
case 0: return INHERIT;
case 1: return LTR;
case 2: return RTL;
default: throw new IllegalArgumentException("Unkown enum value: " + value);
}
}
}
| niukui/gitpro | node_modules/react-native/ReactAndroid/src/main/java/com/facebook/yoga/YogaDirection.java | Java | mit | 857 |
/*
* Copyright (C) 2015 Bilibili
* Copyright (C) 2015 Zhang Rui <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tv.danmaku.ijk.media.example.content;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteOpenHelper;
import android.os.AsyncTask;
import android.support.v4.content.AsyncTaskLoader;
import android.text.TextUtils;
public class RecentMediaStorage {
private Context mAppContext;
public RecentMediaStorage(Context context) {
mAppContext = context.getApplicationContext();
}
public void saveUrlAsync(String url) {
new AsyncTask<String, Void, Void>() {
@Override
protected Void doInBackground(String... params) {
saveUrl(params[0]);
return null;
}
}.execute(url);
}
public void saveUrl(String url) {
ContentValues cv = new ContentValues();
cv.putNull(Entry.COLUMN_NAME_ID);
cv.put(Entry.COLUMN_NAME_URL, url);
cv.put(Entry.COLUMN_NAME_LAST_ACCESS, System.currentTimeMillis());
cv.put(Entry.COLUMN_NAME_NAME, getNameOfUrl(url));
save(cv);
}
public void save(ContentValues contentValue) {
OpenHelper openHelper = new OpenHelper(mAppContext);
SQLiteDatabase db = openHelper.getWritableDatabase();
db.replace(Entry.TABLE_NAME, null, contentValue);
}
public static String getNameOfUrl(String url) {
return getNameOfUrl(url, "");
}
public static String getNameOfUrl(String url, String defaultName) {
String name = null;
int pos = url.lastIndexOf('/');
if (pos >= 0)
name = url.substring(pos + 1);
if (TextUtils.isEmpty(name))
name = defaultName;
return name;
}
public static class Entry {
public static final String TABLE_NAME = "RecentMedia";
public static final String COLUMN_NAME_ID = "id";
public static final String COLUMN_NAME_URL = "url";
public static final String COLUMN_NAME_NAME = "name";
public static final String COLUMN_NAME_LAST_ACCESS = "last_access";
}
public static final String ALL_COLUMNS[] = new String[]{
Entry.COLUMN_NAME_ID + " as _id",
Entry.COLUMN_NAME_ID,
Entry.COLUMN_NAME_URL,
Entry.COLUMN_NAME_NAME,
Entry.COLUMN_NAME_LAST_ACCESS};
public static class OpenHelper extends SQLiteOpenHelper {
private static final int DATABASE_VERSION = 1;
private static final String DATABASE_NAME = "RecentMedia.db";
private static final String SQL_CREATE_ENTRIES =
" CREATE TABLE IF NOT EXISTS " + Entry.TABLE_NAME + " (" +
Entry.COLUMN_NAME_ID + " INTEGER PRIMARY KEY AUTOINCREMENT, " +
Entry.COLUMN_NAME_URL + " VARCHAR UNIQUE, " +
Entry.COLUMN_NAME_NAME + " VARCHAR, " +
Entry.COLUMN_NAME_LAST_ACCESS + " INTEGER) ";
public OpenHelper(Context context) {
super(context, DATABASE_NAME, null, DATABASE_VERSION);
}
@Override
public void onCreate(SQLiteDatabase db) {
db.execSQL(SQL_CREATE_ENTRIES);
}
@Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
}
}
public static class CursorLoader extends AsyncTaskLoader<Cursor> {
public CursorLoader(Context context) {
super(context);
}
@Override
public Cursor loadInBackground() {
Context context = getContext();
OpenHelper openHelper = new OpenHelper(context);
SQLiteDatabase db = openHelper.getReadableDatabase();
return db.query(Entry.TABLE_NAME, ALL_COLUMNS, null, null, null, null,
Entry.COLUMN_NAME_LAST_ACCESS + " DESC",
"100");
}
@Override
protected void onStartLoading() {
forceLoad();
}
}
}
| annidy/ijkplayer | android/ijkplayer/ijkplayer-example/src/main/java/tv/danmaku/ijk/media/example/content/RecentMediaStorage.java | Java | gpl-2.0 | 4,708 |
/*
* WorldEdit, a Minecraft world manipulation toolkit
* Copyright (C) sk89q <http://www.sk89q.com>
* Copyright (C) WorldEdit team and contributors
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.sk89q.worldedit.extent.world;
import com.sk89q.worldedit.Vector;
import com.sk89q.worldedit.WorldEditException;
import com.sk89q.worldedit.blocks.BaseBlock;
import com.sk89q.worldedit.blocks.BlockID;
import com.sk89q.worldedit.blocks.BlockType;
import com.sk89q.worldedit.extent.AbstractDelegateExtent;
import com.sk89q.worldedit.extent.Extent;
import com.sk89q.worldedit.world.World;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Handles various quirks when setting blocks, such as ice turning
* into water or containers dropping their contents.
*/
public class BlockQuirkExtent extends AbstractDelegateExtent {
private final World world;
/**
* Create a new instance.
*
* @param extent the extent
* @param world the world
*/
public BlockQuirkExtent(Extent extent, World world) {
super(extent);
checkNotNull(world);
this.world = world;
}
@Override
public boolean setBlock(Vector position, BaseBlock block) throws WorldEditException {
BaseBlock lazyBlock = getExtent().getLazyBlock(position);
int existing = lazyBlock.getType();
if (BlockType.isContainerBlock(existing)) {
world.clearContainerBlockContents(position); // Clear the container block so that it doesn't drop items
} else if (existing == BlockID.ICE) {
world.setBlock(position, new BaseBlock(BlockID.AIR)); // Ice turns until water so this has to be done first
}
return super.setBlock(position, block);
}
}
| UnlimitedFreedom/UF-WorldEdit | worldedit-core/src/main/java/com/sk89q/worldedit/extent/world/BlockQuirkExtent.java | Java | gpl-3.0 | 2,390 |
package edu.stanford.rsl.tutorial.fan.dynamicCollimation;
import ij.ImageJ;
import edu.stanford.rsl.conrad.data.numeric.Grid2D;
import edu.stanford.rsl.conrad.data.numeric.Grid3D;
import edu.stanford.rsl.conrad.data.numeric.InterpolationOperators;
import edu.stanford.rsl.conrad.data.numeric.NumericPointwiseOperators;
import edu.stanford.rsl.tutorial.fan.FanBeamProjector2D;
import edu.stanford.rsl.tutorial.fan.redundancy.BinaryWeights;
import edu.stanford.rsl.tutorial.phantoms.Phantom;
import edu.stanford.rsl.tutorial.phantoms.SheppLogan;
public class copyRedundantData extends Grid2D {
private final double focalLength;
private final double maxT;
private final double deltaT, deltax, dLambda;
private final int maxTIndex, maxLambdaIndex;
public copyRedundantData(final double focalLength, final double maxT,
final double deltaT, double maxLambda, double dLambda) {
// Call constructor from superclass
super((int) Math.round(maxT / deltaT), (int)(Math.round(maxLambda / dLambda)) + 1);
// Initialize parameters
this.focalLength = focalLength;
this.maxT = maxT;
this.deltaT = deltaT;
this.dLambda = dLambda;
this.maxLambdaIndex = (int)(Math.round(maxLambda / dLambda)) + 1;
this.maxTIndex = (int) Math.round(maxT / deltaT);
this.deltax = maxLambda - Math.PI;
// Correct for scaling due to varying angle lambda
NumericPointwiseOperators.multiplyBy(this, (float)( maxLambda / (Math.PI)));
}
private void createFullSinogram(Grid2D OneSidedSinogram)
{
double lambda, delta;
// iterate over the detector elements
for (int t = 0; t < maxTIndex; ++t) {
// compute delta of the current ray (detector element)
delta = Math.atan((t * deltaT - maxT / 2.d + 0.5*deltaT) / focalLength);
// iterate over the projection angles
for (int b = 0; b < maxLambdaIndex; ++b) {
// compute the current lambda angle
lambda = b * dLambda;
// First case: Handles values for redundancies at the end of the scan
// Copy values from redundancies at the beginning of the scan
if (lambda >= ( Math.PI + 2*delta) && lambda <= (Math.PI + deltax) + 1e-12)
{
//double delta2 = -1.0*delta;
double lambda2 = -2.0*delta - Math.PI + lambda;
double b2 = lambda2 / dLambda;
//b2 = (b2 < 0) ? (0.0) : b2;
int t2 = maxTIndex - t -1;//(int) Math.round(delta2 / deltaT);
OneSidedSinogram.setAtIndex(maxTIndex - t - 1, b, InterpolationOperators.interpolateLinear(OneSidedSinogram, b2, maxTIndex - t2 - 1));
}
}
}
}
public void applyToGrid(Grid2D OneSidedSinogram) {
createFullSinogram(OneSidedSinogram);
}
public static void main (String [] args){
//fan beam bp parameters
double maxT = 100;
double deltaT = 1.d;
// set focal length according to the fan angle
double focalLength = (maxT/2.0-0.5)/Math.tan(20.0*Math.PI/180.0);
Phantom ph = new SheppLogan(64);
new ImageJ();
int startBeta = 100;
int endBeta = 260;
Grid3D g = new Grid3D((int)maxT, 133, endBeta-startBeta +1, false);
for (int i = startBeta; i < endBeta+1; ++i)
{
double maxBeta = (double)(i+1) * Math.PI * 2.0 / 360.0;
double deltaBeta = maxBeta / 132;
FanBeamProjector2D fbp_forward = new FanBeamProjector2D(focalLength, maxBeta, deltaBeta, maxT, deltaT);
Grid2D halfSino = fbp_forward.projectRayDriven(ph);
BinaryWeights BW = new BinaryWeights(focalLength, maxT, deltaT, maxBeta, deltaBeta);
//BW.show();
BW.applyToGrid(halfSino);
//halfSino.show();
copyRedundantData p = new copyRedundantData(focalLength, maxT, deltaT, maxBeta, deltaBeta);
p.applyToGrid(halfSino);
Grid2D dummy = new Grid2D(halfSino);
g.setSubGrid(i-startBeta, dummy);
//g.setSliceLabel("MaxBeta: " + Double.toString(maxBeta*180/Math.PI), i+1 - startBeta);
}
g.show();
}
}
/*
* Copyright (C) 2010-2014 Andreas Maier
* CONRAD is developed as an Open Source project under the GNU General Public License (GPL).
*/ | YixingHuang/CONRAD-1 | src/edu/stanford/rsl/tutorial/fan/dynamicCollimation/copyRedundantData.java | Java | gpl-3.0 | 4,098 |
import java.util.*;
import test.*;
@java.lang.annotation.Target(java.lang.annotation.ElementType.TYPE_USE) @interface AssertTrue {}
class Use
{
interface MyList123{}
final List<?> list = new @AssertTrue MyList<>();
}
| siosio/intellij-community | java/java-tests/testData/refactoring/renameClass/annotatedReference/before/Use.java | Java | apache-2.0 | 228 |
/*
* Copyright 2011 gitblit.com.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gitblit;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.util.Map;
import java.util.Properties;
import com.gitblit.utils.FileUtils;
/**
* Dynamically loads and reloads a properties file by keeping track of the last
* modification date.
*
* @author James Moger
*
*/
public class FileSettings extends IStoredSettings {
protected File propertiesFile;
private final Properties properties = new Properties();
private volatile long lastModified;
private volatile boolean forceReload;
public FileSettings() {
super(FileSettings.class);
}
public FileSettings(String file) {
this();
load(file);
}
public void load(String file) {
this.propertiesFile = new File(file);
}
/**
* Merges the provided settings into this instance. This will also
* set the target file for this instance IFF it is unset AND the merge
* source is also a FileSettings. This is a little sneaky.
*/
@Override
public void merge(IStoredSettings settings) {
super.merge(settings);
// sneaky: set the target file from the merge source
if (propertiesFile == null && settings instanceof FileSettings) {
this.propertiesFile = ((FileSettings) settings).propertiesFile;
}
}
/**
* Returns a properties object which contains the most recent contents of
* the properties file.
*/
@Override
protected synchronized Properties read() {
if (propertiesFile != null && propertiesFile.exists() && (forceReload || (propertiesFile.lastModified() > lastModified))) {
FileInputStream is = null;
try {
Properties props = new Properties();
is = new FileInputStream(propertiesFile);
props.load(is);
// load properties after we have successfully read file
properties.clear();
properties.putAll(props);
lastModified = propertiesFile.lastModified();
forceReload = false;
} catch (FileNotFoundException f) {
// IGNORE - won't happen because file.exists() check above
} catch (Throwable t) {
logger.error("Failed to read " + propertiesFile.getName(), t);
} finally {
if (is != null) {
try {
is.close();
} catch (Throwable t) {
// IGNORE
}
}
}
}
return properties;
}
@Override
public boolean saveSettings() {
String content = FileUtils.readContent(propertiesFile, "\n");
for (String key : removals) {
String regex = "(?m)^(" + regExEscape(key) + "\\s*+=\\s*+)"
+ "(?:[^\r\n\\\\]++|\\\\(?:\r?\n|\r|.))*+$";
content = content.replaceAll(regex, "");
}
removals.clear();
FileUtils.writeContent(propertiesFile, content);
// manually set the forceReload flag because not all JVMs support real
// millisecond resolution of lastModified. (issue-55)
forceReload = true;
return true;
}
/**
* Updates the specified settings in the settings file.
*/
@Override
public synchronized boolean saveSettings(Map<String, String> settings) {
String content = FileUtils.readContent(propertiesFile, "\n");
for (Map.Entry<String, String> setting:settings.entrySet()) {
String regex = "(?m)^(" + regExEscape(setting.getKey()) + "\\s*+=\\s*+)"
+ "(?:[^\r\n\\\\]++|\\\\(?:\r?\n|\r|.))*+$";
String oldContent = content;
content = content.replaceAll(regex, setting.getKey() + " = " + setting.getValue());
if (content.equals(oldContent)) {
// did not replace value because it does not exist in the file
// append new setting to content (issue-85)
content += "\n" + setting.getKey() + " = " + setting.getValue();
}
}
FileUtils.writeContent(propertiesFile, content);
// manually set the forceReload flag because not all JVMs support real
// millisecond resolution of lastModified. (issue-55)
forceReload = true;
return true;
}
private String regExEscape(String input) {
return input.replace(".", "\\.").replace("$", "\\$").replace("{", "\\{");
}
/**
* @return the last modification date of the properties file
*/
protected long lastModified() {
return lastModified;
}
/**
* @return the state of the force reload flag
*/
protected boolean forceReload() {
return forceReload;
}
@Override
public String toString() {
return propertiesFile.getAbsolutePath();
}
}
| firateren52/gitblit | src/main/java/com/gitblit/FileSettings.java | Java | apache-2.0 | 4,988 |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
public abstract class MetricsHBaseServerSourceFactory {
/**
* The name of the metrics
*/
static final String METRICS_NAME = "IPC";
/**
* Description
*/
static final String METRICS_DESCRIPTION = "Metrics about HBase Server IPC";
/**
* The Suffix of the JMX Context that a MetricsHBaseServerSource will register under.
*
* JMX_CONTEXT will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX
*/
static final String METRICS_JMX_CONTEXT_SUFFIX = ",sub=" + METRICS_NAME;
abstract MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper);
/**
* From the name of the class that's starting up create the
* context that an IPC source should register itself.
*
* @param serverName The name of the class that's starting up.
* @return The Camel Cased context name.
*/
protected static String createContextName(String serverName) {
if (serverName.contains("HMaster")) {
return "Master";
} else if (serverName.contains("HRegion")) {
return "RegionServer";
}
return "IPC";
}
}
| daidong/DominoHBase | hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java | Java | apache-2.0 | 1,960 |
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.diff.merge;
import com.intellij.diff.DiffContext;
import com.intellij.diff.FrameDiffTool;
import com.intellij.diff.contents.DiffContent;
import com.intellij.diff.requests.ContentDiffRequest;
import com.intellij.diff.requests.DiffRequest;
import com.intellij.diff.requests.ProxySimpleDiffRequest;
import com.intellij.diff.tools.binary.ThreesideBinaryDiffViewer;
import com.intellij.diff.tools.holders.BinaryEditorHolder;
import com.intellij.openapi.util.Disposer;
import com.intellij.util.concurrency.annotations.RequiresEdt;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.util.ArrayList;
import java.util.List;
public class BinaryMergeTool implements MergeTool {
public static final BinaryMergeTool INSTANCE = new BinaryMergeTool();
@NotNull
@Override
public MergeViewer createComponent(@NotNull MergeContext context, @NotNull MergeRequest request) {
return new BinaryMergeViewer(context, (ThreesideMergeRequest)request);
}
@Override
public boolean canShow(@NotNull MergeContext context, @NotNull MergeRequest request) {
if (!(request instanceof ThreesideMergeRequest)) return false;
MergeUtil.ProxyDiffContext diffContext = new MergeUtil.ProxyDiffContext(context);
for (DiffContent diffContent : ((ThreesideMergeRequest)request).getContents()) {
if (!BinaryEditorHolder.BinaryEditorHolderFactory.INSTANCE.canShowContent(diffContent, diffContext)) return false;
}
return true;
}
public static class BinaryMergeViewer implements MergeViewer {
@NotNull private final MergeContext myMergeContext;
@NotNull private final ThreesideMergeRequest myMergeRequest;
@NotNull private final DiffContext myDiffContext;
@NotNull private final ContentDiffRequest myDiffRequest;
@NotNull private final MyThreesideViewer myViewer;
public BinaryMergeViewer(@NotNull MergeContext context, @NotNull ThreesideMergeRequest request) {
myMergeContext = context;
myMergeRequest = request;
myDiffContext = new MergeUtil.ProxyDiffContext(myMergeContext);
myDiffRequest = new ProxySimpleDiffRequest(myMergeRequest.getTitle(),
getDiffContents(myMergeRequest),
getDiffContentTitles(myMergeRequest),
myMergeRequest);
myViewer = new MyThreesideViewer(myDiffContext, myDiffRequest);
}
@NotNull
private static List<DiffContent> getDiffContents(@NotNull ThreesideMergeRequest mergeRequest) {
return new ArrayList<>(mergeRequest.getContents());
}
@NotNull
private static List<String> getDiffContentTitles(@NotNull ThreesideMergeRequest mergeRequest) {
return MergeUtil.notNullizeContentTitles(mergeRequest.getContentTitles());
}
//
// Impl
//
@NotNull
@Override
public JComponent getComponent() {
return myViewer.getComponent();
}
@Nullable
@Override
public JComponent getPreferredFocusedComponent() {
return myViewer.getPreferredFocusedComponent();
}
@NotNull
@Override
public ToolbarComponents init() {
ToolbarComponents components = new ToolbarComponents();
FrameDiffTool.ToolbarComponents init = myViewer.init();
components.statusPanel = init.statusPanel;
components.toolbarActions = init.toolbarActions;
components.closeHandler = () -> MergeUtil.showExitWithoutApplyingChangesDialog(this, myMergeRequest, myMergeContext, false);
return components;
}
@Nullable
@Override
public Action getResolveAction(@NotNull final MergeResult result) {
if (result == MergeResult.RESOLVED) return null;
return MergeUtil.createSimpleResolveAction(result, myMergeRequest, myMergeContext, this, false);
}
@Override
public void dispose() {
Disposer.dispose(myViewer);
}
//
// Getters
//
@NotNull
public MyThreesideViewer getViewer() {
return myViewer;
}
//
// Viewer
//
private static class MyThreesideViewer extends ThreesideBinaryDiffViewer {
MyThreesideViewer(@NotNull DiffContext context, @NotNull DiffRequest request) {
super(context, request);
}
@Override
@RequiresEdt
public void rediff(boolean trySync) {
}
}
}
}
| jwren/intellij-community | platform/diff-impl/src/com/intellij/diff/merge/BinaryMergeTool.java | Java | apache-2.0 | 4,571 |
/**
* <copyright>
* </copyright>
*
* $Id$
*/
package org.wso2.developerstudio.eclipse.humantask.model.ht;
import org.eclipse.emf.common.util.EList;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>TLogical People Group</b></em>'.
* <!-- end-user-doc -->
*
* <p>
* The following features are supported:
* <ul>
* <li>{@link org.wso2.developerstudio.eclipse.humantask.model.ht.TLogicalPeopleGroup#getParameter <em>Parameter</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.humantask.model.ht.TLogicalPeopleGroup#getName <em>Name</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.humantask.model.ht.TLogicalPeopleGroup#getReference <em>Reference</em>}</li>
* </ul>
* </p>
*
* @see org.wso2.developerstudio.eclipse.humantask.model.ht.HTPackage#getTLogicalPeopleGroup()
* @model extendedMetaData="name='tLogicalPeopleGroup' kind='elementOnly'"
* @generated
*/
public interface TLogicalPeopleGroup extends TExtensibleElements {
/**
* Returns the value of the '<em><b>Parameter</b></em>' containment reference list.
* The list contents are of type {@link org.wso2.developerstudio.eclipse.humantask.model.ht.TParameter}.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Parameter</em>' containment reference list isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Parameter</em>' containment reference list.
* @see org.wso2.developerstudio.eclipse.humantask.model.ht.HTPackage#getTLogicalPeopleGroup_Parameter()
* @model containment="true"
* extendedMetaData="kind='element' name='parameter' namespace='##targetNamespace'"
* @generated
*/
EList<TParameter> getParameter();
/**
* Returns the value of the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Name</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Name</em>' attribute.
* @see #setName(String)
* @see org.wso2.developerstudio.eclipse.humantask.model.ht.HTPackage#getTLogicalPeopleGroup_Name()
* @model dataType="org.eclipse.emf.ecore.xml.type.NCName" required="true"
* extendedMetaData="kind='attribute' name='name'"
* @generated
*/
String getName();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.humantask.model.ht.TLogicalPeopleGroup#getName <em>Name</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Name</em>' attribute.
* @see #getName()
* @generated
*/
void setName(String value);
/**
* Returns the value of the '<em><b>Reference</b></em>' attribute.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Reference</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Reference</em>' attribute.
* @see #setReference(String)
* @see org.wso2.developerstudio.eclipse.humantask.model.ht.HTPackage#getTLogicalPeopleGroup_Reference()
* @model dataType="org.eclipse.emf.ecore.xml.type.NCName"
* extendedMetaData="kind='attribute' name='reference'"
* @generated
*/
String getReference();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.humantask.model.ht.TLogicalPeopleGroup#getReference <em>Reference</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Reference</em>' attribute.
* @see #getReference()
* @generated
*/
void setReference(String value);
} // TLogicalPeopleGroup
| kaviththiranga/developer-studio | humantask/org.wso2.tools.humantask.model/src/org/wso2/carbonstudio/eclipse/humantask/model/ht/TLogicalPeopleGroup.java | Java | apache-2.0 | 3,744 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Autogenerated by Thrift Compiler (0.9.2)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package org.apache.zeppelin.interpreter.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.annotation.Generated;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-10-22")
public class RemoteInterpreterResultMessage implements org.apache.thrift.TBase<RemoteInterpreterResultMessage, RemoteInterpreterResultMessage._Fields>, java.io.Serializable, Cloneable, Comparable<RemoteInterpreterResultMessage> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RemoteInterpreterResultMessage");
private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("data", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new RemoteInterpreterResultMessageStandardSchemeFactory());
schemes.put(TupleScheme.class, new RemoteInterpreterResultMessageTupleSchemeFactory());
}
public String type; // required
public String data; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
TYPE((short)1, "type"),
DATA((short)2, "data");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // TYPE
return TYPE;
case 2: // DATA
return DATA;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.DATA, new org.apache.thrift.meta_data.FieldMetaData("data", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RemoteInterpreterResultMessage.class, metaDataMap);
}
public RemoteInterpreterResultMessage() {
}
public RemoteInterpreterResultMessage(
String type,
String data)
{
this();
this.type = type;
this.data = data;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public RemoteInterpreterResultMessage(RemoteInterpreterResultMessage other) {
if (other.isSetType()) {
this.type = other.type;
}
if (other.isSetData()) {
this.data = other.data;
}
}
public RemoteInterpreterResultMessage deepCopy() {
return new RemoteInterpreterResultMessage(this);
}
@Override
public void clear() {
this.type = null;
this.data = null;
}
public String getType() {
return this.type;
}
public RemoteInterpreterResultMessage setType(String type) {
this.type = type;
return this;
}
public void unsetType() {
this.type = null;
}
/** Returns true if field type is set (has been assigned a value) and false otherwise */
public boolean isSetType() {
return this.type != null;
}
public void setTypeIsSet(boolean value) {
if (!value) {
this.type = null;
}
}
public String getData() {
return this.data;
}
public RemoteInterpreterResultMessage setData(String data) {
this.data = data;
return this;
}
public void unsetData() {
this.data = null;
}
/** Returns true if field data is set (has been assigned a value) and false otherwise */
public boolean isSetData() {
return this.data != null;
}
public void setDataIsSet(boolean value) {
if (!value) {
this.data = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TYPE:
if (value == null) {
unsetType();
} else {
setType((String)value);
}
break;
case DATA:
if (value == null) {
unsetData();
} else {
setData((String)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case TYPE:
return getType();
case DATA:
return getData();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case TYPE:
return isSetType();
case DATA:
return isSetData();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof RemoteInterpreterResultMessage)
return this.equals((RemoteInterpreterResultMessage)that);
return false;
}
public boolean equals(RemoteInterpreterResultMessage that) {
if (that == null)
return false;
boolean this_present_type = true && this.isSetType();
boolean that_present_type = true && that.isSetType();
if (this_present_type || that_present_type) {
if (!(this_present_type && that_present_type))
return false;
if (!this.type.equals(that.type))
return false;
}
boolean this_present_data = true && this.isSetData();
boolean that_present_data = true && that.isSetData();
if (this_present_data || that_present_data) {
if (!(this_present_data && that_present_data))
return false;
if (!this.data.equals(that.data))
return false;
}
return true;
}
@Override
public int hashCode() {
List<Object> list = new ArrayList<Object>();
boolean present_type = true && (isSetType());
list.add(present_type);
if (present_type)
list.add(type);
boolean present_data = true && (isSetData());
list.add(present_data);
if (present_data)
list.add(data);
return list.hashCode();
}
@Override
public int compareTo(RemoteInterpreterResultMessage other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetType()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetData()).compareTo(other.isSetData());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetData()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.data, other.data);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("RemoteInterpreterResultMessage(");
boolean first = true;
sb.append("type:");
if (this.type == null) {
sb.append("null");
} else {
sb.append(this.type);
}
first = false;
if (!first) sb.append(", ");
sb.append("data:");
if (this.data == null) {
sb.append("null");
} else {
sb.append(this.data);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class RemoteInterpreterResultMessageStandardSchemeFactory implements SchemeFactory {
public RemoteInterpreterResultMessageStandardScheme getScheme() {
return new RemoteInterpreterResultMessageStandardScheme();
}
}
private static class RemoteInterpreterResultMessageStandardScheme extends StandardScheme<RemoteInterpreterResultMessage> {
public void read(org.apache.thrift.protocol.TProtocol iprot, RemoteInterpreterResultMessage struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // TYPE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.type = iprot.readString();
struct.setTypeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // DATA
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.data = iprot.readString();
struct.setDataIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, RemoteInterpreterResultMessage struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.type != null) {
oprot.writeFieldBegin(TYPE_FIELD_DESC);
oprot.writeString(struct.type);
oprot.writeFieldEnd();
}
if (struct.data != null) {
oprot.writeFieldBegin(DATA_FIELD_DESC);
oprot.writeString(struct.data);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class RemoteInterpreterResultMessageTupleSchemeFactory implements SchemeFactory {
public RemoteInterpreterResultMessageTupleScheme getScheme() {
return new RemoteInterpreterResultMessageTupleScheme();
}
}
private static class RemoteInterpreterResultMessageTupleScheme extends TupleScheme<RemoteInterpreterResultMessage> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, RemoteInterpreterResultMessage struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetType()) {
optionals.set(0);
}
if (struct.isSetData()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetType()) {
oprot.writeString(struct.type);
}
if (struct.isSetData()) {
oprot.writeString(struct.data);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, RemoteInterpreterResultMessage struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.type = iprot.readString();
struct.setTypeIsSet(true);
}
if (incoming.get(1)) {
struct.data = iprot.readString();
struct.setDataIsSet(true);
}
}
}
}
| anthonycorbacho/incubator-zeppelin | zeppelin-interpreter/src/main/java/org/apache/zeppelin/interpreter/thrift/RemoteInterpreterResultMessage.java | Java | apache-2.0 | 16,116 |
/*
* Copyright 2014-2015 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.rest.exceptions;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
/**
* Mapper for illegal argument exceptions to the BAD_REQUEST response code.
*/
@Provider
public class IllegalArgumentExceptionMapper extends AbstractMapper<IllegalArgumentException> {
@Override
protected Response.Status responseStatus() {
return Response.Status.BAD_REQUEST;
}
}
| packet-tracker/onos | web/api/src/main/java/org/onosproject/rest/exceptions/IllegalArgumentExceptionMapper.java | Java | apache-2.0 | 1,030 |
/*
* Copyright 2012-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.loader.util;
import java.nio.charset.Charset;
/**
* Simple wrapper around a byte array that represents an ASCII. Used for performance
* reasons to save constructing Strings for ZIP data.
*
* @author Phillip Webb
*/
public final class AsciiBytes {
private static final Charset UTF_8 = Charset.forName("UTF-8");
private static final int INITIAL_HASH = 7;
private static final int MULTIPLIER = 31;
private final byte[] bytes;
private final int offset;
private final int length;
private String string;
/**
* Create a new {@link AsciiBytes} from the specified String.
* @param string
*/
public AsciiBytes(String string) {
this(string.getBytes(UTF_8));
this.string = string;
}
/**
* Create a new {@link AsciiBytes} from the specified bytes. NOTE: underlying bytes
* are not expected to change.
* @param bytes the bytes
*/
public AsciiBytes(byte[] bytes) {
this(bytes, 0, bytes.length);
}
/**
* Create a new {@link AsciiBytes} from the specified bytes. NOTE: underlying bytes
* are not expected to change.
* @param bytes the bytes
* @param offset the offset
* @param length the length
*/
public AsciiBytes(byte[] bytes, int offset, int length) {
if (offset < 0 || length < 0 || (offset + length) > bytes.length) {
throw new IndexOutOfBoundsException();
}
this.bytes = bytes;
this.offset = offset;
this.length = length;
}
public int length() {
return this.length;
}
public boolean startsWith(AsciiBytes prefix) {
if (this == prefix) {
return true;
}
if (prefix.length > this.length) {
return false;
}
for (int i = 0; i < prefix.length; i++) {
if (this.bytes[i + this.offset] != prefix.bytes[i + prefix.offset]) {
return false;
}
}
return true;
}
public boolean endsWith(AsciiBytes postfix) {
if (this == postfix) {
return true;
}
if (postfix.length > this.length) {
return false;
}
for (int i = 0; i < postfix.length; i++) {
if (this.bytes[this.offset + (this.length - 1) - i] != postfix.bytes[postfix.offset
+ (postfix.length - 1) - i]) {
return false;
}
}
return true;
}
public AsciiBytes substring(int beginIndex) {
return substring(beginIndex, this.length);
}
public AsciiBytes substring(int beginIndex, int endIndex) {
int length = endIndex - beginIndex;
if (this.offset + length > this.length) {
throw new IndexOutOfBoundsException();
}
return new AsciiBytes(this.bytes, this.offset + beginIndex, length);
}
public AsciiBytes append(String string) {
if (string == null || string.length() == 0) {
return this;
}
return append(string.getBytes(UTF_8));
}
public AsciiBytes append(AsciiBytes asciiBytes) {
if (asciiBytes == null || asciiBytes.length() == 0) {
return this;
}
return append(asciiBytes.bytes);
}
public AsciiBytes append(byte[] bytes) {
if (bytes == null || bytes.length == 0) {
return this;
}
byte[] combined = new byte[this.length + bytes.length];
System.arraycopy(this.bytes, this.offset, combined, 0, this.length);
System.arraycopy(bytes, 0, combined, this.length, bytes.length);
return new AsciiBytes(combined);
}
@Override
public String toString() {
if (this.string == null) {
this.string = new String(this.bytes, this.offset, this.length, UTF_8);
}
return this.string;
}
@Override
public int hashCode() {
int hash = INITIAL_HASH;
for (int i = 0; i < this.length; i++) {
hash = MULTIPLIER * hash + this.bytes[this.offset + i];
}
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (this == obj) {
return true;
}
if (obj.getClass().equals(AsciiBytes.class)) {
AsciiBytes other = (AsciiBytes) obj;
if (this.length == other.length) {
for (int i = 0; i < this.length; i++) {
if (this.bytes[this.offset + i] != other.bytes[other.offset + i]) {
return false;
}
}
return true;
}
}
return false;
}
}
| Vad1mo/smartuberjar | src/main/java/org/springframework/boot/loader/util/AsciiBytes.java | Java | apache-2.0 | 4,614 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sling.jackrabbit.usermanager.impl;
import java.io.UnsupportedEncodingException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.Collection;
import java.util.Map;
import javax.jcr.RepositoryException;
import javax.jcr.Session;
import javax.servlet.Servlet;
import org.apache.jackrabbit.api.security.user.Authorizable;
import org.apache.jackrabbit.api.security.user.User;
import org.apache.jackrabbit.api.security.user.UserManager;
import org.apache.sling.commons.osgi.OsgiUtil;
import org.apache.sling.jackrabbit.usermanager.AuthorizablePrivilegesInfo;
import org.apache.sling.jcr.base.util.AccessControlUtil;
import org.osgi.framework.BundleContext;
import org.osgi.framework.InvalidSyntaxException;
import org.osgi.framework.ServiceReference;
import org.osgi.service.component.annotations.Activate;
import org.osgi.service.component.annotations.Component;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class to assist in the usage of access control of users/groups from scripts.
*
* The default access control policy defined by this provider has the following
* characteristics:
* <ul>
* <li>everybody has READ permission to all items,</li>
*
* <li>every known user is allowed to modify it's own properties except for
* her/his group membership,</li>
* </ul>
*/
@Component(service=AuthorizablePrivilegesInfo.class,
property={
AuthorizablePrivilegesInfoImpl.PAR_USER_ADMIN_GROUP_NAME + "=" + AuthorizablePrivilegesInfoImpl.DEFAULT_USER_ADMIN_GROUP_NAME,
AuthorizablePrivilegesInfoImpl.PAR_GROUP_ADMIN_GROUP_NAME + "=" + AuthorizablePrivilegesInfoImpl.DEFAULT_GROUP_ADMIN_GROUP_NAME
})
public class AuthorizablePrivilegesInfoImpl implements AuthorizablePrivilegesInfo {
/** default log */
private final Logger log = LoggerFactory.getLogger(getClass());
/**
* The default 'User administrator' group name
*
* @see #PAR_USER_ADMIN_GROUP_NAME
*/
static final String DEFAULT_USER_ADMIN_GROUP_NAME = "UserAdmin";
/**
* The name of the configuration parameter providing the
* 'User administrator' group name.
*/
static final String PAR_USER_ADMIN_GROUP_NAME = "user.admin.group.name";
/**
* The default 'User administrator' group name
*
* @see #PAR_GROUP_ADMIN_GROUP_NAME
*/
static final String DEFAULT_GROUP_ADMIN_GROUP_NAME = "GroupAdmin";
/**
* The name of the configuration parameter providing the
* 'Group administrator' group name.
*/
static final String PAR_GROUP_ADMIN_GROUP_NAME = "group.admin.group.name";
/* (non-Javadoc)
* @see org.apache.sling.jackrabbit.usermanager.AuthorizablePrivilegesInfo#canAddGroup(javax.jcr.Session)
*/
public boolean canAddGroup(Session jcrSession) {
try {
UserManager userManager = AccessControlUtil.getUserManager(jcrSession);
Authorizable currentUser = userManager.getAuthorizable(jcrSession.getUserID());
if (currentUser != null) {
if (((User)currentUser).isAdmin()) {
return true; //admin user has full control
}
}
} catch (RepositoryException e) {
log.warn("Failed to determine if {} can add a new group", jcrSession.getUserID());
}
return false;
}
/* (non-Javadoc)
* @see org.apache.sling.jackrabbit.usermanager.AuthorizablePrivilegesInfo#canAddUser(javax.jcr.Session)
*/
public boolean canAddUser(Session jcrSession) {
try {
//if self-registration is enabled, then anyone can create a user
if (bundleContext != null) {
String filter = "(&(sling.servlet.resourceTypes=sling/users)(|(sling.servlet.methods=POST)(sling.servlet.selectors=create)))";
Collection<ServiceReference<Servlet>> serviceReferences = bundleContext.getServiceReferences(Servlet.class, filter);
if (serviceReferences != null) {
String propName = "self.registration.enabled";
for (ServiceReference<Servlet> serviceReference : serviceReferences) {
Object propValue = serviceReference.getProperty(propName);
if (propValue != null) {
boolean selfRegEnabled = Boolean.TRUE.equals(propValue);
if (selfRegEnabled) {
return true;
}
break;
}
}
}
}
UserManager userManager = AccessControlUtil.getUserManager(jcrSession);
Authorizable currentUser = userManager.getAuthorizable(jcrSession.getUserID());
if (currentUser != null) {
if (((User)currentUser).isAdmin()) {
return true; //admin user has full control
}
}
} catch (RepositoryException e) {
log.warn("Failed to determine if {} can add a new user", jcrSession.getUserID());
} catch (InvalidSyntaxException e) {
log.warn("Failed to determine if {} can add a new user", jcrSession.getUserID());
}
return false;
}
/* (non-Javadoc)
* @see org.apache.sling.jackrabbit.usermanager.AuthorizablePrivilegesInfo#canRemove(javax.jcr.Session, java.lang.String)
*/
public boolean canRemove(Session jcrSession, String principalId) {
try {
UserManager userManager = AccessControlUtil.getUserManager(jcrSession);
Authorizable currentUser = userManager.getAuthorizable(jcrSession.getUserID());
if (((User)currentUser).isAdmin()) {
return true; //admin user has full control
}
} catch (RepositoryException e) {
log.warn("Failed to determine if {} can remove authorizable {}", jcrSession.getUserID(), principalId);
}
return false;
}
/* (non-Javadoc)
* @see org.apache.sling.jackrabbit.usermanager.AuthorizablePrivilegesInfo#canUpdateGroupMembers(javax.jcr.Session, java.lang.String)
*/
public boolean canUpdateGroupMembers(Session jcrSession, String groupId) {
try {
UserManager userManager = AccessControlUtil.getUserManager(jcrSession);
Authorizable currentUser = userManager.getAuthorizable(jcrSession.getUserID());
if (((User)currentUser).isAdmin()) {
return true; //admin user has full control
}
} catch (RepositoryException e) {
log.warn("Failed to determine if {} can remove authorizable {}", jcrSession.getUserID(), groupId);
}
return false;
}
/* (non-Javadoc)
* @see org.apache.sling.jackrabbit.usermanager.AuthorizablePrivilegesInfo#canUpdateProperties(javax.jcr.Session, java.lang.String)
*/
public boolean canUpdateProperties(Session jcrSession, String principalId) {
try {
if (jcrSession.getUserID().equals(principalId)) {
//user is allowed to update it's own properties
return true;
}
UserManager userManager = AccessControlUtil.getUserManager(jcrSession);
Authorizable currentUser = userManager.getAuthorizable(jcrSession.getUserID());
if (((User)currentUser).isAdmin()) {
return true; //admin user has full control
}
} catch (RepositoryException e) {
log.warn("Failed to determine if {} can remove authorizable {}", jcrSession.getUserID(), principalId);
}
return false;
}
// ---------- SCR Integration ----------------------------------------------
//keep track of the bundle context
private BundleContext bundleContext;
@Activate
protected void activate(BundleContext bundleContext, Map<String, Object> properties)
throws InvalidKeyException, NoSuchAlgorithmException,
IllegalStateException, UnsupportedEncodingException {
this.bundleContext = bundleContext;
String userAdminGroupName = OsgiUtil.toString(properties.get(PAR_USER_ADMIN_GROUP_NAME), null);
if ( userAdminGroupName != null && ! DEFAULT_USER_ADMIN_GROUP_NAME.equals(userAdminGroupName)) {
log.warn("Configuration setting for {} is deprecated and will not have any effect", PAR_USER_ADMIN_GROUP_NAME);
}
String groupAdminGroupName = OsgiUtil.toString(properties.get(PAR_GROUP_ADMIN_GROUP_NAME), null);
if ( groupAdminGroupName != null && ! DEFAULT_GROUP_ADMIN_GROUP_NAME.equals(userAdminGroupName)) {
log.warn("Configuration setting for {} is deprecated and will not have any effect", PAR_GROUP_ADMIN_GROUP_NAME);
}
}
}
| labertasch/sling | bundles/jcr/jackrabbit-usermanager/src/main/java/org/apache/sling/jackrabbit/usermanager/impl/AuthorizablePrivilegesInfoImpl.java | Java | apache-2.0 | 9,736 |
/**
* Copyright (c) 2010-2016 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.binding.serial.internal;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TooManyListenersException;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.IOUtils;
import org.openhab.core.events.EventPublisher;
import org.openhab.core.library.items.ContactItem;
import org.openhab.core.library.items.DimmerItem;
import org.openhab.core.library.items.NumberItem;
import org.openhab.core.library.items.RollershutterItem;
import org.openhab.core.library.items.StringItem;
import org.openhab.core.library.items.SwitchItem;
import org.openhab.core.library.types.DecimalType;
import org.openhab.core.library.types.OnOffType;
import org.openhab.core.library.types.OpenClosedType;
import org.openhab.core.library.types.PercentType;
import org.openhab.core.library.types.StringType;
import org.openhab.core.transform.TransformationException;
import org.openhab.core.types.State;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import gnu.io.CommPortIdentifier;
import gnu.io.PortInUseException;
import gnu.io.SerialPort;
import gnu.io.SerialPortEvent;
import gnu.io.SerialPortEventListener;
import gnu.io.UnsupportedCommOperationException;
/**
* This class represents a serial device that is linked to one or many String, Number, Switch or Rollershutter items
*
* @author Kai Kreuzer
*
*/
public class SerialDevice implements SerialPortEventListener {
private static final Logger logger = LoggerFactory.getLogger(SerialDevice.class);
private String port;
private int baud = 9600;
private EventPublisher eventPublisher;
private CommPortIdentifier portId;
private SerialPort serialPort;
private InputStream inputStream;
private OutputStream outputStream;
private Map<String, ItemType> configMap;
class ItemType {
String pattern;
boolean base64;
String onCommand;
String offCommand;
String upCommand;
String downCommand;
String stopCommand;
String format;
Class<?> type;
}
public boolean isEmpty() {
return configMap.isEmpty();
}
public void addConfig(String itemName, Class<?> type, String pattern, boolean base64, String onCommand,
String offCommand, String upCommand, String downCommand, String stopCommand, String format) {
if (configMap == null) {
configMap = new HashMap<>();
}
ItemType typeItem = new ItemType();
typeItem.pattern = pattern;
typeItem.base64 = base64;
typeItem.type = type;
typeItem.onCommand = onCommand;
typeItem.offCommand = offCommand;
typeItem.upCommand = upCommand;
typeItem.downCommand = downCommand;
typeItem.stopCommand = stopCommand;
typeItem.format = format;
configMap.put(itemName, typeItem);
}
public void removeConfig(String itemName) {
if (configMap != null) {
ItemType type = configMap.get(itemName);
if (type.pattern != null) {
// We can safely remove any pattern
// If there are any duplicates, they will be added to cache next time they are requested
RegexPatternMatcher.removePattern(type.pattern);
}
configMap.remove(itemName);
}
}
public SerialDevice(String port) {
this.port = port;
}
public SerialDevice(String port, int baud) {
this.port = port;
this.baud = baud;
}
public void setEventPublisher(EventPublisher eventPublisher) {
this.eventPublisher = eventPublisher;
}
public void unsetEventPublisher(EventPublisher eventPublisher) {
this.eventPublisher = null;
}
public String getPort() {
return port;
}
public String getOnCommand(String itemName) {
if (configMap.get(itemName) != null) {
return configMap.get(itemName).onCommand;
}
return "";
}
public String getOffCommand(String itemName) {
if (configMap.get(itemName) != null) {
return configMap.get(itemName).offCommand;
}
return "";
}
public String getUpCommand(String itemName) {
if (configMap.get(itemName) != null) {
return configMap.get(itemName).upCommand;
}
return "";
}
public String getDownCommand(String itemName) {
if (configMap.get(itemName) != null) {
return configMap.get(itemName).downCommand;
}
return "";
}
public String getStopCommand(String itemName) {
if (configMap.get(itemName) != null) {
return configMap.get(itemName).stopCommand;
}
return "";
}
public String getFormat(String itemName) {
if (configMap.get(itemName) != null) {
return configMap.get(itemName).format;
}
return "";
}
/**
* Initialize this device and open the serial port
*
* @throws InitializationException if port can not be opened
*/
@SuppressWarnings("rawtypes")
public void initialize() throws InitializationException {
// parse ports and if the default port is found, initialized the reader
Enumeration portList = CommPortIdentifier.getPortIdentifiers();
while (portList.hasMoreElements()) {
CommPortIdentifier id = (CommPortIdentifier) portList.nextElement();
if (id.getPortType() == CommPortIdentifier.PORT_SERIAL) {
if (id.getName().equals(port)) {
logger.debug("Serial port '{}' has been found.", port);
portId = id;
}
}
}
if (portId != null) {
// initialize serial port
try {
serialPort = portId.open("openHAB", 2000);
} catch (PortInUseException e) {
throw new InitializationException(e);
}
try {
inputStream = serialPort.getInputStream();
} catch (IOException e) {
throw new InitializationException(e);
}
try {
serialPort.addEventListener(this);
} catch (TooManyListenersException e) {
throw new InitializationException(e);
}
// activate the DATA_AVAILABLE notifier
serialPort.notifyOnDataAvailable(true);
try {
// set port parameters
serialPort.setSerialPortParams(baud, SerialPort.DATABITS_8, SerialPort.STOPBITS_1,
SerialPort.PARITY_NONE);
} catch (UnsupportedCommOperationException e) {
throw new InitializationException(e);
}
try {
// get the output stream
outputStream = serialPort.getOutputStream();
} catch (IOException e) {
throw new InitializationException(e);
}
} else {
StringBuilder sb = new StringBuilder();
portList = CommPortIdentifier.getPortIdentifiers();
while (portList.hasMoreElements()) {
CommPortIdentifier id = (CommPortIdentifier) portList.nextElement();
if (id.getPortType() == CommPortIdentifier.PORT_SERIAL) {
sb.append(id.getName() + "\n");
}
}
throw new InitializationException(
"Serial port '" + port + "' could not be found. Available ports are:\n" + sb.toString());
}
}
@Override
public void serialEvent(SerialPortEvent event) {
switch (event.getEventType()) {
case SerialPortEvent.BI:
case SerialPortEvent.OE:
case SerialPortEvent.FE:
case SerialPortEvent.PE:
case SerialPortEvent.CD:
case SerialPortEvent.CTS:
case SerialPortEvent.DSR:
case SerialPortEvent.RI:
case SerialPortEvent.OUTPUT_BUFFER_EMPTY:
break;
case SerialPortEvent.DATA_AVAILABLE:
// we get here if data has been received
StringBuilder sb = new StringBuilder();
byte[] readBuffer = new byte[20];
try {
do {
// read data from serial device
while (inputStream.available() > 0) {
int bytes = inputStream.read(readBuffer);
sb.append(new String(readBuffer, 0, bytes));
}
try {
// add wait states around reading the stream, so that interrupted transmissions are merged
Thread.sleep(100);
} catch (InterruptedException e) {
// ignore interruption
}
} while (inputStream.available() > 0);
// sent data
String result = sb.toString();
// send data to the bus
logger.debug("Received message '{}' on serial port {}", new String[] { result, port });
if (eventPublisher != null) {
if (configMap != null && !configMap.isEmpty()) {
for (Entry<String, ItemType> entry : configMap.entrySet()) {
String pattern = entry.getValue().pattern;
// use pattern
if (pattern != null) {
try {
String[] matches = RegexPatternMatcher.getMatches(pattern, result);
for (int i = 0; i < matches.length; i++) {
String match = matches[i];
try {
State state = null;
if (entry.getValue().type.equals(NumberItem.class)) {
state = new DecimalType(match);
} else if (entry.getValue().type == RollershutterItem.class) {
state = new PercentType(match);
} else {
state = new StringType(match);
}
eventPublisher.postUpdate(entry.getKey(), state);
} catch (NumberFormatException e) {
logger.warn("Unable to convert regex result '{}' for item {} to number",
new String[] { result, entry.getKey() });
}
}
} catch (TransformationException e) {
logger.warn("Unable to transform!", e);
}
} else if (entry.getValue().type == StringItem.class) {
if (entry.getValue().base64) {
result = Base64.encodeBase64String(result.getBytes());
}
eventPublisher.postUpdate(entry.getKey(), new StringType(result));
} else if (entry.getValue().type == SwitchItem.class) {
if (result.trim().isEmpty()) {
eventPublisher.postUpdate(entry.getKey(), OnOffType.ON);
eventPublisher.postUpdate(entry.getKey(), OnOffType.OFF);
} else if (result.equals(getOnCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), OnOffType.ON);
} else if (result.equals(getOffCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), OnOffType.OFF);
}
} else if (entry.getValue().type == ContactItem.class) {
if (result.trim().isEmpty()) {
eventPublisher.postUpdate(entry.getKey(), OpenClosedType.CLOSED);
eventPublisher.postUpdate(entry.getKey(), OpenClosedType.OPEN);
} else if (result.equals(getOnCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), OpenClosedType.CLOSED);
} else if (result.equals(getOffCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), OpenClosedType.OPEN);
}
} else if (entry.getValue().type == RollershutterItem.class
|| entry.getValue().type == DimmerItem.class) {
if (result.trim().isEmpty()) {
eventPublisher.postUpdate(entry.getKey(), new PercentType(50));
} else if (result.equals(getUpCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), PercentType.HUNDRED);
} else if (result.equals(getDownCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), PercentType.ZERO);
} else if (result.equals(getStopCommand(entry.getKey()))) {
eventPublisher.postUpdate(entry.getKey(), new PercentType(50));
}
}
}
}
}
} catch (IOException e) {
logger.debug("Error receiving data on serial port {}: {}", new String[] { port, e.getMessage() });
}
break;
}
}
/**
* Sends a string to the serial port of this device
*
* @param msg the string to send
*/
public void writeString(String msg) {
logger.debug("Writing '{}' to serial port {}", new String[] { msg, port });
try {
// write string to serial port
if (msg.startsWith("BASE64:")) {
outputStream.write(Base64.decodeBase64(msg.substring(7, msg.length())));
} else {
outputStream.write(msg.getBytes());
}
outputStream.flush();
} catch (IOException e) {
logger.error("Error writing '{}' to serial port {}: {}", new String[] { msg, port, e.getMessage() });
}
}
/**
* Close this serial device
*/
public void close() {
serialPort.removeEventListener();
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
serialPort.close();
}
}
| lewie/openhab | bundles/binding/org.openhab.binding.serial/src/main/java/org/openhab/binding/serial/internal/SerialDevice.java | Java | epl-1.0 | 16,055 |
package com.qunar.nlp.crf;
public final class Path {
public Node leftNode;
public Node rightNode;
public int[] feature;
public double cost;
public Path() {}
public void add(Node leftNode, Node rightNode) {
this.leftNode = leftNode;
this.rightNode = rightNode;
leftNode.rightPaths.add(this);
rightNode.leftPaths.add(this);
}
}
| duanhongyi/nlpbamboo | exts/java/JBamboo/src/com/qunar/nlp/crf/Path.java | Java | gpl-3.0 | 342 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file.strategy;
import org.apache.camel.Exchange;
import org.apache.camel.component.file.FileEndpoint;
import org.apache.camel.component.file.GenericFile;
import org.apache.camel.component.file.GenericFileEndpoint;
import org.apache.camel.component.file.GenericFileOperations;
import org.apache.camel.support.ExchangeHelper;
public class GenericFileRenameProcessStrategy<T> extends GenericFileProcessStrategySupport<T> {
private GenericFileRenamer<T> beginRenamer;
private GenericFileRenamer<T> failureRenamer;
private GenericFileRenamer<T> commitRenamer;
public GenericFileRenameProcessStrategy() {
}
@Override
public boolean begin(
GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception {
// must invoke super
boolean result = super.begin(operations, endpoint, exchange, file);
if (!result) {
return false;
}
// okay we got the file then execute the begin renamer
if (beginRenamer != null) {
GenericFile<T> newName = beginRenamer.renameFile(operations, exchange, file);
GenericFile<T> to = renameFile(operations, file, newName);
FileEndpoint fe = null;
if (endpoint instanceof FileEndpoint) {
fe = (FileEndpoint) endpoint;
if (to != null) {
to.bindToExchange(exchange, fe.isProbeContentType());
}
} else {
if (to != null) {
to.bindToExchange(exchange);
}
}
}
return true;
}
@Override
public void rollback(
GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception {
try {
operations.releaseRetrievedFileResources(exchange);
if (failureRenamer != null) {
// create a copy and bind the file to the exchange to be used by
// the renamer to evaluate the file name
Exchange copy = ExchangeHelper.createCopy(exchange, true);
FileEndpoint fe = null;
if (endpoint instanceof FileEndpoint) {
fe = (FileEndpoint) endpoint;
file.bindToExchange(copy, fe.isProbeContentType());
} else {
file.bindToExchange(copy);
}
// must preserve message id
copy.getIn().setMessageId(exchange.getIn().getMessageId());
copy.setExchangeId(exchange.getExchangeId());
GenericFile<T> newName = failureRenamer.renameFile(operations, copy, file);
renameFile(operations, file, newName);
}
} finally {
if (exclusiveReadLockStrategy != null) {
exclusiveReadLockStrategy.releaseExclusiveReadLockOnRollback(operations, file, exchange);
}
deleteLocalWorkFile(exchange);
}
}
@Override
public void commit(
GenericFileOperations<T> operations, GenericFileEndpoint<T> endpoint, Exchange exchange, GenericFile<T> file)
throws Exception {
try {
operations.releaseRetrievedFileResources(exchange);
if (commitRenamer != null) {
// create a copy and bind the file to the exchange to be used by
// the renamer to evaluate the file name
Exchange copy = ExchangeHelper.createCopy(exchange, true);
FileEndpoint fe = null;
if (endpoint instanceof FileEndpoint) {
fe = (FileEndpoint) endpoint;
file.bindToExchange(copy, fe.isProbeContentType());
} else {
file.bindToExchange(copy);
}
// must preserve message id
copy.getIn().setMessageId(exchange.getIn().getMessageId());
copy.setExchangeId(exchange.getExchangeId());
GenericFile<T> newName = commitRenamer.renameFile(operations, copy, file);
renameFile(operations, file, newName);
}
} finally {
deleteLocalWorkFile(exchange);
// must release lock last
if (exclusiveReadLockStrategy != null) {
exclusiveReadLockStrategy.releaseExclusiveReadLockOnCommit(operations, file, exchange);
}
}
}
public GenericFileRenamer<T> getBeginRenamer() {
return beginRenamer;
}
public void setBeginRenamer(GenericFileRenamer<T> beginRenamer) {
this.beginRenamer = beginRenamer;
}
public GenericFileRenamer<T> getCommitRenamer() {
return commitRenamer;
}
public void setCommitRenamer(GenericFileRenamer<T> commitRenamer) {
this.commitRenamer = commitRenamer;
}
public GenericFileRenamer<T> getFailureRenamer() {
return failureRenamer;
}
public void setFailureRenamer(GenericFileRenamer<T> failureRenamer) {
this.failureRenamer = failureRenamer;
}
}
| nikhilvibhav/camel | components/camel-file/src/main/java/org/apache/camel/component/file/strategy/GenericFileRenameProcessStrategy.java | Java | apache-2.0 | 6,056 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.stratos.cli;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import static org.apache.stratos.cli.utils.CliConstants.STRATOS_DIR;
/**
* This class is used for input the commands through CLITool, command prompt.
*/
public class CliTool {
private static final Logger log = LoggerFactory.getLogger(CliTool.class);
/**
* Here is the place all the command line inputs get processed
*
* @param arguments passed to CLI tool.
*/
void handleConsoleInputs(String[] arguments) {
if (log.isInfoEnabled()) {
log.info("Stratos CLI started...");
}
StratosApplication application = new StratosApplication(arguments);
application.start(arguments);
}
void createConfigDirectory() {
File stratosFile = new File(System.getProperty("user.home"), STRATOS_DIR);
if (stratosFile.exists()) {
if (log.isInfoEnabled()) {
log.info("Using directory: {}", stratosFile.getPath());
}
} else {
if (stratosFile.mkdir()) {
if (log.isInfoEnabled()) {
log.info("Created directory: {}", stratosFile.getPath());
}
} else if (log.isWarnEnabled()) {
log.warn("Failed to created directory: {}", stratosFile.getPath());
}
}
}
}
| ravihansa3000/stratos | components/org.apache.stratos.cli/src/main/java/org/apache/stratos/cli/CliTool.java | Java | apache-2.0 | 2,225 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.io.compress;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.google.common.annotations.VisibleForTesting;
import net.jpountz.lz4.LZ4Exception;
import net.jpountz.lz4.LZ4Factory;
public class LZ4Compressor implements ICompressor
{
private static final int INTEGER_BYTES = 4;
@VisibleForTesting
public static final LZ4Compressor instance = new LZ4Compressor();
public static LZ4Compressor create(Map<String, String> args)
{
return instance;
}
private final net.jpountz.lz4.LZ4Compressor compressor;
private final net.jpountz.lz4.LZ4FastDecompressor decompressor;
private LZ4Compressor()
{
final LZ4Factory lz4Factory = LZ4Factory.fastestInstance();
compressor = lz4Factory.fastCompressor();
decompressor = lz4Factory.fastDecompressor();
}
public int initialCompressedBufferLength(int chunkLength)
{
return INTEGER_BYTES + compressor.maxCompressedLength(chunkLength);
}
public void compress(ByteBuffer input, ByteBuffer output) throws IOException
{
int len = input.remaining();
output.put((byte) len);
output.put((byte) (len >>> 8));
output.put((byte) (len >>> 16));
output.put((byte) (len >>> 24));
try
{
compressor.compress(input, output);
}
catch (LZ4Exception e)
{
throw new IOException(e);
}
}
public int uncompress(byte[] input, int inputOffset, int inputLength, byte[] output, int outputOffset) throws IOException
{
final int decompressedLength =
(input[inputOffset] & 0xFF)
| ((input[inputOffset + 1] & 0xFF) << 8)
| ((input[inputOffset + 2] & 0xFF) << 16)
| ((input[inputOffset + 3] & 0xFF) << 24);
final int compressedLength;
try
{
compressedLength = decompressor.decompress(input, inputOffset + INTEGER_BYTES,
output, outputOffset, decompressedLength);
}
catch (LZ4Exception e)
{
throw new IOException(e);
}
if (compressedLength != inputLength - INTEGER_BYTES)
{
throw new IOException("Compressed lengths mismatch");
}
return decompressedLength;
}
public void uncompress(ByteBuffer input, ByteBuffer output) throws IOException
{
final int decompressedLength = (input.get() & 0xFF)
| ((input.get() & 0xFF) << 8)
| ((input.get() & 0xFF) << 16)
| ((input.get() & 0xFF) << 24);
try
{
int compressedLength = decompressor.decompress(input, input.position(), output, output.position(), decompressedLength);
input.position(input.position() + compressedLength);
output.position(output.position() + decompressedLength);
}
catch (LZ4Exception e)
{
throw new IOException(e);
}
if (input.remaining() > 0)
{
throw new IOException("Compressed lengths mismatch - "+input.remaining()+" bytes remain");
}
}
public Set<String> supportedOptions()
{
return new HashSet<>(Arrays.asList(CompressionParameters.CRC_CHECK_CHANCE));
}
public BufferType preferredBufferType()
{
return BufferType.OFF_HEAP;
}
public boolean supports(BufferType bufferType)
{
return true;
}
}
| lynchlee/play-jmx | src/main/java/org/apache/cassandra/io/compress/LZ4Compressor.java | Java | apache-2.0 | 4,466 |
package jp.wasabeef.recyclerview.animators;
/**
* Copyright (C) 2015 Wasabeef
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.support.v4.view.ViewCompat;
import android.support.v7.widget.RecyclerView;
import android.view.animation.Interpolator;
public class LandingAnimator extends BaseItemAnimator {
public LandingAnimator() {
}
public LandingAnimator(Interpolator interpolator) {
mInterpolator = interpolator;
}
@Override protected void animateRemoveImpl(final RecyclerView.ViewHolder holder) {
ViewCompat.animate(holder.itemView)
.alpha(0)
.scaleX(1.5f)
.scaleY(1.5f)
.setDuration(getRemoveDuration())
.setInterpolator(mInterpolator)
.setListener(new DefaultRemoveVpaListener(holder))
.start();
}
@Override protected void preAnimateAddImpl(RecyclerView.ViewHolder holder) {
ViewCompat.setAlpha(holder.itemView, 0);
ViewCompat.setScaleX(holder.itemView, 1.5f);
ViewCompat.setScaleY(holder.itemView, 1.5f);
}
@Override protected void animateAddImpl(final RecyclerView.ViewHolder holder) {
ViewCompat.animate(holder.itemView)
.alpha(1)
.scaleX(1)
.scaleY(1)
.setDuration(getAddDuration())
.setInterpolator(mInterpolator)
.setListener(new DefaultAddVpaListener(holder))
.start();
}
}
| hejunbinlan/recyclerview-animators | animators/src/main/java/jp/wasabeef/recyclerview/animators/LandingAnimator.java | Java | apache-2.0 | 1,873 |
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiType;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.plugins.groovy.lang.resolve.api.GroovyMethodCallReference;
public interface GrUnaryExpression extends GrExpression {
@NotNull
@Override
GroovyMethodCallReference getReference();
/**
* @return type of this expression with regard to whether this expression is prefix or postfix
*/
@Nullable
@Override
default PsiType getType() {
return GrExpression.super.getType();
}
/**
* @return type of operator call performed by this expression independently of whether this expression is prefix or postfix
*/
@Nullable
PsiType getOperationType();
@NotNull
IElementType getOperationTokenType();
@NotNull
PsiElement getOperationToken();
@Nullable
GrExpression getOperand();
boolean isPostfix();
}
| dahlstrom-g/intellij-community | plugins/groovy/groovy-psi/src/org/jetbrains/plugins/groovy/lang/psi/api/statements/expressions/GrUnaryExpression.java | Java | apache-2.0 | 1,176 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.micrometer.messagehistory;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Timer;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.apache.camel.CamelContext;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.test.junit5.CamelTestSupport;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.camel.component.micrometer.MicrometerConstants.DEFAULT_CAMEL_MESSAGE_HISTORY_METER_NAME;
import static org.apache.camel.component.micrometer.MicrometerConstants.NODE_ID_TAG;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class MicrometerMessageHistoryTest extends CamelTestSupport {
protected final Logger log = LoggerFactory.getLogger(getClass());
private MeterRegistry registry = new SimpleMeterRegistry();
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
MicrometerMessageHistoryFactory factory = new MicrometerMessageHistoryFactory();
factory.setMeterRegistry(registry);
factory.setPrettyPrint(true);
context.setMessageHistoryFactory(factory);
return context;
}
@Test
public void testMetricsHistory() throws Exception {
int count = 10;
getMockEndpoint("mock:foo").expectedMessageCount(count / 2);
getMockEndpoint("mock:bar").expectedMessageCount(count / 2);
getMockEndpoint("mock:baz").expectedMessageCount(count / 2);
for (int i = 0; i < count; i++) {
if (i % 2 == 0) {
template.sendBody("direct:foo", "Hello " + i);
} else {
template.sendBody("direct:bar", "Hello " + i);
}
}
assertMockEndpointsSatisfied();
// there should be 3 names
assertEquals(3, registry.getMeters().size());
Timer fooTimer = registry.find(DEFAULT_CAMEL_MESSAGE_HISTORY_METER_NAME).tag(NODE_ID_TAG, "foo").timer();
assertEquals(count / 2, fooTimer.count());
Timer barTimer = registry.find(DEFAULT_CAMEL_MESSAGE_HISTORY_METER_NAME).tag(NODE_ID_TAG, "bar").timer();
assertEquals(count / 2, barTimer.count());
Timer bazTimer = registry.find(DEFAULT_CAMEL_MESSAGE_HISTORY_METER_NAME).tag(NODE_ID_TAG, "baz").timer();
assertEquals(count / 2, bazTimer.count());
// get the message history service
MicrometerMessageHistoryService service = context.hasService(MicrometerMessageHistoryService.class);
assertNotNull(service);
String json = service.dumpStatisticsAsJson();
assertNotNull(json);
log.info(json);
assertTrue(json.contains("\"nodeId\" : \"foo\""));
assertTrue(json.contains("\"nodeId\" : \"bar\""));
assertTrue(json.contains("\"nodeId\" : \"baz\""));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:foo")
.to("mock:foo").id("foo");
from("direct:bar")
.to("mock:bar").id("bar")
.to("mock:baz").id("baz");
}
};
}
}
| gnodet/camel | components/camel-micrometer/src/test/java/org/apache/camel/component/micrometer/messagehistory/MicrometerMessageHistoryTest.java | Java | apache-2.0 | 4,303 |
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Future;
class Foo {
{
List<? extends List<? extends Future<?>>> list = new ArrayList<>();
list.stream().map(l -> l.stream().map(f -> f.isD<caret>
}
} | siosio/intellij-community | java/java-tests/testData/codeInsight/completion/normal/NoCastForCompatibleCapture.java | Java | apache-2.0 | 241 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.util.lang;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.lang.IgniteOutClosure;
/**
* Convenient out-closure subclass that allows for thrown grid exception. This class
* implements {@link #apply()} method that calls {@link #applyx()} method and properly
* wraps {@link IgniteCheckedException} into {@link GridClosureException} instance.
*/
public abstract class IgniteOutClosureX<T> implements IgniteOutClosure<T> {
/** */
private static final long serialVersionUID = 0L;
/** {@inheritDoc} */
@Override public T apply() {
try {
return applyx();
}
catch (IgniteCheckedException e) {
throw F.wrap(e);
}
}
/**
* Out-closure body that can throw {@link IgniteCheckedException}.
*
* @return Element.
* @throws IgniteCheckedException Thrown in case of any error condition inside of the closure.
*/
public abstract T applyx() throws IgniteCheckedException;
}
| samaitra/ignite | modules/core/src/main/java/org/apache/ignite/internal/util/lang/IgniteOutClosureX.java | Java | apache-2.0 | 1,885 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.