code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
import chaincode_pb2
import fabric_next_pb2
import bdd_test_util
import bdd_grpc_util
def getChaincodeSpec(ccType, path, args):
# make chaincode spec for chaincode to be deployed
ccSpec = chaincode_pb2.ChaincodeSpec(type = ccType,
chaincodeID = chaincode_pb2.ChaincodeID(path=path),
ctorMsg = chaincode_pb2.ChaincodeInput(args = args))
return ccSpec
def createPropsalId():
return 'TODO proposal Id'
def createDeploymentProposalForBDD(ccDeploymentSpec):
"Returns a deployment proposal of chaincode type"
lc_chaincode_spec = chaincode_pb2.ChaincodeSpec(type = chaincode_pb2.ChaincodeSpec.GOLANG,
chaincodeID = chaincode_pb2.ChaincodeID(name="lccc"),
ctorMsg = chaincode_pb2.ChaincodeInput(args = ['deploy', 'default', ccDeploymentSpec.SerializeToString()]))
lc_chaincode_invocation_spec = chaincode_pb2.ChaincodeInvocationSpec(chaincodeSpec = lc_chaincode_spec)
# make proposal
proposal = fabric_next_pb2.Proposal(type = fabric_next_pb2.Proposal.CHAINCODE, id = createPropsalId())
proposal.payload = lc_chaincode_invocation_spec.SerializeToString()
return proposal
def getEndorserStubs(context, composeServices):
stubs = []
for composeService in composeServices:
ipAddress = bdd_test_util.ipFromContainerNamePart(composeService, context.compose_containers)
channel = bdd_grpc_util.getGRPCChannel(ipAddress)
newEndorserStub = fabric_next_pb2.beta_create_Endorser_stub(channel)
stubs.append(newEndorserStub)
return stubs
| vpaprots/fabric | bddtests/steps/endorser_util.py | Python | apache-2.0 | 1,469 |
// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This defines MacOSXAPIChecker, which is an assortment of checks on calls
// to various, widely used Apple APIs.
//
// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
// to here, using the new Checker interface.
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<BugType> BT_dispatchOnce;
static const ObjCIvarRegion *getParentIvarRegion(const MemRegion *R);
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
StringRef FName) const;
typedef void (MacOSXAPIChecker::*SubChecker)(CheckerContext &,
const CallExpr *,
StringRef FName) const;
};
} //end anonymous namespace
//===----------------------------------------------------------------------===//
// dispatch_once and dispatch_once_f
//===----------------------------------------------------------------------===//
const ObjCIvarRegion *
MacOSXAPIChecker::getParentIvarRegion(const MemRegion *R) {
const SubRegion *SR = dyn_cast<SubRegion>(R);
while (SR) {
if (const ObjCIvarRegion *IR = dyn_cast<ObjCIvarRegion>(SR))
return IR;
SR = dyn_cast<SubRegion>(SR->getSuperRegion());
}
return nullptr;
}
void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
StringRef FName) const {
if (CE->getNumArgs() < 1)
return;
// Check if the first argument is improperly allocated. If so, issue a
// warning because that's likely to be bad news.
const MemRegion *R = C.getSVal(CE->getArg(0)).getAsRegion();
if (!R)
return;
// Global variables are fine.
const MemRegion *RB = R->getBaseRegion();
const MemSpaceRegion *RS = RB->getMemorySpace();
if (isa<GlobalsSpaceRegion>(RS))
return;
// Handle _dispatch_once. In some versions of the OS X SDK we have the case
// that dispatch_once is a macro that wraps a call to _dispatch_once.
// _dispatch_once is then a function which then calls the real dispatch_once.
// Users do not care; they just want the warning at the top-level call.
if (CE->getBeginLoc().isMacroID()) {
StringRef TrimmedFName = FName.ltrim('_');
if (TrimmedFName != FName)
FName = TrimmedFName;
}
SmallString<256> S;
llvm::raw_svector_ostream os(S);
bool SuggestStatic = false;
os << "Call to '" << FName << "' uses";
if (const VarRegion *VR = dyn_cast<VarRegion>(RB)) {
const VarDecl *VD = VR->getDecl();
// FIXME: These should have correct memory space and thus should be filtered
// out earlier. This branch only fires when we're looking from a block,
// which we analyze as a top-level declaration, onto a static local
// in a function that contains the block.
if (VD->isStaticLocal())
return;
// We filtered out globals earlier, so it must be a local variable
// or a block variable which is under UnknownSpaceRegion.
if (VR != R)
os << " memory within";
if (VD->hasAttr<BlocksAttr>())
os << " the block variable '";
else
os << " the local variable '";
os << VR->getDecl()->getName() << '\'';
SuggestStatic = true;
} else if (const ObjCIvarRegion *IVR = getParentIvarRegion(R)) {
if (IVR != R)
os << " memory within";
os << " the instance variable '" << IVR->getDecl()->getName() << '\'';
} else if (isa<HeapSpaceRegion>(RS)) {
os << " heap-allocated memory";
} else if (isa<UnknownSpaceRegion>(RS)) {
// Presence of an IVar superregion has priority over this branch, because
// ObjC objects are on the heap even if the core doesn't realize this.
// Presence of a block variable base region has priority over this branch,
// because block variables are known to be either on stack or on heap
// (might actually move between the two, hence UnknownSpace).
return;
} else {
os << " stack allocated memory";
}
os << " for the predicate value. Using such transient memory for "
"the predicate is potentially dangerous.";
if (SuggestStatic)
os << " Perhaps you intended to declare the variable as 'static'?";
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
if (!BT_dispatchOnce)
BT_dispatchOnce.reset(new BugType(this, "Improper use of 'dispatch_once'",
"API Misuse (Apple)"));
auto report =
std::make_unique<PathSensitiveBugReport>(*BT_dispatchOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.emitReport(std::move(report));
}
//===----------------------------------------------------------------------===//
// Central dispatch function.
//===----------------------------------------------------------------------===//
void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
StringRef Name = C.getCalleeName(CE);
if (Name.empty())
return;
SubChecker SC =
llvm::StringSwitch<SubChecker>(Name)
.Cases("dispatch_once",
"_dispatch_once",
"dispatch_once_f",
&MacOSXAPIChecker::CheckDispatchOnce)
.Default(nullptr);
if (SC)
(this->*SC)(C, CE, Name);
}
//===----------------------------------------------------------------------===//
// Registration.
//===----------------------------------------------------------------------===//
void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSXAPIChecker>();
}
bool ento::shouldRegisterMacOSXAPIChecker(const LangOptions &LO) {
return true;
}
| llvm-mirror/clang | lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp | C++ | apache-2.0 | 6,737 |
#region License
/*
* Copyright © 2002-2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#endregion
#region Imports
using System;
using System.Configuration;
using System.Reflection;
using System.Xml;
using NUnit.Framework;
using Spring.Objects;
using Spring.Proxy;
using Spring.Objects.Factory.Support;
using Spring.Util;
#endregion
namespace Spring.Context.Support
{
/// <summary>
/// Unit tests for the ContextRegistry class.
/// </summary>
/// <author>Rick Evans</author>
[TestFixture]
public sealed class ContextRegistryTests
{
[SetUp]
public void SetUp()
{
ContextRegistry.Clear();
ResetConfigurationSystem();
}
private static void ResetConfigurationSystem()
{
if (SystemUtils.MonoRuntime)
{
return;
}
FieldInfo initStateRef = typeof(ConfigurationManager).GetField("s_initState", BindingFlags.NonPublic | BindingFlags.Static);
object notStarted = Activator.CreateInstance(initStateRef.FieldType);
initStateRef.SetValue(null, notStarted);
}
/// <summary>
/// This handler simulates an undefined configuration section
/// </summary>
private static object GetNullSection(object parent, object context, XmlNode section)
{
return null;
}
/// <summary>
/// This handler simulates calls to ContextRegistry during context creation
/// </summary>
private static object GetContextRecursive(object parent, object context, XmlNode section)
{
return ContextRegistry.GetContext(); // this must fail!
}
[Test]
public void ThrowsInvalidOperationExceptionOnRecursiveCallsToGetContext()
{
using (new HookableContextHandler.Guard(new HookableContextHandler.CreateContextFromSectionHandler(GetContextRecursive)))
{
try
{
ContextRegistry.GetContext("somename");
Assert.Fail("Should throw an exception");
}
catch (ConfigurationException ex)
{
InvalidOperationException rootCause = ex.GetBaseException() as InvalidOperationException;
Assert.IsNotNull(rootCause);
Assert.AreEqual("root context is currently in creation.", rootCause.Message.Substring(0, 38));
}
}
}
[Test]
public void RegisterRootContext()
{
MockApplicationContext ctx = new MockApplicationContext();
ContextRegistry.RegisterContext(ctx);
IApplicationContext context = ContextRegistry.GetContext();
Assert.IsNotNull(context,
"Root context is null even though a context has been registered.");
Assert.IsTrue(Object.ReferenceEquals(ctx, context),
"Root context was not the same as the first context registered (it must be).");
}
[Test]
public void RegisterNamedRootContext()
{
const string ctxName = "bingo";
MockApplicationContext ctx = new MockApplicationContext(ctxName);
ContextRegistry.RegisterContext(ctx);
IApplicationContext rootContext = ContextRegistry.GetContext();
Assert.IsNotNull(rootContext,
"Root context is null even though a context has been registered.");
Assert.AreEqual(ctxName, rootContext.Name,
"Root context name is different even though the root context has been registered under the lookup name.");
}
[Test]
public void RegisterNamedContext()
{
const string ctxName = "bingo";
MockApplicationContext ctx = new MockApplicationContext(ctxName);
ContextRegistry.RegisterContext(ctx);
IApplicationContext context = ContextRegistry.GetContext(ctxName);
Assert.IsNotNull(context,
"Named context is null even though a context has been registered under the lookup name.");
Assert.IsTrue(Object.ReferenceEquals(ctx, context),
"Named context was not the same as the registered context (it must be).");
}
[Test]
[ExpectedException(typeof(ArgumentException))]
public void GetContextWithNullName()
{
ContextRegistry.GetContext(null);
}
[Test]
[ExpectedException(typeof(ArgumentException))]
public void GetContextWithEmptyName()
{
ContextRegistry.GetContext("");
}
[Test]
// [Ignore("How can we test that one ???")]
[ExpectedException(typeof(ApplicationContextException),
ExpectedMessage = "No context registered. Use the 'RegisterContext' method or the 'spring/context' section from your configuration file.")]
public void GetRootContextNotRegisteredThrowsException()
{
using (new HookableContextHandler.Guard(new HookableContextHandler.CreateContextFromSectionHandler(GetNullSection)))
{
IApplicationContext context = ContextRegistry.GetContext();
}
}
[Test]
[ExpectedException(typeof(ApplicationContextException),
ExpectedMessage = "No context registered under name 'bingo'. Use the 'RegisterContext' method or the 'spring/context' section from your configuration file.")]
public void GetContextByNameNotRegisteredThrowsException()
{
IApplicationContext context = ContextRegistry.GetContext("bingo");
}
[Test]
public void CanBuildProxyForClassWithProtectedConstructor()
{
CompositionProxyTypeBuilder typeBuilder = new CompositionProxyTypeBuilder();
typeBuilder.TargetType = typeof(ClassWithProtectedCtor);
typeBuilder.BuildProxyType();
}
[Test]
public void ClearWithDynamicProxies()
{
CompositionProxyTypeBuilder typeBuilder = new CompositionProxyTypeBuilder();
typeBuilder.TargetType = typeof(TestObject);
Type proxyType = typeBuilder.BuildProxyType();
DefaultListableObjectFactory of = new DefaultListableObjectFactory();
RootObjectDefinition od1 = new RootObjectDefinition(proxyType, false);
od1.PropertyValues.Add("Name", "Bruno");
of.RegisterObjectDefinition("testObject", od1);
GenericApplicationContext ctx1 = new GenericApplicationContext(of);
ContextRegistry.RegisterContext(ctx1);
ITestObject to1 = ContextRegistry.GetContext().GetObject("testObject") as ITestObject;
Assert.IsNotNull(to1);
Assert.AreEqual("Bruno", to1.Name);
DefaultListableObjectFactory of2 = new DefaultListableObjectFactory();
RootObjectDefinition od2 = new RootObjectDefinition(proxyType, false);
od2.PropertyValues.Add("Name", "Baia");
of2.RegisterObjectDefinition("testObject", od2);
GenericApplicationContext ctx2 = new GenericApplicationContext(of2);
ContextRegistry.Clear();
ITestObject to2 = ctx2.GetObject("testObject") as ITestObject;
Assert.IsNotNull(to2);
Assert.AreEqual("Baia", to2.Name);
}
// TODO : Add support for .NET 1.x
[Test]
public void ClearWithConfigurationSection()
{
IApplicationContext ctx1 = ContextRegistry.GetContext();
ContextRegistry.Clear();
IApplicationContext ctx2 = ContextRegistry.GetContext();
Assert.AreNotSame(ctx1, ctx2);
}
[Test(Description = "SPRNET-105")]
[ExpectedException(typeof(ApplicationContextException))]
public void ChokesIfChildContextRegisteredUnderNameOfAnExistingContext()
{
MockApplicationContext original = new MockApplicationContext("original");
ContextRegistry.RegisterContext(original);
MockApplicationContext duplicate = new MockApplicationContext("original");
ContextRegistry.RegisterContext(duplicate);
}
[Test]
public void RemovesContextFromRegistryWhenContextCloses()
{
StaticApplicationContext appCtx = new StaticApplicationContext();
appCtx.Name = "myCtx";
ContextRegistry.RegisterContext(appCtx);
Assert.IsTrue(ContextRegistry.IsContextRegistered(appCtx.Name));
appCtx.Dispose();
Assert.IsFalse(ContextRegistry.IsContextRegistered(appCtx.Name));
}
[TestFixture]
public class WhenHierarchicalContextsAllHaveDefaultNames
{
private MockApplicationContext _parentContext;
private MockApplicationContext _childContext;
private MockApplicationContext _grandChildContext;
private MockApplicationContext _greatGrandChildContext;
private string _expectedParentName;
private string _expectedChildName;
private string _expectedGrandChildName;
private string _expectedGreatGrandChildName;
[TestFixtureSetUp]
public void InitializeAllTests()
{
_expectedParentName = AbstractApplicationContext.DefaultRootContextName;
_expectedChildName = string.Format("{0}/{1}", _expectedParentName, AbstractApplicationContext.DefaultRootContextName);
_expectedGrandChildName = string.Format("{0}/{1}/{2}",_expectedParentName, _expectedChildName, AbstractApplicationContext.DefaultRootContextName);
_expectedGreatGrandChildName = string.Format("{0}/{1}/{2}/{3}",_expectedParentName, _expectedChildName, _expectedGrandChildName, AbstractApplicationContext.DefaultRootContextName);
}
[SetUp]
public void Setup()
{
//ensure prior-registered contexts are removed
ContextRegistry.Clear();
_parentContext = new MockApplicationContext();
_parentContext.MockName = "parent";
_childContext = new MockApplicationContext(_parentContext);
_childContext.MockName = "child";
_childContext.ParentContext = _parentContext;
_grandChildContext = new MockApplicationContext(_childContext);
_grandChildContext.MockName = "grandchild";
_grandChildContext.ParentContext = _childContext;
_greatGrandChildContext = new MockApplicationContext(_grandChildContext);
_greatGrandChildContext.MockName = "greatgrandchild";
_greatGrandChildContext.ParentContext = _grandChildContext;
}
[Test]
public void RegisterContext_ConstructsNestedPathBasedNames_IfRegisterdInHierarchicalOrder()
{
ContextRegistry.RegisterContext(_parentContext);
ContextRegistry.RegisterContext(_childContext);
ContextRegistry.RegisterContext(_grandChildContext);
ContextRegistry.RegisterContext(_greatGrandChildContext);
Assert.AreEqual(_expectedParentName, ContextRegistry.GetContext().Name);
Assert.AreEqual(_expectedChildName, ContextRegistry.GetContext(_expectedChildName).Name);
Assert.AreEqual(_expectedGrandChildName, ContextRegistry.GetContext(_expectedGrandChildName).Name);
Assert.AreEqual(_expectedGreatGrandChildName, ContextRegistry.GetContext(_expectedGreatGrandChildName).Name);
}
[Test]
public void RegisterContext_ConstructsNestedPathBasedNames_IfRegisteringAMixOfDefaultAndExplicitNamedContexts()
{
//modify the expected names for the decendent contexts for this one test
string childContextInitialName = AbstractApplicationContext.DefaultRootContextName + "_CUSTOM";
_expectedChildName = string.Format("{0}/{1}", _expectedParentName, childContextInitialName);
_expectedGrandChildName = string.Format("{0}/{1}/{2}", _expectedParentName, _expectedChildName, AbstractApplicationContext.DefaultRootContextName);
_expectedGreatGrandChildName = string.Format("{0}/{1}/{2}/{3}", _expectedParentName, _expectedChildName, _expectedGrandChildName, AbstractApplicationContext.DefaultRootContextName);
//setup custom child instance for this one test
_childContext = new MockApplicationContext(_expectedChildName);
_childContext.MockName = "child";
_childContext.ParentContext = _parentContext;
_grandChildContext.ParentContext = _childContext;
//register contexts in conflict with hierarchical order
ContextRegistry.RegisterContext(_parentContext);
ContextRegistry.RegisterContext(_childContext);
ContextRegistry.RegisterContext(_grandChildContext);
ContextRegistry.RegisterContext(_greatGrandChildContext);
Assert.AreEqual(_expectedParentName, ContextRegistry.GetContext(_expectedParentName).Name);
Assert.AreEqual(_expectedChildName, ContextRegistry.GetContext(_expectedChildName).Name);
Assert.AreEqual(_expectedGrandChildName, ContextRegistry.GetContext(_expectedGrandChildName).Name);
Assert.AreEqual(_expectedGreatGrandChildName, ContextRegistry.GetContext(_expectedGreatGrandChildName).Name);
}
}
}
}
| yonglehou/spring-net | test/Spring/Spring.Core.Tests/Context/Support/ContextRegistryTests.cs | C# | apache-2.0 | 14,719 |
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
/*
* @author max
*/
package com.intellij.lang.html;
import com.intellij.lang.ASTNode;
import com.intellij.lang.PsiBuilder;
import com.intellij.lang.PsiParser;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
public class HTMLParser implements PsiParser {
@Override
@NotNull
public ASTNode parse(@NotNull final IElementType root, @NotNull final PsiBuilder builder) {
parseWithoutBuildingTree(root, builder, createHtmlParsing(builder));
return builder.getTreeBuilt();
}
public static void parseWithoutBuildingTree(@NotNull IElementType root, @NotNull PsiBuilder builder) {
parseWithoutBuildingTree(root, builder, new HtmlParsing(builder));
}
private static void parseWithoutBuildingTree(@NotNull IElementType root, @NotNull PsiBuilder builder,
@NotNull HtmlParsing htmlParsing) {
builder.enforceCommentTokens(TokenSet.EMPTY);
final PsiBuilder.Marker file = builder.mark();
htmlParsing.parseDocument();
file.done(root);
}
// to be able to manage what tags treated as single
@NotNull
protected HtmlParsing createHtmlParsing(@NotNull PsiBuilder builder) {
return new HtmlParsing(builder);
}
} | goodwinnk/intellij-community | xml/xml-psi-impl/src/com/intellij/lang/html/HTMLParser.java | Java | apache-2.0 | 1,414 |
/**
* CollectionSizeErrorReason.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter.
*/
package com.google.api.ads.dfp.axis.v201502;
public class CollectionSizeErrorReason implements java.io.Serializable {
private java.lang.String _value_;
private static java.util.HashMap _table_ = new java.util.HashMap();
// Constructor
protected CollectionSizeErrorReason(java.lang.String value) {
_value_ = value;
_table_.put(_value_,this);
}
public static final java.lang.String _TOO_LARGE = "TOO_LARGE";
public static final java.lang.String _UNKNOWN = "UNKNOWN";
public static final CollectionSizeErrorReason TOO_LARGE = new CollectionSizeErrorReason(_TOO_LARGE);
public static final CollectionSizeErrorReason UNKNOWN = new CollectionSizeErrorReason(_UNKNOWN);
public java.lang.String getValue() { return _value_;}
public static CollectionSizeErrorReason fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException {
CollectionSizeErrorReason enumeration = (CollectionSizeErrorReason)
_table_.get(value);
if (enumeration==null) throw new java.lang.IllegalArgumentException();
return enumeration;
}
public static CollectionSizeErrorReason fromString(java.lang.String value)
throws java.lang.IllegalArgumentException {
return fromValue(value);
}
public boolean equals(java.lang.Object obj) {return (obj == this);}
public int hashCode() { return toString().hashCode();}
public java.lang.String toString() { return _value_;}
public java.lang.Object readResolve() throws java.io.ObjectStreamException { return fromValue(_value_);}
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumSerializer(
_javaType, _xmlType);
}
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.EnumDeserializer(
_javaType, _xmlType);
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(CollectionSizeErrorReason.class);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201502", "CollectionSizeError.Reason"));
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
}
| shyTNT/googleads-java-lib | modules/dfp_axis/src/main/java/com/google/api/ads/dfp/axis/v201502/CollectionSizeErrorReason.java | Java | apache-2.0 | 2,880 |
package io.dropwizard.testing.junit5;
import io.dropwizard.testing.app.ContextInjectionResource;
import org.glassfish.jersey.test.grizzly.GrizzlyWebTestContainerFactory;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.client.Entity;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import static org.assertj.core.api.Assertions.assertThat;
@ExtendWith(DropwizardExtensionsSupport.class)
class ResourceExtensionWithGrizzlyTest {
private ResourceExtension resources = ResourceExtension.builder()
.addResource(ContextInjectionResource::new)
.setTestContainerFactory(new GrizzlyWebTestContainerFactory())
.setClientConfigurator(clientConfig -> clientConfig.register(DummyExceptionMapper.class))
.build();
@Test
void testClientSupportsPatchMethod() {
final String resp = resources.target("test")
.request()
.method("PATCH", Entity.text("Patch is working"), String.class);
assertThat(resp).isEqualTo("Patch is working");
}
@Test
void testCustomClientConfiguration() {
assertThat(resources.client().getConfiguration().isRegistered(DummyExceptionMapper.class)).isTrue();
}
private static class DummyExceptionMapper implements ExceptionMapper<WebApplicationException> {
@Override
public Response toResponse(WebApplicationException e) {
throw new UnsupportedOperationException();
}
}
}
| dropwizard/dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/ResourceExtensionWithGrizzlyTest.java | Java | apache-2.0 | 1,562 |
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.skyframe;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.google.devtools.build.lib.actions.FileStateType;
import com.google.devtools.build.lib.actions.FileStateValue;
import com.google.devtools.build.lib.actions.FileValue;
import com.google.devtools.build.lib.pkgcache.PathPackageLocator;
import com.google.devtools.build.lib.util.Pair;
import com.google.devtools.build.lib.vfs.Path;
import com.google.devtools.build.lib.vfs.PathFragment;
import com.google.devtools.build.lib.vfs.RootedPath;
import com.google.devtools.build.skyframe.SkyFunction;
import com.google.devtools.build.skyframe.SkyFunctionException;
import com.google.devtools.build.skyframe.SkyFunctionException.Transience;
import com.google.devtools.build.skyframe.SkyKey;
import java.io.IOException;
import java.util.ArrayList;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nullable;
/**
* A {@link SkyFunction} for {@link FileValue}s.
*
* <p>Most of the complexity in the implementation results from wanting incremental correctness in
* the presence of symlinks, esp. ancestor directory symlinks.
*/
public class FileFunction implements SkyFunction {
private final AtomicReference<PathPackageLocator> pkgLocator;
@Nullable private final NonexistentFileReceiver nonexistentFileReceiver;
/** Temporary interface to help track down why files are missing in some cases. */
public interface NonexistentFileReceiver {
void accept(
RootedPath rootedPath,
RootedPath realRootedPath,
RootedPath parentRootedPath,
FileValue parentFileValue);
}
public FileFunction(AtomicReference<PathPackageLocator> pkgLocator) {
this(pkgLocator, null);
}
FileFunction(
AtomicReference<PathPackageLocator> pkgLocator,
@Nullable NonexistentFileReceiver nonexistentFileReceiver) {
this.pkgLocator = pkgLocator;
this.nonexistentFileReceiver = nonexistentFileReceiver;
}
@Override
public FileValue compute(SkyKey skyKey, Environment env)
throws FileFunctionException, InterruptedException {
RootedPath rootedPath = (RootedPath) skyKey.argument();
// Suppose we have a path p. One of the goals of FileFunction is to resolve the "real path", if
// any, of p. The basic algorithm is to use the fully resolved path of p's parent directory to
// determine the fully resolved path of p. This is complicated when symlinks are involved, and
// is especially complicated when ancestor directory symlinks are involved.
//
// Since FileStateValues are the roots of invalidation, care has to be taken to ensuring we
// declare the proper FileStateValue deps. As a concrete example, let p = a/b and imagine (i) a
// is a direct symlink to c and also (ii) c/b is an existing file. Among other direct deps, we
// want to have a direct dep on FileStateValue(c/b), since that's the node that will be changed
// if the actual contents of a/b (aka c/b) changes. To rephrase: a dep on FileStateValue(a/b)
// won't do anything productive since that path will never be in the Skyframe diff.
//
// In the course of resolving the real path of p, there will be a logical chain of paths we
// consider. Going with the example from above, the full chain of paths we consider is
// [a/b, c/b].
ArrayList<RootedPath> logicalChain = new ArrayList<>();
// Same contents as 'logicalChain', except stored as an sorted TreeSet for efficiency reasons.
// See the usage in checkPathSeenDuringPartialResolutionInternal.
TreeSet<Path> sortedLogicalChain = Sets.newTreeSet();
// Fully resolve the path of the parent directory, but only if the current file is not the
// filesystem root (has no parent) or a package path root (treated opaquely and handled by
// skyframe's DiffAwareness interface).
//
// This entails resolving ancestor symlinks fully. Note that this is the first thing we do - if
// an ancestor is part of a symlink cycle, we want to detect that quickly as it gives a more
// informative error message than we'd get doing bogus filesystem operations.
PartialResolutionResult resolveFromAncestorsResult =
resolveFromAncestors(rootedPath, sortedLogicalChain, logicalChain, env);
if (resolveFromAncestorsResult == null) {
return null;
}
RootedPath rootedPathFromAncestors = resolveFromAncestorsResult.rootedPath;
FileStateValue fileStateValueFromAncestors = resolveFromAncestorsResult.fileStateValue;
if (fileStateValueFromAncestors.getType() == FileStateType.NONEXISTENT) {
return FileValue.value(
ImmutableList.copyOf(logicalChain),
rootedPath,
FileStateValue.NONEXISTENT_FILE_STATE_NODE,
rootedPathFromAncestors,
fileStateValueFromAncestors);
}
RootedPath realRootedPath = rootedPathFromAncestors;
FileStateValue realFileStateValue = fileStateValueFromAncestors;
while (realFileStateValue.getType().isSymlink()) {
PartialResolutionResult getSymlinkTargetRootedPathResult =
getSymlinkTargetRootedPath(
realRootedPath,
realFileStateValue.getSymlinkTarget(),
sortedLogicalChain,
logicalChain,
env);
if (getSymlinkTargetRootedPathResult == null) {
return null;
}
realRootedPath = getSymlinkTargetRootedPathResult.rootedPath;
realFileStateValue = getSymlinkTargetRootedPathResult.fileStateValue;
}
return FileValue.value(
ImmutableList.copyOf(logicalChain),
rootedPath,
// TODO(b/123922036): This is a bug. Should be 'fileStateValueFromAncestors'.
fileStateValueFromAncestors,
realRootedPath,
realFileStateValue);
}
private static RootedPath getChild(RootedPath parentRootedPath, String baseName) {
return RootedPath.toRootedPath(
parentRootedPath.getRoot(), parentRootedPath.getRootRelativePath().getChild(baseName));
}
private RootedPath toRootedPath(Path path) {
return RootedPath.toRootedPathMaybeUnderRoot(path, pkgLocator.get().getPathEntries());
}
/**
* Returns the path and file state of {@code rootedPath}, accounting for ancestor symlinks, or
* {@code null} if there was a missing dep.
*/
@Nullable
private PartialResolutionResult resolveFromAncestors(
RootedPath rootedPath,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws InterruptedException, FileFunctionException {
RootedPath parentRootedPath = rootedPath.getParentDirectory();
return parentRootedPath != null
? resolveFromAncestorsWithParent(
rootedPath, parentRootedPath, sortedLogicalChain, logicalChain, env)
: resolveFromAncestorsNoParent(rootedPath, sortedLogicalChain, logicalChain, env);
}
@Nullable
private PartialResolutionResult resolveFromAncestorsWithParent(
RootedPath rootedPath,
RootedPath parentRootedPath,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws InterruptedException, FileFunctionException {
PathFragment relativePath = rootedPath.getRootRelativePath();
RootedPath rootedPathFromAncestors;
String baseName = relativePath.getBaseName();
FileValue parentFileValue = (FileValue) env.getValue(FileValue.key(parentRootedPath));
if (parentFileValue == null) {
return null;
}
rootedPathFromAncestors = getChild(parentFileValue.realRootedPath(), baseName);
if (!parentFileValue.exists() || !parentFileValue.isDirectory()) {
if (nonexistentFileReceiver != null) {
nonexistentFileReceiver.accept(
rootedPath, rootedPathFromAncestors, parentRootedPath, parentFileValue);
}
return new PartialResolutionResult(
rootedPathFromAncestors, FileStateValue.NONEXISTENT_FILE_STATE_NODE);
}
for (RootedPath parentPartialRootedPath : parentFileValue.logicalChainDuringResolution()) {
checkAndNotePathSeenDuringPartialResolution(
getChild(parentPartialRootedPath, baseName), sortedLogicalChain, logicalChain, env);
if (env.valuesMissing()) {
return null;
}
}
FileStateValue fileStateValueFromAncestors =
(FileStateValue) env.getValue(FileStateValue.key(rootedPathFromAncestors));
if (fileStateValueFromAncestors == null) {
return null;
}
return new PartialResolutionResult(rootedPathFromAncestors, fileStateValueFromAncestors);
}
@Nullable
private PartialResolutionResult resolveFromAncestorsNoParent(
RootedPath rootedPath,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws InterruptedException, FileFunctionException {
checkAndNotePathSeenDuringPartialResolution(rootedPath, sortedLogicalChain, logicalChain, env);
if (env.valuesMissing()) {
return null;
}
FileStateValue realFileStateValue =
(FileStateValue) env.getValue(FileStateValue.key(rootedPath));
if (realFileStateValue == null) {
return null;
}
return new PartialResolutionResult(rootedPath, realFileStateValue);
}
private static final class PartialResolutionResult {
private final RootedPath rootedPath;
private final FileStateValue fileStateValue;
private PartialResolutionResult(RootedPath rootedPath, FileStateValue fileStateValue) {
this.rootedPath = rootedPath;
this.fileStateValue = fileStateValue;
}
}
/**
* Returns the symlink target and file state of {@code rootedPath}'s symlink to {@code
* symlinkTarget}, accounting for ancestor symlinks, or {@code null} if there was a missing dep.
*/
@Nullable
private PartialResolutionResult getSymlinkTargetRootedPath(
RootedPath rootedPath,
PathFragment symlinkTarget,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws FileFunctionException, InterruptedException {
Path path = rootedPath.asPath();
Path symlinkTargetPath;
if (symlinkTarget.isAbsolute()) {
symlinkTargetPath = path.getRelative(symlinkTarget);
} else {
Path parentPath = path.getParentDirectory();
symlinkTargetPath =
parentPath != null
? parentPath.getRelative(symlinkTarget)
: path.getRelative(symlinkTarget);
}
RootedPath symlinkTargetRootedPath = toRootedPath(symlinkTargetPath);
checkPathSeenDuringPartialResolution(
symlinkTargetRootedPath, sortedLogicalChain, logicalChain, env);
if (env.valuesMissing()) {
return null;
}
// The symlink target could have a different parent directory, which itself could be a directory
// symlink (or have an ancestor directory symlink)!
return resolveFromAncestors(symlinkTargetRootedPath, sortedLogicalChain, logicalChain, env);
}
private void checkAndNotePathSeenDuringPartialResolution(
RootedPath rootedPath,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws FileFunctionException, InterruptedException {
Path path = rootedPath.asPath();
checkPathSeenDuringPartialResolutionInternal(
rootedPath, path, sortedLogicalChain, logicalChain, env);
sortedLogicalChain.add(path);
logicalChain.add(rootedPath);
}
private void checkPathSeenDuringPartialResolution(
RootedPath rootedPath,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws FileFunctionException, InterruptedException {
checkPathSeenDuringPartialResolutionInternal(
rootedPath, rootedPath.asPath(), sortedLogicalChain, logicalChain, env);
}
private void checkPathSeenDuringPartialResolutionInternal(
RootedPath rootedPath,
Path path,
TreeSet<Path> sortedLogicalChain,
ArrayList<RootedPath> logicalChain,
Environment env)
throws FileFunctionException, InterruptedException {
// We are about to perform another step of partial real path resolution. 'logicalChain' is the
// chain of paths we've considered so far, and 'rootedPath' / 'path' is the proposed next path
// we consider.
//
// Before we proceed with 'rootedPath', we need to ensure there won't be a problem. There are
// three sorts of issues, all stemming from symlinks:
// (i) Symlink cycle:
// p -> p1 -> p2 -> p1
// (ii) Unbounded expansion caused by a symlink to a descendant of a member of the chain:
// p -> a/b -> c/d -> a/b/e
// (iii) Unbounded expansion caused by a symlink to an ancestor of a member of the chain:
// p -> a/b -> c/d -> a
//
// We can detect all three of these symlink issues via inspection of the proposed new element.
// Here is our incremental algorithm:
// If 'path' is in 'sortedLogicalChain' then we have a found a cycle (i).
// If 'path' is a descendant of any path p in 'sortedLogicalChain' then we have unbounded
// expansion (ii).
// If 'path' is an ancestor of any path p in 'sortedLogicalChain' then we have unbounded
// expansion (iii).
// We can check for these cases efficiently (read: sublinear time) by finding the extremal
// candidate p for (ii) and (iii).
SkyKey uniquenessKey = null;
FileSymlinkException fse = null;
Path seenFloorPath = sortedLogicalChain.floor(path);
Path seenCeilingPath = sortedLogicalChain.ceiling(path);
if (sortedLogicalChain.contains(path)) {
// 'rootedPath' is [transitively] a symlink to a previous element in the symlink chain (i).
Pair<ImmutableList<RootedPath>, ImmutableList<RootedPath>> pathAndChain =
CycleUtils.splitIntoPathAndChain(isPathPredicate(path), logicalChain);
FileSymlinkCycleException fsce =
new FileSymlinkCycleException(pathAndChain.getFirst(), pathAndChain.getSecond());
uniquenessKey = FileSymlinkCycleUniquenessFunction.key(fsce.getCycle());
fse = fsce;
} else if (seenFloorPath != null && path.startsWith(seenFloorPath)) {
// 'rootedPath' is [transitively] a symlink to a descendant of a previous element in the
// symlink chain (ii).
Pair<ImmutableList<RootedPath>, ImmutableList<RootedPath>> pathAndChain =
CycleUtils.splitIntoPathAndChain(
isPathPredicate(seenFloorPath),
ImmutableList.copyOf(Iterables.concat(logicalChain, ImmutableList.of(rootedPath))));
uniquenessKey = FileSymlinkInfiniteExpansionUniquenessFunction.key(pathAndChain.getSecond());
fse = new FileSymlinkInfiniteExpansionException(
pathAndChain.getFirst(), pathAndChain.getSecond());
} else if (seenCeilingPath != null && seenCeilingPath.startsWith(path)) {
// 'rootedPath' is [transitively] a symlink to an ancestor of a previous element in the
// symlink chain (iii).
Pair<ImmutableList<RootedPath>, ImmutableList<RootedPath>> pathAndChain =
CycleUtils.splitIntoPathAndChain(
isPathPredicate(seenCeilingPath),
ImmutableList.copyOf(Iterables.concat(logicalChain, ImmutableList.of(rootedPath))));
uniquenessKey = FileSymlinkInfiniteExpansionUniquenessFunction.key(pathAndChain.getSecond());
fse =
new FileSymlinkInfiniteExpansionException(
pathAndChain.getFirst(), pathAndChain.getSecond());
}
if (uniquenessKey != null) {
// Note that this dependency is merely to ensure that each unique symlink error gets
// reported exactly once.
env.getValue(uniquenessKey);
if (env.valuesMissing()) {
return;
}
throw new FileFunctionException(
Preconditions.checkNotNull(fse, rootedPath), Transience.PERSISTENT);
}
}
private static final Predicate<RootedPath> isPathPredicate(final Path path) {
return new Predicate<RootedPath>() {
@Override
public boolean apply(RootedPath rootedPath) {
return rootedPath.asPath().equals(path);
}
};
}
@Nullable
@Override
public String extractTag(SkyKey skyKey) {
return null;
}
/**
* Used to declare all the exception types that can be wrapped in the exception thrown by
* {@link FileFunction#compute}.
*/
private static final class FileFunctionException extends SkyFunctionException {
public FileFunctionException(IOException e, Transience transience) {
super(e, transience);
}
}
}
| dslomov/bazel | src/main/java/com/google/devtools/build/lib/skyframe/FileFunction.java | Java | apache-2.0 | 17,418 |
/*
* Copyright 2016 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @fileoverview Required pieces of Closure's base.js . */
/** @const */
var goog = goog || {};
/** @param {string} name */
goog.provide = function(name) {};
/** @param {string} name */
goog.require = function(name) {};
| Pimm/closure-compiler | test/com/google/javascript/refactoring/examples/testdata/goog_base.js | JavaScript | apache-2.0 | 838 |
# Encoding: utf-8
# Cloud Foundry Java Buildpack
# Copyright 2013-2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'spec_helper'
require 'component_helper'
require 'java_buildpack/framework/your_kit_profiler'
describe JavaBuildpack::Framework::YourKitProfiler do
include_context 'component_helper'
it 'does not detect if not enabled' do
expect(component.detect).to be_nil
end
context do
let(:configuration) { { 'enabled' => true } }
it 'detects when enabled' do
expect(component.detect).to eq("your-kit-profiler=#{version}")
end
it 'downloads YourKit agent',
cache_fixture: 'stub-your-kit-profiler.so' do
component.compile
expect(sandbox + "your_kit_profiler-#{version}").to exist
end
context do
it 'updates JAVA_OPTS' do
component.release
expect(java_opts).to include("-agentpath:$PWD/.java-buildpack/your_kit_profiler/your_kit_profiler-#{version}=" \
'dir=$PWD/.java-buildpack/your_kit_profiler/snapshots,logdir=$PWD/.java-buildpack/your_kit_profiler/logs,' \
'port=10001,sessionname=test-application-name')
end
context do
let(:configuration) { super().merge 'port' => 10_002 }
it 'adds port from configuration to JAVA_OPTS if specified' do
component.release
expect(java_opts).to include('-agentpath:$PWD/.java-buildpack/your_kit_profiler/your_kit_profiler-' \
"#{version}=dir=$PWD/.java-buildpack/your_kit_profiler/snapshots,logdir=$PWD/.java-buildpack/" \
'your_kit_profiler/logs,port=10002,sessionname=test-application-name')
end
end
context do
let(:configuration) { super().merge 'default_session_name' => 'alternative-session-name' }
it 'adds session name from configuration to JAVA_OPTS if specified' do
component.release
expect(java_opts).to include('-agentpath:$PWD/.java-buildpack/your_kit_profiler/your_kit_profiler-' \
"#{version}=dir=$PWD/.java-buildpack/your_kit_profiler/snapshots,logdir=$PWD/.java-buildpack/" \
'your_kit_profiler/logs,port=10001,sessionname=alternative-session-name')
end
end
end
end
end
| Service-Flow/java-buildpack-cloudfoundry | spec/java_buildpack/framework/your_kit_profiler_spec.rb | Ruby | apache-2.0 | 2,749 |
<!-- Nacionales-Top-728x90px -->
<div id='div-gpt-ad-1403196057127-0' class="topleftad">
<script type='text/javascript'>
googletag.cmd.push(function() { googletag.display('div-gpt-ad-1403196057127-0'); });
</script>
</div>
| Doap/sinkjuice.com | wp-content/themes/prensiguia/banner-ad-widget-nacionales-728x90.php | PHP | apache-2.0 | 223 |
/**
* Copyright (C) 2015 Fernando Cejas Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.fernandocejas.android10.sample.presentation.internal.di.modules;
import com.fernandocejas.android10.sample.domain.executor.PostExecutionThread;
import com.fernandocejas.android10.sample.domain.executor.ThreadExecutor;
import com.fernandocejas.android10.sample.domain.interactor.GetUserDetailsUseCase;
import com.fernandocejas.android10.sample.domain.interactor.GetUserListUseCase;
import com.fernandocejas.android10.sample.domain.interactor.UseCase;
import com.fernandocejas.android10.sample.domain.repository.UserRepository;
import com.fernandocejas.android10.sample.presentation.internal.di.PerActivity;
import dagger.Module;
import dagger.Provides;
import javax.inject.Named;
/**
* Dagger module that provides user related collaborators.
*/
@Module
public class UserModule {
private int userId = -1;
public UserModule() {}
public UserModule(int userId) {
this.userId = userId;
}
@Provides @PerActivity @Named("userList") UseCase provideGetUserListUseCase(
GetUserListUseCase getUserListUseCase) {
return getUserListUseCase;
}
@Provides @PerActivity @Named("userDetails") UseCase provideGetUserDetailsUseCase(
UserRepository userRepository, ThreadExecutor threadExecutor,
PostExecutionThread postExecutionThread) {
return new GetUserDetailsUseCase(userId, userRepository, threadExecutor, postExecutionThread);
}
} | fordjm/Android-CleanArchitecture | presentation/src/main/java/com/fernandocejas/android10/sample/presentation/internal/di/modules/UserModule.java | Java | apache-2.0 | 2,006 |
<?php
/*+**********************************************************************************
* The contents of this file are subject to the vtiger CRM Public License Version 1.0
* ("License"); You may not use this file except in compliance with the License
* The Original Code is: vtiger CRM Open Source
* The Initial Developer of the Original Code is vtiger.
* Portions created by vtiger are Copyright (C) vtiger.
* All Rights Reserved.
************************************************************************************/
$languageStrings = array(
'LBL_SELECT_PICKLIST_IN' => 'Select Picklist in' , // TODO: Review
'LBL_ADD_VALUE' => 'Add Value' , // TODO: Review
'LBL_RENAME_VALUE' => 'Rename Value' , // TODO: Review
'LBL_DELETE_VALUE' => 'Delete Value' , // TODO: Review
'LBL_ITEMS' => 'Values' , // TODO: Review
'LBL_DRAG_ITEMS_TO_RESPOSITION' => 'Drag items to reposition them', // TODO: Review
'LBL_SELECT_AN_ITEM_TO_RENAME_OR_DELETE' => 'Select an item to rename or delete', // TODO: Review
'LBL_TO_DELETE_MULTIPLE_HOLD_CONTROL_KEY' => 'To Delete multiple items hold Ctrl key down while selecting items', // TODO: Review
'LBL_ADD_ITEM_TO' => 'Add Item to' , // TODO: Review
'LBL_ITEM_VALUE' => 'Item value' , // TODO: Review
'LBL_ITEM_TO_RENAME' => 'Item to rename' , // TODO: Review
'LBL_ENTER_NEW_NAME' => 'Enter new Name' , // TODO: Review
'LBL_RENAME_PICKLIST_ITEM' => 'Rename Picklist Item' , // TODO: Review
'LBL_DELETE_PICKLIST_ITEMS' => 'Delete Picklist Items' , // TODO: Review
'LBL_ITEMS_TO_DELETE' => 'Items to Delete' , // TODO: Review
'LBL_REPLACE_IT_WITH' => 'Replace it with' , // TODO: Review
'LBL_ASSIGN_TO_ROLE' => 'Assign to Role' , // TODO: Review
'LBL_ALL_ROLES' => 'All Roles' , // TODO: Review
'LBL_CHOOSE_ROLES' => 'Choose Roles' , // TODO: Review
'LBL_ALL_VALUES' => 'All values' , // TODO: Review
'LBL_VALUES_ASSIGNED_TO_A_ROLE' => 'Values assigned to a role' , // TODO: Review
'LBL_ASSIGN_VALUE' => 'Assign Value' , // TODO: Review
'LBL_SAVE_ORDER' => 'Save Order' , // TODO: Review
'LBL_ROLE_NAME' => 'Role Name' , // TODO: Review
'LBL_SELECTED_VALUES_MESSGAE' => 'will appear for the user with this role', // TODO: Review
'LBL_ENABLE/DISABLE_MESSGAE' => 'Click on value to Enable/Disable it. Then click "Save"', // TODO: Review
'LBL_ASSIGN_VALUES_TO_ROLES' => 'Assign Values to Roles' , // TODO: Review
'LBL_SELECTED_VALUES' => 'Selected Values' , // TODO: Review
'NO_PICKLIST_FIELDS' => 'do not have any picklist fields', // TODO: Review
//Translation for module
'Calendar' => 'To Do',
);
$jsLanguageStrings = array(
'JS_ITEM_RENAMED_SUCCESSFULLY' => 'Item renamed successfully' , // TODO: Review
'JS_ITEM_ADDED_SUCCESSFULLY' => 'Item added successfully' , // TODO: Review
'JS_NO_ITEM_SELECTED' => 'No item selected' , // TODO: Review
'JS_MORE_THAN_ONE_ITEM_SELECTED' => 'More than one item selected' , // TODO: Review
'JS_ITEMS_DELETED_SUCCESSFULLY' => 'Items deleted successfully' , // TODO: Review
'JS_YOU_CANNOT_DELETE_ALL_THE_VALUES' => 'You cannot delete all the values', // TODO: Review
'JS_ALL_ROLES_SELECTED' => 'All Roles selected' , // TODO: Review
'JS_LIST_UPDATED_SUCCESSFULLY' => 'List updated successfully' , // TODO: Review
'JS_SEQUENCE_UPDATED_SUCCESSFULLY' => 'Sequence updated successfully', // TODO: Review
'JS_VALUE_ASSIGNED_SUCCESSFULLY' => 'Value assigned successfully' , // TODO: Review
'JS_PLEASE_SELECT_MODULE' => 'Please select module' , // TODO: Review
); | basiljose1/byjcrm | pkg/vtiger/translations/BritishLanguagePack_br_br/modules/Settings/Picklist.php | PHP | apache-2.0 | 4,090 |
package DDG::Goodie::Minecraft;
# ABSTRACT: Minecraft crafting recipes.
use strict;
use DDG::Goodie;
use JSON;
zci answer_type => 'minecraft';
zci is_cached => 1;
primary_example_queries 'cake minecraft';
secondary_example_queries 'how do i craft a cake in minecraft';
name 'Minecraft';
description 'Minecraft crafting recipes.';
source 'http://thejool.com/api/crafting-guide.json';
category 'special';
topics 'words_and_games';
code_url 'https://github.com/duckduckgo/zeroclickinfo-goodies/blob/master/lib/DDG/Goodie/Minecraft.pm';
attribution
web => ['http://engvik.nu', 'Lars Jansøn Engvik'],
github => [ 'larseng', 'Lars Jansøn Engvik'];
triggers startend => "minecraft";
# Fetch and store recipes in a hash.
my $json = share('crafting-guide.json')->slurp;
my $decoded = decode_json($json);
my %recipes = map{ lc $_->{'name'} => $_ } (@{ $decoded->{'items'} });
# Good words: All the words that recipe names consist of.
# Okay words: Words that are in the good words list, but also could be a part of the query.
# Bad words: Words related to Minecraft, but not related to recipes.
my %good_words = map { $_ => 1 } map { split /\s+/ } (keys %recipes);
my %okay_words = map { $_ => 1 } (qw(a crafting));
my %bad_words = map { $_ => 1 } (qw(download server tutorial mod mods skins skin texture pack packs project projects));
handle remainder => sub {
my @query = split /\s+/, lc $_; # Split on whitespaces.
my @lookup;
# Loop through the query.
foreach (@query) {
return if(exists($bad_words{$_})); # Not looking for a recipe.
push (@lookup, $_) if(exists($good_words{$_})); # Word exists in a recipe, add it.
}
my $recipe = $recipes{join(' ', @lookup)} || $recipes{join(' ', grep { !$okay_words{$_} } @lookup)};
return unless $recipe;
# Recipe found, let's return an answer.
my $plaintext = 'Minecraft ' . $recipe->{'name'} . ' ingredients: ' . $recipe->{'ingredients'} . '.';
return $plaintext,
structured_answer => {
id => 'minecraft',
name => 'Minecraft',
data => {
title => $recipe->{'name'},
subtitle => "Ingredients: " . $recipe->{'ingredients'},
description => $recipe->{'description'},
image => 'https://duckduckgo.com/iu/?u=' . uri_esc( $recipe->{'image'} )
},
meta => {
sourceName => "Minecraft XL",
sourceUrl => "http://www.minecraftxl.com/crafting-recipes/"
},
templates => {
group => 'info',
options => {
moreAt => 1
}
}
};
};
1;
| wongalvis/zeroclickinfo-goodies | lib/DDG/Goodie/Minecraft.pm | Perl | apache-2.0 | 2,620 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.session.db;
import com.google.common.collect.ImmutableList;
import io.airlift.log.Logger;
import io.trino.plugin.session.SessionMatchSpec;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
/**
* Test implementation of DbSpecsProvider.
* {@link SessionPropertiesDao#getSessionMatchSpecs} is invoked every time the get() method is called.
*/
public class TestingDbSpecsProvider
implements DbSpecsProvider
{
private static final Logger log = Logger.get(TestingDbSpecsProvider.class);
private final AtomicReference<List<SessionMatchSpec>> sessionMatchSpecs = new AtomicReference<>(ImmutableList.of());
private final AtomicBoolean destroyed = new AtomicBoolean(false);
private final SessionPropertiesDao dao;
@Inject
public TestingDbSpecsProvider(SessionPropertiesDao dao)
{
requireNonNull(dao, "dao is null");
this.dao = dao;
dao.createSessionSpecsTable();
dao.createSessionClientTagsTable();
dao.createSessionPropertiesTable();
}
@PreDestroy
public void destroy()
{
destroyed.compareAndSet(false, true);
}
@Override
public List<SessionMatchSpec> get()
{
checkState(!destroyed.get(), "provider already destroyed");
try {
sessionMatchSpecs.set(dao.getSessionMatchSpecs());
}
catch (RuntimeException e) {
// Swallow exceptions
log.error(e, "Error reloading configuration");
}
return ImmutableList.copyOf(this.sessionMatchSpecs.get());
}
}
| ebyhr/presto | plugin/trino-session-property-managers/src/test/java/io/trino/plugin/session/db/TestingDbSpecsProvider.java | Java | apache-2.0 | 2,388 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.testing.statistics;
public interface MetricComparisonStrategy
{
boolean matches(double actual, double estimate);
}
| electrum/presto | testing/trino-testing/src/main/java/io/trino/testing/statistics/MetricComparisonStrategy.java | Java | apache-2.0 | 700 |
// <auto-generated>
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
// </auto-generated>
namespace Microsoft.Azure.Management.Redis.Models
{
using Newtonsoft.Json;
using System.Linq;
/// <summary>
/// Details of single instance of redis.
/// </summary>
public partial class RedisInstanceDetails
{
/// <summary>
/// Initializes a new instance of the RedisInstanceDetails class.
/// </summary>
public RedisInstanceDetails()
{
CustomInit();
}
/// <summary>
/// Initializes a new instance of the RedisInstanceDetails class.
/// </summary>
/// <param name="sslPort">Redis instance SSL port.</param>
/// <param name="nonSslPort">If enableNonSslPort is true, provides
/// Redis instance Non-SSL port.</param>
/// <param name="zone">If the Cache uses availability zones, specifies
/// availability zone where this instance is located.</param>
/// <param name="shardId">If clustering is enabled, the Shard ID of
/// Redis Instance</param>
/// <param name="isMaster">Specifies whether the instance is a master
/// node.</param>
public RedisInstanceDetails(int? sslPort = default(int?), int? nonSslPort = default(int?), string zone = default(string), int? shardId = default(int?), bool? isMaster = default(bool?))
{
SslPort = sslPort;
NonSslPort = nonSslPort;
Zone = zone;
ShardId = shardId;
IsMaster = isMaster;
CustomInit();
}
/// <summary>
/// An initialization method that performs custom operations like setting defaults
/// </summary>
partial void CustomInit();
/// <summary>
/// Gets redis instance SSL port.
/// </summary>
[JsonProperty(PropertyName = "sslPort")]
public int? SslPort { get; private set; }
/// <summary>
/// Gets if enableNonSslPort is true, provides Redis instance Non-SSL
/// port.
/// </summary>
[JsonProperty(PropertyName = "nonSslPort")]
public int? NonSslPort { get; private set; }
/// <summary>
/// Gets if the Cache uses availability zones, specifies availability
/// zone where this instance is located.
/// </summary>
[JsonProperty(PropertyName = "zone")]
public string Zone { get; private set; }
/// <summary>
/// Gets if clustering is enabled, the Shard ID of Redis Instance
/// </summary>
[JsonProperty(PropertyName = "shardId")]
public int? ShardId { get; private set; }
/// <summary>
/// Gets specifies whether the instance is a master node.
/// </summary>
[JsonProperty(PropertyName = "isMaster")]
public bool? IsMaster { get; private set; }
}
}
| yugangw-msft/azure-sdk-for-net | sdk/redis/Microsoft.Azure.Management.RedisCache/src/Generated/Models/RedisInstanceDetails.cs | C# | apache-2.0 | 3,180 |
/**
* OperatingSystemTargeting.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter.
*/
package com.google.api.ads.dfp.axis.v201505;
/**
* Represents operating systems that are being targeted or excluded
* by the
* {@link LineItem}.
*/
public class OperatingSystemTargeting implements java.io.Serializable {
/* Indicates whether operating systems should be targeted or excluded.
* This
* attribute is optional and defaults to {@code true}. */
private java.lang.Boolean isTargeted;
/* Operating systems that are being targeted or excluded by the
* {@link LineItem}. */
private com.google.api.ads.dfp.axis.v201505.Technology[] operatingSystems;
public OperatingSystemTargeting() {
}
public OperatingSystemTargeting(
java.lang.Boolean isTargeted,
com.google.api.ads.dfp.axis.v201505.Technology[] operatingSystems) {
this.isTargeted = isTargeted;
this.operatingSystems = operatingSystems;
}
/**
* Gets the isTargeted value for this OperatingSystemTargeting.
*
* @return isTargeted * Indicates whether operating systems should be targeted or excluded.
* This
* attribute is optional and defaults to {@code true}.
*/
public java.lang.Boolean getIsTargeted() {
return isTargeted;
}
/**
* Sets the isTargeted value for this OperatingSystemTargeting.
*
* @param isTargeted * Indicates whether operating systems should be targeted or excluded.
* This
* attribute is optional and defaults to {@code true}.
*/
public void setIsTargeted(java.lang.Boolean isTargeted) {
this.isTargeted = isTargeted;
}
/**
* Gets the operatingSystems value for this OperatingSystemTargeting.
*
* @return operatingSystems * Operating systems that are being targeted or excluded by the
* {@link LineItem}.
*/
public com.google.api.ads.dfp.axis.v201505.Technology[] getOperatingSystems() {
return operatingSystems;
}
/**
* Sets the operatingSystems value for this OperatingSystemTargeting.
*
* @param operatingSystems * Operating systems that are being targeted or excluded by the
* {@link LineItem}.
*/
public void setOperatingSystems(com.google.api.ads.dfp.axis.v201505.Technology[] operatingSystems) {
this.operatingSystems = operatingSystems;
}
public com.google.api.ads.dfp.axis.v201505.Technology getOperatingSystems(int i) {
return this.operatingSystems[i];
}
public void setOperatingSystems(int i, com.google.api.ads.dfp.axis.v201505.Technology _value) {
this.operatingSystems[i] = _value;
}
private java.lang.Object __equalsCalc = null;
public synchronized boolean equals(java.lang.Object obj) {
if (!(obj instanceof OperatingSystemTargeting)) return false;
OperatingSystemTargeting other = (OperatingSystemTargeting) obj;
if (obj == null) return false;
if (this == obj) return true;
if (__equalsCalc != null) {
return (__equalsCalc == obj);
}
__equalsCalc = obj;
boolean _equals;
_equals = true &&
((this.isTargeted==null && other.getIsTargeted()==null) ||
(this.isTargeted!=null &&
this.isTargeted.equals(other.getIsTargeted()))) &&
((this.operatingSystems==null && other.getOperatingSystems()==null) ||
(this.operatingSystems!=null &&
java.util.Arrays.equals(this.operatingSystems, other.getOperatingSystems())));
__equalsCalc = null;
return _equals;
}
private boolean __hashCodeCalc = false;
public synchronized int hashCode() {
if (__hashCodeCalc) {
return 0;
}
__hashCodeCalc = true;
int _hashCode = 1;
if (getIsTargeted() != null) {
_hashCode += getIsTargeted().hashCode();
}
if (getOperatingSystems() != null) {
for (int i=0;
i<java.lang.reflect.Array.getLength(getOperatingSystems());
i++) {
java.lang.Object obj = java.lang.reflect.Array.get(getOperatingSystems(), i);
if (obj != null &&
!obj.getClass().isArray()) {
_hashCode += obj.hashCode();
}
}
}
__hashCodeCalc = false;
return _hashCode;
}
// Type metadata
private static org.apache.axis.description.TypeDesc typeDesc =
new org.apache.axis.description.TypeDesc(OperatingSystemTargeting.class, true);
static {
typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201505", "OperatingSystemTargeting"));
org.apache.axis.description.ElementDesc elemField = new org.apache.axis.description.ElementDesc();
elemField.setFieldName("isTargeted");
elemField.setXmlName(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201505", "isTargeted"));
elemField.setXmlType(new javax.xml.namespace.QName("http://www.w3.org/2001/XMLSchema", "boolean"));
elemField.setMinOccurs(0);
elemField.setNillable(false);
typeDesc.addFieldDesc(elemField);
elemField = new org.apache.axis.description.ElementDesc();
elemField.setFieldName("operatingSystems");
elemField.setXmlName(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201505", "operatingSystems"));
elemField.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201505", "Technology"));
elemField.setMinOccurs(0);
elemField.setNillable(false);
elemField.setMaxOccursUnbounded(true);
typeDesc.addFieldDesc(elemField);
}
/**
* Return type metadata object
*/
public static org.apache.axis.description.TypeDesc getTypeDesc() {
return typeDesc;
}
/**
* Get Custom Serializer
*/
public static org.apache.axis.encoding.Serializer getSerializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.BeanSerializer(
_javaType, _xmlType, typeDesc);
}
/**
* Get Custom Deserializer
*/
public static org.apache.axis.encoding.Deserializer getDeserializer(
java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType) {
return
new org.apache.axis.encoding.ser.BeanDeserializer(
_javaType, _xmlType, typeDesc);
}
}
| stoksey69/googleads-java-lib | modules/dfp_axis/src/main/java/com/google/api/ads/dfp/axis/v201505/OperatingSystemTargeting.java | Java | apache-2.0 | 6,903 |
/*
* DO NOT EDIT. THIS FILE IS GENERATED FROM /builds/tinderbox/Xr-Mozilla1.9-Release/Linux_2.6.18-53.1.13.el5_Depend/mozilla/dom/public/idl/range/nsIDOMDocumentRange.idl
*/
#ifndef __gen_nsIDOMDocumentRange_h__
#define __gen_nsIDOMDocumentRange_h__
#ifndef __gen_domstubs_h__
#include "domstubs.h"
#endif
/* For IDL files that don't want to include root IDL files. */
#ifndef NS_NO_VTABLE
#define NS_NO_VTABLE
#endif
/* starting interface: nsIDOMDocumentRange */
#define NS_IDOMDOCUMENTRANGE_IID_STR "7b9badc6-c9bc-447a-8670-dbd195aed24b"
#define NS_IDOMDOCUMENTRANGE_IID \
{0x7b9badc6, 0xc9bc, 0x447a, \
{ 0x86, 0x70, 0xdb, 0xd1, 0x95, 0xae, 0xd2, 0x4b }}
/**
* The nsIDOMDocumentRange interface is an interface to a document
* object that supports ranges in the Document Object Model.
*
* For more information on this interface please see
* http://www.w3.org/TR/DOM-Level-2-Traversal-Range/
*
* @status FROZEN
*/
class NS_NO_VTABLE NS_SCRIPTABLE nsIDOMDocumentRange : public nsISupports {
public:
NS_DECLARE_STATIC_IID_ACCESSOR(NS_IDOMDOCUMENTRANGE_IID)
/* nsIDOMRange createRange (); */
NS_SCRIPTABLE NS_IMETHOD CreateRange(nsIDOMRange **_retval) = 0;
};
NS_DEFINE_STATIC_IID_ACCESSOR(nsIDOMDocumentRange, NS_IDOMDOCUMENTRANGE_IID)
/* Use this macro when declaring classes that implement this interface. */
#define NS_DECL_NSIDOMDOCUMENTRANGE \
NS_SCRIPTABLE NS_IMETHOD CreateRange(nsIDOMRange **_retval);
/* Use this macro to declare functions that forward the behavior of this interface to another object. */
#define NS_FORWARD_NSIDOMDOCUMENTRANGE(_to) \
NS_SCRIPTABLE NS_IMETHOD CreateRange(nsIDOMRange **_retval) { return _to CreateRange(_retval); }
/* Use this macro to declare functions that forward the behavior of this interface to another object in a safe way. */
#define NS_FORWARD_SAFE_NSIDOMDOCUMENTRANGE(_to) \
NS_SCRIPTABLE NS_IMETHOD CreateRange(nsIDOMRange **_retval) { return !_to ? NS_ERROR_NULL_POINTER : _to->CreateRange(_retval); }
#if 0
/* Use the code below as a template for the implementation class for this interface. */
/* Header file */
class nsDOMDocumentRange : public nsIDOMDocumentRange
{
public:
NS_DECL_ISUPPORTS
NS_DECL_NSIDOMDOCUMENTRANGE
nsDOMDocumentRange();
private:
~nsDOMDocumentRange();
protected:
/* additional members */
};
/* Implementation file */
NS_IMPL_ISUPPORTS1(nsDOMDocumentRange, nsIDOMDocumentRange)
nsDOMDocumentRange::nsDOMDocumentRange()
{
/* member initializers and constructor code */
}
nsDOMDocumentRange::~nsDOMDocumentRange()
{
/* destructor code */
}
/* nsIDOMRange createRange (); */
NS_IMETHODIMP nsDOMDocumentRange::CreateRange(nsIDOMRange **_retval)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
/* End of implementation class template. */
#endif
#endif /* __gen_nsIDOMDocumentRange_h__ */
| mfazekas/safaridriver | third_party/gecko-1.9.0.11/linux/include/nsIDOMDocumentRange.h | C | apache-2.0 | 2,835 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
import java.util.Map;
public class ExtendedStatsBucketPipelineAggregator extends BucketMetricsPipelineAggregator {
private final double sigma;
private double sum = 0;
private long count = 0;
private double min = Double.POSITIVE_INFINITY;
private double max = Double.NEGATIVE_INFINITY;
private double sumOfSqrs = 1;
ExtendedStatsBucketPipelineAggregator(String name, String[] bucketsPaths, double sigma, GapPolicy gapPolicy,
DocValueFormat formatter, Map<String, Object> metadata) {
super(name, bucketsPaths, gapPolicy, formatter, metadata);
this.sigma = sigma;
}
@Override
protected void preCollection() {
sum = 0;
count = 0;
min = Double.POSITIVE_INFINITY;
max = Double.NEGATIVE_INFINITY;
sumOfSqrs = 0;
}
@Override
protected void collectBucketValue(String bucketKey, Double bucketValue) {
sum += bucketValue;
min = Math.min(min, bucketValue);
max = Math.max(max, bucketValue);
count += 1;
sumOfSqrs += bucketValue * bucketValue;
}
@Override
protected InternalAggregation buildAggregation(Map<String, Object> metadata) {
return new InternalExtendedStatsBucket(name(), count, sum, min, max, sumOfSqrs, sigma, format, metadata);
}
}
| nknize/elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java | Java | apache-2.0 | 2,402 |
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.firealarm;
public class Sprinkler {
private Room room;
private boolean on = false;
public Sprinkler() { }
public Sprinkler(Room room) {
this.room = room;
}
public Room getRoom() {
return room;
}
public void setRoom(Room room) {
this.room = room;
}
public boolean isOn() {
return on;
}
public void setOn(boolean on) {
this.on = on;
}
@Override
public int hashCode() {
return room.hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Sprinkler)) { return false; }
return room.equals(((Sprinkler) obj).getRoom());
}
@Override
public String toString() {
return "Sprinkler for " + room;
}
}
| winklerm/droolsjbpm-integration | kie-maven-plugin-example/src/main/java/org/kie/firealarm/Sprinkler.java | Java | apache-2.0 | 1,380 |
<!DOCTYPE html>
<html lang="en">
<head>
<title>SysProcess Class Reference</title>
<link rel="stylesheet" type="text/css" href="../css/jazzy.css" />
<link rel="stylesheet" type="text/css" href="../css/highlight.css" />
<meta charset='utf-8'>
<script src="../js/jquery.min.js" defer></script>
<script src="../js/jazzy.js" defer></script>
</head>
<body>
<a name="//apple_ref/swift/Class/SysProcess" class="dashAnchor"></a>
<a title="SysProcess Class Reference"></a>
<header>
<div class="content-wrapper">
<p><a href="../index.html"> Docs</a> (88% documented)</p>
</div>
</header>
<div class="content-wrapper">
<p id="breadcrumbs">
<a href="../index.html"> Reference</a>
<img id="carat" src="../img/carat.png" />
SysProcess Class Reference
</p>
</div>
<div class="content-wrapper">
<nav class="sidebar">
<ul class="nav-groups">
<li class="nav-group-name">
<a href="../Classes.html">Classes</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Classes/Bytes.html">Bytes</a>
</li>
<li class="nav-group-task">
<a href="../Classes/CURL.html">CURL</a>
</li>
<li class="nav-group-task">
<a href="../Classes.html#/s:C10PerfectLib6Cookie">Cookie</a>
</li>
<li class="nav-group-task">
<a href="../Classes/Dir.html">Dir</a>
</li>
<li class="nav-group-task">
<a href="../Classes/Encoding.html">Encoding</a>
</li>
<li class="nav-group-task">
<a href="../Classes/FastCGIServer.html">FastCGIServer</a>
</li>
<li class="nav-group-task">
<a href="../Classes/File.html">File</a>
</li>
<li class="nav-group-task">
<a href="../Classes/HTTPServer.html">HTTPServer</a>
</li>
<li class="nav-group-task">
<a href="../Classes/ICU.html">ICU</a>
</li>
<li class="nav-group-task">
<a href="../Classes/JSONArrayType.html">JSONArrayType</a>
</li>
<li class="nav-group-task">
<a href="../Classes/JSONDecode.html">JSONDecode</a>
</li>
<li class="nav-group-task">
<a href="../Classes/JSONDictionaryType.html">JSONDictionaryType</a>
</li>
<li class="nav-group-task">
<a href="../Classes/JSONEncode.html">JSONEncode</a>
</li>
<li class="nav-group-task">
<a href="../Classes/JSONNull.html">JSONNull</a>
</li>
<li class="nav-group-task">
<a href="../Classes.html#/s:C10PerfectLib10LogManager">LogManager</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MimeReader.html">MimeReader</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustacheEvaluationContext.html">MustacheEvaluationContext</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustacheEvaluationOutputCollector.html">MustacheEvaluationOutputCollector</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustacheGroupTag.html">MustacheGroupTag</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustacheParser.html">MustacheParser</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustachePartialTag.html">MustachePartialTag</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustachePragmaTag.html">MustachePragmaTag</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustacheTag.html">MustacheTag</a>
</li>
<li class="nav-group-task">
<a href="../Classes/MustacheTemplate.html">MustacheTemplate</a>
</li>
<li class="nav-group-task">
<a href="../Classes/NetNamedPipe.html">NetNamedPipe</a>
</li>
<li class="nav-group-task">
<a href="../Classes/NetTCP.html">NetTCP</a>
</li>
<li class="nav-group-task">
<a href="../Classes/NetTCPSSL.html">NetTCPSSL</a>
</li>
<li class="nav-group-task">
<a href="../Classes/PageHandlerRegistry.html">PageHandlerRegistry</a>
</li>
<li class="nav-group-task">
<a href="../Classes/PerfectServer.html">PerfectServer</a>
</li>
<li class="nav-group-task">
<a href="../Classes/Routing.html">Routing</a>
</li>
<li class="nav-group-task">
<a href="../Classes/SQLite.html">SQLite</a>
</li>
<li class="nav-group-task">
<a href="../Classes/SQLiteStmt.html">SQLiteStmt</a>
</li>
<li class="nav-group-task">
<a href="../Classes/SessionManager.html">SessionManager</a>
</li>
<li class="nav-group-task">
<a href="../Classes/StaticFileHandler.html">StaticFileHandler</a>
</li>
<li class="nav-group-task">
<a href="../Classes/SysProcess.html">SysProcess</a>
</li>
<li class="nav-group-task">
<a href="../Classes/Threading.html">Threading</a>
</li>
<li class="nav-group-task">
<a href="../Classes/UTF16Encoding.html">UTF16Encoding</a>
</li>
<li class="nav-group-task">
<a href="../Classes/UTF8Encoding.html">UTF8Encoding</a>
</li>
<li class="nav-group-task">
<a href="../Classes/WebRequest.html">WebRequest</a>
</li>
<li class="nav-group-task">
<a href="../Classes/WebResponse.html">WebResponse</a>
</li>
<li class="nav-group-task">
<a href="../Classes/WebSocket.html">WebSocket</a>
</li>
<li class="nav-group-task">
<a href="../Classes/WebSocketHandler.html">WebSocketHandler</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Global Variables.html">Global Variables</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Global Variables.html#/s:v10PerfectLib22serverPerfectLibrariesSS">serverPerfectLibraries</a>
</li>
<li class="nav-group-task">
<a href="../Global Variables.html#/s:v10PerfectLib15serverSQLiteDBsSS">serverSQLiteDBs</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Enums.html">Enums</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Enums/JSONError.html">JSONError</a>
</li>
<li class="nav-group-task">
<a href="../Enums/MustacheError.html">MustacheError</a>
</li>
<li class="nav-group-task">
<a href="../Enums/PerfectError.html">PerfectError</a>
</li>
<li class="nav-group-task">
<a href="../Enums/SQLiteError.html">SQLiteError</a>
</li>
<li class="nav-group-task">
<a href="../Enums/SessionResult.html">SessionResult</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Extensions.html">Extensions</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Extensions/String.html">String</a>
</li>
<li class="nav-group-task">
<a href="../Extensions/UnicodeScalar.html">UnicodeScalar</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Functions.html">Functions</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Functions.html#/s:F10PerfectLib10FileStderrFT_CS_4File">FileStderr()</a>
</li>
<li class="nav-group-task">
<a href="../Functions.html#/s:F10PerfectLib9FileStdinFT_CS_4File">FileStdin()</a>
</li>
<li class="nav-group-task">
<a href="../Functions.html#/s:F10PerfectLib10FileStdoutFT_CS_4File">FileStdout()</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Protocols.html">Protocols</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Protocols/Closeable.html">Closeable</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/PageHandler.html">PageHandler</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/RequestHandler.html">RequestHandler</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/WebConnection.html">WebConnection</a>
</li>
<li class="nav-group-task">
<a href="../Protocols/WebSocketSessionHandler.html">WebSocketSessionHandler</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a href="../Structs.html">Structs</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a href="../Structs/GenerateFromPointer.html">GenerateFromPointer</a>
</li>
<li class="nav-group-task">
<a href="../Structs/RouteMap.html">RouteMap</a>
</li>
<li class="nav-group-task">
<a href="../Structs/SessionConfiguration.html">SessionConfiguration</a>
</li>
<li class="nav-group-task">
<a href="../Structs.html#/s:V10PerfectLib20SocketFileDescriptor">SocketFileDescriptor</a>
</li>
</ul>
</li>
</ul>
</nav>
<article class="main-content">
<section>
<section class="section">
<h1>SysProcess</h1>
<div class="declaration">
<div class="language">
<pre class="highlight"><code><span class="kd">public</span> <span class="kd">class</span> <span class="kt">SysProcess</span> <span class="p">:</span> <span class="kt">Closeable</span></code></pre>
</div>
</div>
<p>This class permits an external process to be launched given a set of command line arguments and environment variables.
The standard in, out and err file streams are made available. The process can be terminated or permitted to be run to completion.</p>
</section>
<section class="section task-group-section">
<div class="task-group">
<ul>
<li class="item">
<div>
<code>
<a name="/s:vC10PerfectLib10SysProcess5stdinGSqCS_4File_"></a>
<a name="//apple_ref/swift/Property/stdin" class="dashAnchor"></a>
<a class="token" href="#/s:vC10PerfectLib10SysProcess5stdinGSqCS_4File_">stdin</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The standard in file stream.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">stdin</span><span class="p">:</span> <span class="kt">File</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:vC10PerfectLib10SysProcess6stdoutGSqCS_4File_"></a>
<a name="//apple_ref/swift/Property/stdout" class="dashAnchor"></a>
<a class="token" href="#/s:vC10PerfectLib10SysProcess6stdoutGSqCS_4File_">stdout</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The standard out file stream.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">stdout</span><span class="p">:</span> <span class="kt">File</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:vC10PerfectLib10SysProcess6stderrGSqCS_4File_"></a>
<a name="//apple_ref/swift/Property/stderr" class="dashAnchor"></a>
<a class="token" href="#/s:vC10PerfectLib10SysProcess6stderrGSqCS_4File_">stderr</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The standard err file stream.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">stderr</span><span class="p">:</span> <span class="kt">File</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:vC10PerfectLib10SysProcess3pidVSs5Int32"></a>
<a name="//apple_ref/swift/Property/pid" class="dashAnchor"></a>
<a class="token" href="#/s:vC10PerfectLib10SysProcess3pidVSs5Int32">pid</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The process identifier.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">pid</span> <span class="o">=</span> <span class="nf">pid_t</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:FC10PerfectLib10SysProcesscFMS0_FzTSS4argsGSqGSaSS__3envGSqGSaTSSSS____S0_"></a>
<a name="//apple_ref/swift/Method/init(_:args:env:)" class="dashAnchor"></a>
<a class="token" href="#/s:FC10PerfectLib10SysProcesscFMS0_FzTSS4argsGSqGSaSS__3envGSqGSaTSSSS____S0_">init(_:args:env:)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Initialize the object and launch the process.
- parameter cmd: The path to the process which will be launched.
- parameter args: An optional array of String arguments which will be given to the process.
- parameter env: An optional array of environment variable name and value pairs.
- throws: <code>LassoError.SystemError</code></p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="nf">init</span><span class="p">(</span><span class="n">_</span> <span class="nv">cmd</span><span class="p">:</span> <span class="kt">String</span><span class="p">,</span> <span class="nv">args</span><span class="p">:</span> <span class="p">[</span><span class="kt">String</span><span class="p">]?,</span> <span class="nv">env</span><span class="p">:</span> <span class="p">[(</span><span class="kt">String</span><span class="p">,</span><span class="kt">String</span><span class="p">)]?)</span> <span class="n">throws</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>cmd</em>
</code>
</td>
<td>
<div>
<p>The path to the process which will be launched.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>args</em>
</code>
</td>
<td>
<div>
<p>An optional array of String arguments which will be given to the process.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>env</em>
</code>
</td>
<td>
<div>
<p>An optional array of environment variable name and value pairs.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:FC10PerfectLib10SysProcess6isOpenFS0_FT_Sb"></a>
<a name="//apple_ref/swift/Method/isOpen()" class="dashAnchor"></a>
<a class="token" href="#/s:FC10PerfectLib10SysProcess6isOpenFS0_FT_Sb">isOpen()</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Returns true if the process was opened and was running at some point.
Note that the process may not be currently running. Use <code>wait(false)</code> to check if the process is currently running.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">isOpen</span><span class="p">()</span> <span class="o">-></span> <span class="kt">Bool</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:FC10PerfectLib10SysProcess5closeFS0_FT_T_"></a>
<a name="//apple_ref/swift/Method/close()" class="dashAnchor"></a>
<a class="token" href="#/s:FC10PerfectLib10SysProcess5closeFS0_FT_T_">close()</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Terminate the process and clean up.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">close</span><span class="p">()</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:FC10PerfectLib10SysProcess6detachFS0_FT_T_"></a>
<a name="//apple_ref/swift/Method/detach()" class="dashAnchor"></a>
<a class="token" href="#/s:FC10PerfectLib10SysProcess6detachFS0_FT_T_">detach()</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Detach from the process such that it will not be manually terminated when this object is deinitialized.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">detach</span><span class="p">()</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:FC10PerfectLib10SysProcess4waitFS0_FzTSb_VSs5Int32"></a>
<a name="//apple_ref/swift/Method/wait(_:)" class="dashAnchor"></a>
<a class="token" href="#/s:FC10PerfectLib10SysProcess4waitFS0_FzTSb_VSs5Int32">wait(_:)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Determine if the process has completed running and retrieve its result code.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">wait</span><span class="p">(</span><span class="nv">hang</span><span class="p">:</span> <span class="kt">Bool</span> <span class="o">=</span> <span class="kc">true</span><span class="p">)</span> <span class="n">throws</span> <span class="o">-></span> <span class="kt">Int32</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:FC10PerfectLib10SysProcess4killFS0_FzTVSs5Int32_S1_"></a>
<a name="//apple_ref/swift/Method/kill(_:)" class="dashAnchor"></a>
<a class="token" href="#/s:FC10PerfectLib10SysProcess4killFS0_FzTVSs5Int32_S1_">kill(_:)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Terminate the process and return its result code.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">kill</span><span class="p">(</span><span class="nv">signal</span><span class="p">:</span> <span class="kt">Int32</span> <span class="o">=</span> <span class="kt">SIGTERM</span><span class="p">)</span> <span class="n">throws</span> <span class="o">-></span> <span class="kt">Int32</span></code></pre>
</div>
</div>
</section>
</div>
</li>
</ul>
</div>
</section>
</section>
<section id="footer">
<p>© 2016 <a class="link" href="" target="_blank" rel="external"></a>. All rights reserved. (Last updated: 2016-01-15)</p>
<p>Generated by <a class="link" href="https://github.com/realm/jazzy" target="_blank" rel="external">jazzy ♪♫ v0.5.0</a>, a <a class="link" href="http://realm.io" target="_blank" rel="external">Realm</a> project.</p>
</section>
</article>
</div>
</body>
</div>
</html>
| lokinfey/MyPerfectframework | PerfectLib/docs/docsets/.docset/Contents/Resources/Documents/Classes/SysProcess.html | HTML | apache-2.0 | 28,750 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.unitime.timetable.solver.service;
import java.util.HashSet;
import java.util.StringTokenizer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.unitime.timetable.defaults.SessionAttribute;
import org.unitime.timetable.security.SessionContext;
import org.unitime.timetable.solver.CachedClassAssignmentProxy;
import org.unitime.timetable.solver.ClassAssignmentProxy;
import org.unitime.timetable.solver.SolutionClassAssignmentProxy;
import org.unitime.timetable.solver.SolverProxy;
/**
* @author Tomas Muller
*/
@Service("classAssignmentService")
public class ClassAssignmentService implements AssignmentService<ClassAssignmentProxy> {
@Autowired SessionContext sessionContext;
@Autowired SolverService<SolverProxy> courseTimetablingSolverService;
@Override
public ClassAssignmentProxy getAssignment() {
SolverProxy solver = courseTimetablingSolverService.getSolver();
if (solver!=null) return new CachedClassAssignmentProxy(solver);
String solutionIdsStr = (String)sessionContext.getAttribute(SessionAttribute.SelectedSolution);
HashSet<Long> solutionIds = new HashSet<Long>();
if (solutionIdsStr != null) {
for (StringTokenizer s = new StringTokenizer(solutionIdsStr, ","); s.hasMoreTokens(); )
solutionIds.add(Long.valueOf(s.nextToken()));
}
ProxyHolder<HashSet<Long>, SolutionClassAssignmentProxy> h = (ProxyHolder<HashSet<Long>, SolutionClassAssignmentProxy>)sessionContext.getAttribute(SessionAttribute.ClassAssignment);
if (h != null && h.isValid(solutionIds))
return h.getProxy();
SolutionClassAssignmentProxy newProxy = new SolutionClassAssignmentProxy(solutionIds);
sessionContext.setAttribute(SessionAttribute.ClassAssignment, new ProxyHolder<HashSet<Long>, SolutionClassAssignmentProxy>(solutionIds, newProxy));
return newProxy;
}
}
| nikeshmhr/unitime | JavaSource/org/unitime/timetable/solver/service/ClassAssignmentService.java | Java | apache-2.0 | 2,687 |
extern crate hellodep;
extern crate proc_macro;
use proc_macro::TokenStream;
use std::str::FromStr;
#[proc_macro_derive(HelloWorld)]
pub fn hello_world(_input: TokenStream) -> TokenStream {
println!("hellodep returned: {}", hellodep::hellodep());
TokenStream::from_str("").unwrap() // no-op
}
| facebook/buck | test/com/facebook/buck/features/rust/testdata/procmacro/helloworld_derive.rs | Rust | apache-2.0 | 303 |
/**
* Autogenerated by Thrift Compiler (0.7.0)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*/
package org.apache.hadoop.thriftfs.api;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MalformedInputException extends Exception implements org.apache.thrift.TBase<MalformedInputException, MalformedInputException._Fields>, java.io.Serializable, Cloneable {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MalformedInputException");
private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
public String message; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
MESSAGE((short)1, "message");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // MESSAGE
return MESSAGE;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MalformedInputException.class, metaDataMap);
}
public MalformedInputException() {
}
public MalformedInputException(
String message)
{
this();
this.message = message;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public MalformedInputException(MalformedInputException other) {
if (other.isSetMessage()) {
this.message = other.message;
}
}
public MalformedInputException deepCopy() {
return new MalformedInputException(this);
}
@Override
public void clear() {
this.message = null;
}
public String getMessage() {
return this.message;
}
public MalformedInputException setMessage(String message) {
this.message = message;
return this;
}
public void unsetMessage() {
this.message = null;
}
/** Returns true if field message is set (has been assigned a value) and false otherwise */
public boolean isSetMessage() {
return this.message != null;
}
public void setMessageIsSet(boolean value) {
if (!value) {
this.message = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case MESSAGE:
if (value == null) {
unsetMessage();
} else {
setMessage((String)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case MESSAGE:
return getMessage();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case MESSAGE:
return isSetMessage();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof MalformedInputException)
return this.equals((MalformedInputException)that);
return false;
}
public boolean equals(MalformedInputException that) {
if (that == null)
return false;
boolean this_present_message = true && this.isSetMessage();
boolean that_present_message = true && that.isSetMessage();
if (this_present_message || that_present_message) {
if (!(this_present_message && that_present_message))
return false;
if (!this.message.equals(that.message))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
public int compareTo(MalformedInputException other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
MalformedInputException typedOther = (MalformedInputException)other;
lastComparison = Boolean.valueOf(isSetMessage()).compareTo(typedOther.isSetMessage());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMessage()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, typedOther.message);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField field;
iprot.readStructBegin();
while (true)
{
field = iprot.readFieldBegin();
if (field.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (field.id) {
case 1: // MESSAGE
if (field.type == org.apache.thrift.protocol.TType.STRING) {
this.message = iprot.readString();
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
validate();
oprot.writeStructBegin(STRUCT_DESC);
if (this.message != null) {
oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
oprot.writeString(this.message);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("MalformedInputException(");
boolean first = true;
sb.append("message:");
if (this.message == null) {
sb.append("null");
} else {
sb.append(this.message);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
}
| shakamunyi/hadoop-20 | src/contrib/thriftfs/gen-java/org/apache/hadoop/thriftfs/api/MalformedInputException.java | Java | apache-2.0 | 8,986 |
package org.cocos2d.menus;
import org.cocos2d.nodes.CCLabel;
import org.cocos2d.nodes.CCNode;
/** A CCMenuItemFont
Helper class that creates a CCMenuItemLabel class with a Label
*/
public class CCMenuItemFont extends CCMenuItemLabel {
// private CCLabel label_;
static int _fontSize = kItemSize;
static String _fontName = "DroidSans";
/** set font size */
public static void setFontSize(int s) {
_fontSize = s;
}
/** get font size */
public static int fontSize() {
return _fontSize;
}
/** set the font name */
public static void setFontName(String n) {
_fontName = n;
}
/** get the font name */
public static String fontName() {
return _fontName;
}
/** creates a menu item from a string without target/selector.
* To be used with CCMenuItemToggle */
public static CCMenuItemFont item(String value) {
return new CCMenuItemFont(CCLabel.makeLabel(value, _fontName, _fontSize), null, null);
}
/** creates a menu item from a string with a target/selector */
public static CCMenuItemFont item(String value, CCNode rec, String cb) {
CCLabel lbl = CCLabel.makeLabel(value, _fontName, _fontSize);
return new CCMenuItemFont(lbl, rec, cb);
}
/** initializes a menu item from a string with a target/selector */
protected CCMenuItemFont(CCLabel label, CCNode rec, String cb) {
super(label, rec, cb);
}
}
| ouyangwenyuan/androidapp | tofuflee/src/org/cocos2d/menus/CCMenuItemFont.java | Java | apache-2.0 | 1,468 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.configuration;
import org.apache.flink.annotation.PublicEvolving;
import static org.apache.flink.configuration.ConfigOptions.key;
/**
* The set of configuration options relating to TaskManager and Task settings.
*/
@PublicEvolving
public class TaskManagerOptions {
// ------------------------------------------------------------------------
// General TaskManager Options
// ------------------------------------------------------------------------
// @TODO Migrate 'taskmanager.*' config options from ConfigConstants
/**
* JVM heap size (in megabytes) for the TaskManagers
*/
public static final ConfigOption<Integer> TASK_MANAGER_HEAP_MEMORY =
key("taskmanager.heap.mb")
.defaultValue(1024);
/**
* Whether to kill the TaskManager when the task thread throws an OutOfMemoryError
*/
public static final ConfigOption<Boolean> KILL_ON_OUT_OF_MEMORY =
key("taskmanager.jvm-exit-on-oom")
.defaultValue(false);
/**
* Whether the quarantine monitor for task managers shall be started. The quarantine monitor
* shuts down the actor system if it detects that it has quarantined another actor system
* or if it has been quarantined by another actor system.
*/
public static final ConfigOption<Boolean> EXIT_ON_FATAL_AKKA_ERROR =
key("taskmanager.exit-on-fatal-akka-error")
.defaultValue(false);
/**
* The default network port range the task manager expects incoming IPC connections. The {@code "0"} means that
* the TaskManager searches for a free port.
*/
public static final ConfigOption<String> RPC_PORT =
key("taskmanager.rpc.port")
.defaultValue("0");
// ------------------------------------------------------------------------
// Managed Memory Options
// ------------------------------------------------------------------------
/**
* Size of memory buffers used by the network stack and the memory manager (in bytes).
*/
public static final ConfigOption<Integer> MEMORY_SEGMENT_SIZE =
key("taskmanager.memory.segment-size")
.defaultValue(32768);
/**
* Amount of memory to be allocated by the task manager's memory manager (in megabytes). If not
* set, a relative fraction will be allocated, as defined by {@link #MANAGED_MEMORY_FRACTION}.
*/
public static final ConfigOption<Long> MANAGED_MEMORY_SIZE =
key("taskmanager.memory.size")
.defaultValue(-1L);
/**
* Fraction of free memory allocated by the memory manager if {@link #MANAGED_MEMORY_SIZE} is
* not set.
*/
public static final ConfigOption<Float> MANAGED_MEMORY_FRACTION =
key("taskmanager.memory.fraction")
.defaultValue(0.7f);
/**
* Memory allocation method (JVM heap or off-heap), used for managed memory of the TaskManager
* as well as the network buffers.
**/
public static final ConfigOption<Boolean> MEMORY_OFF_HEAP =
key("taskmanager.memory.off-heap")
.defaultValue(false);
/**
* Whether TaskManager managed memory should be pre-allocated when the TaskManager is starting.
*/
public static final ConfigOption<Boolean> MANAGED_MEMORY_PRE_ALLOCATE =
key("taskmanager.memory.preallocate")
.defaultValue(false);
// ------------------------------------------------------------------------
// Network Options
// ------------------------------------------------------------------------
/**
* Number of buffers used in the network stack. This defines the number of possible tasks and
* shuffles.
*
* @deprecated use {@link #NETWORK_BUFFERS_MEMORY_FRACTION}, {@link #NETWORK_BUFFERS_MEMORY_MIN},
* and {@link #NETWORK_BUFFERS_MEMORY_MAX} instead
*/
@Deprecated
public static final ConfigOption<Integer> NETWORK_NUM_BUFFERS =
key("taskmanager.network.numberOfBuffers")
.defaultValue(2048);
/**
* Fraction of JVM memory to use for network buffers.
*/
public static final ConfigOption<Float> NETWORK_BUFFERS_MEMORY_FRACTION =
key("taskmanager.network.memory.fraction")
.defaultValue(0.1f);
/**
* Minimum memory size for network buffers (in bytes)
*/
public static final ConfigOption<Long> NETWORK_BUFFERS_MEMORY_MIN =
key("taskmanager.network.memory.min")
.defaultValue(64L << 20); // 64 MB
/**
* Maximum memory size for network buffers (in bytes)
*/
public static final ConfigOption<Long> NETWORK_BUFFERS_MEMORY_MAX =
key("taskmanager.network.memory.max")
.defaultValue(1024L << 20); // 1 GB
/**
* Number of network buffers to use for each outgoing/incoming channel (subpartition/input channel).
*
* Reasoning: 1 buffer for in-flight data in the subpartition + 1 buffer for parallel serialization
*/
public static final ConfigOption<Integer> NETWORK_BUFFERS_PER_CHANNEL =
key("taskmanager.network.memory.buffers-per-channel")
.defaultValue(2);
/**
* Number of extra network buffers to use for each outgoing/incoming gate (result partition/input gate).
*/
public static final ConfigOption<Integer> NETWORK_EXTRA_BUFFERS_PER_GATE =
key("taskmanager.network.memory.floating-buffers-per-gate")
.defaultValue(8);
/**
* Minimum backoff for partition requests of input channels.
*/
public static final ConfigOption<Integer> NETWORK_REQUEST_BACKOFF_INITIAL =
key("taskmanager.network.request-backoff.initial")
.defaultValue(100)
.withDeprecatedKeys("taskmanager.net.request-backoff.initial");
/**
* Maximum backoff for partition requests of input channels.
*/
public static final ConfigOption<Integer> NETWORK_REQUEST_BACKOFF_MAX =
key("taskmanager.network.request-backoff.max")
.defaultValue(10000)
.withDeprecatedKeys("taskmanager.net.request-backoff.max");
/**
* Boolean flag to enable/disable more detailed metrics about inbound/outbound network queue
* lengths.
*/
public static final ConfigOption<Boolean> NETWORK_DETAILED_METRICS =
key("taskmanager.network.detailed-metrics")
.defaultValue(false);
// ------------------------------------------------------------------------
// Task Options
// ------------------------------------------------------------------------
/**
* Time interval in milliseconds between two successive task cancellation
* attempts.
*/
public static final ConfigOption<Long> TASK_CANCELLATION_INTERVAL =
key("task.cancellation.interval")
.defaultValue(30000L)
.withDeprecatedKeys("task.cancellation-interval");
/**
* Timeout in milliseconds after which a task cancellation times out and
* leads to a fatal TaskManager error. A value of <code>0</code> deactivates
* the watch dog.
*/
public static final ConfigOption<Long> TASK_CANCELLATION_TIMEOUT =
key("task.cancellation.timeout")
.defaultValue(180000L);
/**
* The maximum number of bytes that a checkpoint alignment may buffer.
* If the checkpoint alignment buffers more than the configured amount of
* data, the checkpoint is aborted (skipped).
*
* <p>The default value of {@code -1} indicates that there is no limit.
*/
public static final ConfigOption<Long> TASK_CHECKPOINT_ALIGNMENT_BYTES_LIMIT =
key("task.checkpoint.alignment.max-size")
.defaultValue(-1L);
// ------------------------------------------------------------------------
/** Not intended to be instantiated */
private TaskManagerOptions() {}
}
| zimmermatt/flink | flink-core/src/main/java/org/apache/flink/configuration/TaskManagerOptions.java | Java | apache-2.0 | 8,025 |
<!--
* Copyright 2010 The Glib Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
-->
<!DOCTYPE html>
<html>
<head>
<title>Cute</title>
</head>
<body bgcolor="black">
<div id='flash_player'></div>
<script src="cutegameflash/cutegameflash.nocache.js"></script>
</body>
</html>
| billy93/forplay | sample/cute/core/war/CuteGameFlash.html | HTML | apache-2.0 | 813 |
// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/**
* The String.prototype.toUpperCase.length property has the attribute ReadOnly
*
* @path ch15/15.5/15.5.4/15.5.4.18/S15.5.4.18_A10.js
* @description Checking if varying the String.prototype.toUpperCase.length property fails
*/
//////////////////////////////////////////////////////////////////////////////
//CHECK#1
if (!(String.prototype.toUpperCase.hasOwnProperty('length'))) {
$FAIL('#1: String.prototype.toUpperCase.hasOwnProperty(\'length\') return true. Actual: '+String.prototype.toUpperCase.hasOwnProperty('length'));
}
//
//////////////////////////////////////////////////////////////////////////////
var __obj = String.prototype.toUpperCase.length;
String.prototype.toUpperCase.length = function(){return "shifted";};
//////////////////////////////////////////////////////////////////////////////
//CHECK#2
if (String.prototype.toUpperCase.length !== __obj) {
$ERROR('#2: __obj = String.prototype.toUpperCase.length; String.prototype.toUpperCase.length = function(){return "shifted";}; String.prototype.toUpperCase.length === __obj. Actual: '+String.prototype.toUpperCase.length );
}
//
//////////////////////////////////////////////////////////////////////////////
| popravich/typescript | tests/Fidelity/test262/suite/ch15/15.5/15.5.4/15.5.4.18/S15.5.4.18_A10.js | JavaScript | apache-2.0 | 1,326 |
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
#outputPageHeader()
</head>
<body style="margin:0px; overflow:hidden">
<label id="doradoView" style="display: none" />
#outputPageFooter()
#outputException()
</body>
</html> | muxiangqiu/bdf3 | bdf3-starter-parent/bdf3-sample/src/main/resources/dorado-home/w3c-html5-template.html | HTML | apache-2.0 | 292 |
#pragma once
#include <chrono>
#include "envoy/common/pure.h"
namespace Envoy {
/**
* Less typing for common system time and steady time type.
*
* SystemTime should be used when getting a time to present to the user, e.g. for logging.
* MonotonicTime should be used when tracking time for computing an interval.
*/
typedef std::chrono::time_point<std::chrono::system_clock> SystemTime;
typedef std::chrono::time_point<std::chrono::steady_clock> MonotonicTime;
/**
* Abstraction for getting the current system time. Useful for testing.
*/
class SystemTimeSource {
public:
virtual ~SystemTimeSource() {}
/**
* @return the current system time.
*/
virtual SystemTime currentTime() PURE;
};
/**
* Abstraction for getting the current monotonically increasing time. Useful for testing.
*/
class MonotonicTimeSource {
public:
virtual ~MonotonicTimeSource() {}
/**
* @return the current monotonic time.
*/
virtual MonotonicTime currentTime() PURE;
};
} // namespace Envoy
| craffert0/envoy | include/envoy/common/time.h | C | apache-2.0 | 1,003 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.distributed.impl;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Objects;
import java.util.function.Predicate;
import com.google.common.io.Closeables;
import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.RandomAccessReader;
import org.apache.cassandra.utils.AbstractIterator;
import org.apache.cassandra.distributed.api.LogAction;
import org.apache.cassandra.distributed.api.LineIterator;
public class FileLogAction implements LogAction
{
private final File file;
public FileLogAction(File file)
{
this.file = Objects.requireNonNull(file);
}
@Override
public long mark()
{
return file.length();
}
@Override
public LineIterator match(long startPosition, Predicate<String> fn)
{
RandomAccessReader reader;
reader = RandomAccessReader.open(file);
if (startPosition > 0) // -1 used to disable, so ignore any negative values or 0 (default offset)
{
reader.seek(startPosition);
}
return new FileLineIterator(reader, fn);
}
private static final class FileLineIterator extends AbstractIterator<String> implements LineIterator
{
private final RandomAccessReader reader;
private final Predicate<String> fn;
private FileLineIterator(RandomAccessReader reader, Predicate<String> fn)
{
this.reader = reader;
this.fn = fn;
}
@Override
public long mark()
{
return reader.getFilePointer();
}
@Override
protected String computeNext()
{
try
{
String s;
while ((s = reader.readLine()) != null)
{
if (fn.test(s))
return s;
}
return endOfData();
}
catch (IOException e)
{
throw new UncheckedIOException(e);
}
}
@Override
public void close()
{
try
{
Closeables.close(reader, true);
}
catch (IOException impossible)
{
throw new AssertionError(impossible);
}
}
}
}
| instaclustr/cassandra | test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java | Java | apache-2.0 | 3,173 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.azure.queue;
import java.net.URI;
import com.microsoft.azure.storage.StorageCredentials;
import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
import com.microsoft.azure.storage.core.Base64;
import com.microsoft.azure.storage.queue.CloudQueue;
import org.apache.camel.Endpoint;
import org.apache.camel.test.junit5.CamelTestSupport;
import org.junit.jupiter.api.Test;
import static org.apache.camel.component.azure.queue.QueueServiceComponent.MISSING_QUEUE_CREDNTIALS_EXCEPTION_MESSAGE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.fail;
public class QueueServiceComponentClientConfigurationTest extends CamelTestSupport {
@Test
public void testCreateEndpointWithMinConfigForClientOnly() throws Exception {
CloudQueue client = new CloudQueue(
URI.create("https://camelazure.queue.core.windows.net/testqueue/messages"),
newAccountKeyCredentials());
context.getRegistry().bind("azureQueueClient", client);
QueueServiceEndpoint endpoint = (QueueServiceEndpoint) context.getEndpoint("azure-queue://camelazure/testqueue");
doTestCreateEndpointWithMinConfig(endpoint, true);
}
@Test
public void testCreateEndpointWithMinConfigForCredsOnly() throws Exception {
registerCredentials();
QueueServiceEndpoint endpoint
= (QueueServiceEndpoint) context.getEndpoint("azure-queue://camelazure/testqueue?credentials=#creds");
doTestCreateEndpointWithMinConfig(endpoint, false);
}
@Test
public void testCreateEndpointWithMaxConfig() throws Exception {
registerCredentials();
QueueServiceEndpoint endpoint = (QueueServiceEndpoint) context
.getEndpoint("azure-queue://camelazure/testqueue?credentials=#creds"
+ "&operation=addMessage&queuePrefix=prefix&messageTimeToLive=100&messageVisibilityDelay=10");
doTestCreateEndpointWithMaxConfig(endpoint, false);
}
private void doTestCreateEndpointWithMinConfig(QueueServiceEndpoint endpoint, boolean clientExpected)
throws Exception {
assertEquals("camelazure", endpoint.getConfiguration().getAccountName());
assertEquals("testqueue", endpoint.getConfiguration().getQueueName());
if (clientExpected) {
assertNotNull(endpoint.getConfiguration().getAzureQueueClient());
assertNull(endpoint.getConfiguration().getCredentials());
} else {
assertNull(endpoint.getConfiguration().getAzureQueueClient());
assertNotNull(endpoint.getConfiguration().getCredentials());
}
assertEquals(QueueServiceOperations.listQueues, endpoint.getConfiguration().getOperation());
assertNull(endpoint.getConfiguration().getQueuePrefix());
assertEquals(0, endpoint.getConfiguration().getMessageTimeToLive());
assertEquals(0, endpoint.getConfiguration().getMessageVisibilityDelay());
createConsumer(endpoint);
}
private void doTestCreateEndpointWithMaxConfig(QueueServiceEndpoint endpoint, boolean clientExpected)
throws Exception {
assertEquals("camelazure", endpoint.getConfiguration().getAccountName());
assertEquals("testqueue", endpoint.getConfiguration().getQueueName());
if (clientExpected) {
assertNotNull(endpoint.getConfiguration().getAzureQueueClient());
assertNull(endpoint.getConfiguration().getCredentials());
} else {
assertNull(endpoint.getConfiguration().getAzureQueueClient());
assertNotNull(endpoint.getConfiguration().getCredentials());
}
assertEquals(QueueServiceOperations.addMessage, endpoint.getConfiguration().getOperation());
assertEquals("prefix", endpoint.getConfiguration().getQueuePrefix());
assertEquals(100, endpoint.getConfiguration().getMessageTimeToLive());
assertEquals(10, endpoint.getConfiguration().getMessageVisibilityDelay());
createConsumer(endpoint);
}
@Test
public void testTooManyPathSegments() throws Exception {
try {
context.getEndpoint("azure-queue://camelazure/testqueue/1");
fail();
} catch (Exception ex) {
assertEquals("Only the account and queue names must be specified.", ex.getCause().getMessage());
}
}
@Test
public void testTooFewPathSegments() throws Exception {
try {
context.getEndpoint("azure-queue://camelazure?operation=addMessage");
fail();
} catch (Exception ex) {
assertEquals(MISSING_QUEUE_CREDNTIALS_EXCEPTION_MESSAGE, ex.getCause().getMessage());
}
}
private static void createConsumer(Endpoint endpoint) throws Exception {
endpoint.createConsumer(exchange -> {
// noop
});
}
private void registerCredentials() {
context.getRegistry().bind("creds", newAccountKeyCredentials());
}
private StorageCredentials newAccountKeyCredentials() {
return new StorageCredentialsAccountAndKey(
"camelazure",
Base64.encode("key".getBytes()));
}
}
| adessaigne/camel | components/camel-azure/src/test/java/org/apache/camel/component/azure/queue/QueueServiceComponentClientConfigurationTest.java | Java | apache-2.0 | 6,207 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.protocol.core.impl.wireformat;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
import org.apache.activemq.artemis.core.protocol.core.impl.PacketImpl;
public class ReplicationLargeMessageEndMessage extends PacketImpl {
long messageId;
public ReplicationLargeMessageEndMessage() {
super(PacketImpl.REPLICATION_LARGE_MESSAGE_END);
}
public ReplicationLargeMessageEndMessage(final long messageId) {
this();
this.messageId = messageId;
}
@Override
public void encodeRest(final ActiveMQBuffer buffer) {
buffer.writeLong(messageId);
}
@Override
public void decodeRest(final ActiveMQBuffer buffer) {
messageId = buffer.readLong();
}
/**
* @return the messageId
*/
public long getMessageId() {
return messageId;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + (int) (messageId ^ (messageId >>> 32));
return result;
}
@Override
public String toString() {
return "ReplicationLargeMessageEndMessage{" +
"messageId=" + messageId +
'}';
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
ReplicationLargeMessageEndMessage other = (ReplicationLargeMessageEndMessage) obj;
if (messageId != other.messageId)
return false;
return true;
}
}
| lburgazzoli/apache-activemq-artemis | artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationLargeMessageEndMessage.java | Java | apache-2.0 | 2,399 |
/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.77 2008/03/01 03:26:34 tgl Exp $ */
/*
* The aim is to get a simpler inteface to the database routines.
* All the tidieous messing around with tuples is supposed to be hidden
* by this function.
*/
/* Author: Linus Tolke
(actually most if the code is "borrowed" from the distribution and just
slightly modified)
*/
/* Taken over as part of PostgreSQL by Michael Meskes <[email protected]>
on Feb. 5th, 1998 */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
#include <locale.h>
#include "pg_type.h"
#include "ecpgtype.h"
#include "ecpglib.h"
#include "ecpgerrno.h"
#include "extern.h"
#include "sqlca.h"
#include "sql3types.h"
#include "pgtypes_numeric.h"
#include "pgtypes_date.h"
#include "pgtypes_timestamp.h"
#include "pgtypes_interval.h"
/*
* This function returns a newly malloced string that has ' and \
* escaped.
*/
static char *
quote_postgres(char *arg, bool quote, int lineno)
{
char *res;
size_t length;
size_t escaped_len;
size_t buffer_len;
/*
* if quote is false we just need to store things in a descriptor they
* will be quoted once they are inserted in a statement
*/
if (!quote)
return arg;
else
{
length = strlen(arg);
buffer_len = 2 * length + 1;
res = (char *) ecpg_alloc(buffer_len + 3, lineno);
if (!res)
return (res);
escaped_len = PQescapeString(res + 1, arg, buffer_len);
if (length == escaped_len)
{
res[0] = res[escaped_len + 1] = '\'';
res[escaped_len + 2] = '\0';
}
else
{
/*
* We don't know if the target database is using
* standard_conforming_strings, so we always use E'' strings.
*/
memmove(res + 2, res + 1, escaped_len);
res[0] = ESCAPE_STRING_SYNTAX;
res[1] = res[escaped_len + 2] = '\'';
res[escaped_len + 3] = '\0';
}
ecpg_free(arg);
return res;
}
}
static void
free_variable(struct variable * var)
{
struct variable *var_next;
if (var == NULL)
return;
var_next = var->next;
ecpg_free(var);
while (var_next)
{
var = var_next;
var_next = var->next;
ecpg_free(var);
}
}
static void
free_statement(struct statement * stmt)
{
if (stmt == NULL)
return;
free_variable(stmt->inlist);
free_variable(stmt->outlist);
ecpg_free(stmt->command);
ecpg_free(stmt->name);
ecpg_free(stmt);
}
static int
next_insert(char *text, int pos, bool questionmarks)
{
bool string = false;
int p = pos;
for (; text[p] != '\0'; p++)
{
if (text[p] == '\\') /* escape character */
p++;
else if (text[p] == '\'')
string = string ? false : true;
else if (!string)
{
if (text[p] == '$' && isdigit((unsigned char) text[p + 1]))
{
/* this can be either a dollar quote or a variable */
int i;
for (i = p + 1; isdigit((unsigned char) text[i]); i++)
/* empty loop body */ ;
if (!isalpha((unsigned char) text[i]) &&
isascii((unsigned char) text[i]) &&text[i] != '_')
/* not dollar delimited quote */
return p;
}
else if (questionmarks && text[p] == '?')
{
/* also allow old style placeholders */
return p;
}
}
}
return -1;
}
static bool
ecpg_type_infocache_push(struct ECPGtype_information_cache ** cache, int oid, bool isarray, int lineno)
{
struct ECPGtype_information_cache *new_entry
= (struct ECPGtype_information_cache *) ecpg_alloc(sizeof(struct ECPGtype_information_cache), lineno);
if (new_entry == NULL)
return (false);
new_entry->oid = oid;
new_entry->isarray = isarray;
new_entry->next = *cache;
*cache = new_entry;
return (true);
}
static enum ARRAY_TYPE
ecpg_is_type_an_array(int type, const struct statement * stmt, const struct variable * var)
{
char *array_query;
enum ARRAY_TYPE isarray = ECPG_ARRAY_NOT_SET;
PGresult *query;
struct ECPGtype_information_cache *cache_entry;
if ((stmt->connection->cache_head) == NULL)
{
/*
* Text like types are not an array for ecpg, but postgres counts them
* as an array. This define reminds you to not 'correct' these values.
*/
#define not_an_array_in_ecpg ECPG_ARRAY_NONE
/* populate cache with well known types to speed things up */
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BOOLOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BYTEAOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CHAROID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), NAMEOID, not_an_array_in_ecpg, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT8OID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT2OID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT2VECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT4OID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), REGPROCOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TEXTOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), OIDOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIDOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), XIDOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CIDOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), OIDVECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), POINTOID, ECPG_ARRAY_VECTOR, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), LSEGOID, ECPG_ARRAY_VECTOR, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), PATHOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BOXOID, ECPG_ARRAY_VECTOR, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), POLYGONOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), LINEOID, ECPG_ARRAY_VECTOR, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), FLOAT4OID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), FLOAT8OID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), ABSTIMEOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), RELTIMEOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TINTERVALOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), UNKNOWNOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CIRCLEOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CASHOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INETOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CIDROID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BPCHAROID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), VARCHAROID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), DATEOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMEOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMESTAMPOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMESTAMPTZOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INTERVALOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMETZOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), ZPBITOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), VARBITOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), NUMERICOID, ECPG_ARRAY_NONE, stmt->lineno))
return (ECPG_ARRAY_ERROR);
}
for (cache_entry = (stmt->connection->cache_head); cache_entry != NULL; cache_entry = cache_entry->next)
{
if (cache_entry->oid == type)
return cache_entry->isarray;
}
array_query = (char *) ecpg_alloc(strlen("select typlen from pg_type where oid= and typelem<>0") + 11, stmt->lineno);
if (array_query == NULL)
return (ECPG_ARRAY_ERROR);
sprintf(array_query, "select typlen from pg_type where oid=%d and typelem<>0", type);
query = PQexec(stmt->connection->connection, array_query);
ecpg_free(array_query);
if (!ecpg_check_PQresult(query, stmt->lineno, stmt->connection->connection, stmt->compat))
return (ECPG_ARRAY_ERROR);
else if (PQresultStatus(query) == PGRES_TUPLES_OK)
{
if (PQntuples(query) == 0)
isarray = ECPG_ARRAY_NONE;
else
{
isarray = (atol((char *) PQgetvalue(query, 0, 0)) == -1) ? ECPG_ARRAY_ARRAY : ECPG_ARRAY_VECTOR;
if (ecpg_dynamic_type(type) == SQL3_CHARACTER ||
ecpg_dynamic_type(type) == SQL3_CHARACTER_VARYING)
{
/*
* arrays of character strings are not yet implemented
*/
isarray = ECPG_ARRAY_NONE;
}
}
PQclear(query);
}
else
return (ECPG_ARRAY_ERROR);
ecpg_type_infocache_push(&(stmt->connection->cache_head), type, isarray, stmt->lineno);
ecpg_log("ecpg_is_type_an_array on line %d: type (%d); C (%d); array (%s)\n", stmt->lineno, type, var->type, isarray ? "yes" : "no");
return isarray;
}
bool
ecpg_store_result(const PGresult *results, int act_field,
const struct statement * stmt, struct variable * var)
{
enum ARRAY_TYPE isarray;
int act_tuple,
ntuples = PQntuples(results);
bool status = true;
if ((isarray = ecpg_is_type_an_array(PQftype(results, act_field), stmt, var)) == ECPG_ARRAY_ERROR)
{
ecpg_raise(stmt->lineno, ECPG_OUT_OF_MEMORY, ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL);
return false;
}
if (isarray == ECPG_ARRAY_NONE)
{
/*
* if we don't have enough space, we cannot read all tuples
*/
if ((var->arrsize > 0 && ntuples > var->arrsize) || (var->ind_arrsize > 0 && ntuples > var->ind_arrsize))
{
ecpg_log("ecpg_store_result on line %d: incorrect number of matches; %d don't fit into array of %d\n",
stmt->lineno, ntuples, var->arrsize);
ecpg_raise(stmt->lineno, INFORMIX_MODE(stmt->compat) ? ECPG_INFORMIX_SUBSELECT_NOT_ONE : ECPG_TOO_MANY_MATCHES, ECPG_SQLSTATE_CARDINALITY_VIOLATION, NULL);
return false;
}
}
else
{
/*
* since we read an array, the variable has to be an array too
*/
if (var->arrsize == 0)
{
ecpg_raise(stmt->lineno, ECPG_NO_ARRAY, ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL);
return false;
}
}
/*
* allocate memory for NULL pointers
*/
if ((var->arrsize == 0 || var->varcharsize == 0) && var->value == NULL)
{
int len = 0;
if (!PQfformat(results, act_field))
{
switch (var->type)
{
case ECPGt_char:
case ECPGt_unsigned_char:
if (!var->varcharsize && !var->arrsize)
{
/* special mode for handling char**foo=0 */
for (act_tuple = 0; act_tuple < ntuples; act_tuple++)
len += strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
len *= var->offset; /* should be 1, but YMNK */
len += (ntuples + 1) * sizeof(char *);
}
else
{
var->varcharsize = 0;
/* check strlen for each tuple */
for (act_tuple = 0; act_tuple < ntuples; act_tuple++)
{
int len = strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
if (len > var->varcharsize)
var->varcharsize = len;
}
var->offset *= var->varcharsize;
len = var->offset * ntuples;
}
break;
case ECPGt_varchar:
len = ntuples * (var->varcharsize + sizeof(int));
break;
default:
len = var->offset * ntuples;
break;
}
}
else
{
for (act_tuple = 0; act_tuple < ntuples; act_tuple++)
len += PQgetlength(results, act_tuple, act_field);
}
ecpg_log("ecpg_store_result on line %d: allocating memory for %d tuples\n", stmt->lineno, ntuples);
var->value = (char *) ecpg_alloc(len, stmt->lineno);
if (!var->value)
return false;
*((char **) var->pointer) = var->value;
ecpg_add_mem(var->value, stmt->lineno);
}
/* allocate indicator variable if needed */
if ((var->ind_arrsize == 0 || var->ind_varcharsize == 0) && var->ind_value == NULL && var->ind_pointer != NULL)
{
int len = var->ind_offset * ntuples;
var->ind_value = (char *) ecpg_alloc(len, stmt->lineno);
if (!var->ind_value)
return false;
*((char **) var->ind_pointer) = var->ind_value;
ecpg_add_mem(var->ind_value, stmt->lineno);
}
/* fill the variable with the tuple(s) */
if (!var->varcharsize && !var->arrsize &&
(var->type == ECPGt_char || var->type == ECPGt_unsigned_char))
{
/* special mode for handling char**foo=0 */
/* filling the array of (char*)s */
char **current_string = (char **) var->value;
/* storing the data (after the last array element) */
char *current_data_location = (char *) ¤t_string[ntuples + 1];
for (act_tuple = 0; act_tuple < ntuples && status; act_tuple++)
{
int len = strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
if (!ecpg_get_data(results, act_tuple, act_field, stmt->lineno,
var->type, var->ind_type, current_data_location,
var->ind_value, len, 0, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
status = false;
else
{
*current_string = current_data_location;
current_data_location += len;
current_string++;
}
}
/* terminate the list */
*current_string = NULL;
}
else
{
for (act_tuple = 0; act_tuple < ntuples && status; act_tuple++)
{
if (!ecpg_get_data(results, act_tuple, act_field, stmt->lineno,
var->type, var->ind_type, var->value,
var->ind_value, var->varcharsize, var->offset, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
status = false;
}
}
return status;
}
bool
ecpg_store_input(const int lineno, const bool force_indicator, const struct variable * var,
char **tobeinserted_p, bool quote)
{
char *mallocedval = NULL;
char *newcopy = NULL;
/*
* arrays are not possible unless the attribute is an array too FIXME: we
* do not know if the attribute is an array here
*/
#if 0
if (var->arrsize > 1 &&...)
{
ecpg_raise(lineno, ECPG_ARRAY_INSERT, ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL);
return false;
}
#endif
/*
* Some special treatment is needed for records since we want their
* contents to arrive in a comma-separated list on insert (I think).
*/
*tobeinserted_p = "";
/* check for null value and set input buffer accordingly */
switch (var->ind_type)
{
case ECPGt_short:
case ECPGt_unsigned_short:
if (*(short *) var->ind_value < 0)
*tobeinserted_p = NULL;
break;
case ECPGt_int:
case ECPGt_unsigned_int:
if (*(int *) var->ind_value < 0)
*tobeinserted_p = NULL;
break;
case ECPGt_long:
case ECPGt_unsigned_long:
if (*(long *) var->ind_value < 0L)
*tobeinserted_p = NULL;
break;
#ifdef HAVE_LONG_LONG_INT_64
case ECPGt_long_long:
case ECPGt_unsigned_long_long:
if (*(long long int *) var->ind_value < (long long) 0)
*tobeinserted_p = NULL;
break;
#endif /* HAVE_LONG_LONG_INT_64 */
case ECPGt_NO_INDICATOR:
if (force_indicator == false)
{
if (ECPGis_noind_null(var->type, var->value))
*tobeinserted_p = NULL;
}
break;
default:
break;
}
if (*tobeinserted_p != NULL)
{
int asize = var->arrsize ? var->arrsize : 1;
switch (var->type)
{
int element;
case ECPGt_short:
if (!(mallocedval = ecpg_alloc(asize * 20, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%hd,", ((short *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%hd", *((short *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_int:
if (!(mallocedval = ecpg_alloc(asize * 20, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "{");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%d,", ((int *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "}");
}
else
sprintf(mallocedval, "%d", *((int *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_unsigned_short:
if (!(mallocedval = ecpg_alloc(asize * 20, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%hu,", ((unsigned short *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%hu", *((unsigned short *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_unsigned_int:
if (!(mallocedval = ecpg_alloc(asize * 20, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%u,", ((unsigned int *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%u", *((unsigned int *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_long:
if (!(mallocedval = ecpg_alloc(asize * 20, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%ld,", ((long *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%ld", *((long *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_unsigned_long:
if (!(mallocedval = ecpg_alloc(asize * 20, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%lu,", ((unsigned long *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%lu", *((unsigned long *) var->value));
*tobeinserted_p = mallocedval;
break;
#ifdef HAVE_LONG_LONG_INT_64
case ECPGt_long_long:
if (!(mallocedval = ecpg_alloc(asize * 30, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%lld,", ((long long *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%lld", *((long long *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_unsigned_long_long:
if (!(mallocedval = ecpg_alloc(asize * 30, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%llu,", ((unsigned long long *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%llu", *((unsigned long long *) var->value));
*tobeinserted_p = mallocedval;
break;
#endif /* HAVE_LONG_LONG_INT_64 */
case ECPGt_float:
if (!(mallocedval = ecpg_alloc(asize * 25, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%.14g,", ((float *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%.14g", *((float *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_double:
if (!(mallocedval = ecpg_alloc(asize * 25, lineno)))
return false;
if (asize > 1)
{
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
sprintf(mallocedval + strlen(mallocedval), "%.14g,", ((double *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
sprintf(mallocedval, "%.14g", *((double *) var->value));
*tobeinserted_p = mallocedval;
break;
case ECPGt_bool:
if (!(mallocedval = ecpg_alloc(var->arrsize + sizeof("array []"), lineno)))
return false;
if (var->arrsize > 1)
{
strcpy(mallocedval, "array [");
if (var->offset == sizeof(char))
for (element = 0; element < var->arrsize; element++)
sprintf(mallocedval + strlen(mallocedval), "%c,", (((char *) var->value)[element]) ? 't' : 'f');
/*
* this is necessary since sizeof(C++'s bool)==sizeof(int)
*/
else if (var->offset == sizeof(int))
for (element = 0; element < var->arrsize; element++)
sprintf(mallocedval + strlen(mallocedval), "%c,", (((int *) var->value)[element]) ? 't' : 'f');
else
ecpg_raise(lineno, ECPG_CONVERT_BOOL, ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
{
if (var->offset == sizeof(char))
sprintf(mallocedval, "%c", (*((char *) var->value)) ? 't' : 'f');
else if (var->offset == sizeof(int))
sprintf(mallocedval, "%c", (*((int *) var->value)) ? 't' : 'f');
else
ecpg_raise(lineno, ECPG_CONVERT_BOOL, ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL);
}
*tobeinserted_p = mallocedval;
break;
case ECPGt_char:
case ECPGt_unsigned_char:
{
/* set slen to string length if type is char * */
int slen = (var->varcharsize == 0) ? strlen((char *) var->value) : (unsigned int) var->varcharsize;
if (!(newcopy = ecpg_alloc(slen + 1, lineno)))
return false;
strncpy(newcopy, (char *) var->value, slen);
newcopy[slen] = '\0';
mallocedval = quote_postgres(newcopy, quote, lineno);
if (!mallocedval)
return false;
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_const:
case ECPGt_char_variable:
{
int slen = strlen((char *) var->value);
if (!(mallocedval = ecpg_alloc(slen + 1, lineno)))
return false;
strncpy(mallocedval, (char *) var->value, slen);
mallocedval[slen] = '\0';
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_varchar:
{
struct ECPGgeneric_varchar *variable =
(struct ECPGgeneric_varchar *) (var->value);
if (!(newcopy = (char *) ecpg_alloc(variable->len + 1, lineno)))
return false;
strncpy(newcopy, variable->arr, variable->len);
newcopy[variable->len] = '\0';
mallocedval = quote_postgres(newcopy, quote, lineno);
if (!mallocedval)
return false;
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_decimal:
case ECPGt_numeric:
{
char *str = NULL;
int slen;
numeric *nval;
if (var->arrsize > 1)
{
for (element = 0; element < var->arrsize; element++)
{
nval = PGTYPESnumeric_new();
if (!nval)
return false;
if (var->type == ECPGt_numeric)
PGTYPESnumeric_copy((numeric *) ((var + var->offset * element)->value), nval);
else
PGTYPESnumeric_from_decimal((decimal *) ((var + var->offset * element)->value), nval);
str = PGTYPESnumeric_to_asc(nval, nval->dscale);
slen = strlen(str);
PGTYPESnumeric_free(nval);
if (!(mallocedval = ecpg_realloc(mallocedval, strlen(mallocedval) + slen + sizeof("array [] "), lineno)))
{
ecpg_free(str);
return false;
}
if (!element)
strcpy(mallocedval, "array [");
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
strcpy(mallocedval + strlen(mallocedval), ",");
ecpg_free(str);
}
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
{
nval = PGTYPESnumeric_new();
if (!nval)
return false;
if (var->type == ECPGt_numeric)
PGTYPESnumeric_copy((numeric *) (var->value), nval);
else
PGTYPESnumeric_from_decimal((decimal *) (var->value), nval);
str = PGTYPESnumeric_to_asc(nval, nval->dscale);
slen = strlen(str);
PGTYPESnumeric_free(nval);
if (!(mallocedval = ecpg_alloc(slen + 1, lineno)))
{
free(str);
return false;
}
strncpy(mallocedval, str, slen);
mallocedval[slen] = '\0';
ecpg_free(str);
}
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_interval:
{
char *str = NULL;
int slen;
if (var->arrsize > 1)
{
for (element = 0; element < var->arrsize; element++)
{
str = quote_postgres(PGTYPESinterval_to_asc((interval *) ((var + var->offset * element)->value)), quote, lineno);
if (!str)
return false;
slen = strlen(str);
if (!(mallocedval = ecpg_realloc(mallocedval, strlen(mallocedval) + slen + sizeof("array [],interval "), lineno)))
{
ecpg_free(str);
return false;
}
if (!element)
strcpy(mallocedval, "array [");
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
strcpy(mallocedval + strlen(mallocedval), ",");
ecpg_free(str);
}
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
{
str = quote_postgres(PGTYPESinterval_to_asc((interval *) (var->value)), quote, lineno);
if (!str)
return false;
slen = strlen(str);
if (!(mallocedval = ecpg_alloc(slen + sizeof("interval ") + 1, lineno)))
{
ecpg_free(str);
return false;
}
/* also copy trailing '\0' */
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
ecpg_free(str);
}
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_date:
{
char *str = NULL;
int slen;
if (var->arrsize > 1)
{
for (element = 0; element < var->arrsize; element++)
{
str = quote_postgres(PGTYPESdate_to_asc(*(date *) ((var + var->offset * element)->value)), quote, lineno);
if (!str)
return false;
slen = strlen(str);
if (!(mallocedval = ecpg_realloc(mallocedval, strlen(mallocedval) + slen + sizeof("array [],date "), lineno)))
{
ecpg_free(str);
return false;
}
if (!element)
strcpy(mallocedval, "array [");
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
strcpy(mallocedval + strlen(mallocedval), ",");
ecpg_free(str);
}
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
{
str = quote_postgres(PGTYPESdate_to_asc(*(date *) (var->value)), quote, lineno);
if (!str)
return false;
slen = strlen(str);
if (!(mallocedval = ecpg_alloc(slen + sizeof("date ") + 1, lineno)))
{
ecpg_free(str);
return false;
}
/* also copy trailing '\0' */
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
ecpg_free(str);
}
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_timestamp:
{
char *str = NULL;
int slen;
if (var->arrsize > 1)
{
for (element = 0; element < var->arrsize; element++)
{
str = quote_postgres(PGTYPEStimestamp_to_asc(*(timestamp *) ((var + var->offset * element)->value)), quote, lineno);
if (!str)
return false;
slen = strlen(str);
if (!(mallocedval = ecpg_realloc(mallocedval, strlen(mallocedval) + slen + sizeof("array [], timestamp "), lineno)))
{
ecpg_free(str);
return false;
}
if (!element)
strcpy(mallocedval, "array [");
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
strcpy(mallocedval + strlen(mallocedval), ",");
ecpg_free(str);
}
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
else
{
str = quote_postgres(PGTYPEStimestamp_to_asc(*(timestamp *) (var->value)), quote, lineno);
if (!str)
return false;
slen = strlen(str);
if (!(mallocedval = ecpg_alloc(slen + sizeof("timestamp") + 1, lineno)))
{
ecpg_free(str);
return false;
}
/* also copy trailing '\0' */
strncpy(mallocedval + strlen(mallocedval), str, slen + 1);
ecpg_free(str);
}
*tobeinserted_p = mallocedval;
}
break;
case ECPGt_descriptor:
break;
default:
/* Not implemented yet */
ecpg_raise(lineno, ECPG_UNSUPPORTED, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, (char *) ecpg_type_name(var->type));
return false;
break;
}
}
return true;
}
static void
free_params(const char **paramValues, int nParams, bool print, int lineno)
{
int n;
for (n = 0; n < nParams; n++)
{
if (print)
ecpg_log("free_params on line %d: parameter %d = %s\n", lineno, n + 1, paramValues[n] ? paramValues[n] : "null");
ecpg_free((void *) (paramValues[n]));
}
ecpg_free(paramValues);
}
static bool
insert_tobeinserted(int position, int ph_len, struct statement * stmt, char *tobeinserted)
{
char *newcopy;
if (!(newcopy = (char *) ecpg_alloc(strlen(stmt->command)
+ strlen(tobeinserted)
+ 1, stmt->lineno)))
{
ecpg_free(tobeinserted);
return false;
}
strcpy(newcopy, stmt->command);
strcpy(newcopy + position - 1, tobeinserted);
/*
* The strange thing in the second argument is the rest of the string from
* the old string
*/
strcat(newcopy,
stmt->command
+ position
+ ph_len - 1);
ecpg_free(stmt->command);
stmt->command = newcopy;
ecpg_free((char *) tobeinserted);
return true;
}
static bool
ecpg_execute(struct statement * stmt)
{
bool status = false;
char *cmdstat;
PGresult *results;
PGnotify *notify;
struct variable *var;
int desc_counter = 0;
const char **paramValues = NULL;
int nParams = 0;
int position = 0;
struct sqlca_t *sqlca = ECPGget_sqlca();
bool clear_result = true;
/*
* If the type is one of the fill in types then we take the argument and
* enter it to our parameter array at the first position. Then if there
* are any more fill in types we add more parameters.
*/
var = stmt->inlist;
while (var)
{
char *tobeinserted;
int counter = 1;
tobeinserted = NULL;
/*
* A descriptor is a special case since it contains many variables but
* is listed only once.
*/
if (var->type == ECPGt_descriptor)
{
/*
* We create an additional variable list here, so the same logic
* applies.
*/
struct variable desc_inlist;
struct descriptor *desc;
struct descriptor_item *desc_item;
desc = ecpg_find_desc(stmt->lineno, var->pointer);
if (desc == NULL)
return false;
desc_counter++;
for (desc_item = desc->items; desc_item; desc_item = desc_item->next)
{
if (desc_item->num == desc_counter)
{
desc_inlist.type = ECPGt_char;
desc_inlist.value = desc_item->data;
desc_inlist.pointer = &(desc_item->data);
desc_inlist.varcharsize = strlen(desc_item->data);
desc_inlist.arrsize = 1;
desc_inlist.offset = 0;
if (!desc_item->indicator)
{
desc_inlist.ind_type = ECPGt_NO_INDICATOR;
desc_inlist.ind_value = desc_inlist.ind_pointer = NULL;
desc_inlist.ind_varcharsize = desc_inlist.ind_arrsize = desc_inlist.ind_offset = 0;
}
else
{
desc_inlist.ind_type = ECPGt_int;
desc_inlist.ind_value = &(desc_item->indicator);
desc_inlist.ind_pointer = &(desc_inlist.ind_value);
desc_inlist.ind_varcharsize = desc_inlist.ind_arrsize = 1;
desc_inlist.ind_offset = 0;
}
if (!ecpg_store_input(stmt->lineno, stmt->force_indicator, &desc_inlist, &tobeinserted, false))
return false;
break;
}
}
if (desc->count == desc_counter)
desc_counter = 0;
}
else
{
if (!ecpg_store_input(stmt->lineno, stmt->force_indicator, var, &tobeinserted, false))
return false;
}
/*
* now tobeinserted points to an area that contains the next parameter
* now find the positin in the string where it belongs
*/
if ((position = next_insert(stmt->command, position, stmt->questionmarks) + 1) == 0)
{
/*
* We have an argument but we dont have the matched up placeholder
* in the string
*/
ecpg_raise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS,
ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS,
NULL);
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
/*
* if var->type=ECPGt_char_variable we have a dynamic cursor we have
* to simulate a dynamic cursor because there is no backend
* functionality for it
*/
if (var->type == ECPGt_char_variable)
{
int ph_len = (stmt->command[position] == '?') ? strlen("?") : strlen("$1");
if (!insert_tobeinserted(position, ph_len, stmt, tobeinserted))
{
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
tobeinserted = NULL;
}
/*
* if the placeholder is '$0' we have to replace it on the client side
* this is for places we want to support variables at that are not
* supported in the backend
*/
else if (stmt->command[position] == '0')
{
if (!insert_tobeinserted(position, 2, stmt, tobeinserted))
{
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
tobeinserted = NULL;
}
else
{
nParams++;
if (!(paramValues = (const char **) ecpg_realloc(paramValues, sizeof(const char *) * nParams, stmt->lineno)))
{
ecpg_free(paramValues);
return false;
}
paramValues[nParams - 1] = tobeinserted;
/* let's see if this was an old style placeholder */
if (stmt->command[position] == '?')
{
/* yes, replace with new style */
int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; /* a rough guess of the
* size we need */
if (!(tobeinserted = (char *) ecpg_alloc(buffersize, stmt->lineno)))
{
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
snprintf(tobeinserted, buffersize, "$%d", counter++);
if (!insert_tobeinserted(position, 2, stmt, tobeinserted))
{
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
tobeinserted = NULL;
}
}
if (desc_counter == 0)
var = var->next;
}
/* Check if there are unmatched things left. */
if (next_insert(stmt->command, position, stmt->questionmarks) >= 0)
{
ecpg_raise(stmt->lineno, ECPG_TOO_FEW_ARGUMENTS,
ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, NULL);
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
/* The request has been build. */
if (stmt->connection->committed && !stmt->connection->autocommit)
{
results = PQexec(stmt->connection->connection, "begin transaction");
if (!ecpg_check_PQresult(results, stmt->lineno, stmt->connection->connection, stmt->compat))
{
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
PQclear(results);
stmt->connection->committed = false;
}
ecpg_log("ecpg_execute on line %d: query: %s; with %d parameter(s) on connection %s\n", stmt->lineno, stmt->command, nParams, stmt->connection->name);
if (stmt->statement_type == ECPGst_execute)
{
results = PQexecPrepared(stmt->connection->connection, stmt->name, nParams, paramValues, NULL, NULL, 0);
ecpg_log("ecpg_execute on line %d: using PQexecPrepared for \"%s\"\n", stmt->lineno, stmt->command);
}
else
{
if (nParams == 0)
{
results = PQexec(stmt->connection->connection, stmt->command);
ecpg_log("ecpg_execute on line %d: using PQexec\n", stmt->lineno);
}
else
{
results = PQexecParams(stmt->connection->connection, stmt->command, nParams, NULL, paramValues, NULL, NULL, 0);
ecpg_log("ecpg_execute on line %d: using PQexecParams\n", stmt->lineno);
}
}
free_params(paramValues, nParams, true, stmt->lineno);
if (!ecpg_check_PQresult(results, stmt->lineno, stmt->connection->connection, stmt->compat))
return (false);
var = stmt->outlist;
switch (PQresultStatus(results))
{
int nfields,
ntuples,
act_field;
case PGRES_TUPLES_OK:
nfields = PQnfields(results);
sqlca->sqlerrd[2] = ntuples = PQntuples(results);
ecpg_log("ecpg_execute on line %d: correctly got %d tuples with %d fields\n", stmt->lineno, ntuples, nfields);
status = true;
if (ntuples < 1)
{
if (ntuples)
ecpg_log("ecpg_execute on line %d: incorrect number of matches (%d)\n",
stmt->lineno, ntuples);
ecpg_raise(stmt->lineno, ECPG_NOT_FOUND, ECPG_SQLSTATE_NO_DATA, NULL);
status = false;
break;
}
if (var != NULL && var->type == ECPGt_descriptor)
{
struct descriptor *desc = ecpg_find_desc(stmt->lineno, var->pointer);
if (desc == NULL)
status = false;
else
{
if (desc->result)
PQclear(desc->result);
desc->result = results;
clear_result = false;
ecpg_log("ecpg_execute on line %d: putting result (%d tuples) into descriptor %s\n",
stmt->lineno, PQntuples(results), (const char *) var->pointer);
}
var = var->next;
}
else
for (act_field = 0; act_field < nfields && status; act_field++)
{
if (var != NULL)
{
status = ecpg_store_result(results, act_field, stmt, var);
var = var->next;
}
else if (!INFORMIX_MODE(stmt->compat))
{
ecpg_raise(stmt->lineno, ECPG_TOO_FEW_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_TARGETS, NULL);
return (false);
}
}
if (status && var != NULL)
{
ecpg_raise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_TARGETS, NULL);
status = false;
}
break;
case PGRES_COMMAND_OK:
status = true;
cmdstat = PQcmdStatus(results);
sqlca->sqlerrd[1] = PQoidValue(results);
sqlca->sqlerrd[2] = atol(PQcmdTuples(results));
ecpg_log("ecpg_execute on line %d: OK: %s\n", stmt->lineno, cmdstat);
if (stmt->compat != ECPG_COMPAT_INFORMIX_SE &&
!sqlca->sqlerrd[2] &&
(!strncmp(cmdstat, "UPDATE", 6)
|| !strncmp(cmdstat, "INSERT", 6)
|| !strncmp(cmdstat, "DELETE", 6)))
ecpg_raise(stmt->lineno, ECPG_NOT_FOUND, ECPG_SQLSTATE_NO_DATA, NULL);
break;
case PGRES_COPY_OUT:
{
char *buffer;
int res;
ecpg_log("ecpg_execute on line %d: COPY OUT data transfer in progress\n", stmt->lineno);
while ((res = PQgetCopyData(stmt->connection->connection,
&buffer, 0)) > 0)
{
printf("%s", buffer);
PQfreemem(buffer);
}
if (res == -1)
{
/* COPY done */
PQclear(results);
results = PQgetResult(stmt->connection->connection);
if (PQresultStatus(results) == PGRES_COMMAND_OK)
ecpg_log("ecpg_execute on line %d: got PGRES_COMMAND_OK after PGRES_COPY_OUT\n", stmt->lineno);
else
ecpg_log("ecpg_execute on line %d: got error after PGRES_COPY_OUT: %s", PQresultErrorMessage(results));
}
break;
}
default:
/*
* execution should never reach this code because it is already
* handled in ECPGcheck_PQresult()
*/
ecpg_log("ecpg_execute on line %d: unknown execution status type\n",
stmt->lineno);
ecpg_raise_backend(stmt->lineno, results, stmt->connection->connection, stmt->compat);
status = false;
break;
}
if (clear_result)
PQclear(results);
/* check for asynchronous returns */
notify = PQnotifies(stmt->connection->connection);
if (notify)
{
ecpg_log("ecpg_execute on line %d: asynchronous notification of \"%s\" from backend pid %d received\n",
stmt->lineno, notify->relname, notify->be_pid);
PQfreemem(notify);
}
return status;
}
bool
ECPGdo(const int lineno, const int compat, const int force_indicator, const char *connection_name, const bool questionmarks, const int st, const char *query,...)
{
va_list args;
struct statement *stmt;
struct connection *con;
bool status;
char *oldlocale;
enum ECPGttype type;
struct variable **list;
enum ECPG_statement_type statement_type = (enum ECPG_statement_type) st;
char *prepname;
if (!query)
{
ecpg_raise(lineno, ECPG_EMPTY, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, NULL);
return (false);
}
/* Make sure we do NOT honor the locale for numeric input/output */
/* since the database wants the standard decimal point */
oldlocale = ecpg_strdup(setlocale(LC_NUMERIC, NULL), lineno);
setlocale(LC_NUMERIC, "C");
#ifdef ENABLE_THREAD_SAFETY
ecpg_pthreads_init();
#endif
con = ecpg_get_connection(connection_name);
if (!ecpg_init(con, connection_name, lineno))
{
setlocale(LC_NUMERIC, oldlocale);
ecpg_free(oldlocale);
return (false);
}
/* construct statement in our own structure */
va_start(args, query);
/*
* create a list of variables The variables are listed with input
* variables preceding outputvariables The end of each group is marked by
* an end marker. per variable we list: type - as defined in ecpgtype.h
* value - where to store the data varcharsize - length of string in case
* we have a stringvariable, else 0 arraysize - 0 for pointer (we don't
* know the size of the array), 1 for simple variable, size for arrays
* offset - offset between ith and (i+1)th entry in an array, normally
* that means sizeof(type) ind_type - type of indicator variable ind_value
* - pointer to indicator variable ind_varcharsize - empty ind_arraysize -
* arraysize of indicator array ind_offset - indicator offset
*/
if (!(stmt = (struct statement *) ecpg_alloc(sizeof(struct statement), lineno)))
{
setlocale(LC_NUMERIC, oldlocale);
ecpg_free(oldlocale);
va_end(args);
return false;
}
/*
* If statement type is ECPGst_prepnormal we are supposed to prepare the
* statement before executing them
*/
if (statement_type == ECPGst_prepnormal)
{
if (!ecpg_auto_prepare(lineno, connection_name, compat, &prepname, query))
return (false);
/*
* statement is now prepared, so instead of the query we have to
* execute the name
*/
stmt->command = prepname;
statement_type = ECPGst_execute;
}
else
stmt->command = ecpg_strdup(query, lineno);
stmt->name = NULL;
if (statement_type == ECPGst_execute)
{
/* if we have an EXECUTE command, only the name is send */
char *command = ecpg_prepared(stmt->command, con);
if (command)
{
stmt->name = stmt->command;
stmt->command = ecpg_strdup(command, lineno);
}
else
ecpg_raise(lineno, ECPG_INVALID_STMT, ECPG_SQLSTATE_INVALID_SQL_STATEMENT_NAME, stmt->command);
}
stmt->connection = con;
stmt->lineno = lineno;
stmt->compat = compat;
stmt->force_indicator = force_indicator;
stmt->questionmarks = questionmarks;
stmt->statement_type = statement_type;
list = &(stmt->inlist);
type = va_arg(args, enum ECPGttype);
while (type != ECPGt_EORT)
{
if (type == ECPGt_EOIT)
list = &(stmt->outlist);
else
{
struct variable *var,
*ptr;
if (!(var = (struct variable *) ecpg_alloc(sizeof(struct variable), lineno)))
{
setlocale(LC_NUMERIC, oldlocale);
ecpg_free(oldlocale);
free_statement(stmt);
va_end(args);
return false;
}
var->type = type;
var->pointer = va_arg(args, char *);
var->varcharsize = va_arg(args, long);
var->arrsize = va_arg(args, long);
var->offset = va_arg(args, long);
if (var->arrsize == 0 || var->varcharsize == 0)
var->value = *((char **) (var->pointer));
else
var->value = var->pointer;
/*
* negative values are used to indicate an array without given
* bounds
*/
/* reset to zero for us */
if (var->arrsize < 0)
var->arrsize = 0;
if (var->varcharsize < 0)
var->varcharsize = 0;
var->next = NULL;
var->ind_type = va_arg(args, enum ECPGttype);
var->ind_pointer = va_arg(args, char *);
var->ind_varcharsize = va_arg(args, long);
var->ind_arrsize = va_arg(args, long);
var->ind_offset = va_arg(args, long);
if (var->ind_type != ECPGt_NO_INDICATOR
&& (var->ind_arrsize == 0 || var->ind_varcharsize == 0))
var->ind_value = *((char **) (var->ind_pointer));
else
var->ind_value = var->ind_pointer;
/*
* negative values are used to indicate an array without given
* bounds
*/
/* reset to zero for us */
if (var->ind_arrsize < 0)
var->ind_arrsize = 0;
if (var->ind_varcharsize < 0)
var->ind_varcharsize = 0;
/* if variable is NULL, the statement hasn't been prepared */
if (var->pointer == NULL)
{
ecpg_raise(lineno, ECPG_INVALID_STMT, ECPG_SQLSTATE_INVALID_SQL_STATEMENT_NAME, NULL);
ecpg_free(var);
setlocale(LC_NUMERIC, oldlocale);
ecpg_free(oldlocale);
free_statement(stmt);
va_end(args);
return false;
}
for (ptr = *list; ptr && ptr->next; ptr = ptr->next);
if (ptr == NULL)
*list = var;
else
ptr->next = var;
}
type = va_arg(args, enum ECPGttype);
}
va_end(args);
/* are we connected? */
if (con == NULL || con->connection == NULL)
{
free_statement(stmt);
ecpg_raise(lineno, ECPG_NOT_CONN, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, (con) ? con->name : ecpg_gettext("<empty>"));
setlocale(LC_NUMERIC, oldlocale);
ecpg_free(oldlocale);
return false;
}
/* initialize auto_mem struct */
ecpg_clear_auto_mem();
status = ecpg_execute(stmt);
free_statement(stmt);
/* and reset locale value so our application is not affected */
setlocale(LC_NUMERIC, oldlocale);
ecpg_free(oldlocale);
return (status);
}
/* old descriptor interface */
bool
ECPGdo_descriptor(int line, const char *connection,
const char *descriptor, const char *query)
{
return ECPGdo(line, ECPG_COMPAT_PGSQL, true, connection, '\0', 0, (char *) query, ECPGt_EOIT,
ECPGt_descriptor, descriptor, 0L, 0L, 0L,
ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
}
| edespino/gpdb | src/interfaces/ecpg/ecpglib/execute.c | C | apache-2.0 | 48,194 |
import util
class test_empty:
def init(self):
cmd = "a = M.array([]); a = M.array([]); "
yield cmd
def test_add(self, cmd):
cmd += "res = a + a"
return cmd
| bh107/bohrium | test/python/tests/test_emptiness.py | Python | apache-2.0 | 198 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.test.blueprint;
import org.apache.camel.Consume;
import org.apache.camel.Produce;
import org.apache.camel.ProducerTemplate;
/**
*
*/
public class FooService {
private String fooEndpoint;
private String barEndpoint;
@Produce
private ProducerTemplate bar;
public String getFooEndpoint() {
return fooEndpoint;
}
public void setFooEndpoint(String fooEndpoint) {
this.fooEndpoint = fooEndpoint;
}
public String getBarEndpoint() {
return barEndpoint;
}
public void setBarEndpoint(String barEndpoint) {
this.barEndpoint = barEndpoint;
}
@Consume
public void onFoo(String input) {
bar.sendBody(input);
}
}
| ullgren/camel | components/camel-test-blueprint/src/test/java/org/apache/camel/test/blueprint/FooService.java | Java | apache-2.0 | 1,536 |
---
external help file: Microsoft.Azure.Commands.Compute.dll-Help.xml
online version: https://docs.microsoft.com/en-us/powershell/module/azurerm.compute/new-azurermdiskupdateconfig
schema: 2.0.0
---
# New-AzureRmDiskUpdateConfig
## SYNOPSIS
Creates a configurable disk update object.
## SYNTAX
```
New-AzureRmDiskUpdateConfig [[-SkuName] <StorageAccountTypes>] [[-OsType] <OperatingSystemTypes>]
[[-DiskSizeGB] <Int32>] [[-Tag] <Hashtable>] [-EncryptionSettingsEnabled <Boolean>]
[-DiskEncryptionKey <KeyVaultAndSecretReference>] [-KeyEncryptionKey <KeyVaultAndKeyReference>]
[-DefaultProfile <IAzureContextContainer>] [-WhatIf] [-Confirm] [<CommonParameters>]
```
## DESCRIPTION
The **New-AzureRmDiskUpdateConfig** cmdlet creates a configurable disk update object.
## EXAMPLES
### Example 1
```
PS C:\> $diskupdateconfig = New-AzureRmDiskUpdateConfig -DiskSizeGB 10 -AccountType PremiumLRS -OsType Windows -CreateOption Empty -EncryptionSettingsEnabled $true;
PS C:\> $secretUrl = https://myvault.vault-int.azure-int.net/secrets/123/;
PS C:\> $secretId = '/subscriptions/0000000-0000-0000-0000-000000000000/resourceGroups/ResourceGroup01/providers/Microsoft.KeyVault/vaults/TestVault123';
PS C:\> $keyUrl = https://myvault.vault-int.azure-int.net/keys/456;
PS C:\> $keyId = '/subscriptions/0000000-0000-0000-0000-000000000000/resourceGroups/ResourceGroup01/providers/Microsoft.KeyVault/vaults/TestVault456';
PS C:\> $diskupdateconfig = Set-AzureRmDiskUpdateDiskEncryptionKey -DiskUpdate $diskupdateconfig -SecretUrl $secretUrl -SourceVaultId $secretId;
PS C:\> $diskupdateconfig = Set-AzureRmDiskUpdateKeyEncryptionKey -DiskUpdate $diskupdateconfig -KeyUrl $keyUrl -SourceVaultId $keyId;
PS C:\> Update-AzureRmDisk -ResourceGroupName 'ResourceGroup01' -DiskName 'Disk01' -DiskUpdate $diskupdateconfig;
```
The first command creates a local empty disk update object with size 10GB in Premium_LRS storage
account type. It also sets Windows OS type and enables encryption settings. The second and third
commands set the disk encryption key and key encryption key settings for the disk update object.
The last command takes the disk update object and updates an existing disk with name 'Disk01' in
resource group 'ResourceGroup01'.
### Example 2
```
PS C:\> New-AzureRmDiskUpdateConfig -DiskSizeGB 10 | Update-AzureRmDisk -ResourceGroupName 'ResourceGroup01' -DiskName 'Disk01';
```
This command updates an existing disk with name 'Disk01' in resource group 'ResourceGroup01' to 10 GB disk size.
## PARAMETERS
### -DefaultProfile
The credentials, account, tenant, and subscription used for communication with azure.
```yaml
Type: IAzureContextContainer
Parameter Sets: (All)
Aliases: AzureRmContext, AzureCredential
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -DiskEncryptionKey
Specifies the disk encryption key object on a disk.
```yaml
Type: KeyVaultAndSecretReference
Parameter Sets: (All)
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -DiskSizeGB
Specifies the size of the disk in GB.
```yaml
Type: Int32
Parameter Sets: (All)
Aliases:
Required: False
Position: 2
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -EncryptionSettingsEnabled
Enable encryption settings.
```yaml
Type: Boolean
Parameter Sets: (All)
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -KeyEncryptionKey
Specifies the Key encryption key on a disk.
```yaml
Type: KeyVaultAndKeyReference
Parameter Sets: (All)
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -OsType
Specifies the OS type.
```yaml
Type: OperatingSystemTypes
Parameter Sets: (All)
Aliases:
Accepted values: Windows, Linux
Required: False
Position: 1
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -SkuName
Specifies the Sku name of the storage account.
```yaml
Type: StorageAccountTypes
Parameter Sets: (All)
Aliases: AccountType
Accepted values: StandardLRS, PremiumLRS
Required: False
Position: 0
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -Tag
Key-value pairs in the form of a hash table. For example:
@{key0="value0";key1=$null;key2="value2"}
```yaml
Type: Hashtable
Parameter Sets: (All)
Aliases:
Required: False
Position: 3
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -Confirm
Prompts you for confirmation before running the cmdlet.
```yaml
Type: SwitchParameter
Parameter Sets: (All)
Aliases: cf
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -WhatIf
Shows what would happen if the cmdlet runs. The cmdlet is not run.
```yaml
Type: SwitchParameter
Parameter Sets: (All)
Aliases: wi
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### CommonParameters
This cmdlet supports the common parameters: -Debug, -ErrorAction, -ErrorVariable, -InformationAction, -InformationVariable, -OutVariable, -OutBuffer, -PipelineVariable, -Verbose, -WarningAction, and -WarningVariable. For more information, see about_CommonParameters (http://go.microsoft.com/fwlink/?LinkID=113216).
## INPUTS
## OUTPUTS
### Microsoft.Azure.Commands.Compute.Automation.Models.PSDiskUpdate
## NOTES
## RELATED LINKS
| ClogenyTechnologies/azure-powershell | src/StackAdmin/Compute/Commands.Compute/help/New-AzureRmDiskUpdateConfig.md | Markdown | apache-2.0 | 5,761 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <algorithm>
#include <cmath>
#include <limits>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/round.h"
namespace tflite {
namespace {
// These constants are used to manipulate the binary representation of doubles.
// Double-precision binary64 floating point format is:
// Bit | 63 | 62-52 | 51-0 |
// | Sign | Exponent | Fraction |
// To avoid 64-bit integers as much as possible, I break this into high and
// low 32-bit chunks. High is:
// Bit | 31 | 30-20 | 19-0 |
// | Sign | Exponent | High Fraction |
// Low is:
// Bit | 31-0 |
// | Low Fraction |
// We then access the components through logical bit-wise operations to
// extract the parts needed, with the positions and masks derived from the
// layout shown above.
constexpr uint64_t kSignMask = 0x8000000000000000LL;
constexpr uint64_t kExponentMask = 0x7ff0000000000000LL;
constexpr int32_t kExponentShift = 52;
constexpr int32_t kExponentBias = 1023;
constexpr uint32_t kExponentIsBadNum = 0x7ff;
constexpr uint64_t kFractionMask = 0x000fffffffc00000LL;
constexpr uint32_t kFractionShift = 22;
constexpr uint32_t kFractionRoundingMask = 0x003fffff;
constexpr uint32_t kFractionRoundingThreshold = 0x00200000;
} // namespace
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int* shift) {
if (double_multiplier == 0.) {
*quantized_multiplier = 0;
*shift = 0;
return;
}
#ifdef TFLITE_EMULATE_FLOAT
// If we're trying to avoid the use of floating-point instructions (for
// example on microcontrollers) then use an alternative implementation
// that only requires integer and bitwise operations. To enable this, you
// need to set the define during the build process for your platform.
int64_t q_fixed = IntegerFrExp(double_multiplier, shift);
#else // TFLITE_EMULATE_FLOAT
const double q = std::frexp(double_multiplier, shift);
auto q_fixed = static_cast<int64_t>(TfLiteRound(q * (1ll << 31)));
#endif // TFLITE_EMULATE_FLOAT
TFLITE_CHECK(q_fixed <= (1ll << 31));
if (q_fixed == (1ll << 31)) {
q_fixed /= 2;
++*shift;
}
TFLITE_CHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
// A shift amount smaller than -31 would cause all bits to be shifted out
// and thus all results would be zero. We implement that instead with
// q_fixed==0, so as to avoid hitting issues with right-shift
// operations with shift amounts greater than 31. Note that this happens
// roughly when abs(double_multiplier) < 2^-31 and the present handling means
// that we're effectively flushing tiny double_multiplier's to zero.
// We could conceivably handle values in the range (roughly) [32, 63]
// as 'denormals' i.e. (shift==0, q_fixed < 2^30). In that point of view
// the present handling is just doing 'flush denormals to zero'. We could
// reconsider and actually generate nonzero denormals if a need arises.
if (*shift < -31) {
*shift = 0;
q_fixed = 0;
}
*quantized_multiplier = static_cast<int32_t>(q_fixed);
}
void QuantizeMultiplierGreaterThanOne(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift) {
TFLITE_CHECK_GT(double_multiplier, 1.);
QuantizeMultiplier(double_multiplier, quantized_multiplier, left_shift);
TFLITE_CHECK_GE(*left_shift, 0);
}
void QuantizeMultiplierSmallerThanOneExp(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift) {
TFLITE_CHECK_LT(double_multiplier, 1.);
TFLITE_CHECK_GT(double_multiplier, 0.);
int shift;
QuantizeMultiplier(double_multiplier, quantized_multiplier, &shift);
TFLITE_CHECK_LE(shift, 0);
*left_shift = shift;
}
int64_t IntegerFrExp(double input, int* shift) {
// Make sure our assumptions about the double layout hold.
TFLITE_CHECK_EQ(8, sizeof(double));
// We want to access the bits of the input double value directly, which is
// tricky to do safely, so use a union to handle the casting.
union {
double double_value;
uint64_t double_as_uint;
} cast_union;
cast_union.double_value = input;
const uint64_t u = cast_union.double_as_uint;
// If the bitfield is all zeros apart from the sign bit, this is a normalized
// zero value, so return standard values for this special case.
if ((u & ~kSignMask) == 0) {
*shift = 0;
return 0;
}
// Deal with NaNs and Infs, which are always indicated with a fixed pattern in
// the exponent, and distinguished by whether the fractions are zero or
// non-zero.
const uint32_t exponent_part = ((u & kExponentMask) >> kExponentShift);
if (exponent_part == kExponentIsBadNum) {
*shift = std::numeric_limits<int>::max();
if (u & kFractionMask) {
// NaN, so just return zero (with the exponent set to INT_MAX).
return 0;
} else {
// Infinity, so return +/- INT_MAX.
if (u & kSignMask) {
return std::numeric_limits<int64_t>::min();
} else {
return std::numeric_limits<int64_t>::max();
}
}
}
// The shift is fairly easy to extract from the high bits of the double value,
// just by masking it out and applying a bias. The std::frexp() implementation
// always returns values between 0.5 and 1.0 though, whereas the exponent
// assumes 1.0 to 2.0 is the standard range, so I add on one to match that
// interface.
*shift = (exponent_part - kExponentBias) + 1;
// There's an implicit high bit in the double format definition, so make sure
// we include that at the top, and then reconstruct the rest of the fractional
// value from the remaining fragments.
int64_t fraction = 0x40000000 + ((u & kFractionMask) >> kFractionShift);
// We're cutting off some bits at the bottom, so to exactly match the standard
// frexp implementation here we'll apply rounding by adding one to the least
// significant bit of the result if the discarded portion is over half of the
// maximum.
if ((u & kFractionRoundingMask) > kFractionRoundingThreshold) {
fraction += 1;
}
// Negate the fraction if the sign bit was set.
if (u & kSignMask) {
fraction *= -1;
}
return fraction;
}
double DoubleFromFractionAndShift(int64_t fraction, int shift) {
union {
double double_value;
uint64_t double_as_uint;
} result;
// Detect NaNs and infinities.
if (shift == std::numeric_limits<int>::max()) {
if (fraction == 0) {
return NAN;
} else if (fraction > 0) {
return INFINITY;
} else {
return -INFINITY;
}
}
// Return a normalized zero for a zero fraction.
if (fraction == 0) {
result.double_as_uint = 0;
return result.double_value;
}
bool is_negative = (fraction < 0);
int64_t encoded_fraction = is_negative ? -fraction : fraction;
int64_t encoded_shift = (shift - 1);
while (encoded_fraction < 0x40000000) {
encoded_fraction *= 2;
encoded_shift -= 1;
}
while (encoded_fraction > 0x80000000) {
encoded_fraction /= 2;
encoded_shift += 1;
}
encoded_fraction -= 0x40000000;
if (encoded_shift < -1022) {
encoded_shift = -1023;
} else if (encoded_shift > 1022) {
encoded_shift = 1023;
}
encoded_shift += kExponentBias;
uint64_t encoded_sign = is_negative ? kSignMask : 0;
result.double_as_uint = encoded_sign | (encoded_shift << kExponentShift) |
(encoded_fraction << kFractionShift);
return result.double_value;
}
double IntegerDoubleMultiply(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
// Detect NaNs and infinities.
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return NAN;
}
const int result_shift = a_shift + b_shift + 1;
const int64_t result_fraction = (a_fraction * b_fraction) >> 32;
return DoubleFromFractionAndShift(result_fraction, result_shift);
}
int IntegerDoubleCompare(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
// Detect NaNs and infinities.
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return 1;
}
if ((a_fraction == 0) && (b_fraction < 0)) {
return 1;
} else if ((a_fraction < 0) && (b_fraction == 0)) {
return -1;
} else if (a_shift < b_shift) {
return -1;
} else if (a_shift > b_shift) {
return 1;
} else if (a_fraction < b_fraction) {
return -1;
} else if (a_fraction > b_fraction) {
return 1;
} else {
return 0;
}
}
void PreprocessSoftmaxScaling(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier, int* left_shift) {
// If the overall multiplier (input and beta) is large, then exp() of an
// input difference of 1 scaled by this will be large. In other words, we
// can cap the multiplier and know that, when it is used, the output will be
// (round to) zero wherever the input is not at the maximum value.
// If the overall scale is less than one, and input_integer_bits=0, then the
// result is double equivalent of Q0.31 (actually with more precision). Thus
// this generates a Q(input_integer_bits).(31-input_integer_bits)
// representation.
#ifdef TFLITE_EMULATE_FLOAT
const double input_beta = IntegerDoubleMultiply(beta, input_scale);
int shift;
int64_t fraction = IntegerFrExp(input_beta, &shift);
shift += (31 - input_integer_bits);
double input_beta_real_multiplier =
DoubleFromFractionAndShift(fraction, shift);
if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) {
input_beta_real_multiplier = (1ll << 31) - 1.0;
}
#else // TFLITE_EMULATE_FLOAT
const double input_beta_real_multiplier = std::min(
beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0);
#endif // TFLITE_EMULATE_FLOAT
QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
quantized_multiplier, left_shift);
}
void PreprocessLogSoftmaxScalingExp(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier,
int* left_shift,
int32_t* reverse_scaling_divisor,
int* reverse_scaling_left_shift) {
PreprocessSoftmaxScaling(beta, input_scale, input_integer_bits,
quantized_multiplier, left_shift);
// Also calculate what amounts to the inverse scaling factor for the input.
const double real_reverse_scaling_divisor =
(1 << (31 - *left_shift)) / static_cast<double>(*quantized_multiplier);
tflite::QuantizeMultiplierSmallerThanOneExp(real_reverse_scaling_divisor,
reverse_scaling_divisor,
reverse_scaling_left_shift);
}
int CalculateInputRadius(int input_integer_bits, int input_left_shift,
int total_signed_bits) {
#ifdef TFLITE_EMULATE_FLOAT
int64_t result = (1 << input_integer_bits) - 1;
result <<= (total_signed_bits - input_integer_bits);
result >>= input_left_shift;
return result;
#else // TFLITE_EMULATE_FLOAT
const double max_input_rescaled =
1.0 * ((1 << input_integer_bits) - 1) *
(1ll << (total_signed_bits - input_integer_bits)) /
(1ll << input_left_shift);
// Tighten bound using floor. Suppose that we could use the exact value.
// After scaling the difference, the result would be at the maximum. Thus we
// must ensure that our value has lower magnitude.
return static_cast<int>(std::floor(max_input_rescaled));
#endif // TFLITE_EMULATE_FLOAT
}
void NudgeQuantizationRange(const float min, const float max,
const int quant_min, const int quant_max,
float* nudged_min, float* nudged_max,
float* nudged_scale) {
// This code originates from tensorflow/core/kernels/fake_quant_ops_functor.h.
const float quant_min_float = static_cast<float>(quant_min);
const float quant_max_float = static_cast<float>(quant_max);
*nudged_scale = (max - min) / (quant_max_float - quant_min_float);
const float zero_point_from_min = quant_min_float - min / *nudged_scale;
uint16 nudged_zero_point;
if (zero_point_from_min < quant_min_float) {
nudged_zero_point = static_cast<uint16>(quant_min);
} else if (zero_point_from_min > quant_max_float) {
nudged_zero_point = static_cast<uint16>(quant_max);
} else {
nudged_zero_point = static_cast<uint16>(TfLiteRound(zero_point_from_min));
}
*nudged_min = (quant_min_float - nudged_zero_point) * (*nudged_scale);
*nudged_max = (quant_max_float - nudged_zero_point) * (*nudged_scale);
}
void FakeQuantizeArray(const float nudged_scale, const float nudged_min,
const float nudged_max, const float* input_data,
float* output_data, const float size) {
// This code originates from tensorflow/core/kernels/fake_quant_ops_functor.h.
const float inv_nudged_scale = 1.0f / nudged_scale;
for (int i = 0; i < size; i++) {
const float src_val = input_data[i];
const float clamped = std::min(nudged_max, std::max(nudged_min, src_val));
const float clamped_shifted = clamped - nudged_min;
const float dst_val =
TfLiteRound(clamped_shifted * inv_nudged_scale) * nudged_scale +
nudged_min;
output_data[i] = dst_val;
}
}
bool CheckedLog2(const float x, int* log2_result) {
// Using TfLiteRound instead of std::round and std::log instead of
// std::log2 to work around these fuctions being missing in a toolchain
// used in some TensorFlow tests as of May 2018.
const float x_log2 = std::log(x) * (1.0f / std::log(2.0f));
const float x_log2_rounded = TfLiteRound(x_log2);
const float x_log2_fracpart = x_log2 - x_log2_rounded;
*log2_result = static_cast<int>(x_log2_rounded);
return std::abs(x_log2_fracpart) < 1e-3;
}
void QuantizeMultiplierArray(const double* effective_scales, size_t size,
int32_t* effective_scale_significand,
int* effective_shift) {
for (size_t i = 0; i < size; ++i) {
QuantizeMultiplier(effective_scales[i], &effective_scale_significand[i],
&effective_shift[i]);
}
}
} // namespace tflite
| chemelnucfin/tensorflow | tensorflow/lite/kernels/internal/quantization_util.cc | C++ | apache-2.0 | 15,557 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let vect = vec!(box 100i);
assert!(vect[0] == box 100);
}
| quornian/rust | src/test/run-pass/unique-in-vec.rs | Rust | apache-2.0 | 549 |
/*
* Copyright 2015-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.security.store;
import org.onosproject.event.EventListener;
/**
* Security-Mode ONOS event listener.
*/
public interface SecurityModeListener extends EventListener<SecurityModeEvent> {
}
| gkatsikas/onos | core/security/src/main/java/org/onosproject/security/store/SecurityModeListener.java | Java | apache-2.0 | 834 |
<?php
use Mockery as m;
class AuthDatabaseReminderRepositoryTest extends PHPUnit_Framework_TestCase {
public function tearDown()
{
m::close();
}
public function testCreateInsertsNewRecordIntoTable()
{
$repo = $this->getRepo();
$repo->getConnection()->shouldReceive('table')->once()->with('table')->andReturn($query = m::mock('StdClass'));
$query->shouldReceive('insert')->once();
$user = m::mock('Illuminate\Auth\Reminders\RemindableInterface');
$user->shouldReceive('getReminderEmail')->andReturn('email');
$results = $repo->create($user);
$this->assertTrue(is_string($results) and strlen($results) > 1);
}
public function testExistReturnsFalseIfNoRowFoundForUser()
{
$repo = $this->getRepo();
$repo->getConnection()->shouldReceive('table')->once()->with('table')->andReturn($query = m::mock('StdClass'));
$query->shouldReceive('where')->once()->with('email', 'email')->andReturn($query);
$query->shouldReceive('where')->once()->with('token', 'token')->andReturn($query);
$query->shouldReceive('first')->andReturn(null);
$user = m::mock('Illuminate\Auth\Reminders\RemindableInterface');
$user->shouldReceive('getReminderEmail')->andReturn('email');
$this->assertFalse($repo->exists($user, 'token'));
}
public function testExistReturnsFalseIfRecordIsExpired()
{
$repo = $this->getRepo();
$repo->getConnection()->shouldReceive('table')->once()->with('table')->andReturn($query = m::mock('StdClass'));
$query->shouldReceive('where')->once()->with('email', 'email')->andReturn($query);
$query->shouldReceive('where')->once()->with('token', 'token')->andReturn($query);
$date = date('Y-m-d H:i:s', time() - 300000);
$query->shouldReceive('first')->andReturn((object) array('created_at' => $date));
$user = m::mock('Illuminate\Auth\Reminders\RemindableInterface');
$user->shouldReceive('getReminderEmail')->andReturn('email');
$this->assertFalse($repo->exists($user, 'token'));
}
public function testExistReturnsTrueIfValidRecordExists()
{
$repo = $this->getRepo();
$repo->getConnection()->shouldReceive('table')->once()->with('table')->andReturn($query = m::mock('StdClass'));
$query->shouldReceive('where')->once()->with('email', 'email')->andReturn($query);
$query->shouldReceive('where')->once()->with('token', 'token')->andReturn($query);
$date = date('Y-m-d H:i:s', time() - 600);
$query->shouldReceive('first')->andReturn((object) array('created_at' => $date));
$user = m::mock('Illuminate\Auth\Reminders\RemindableInterface');
$user->shouldReceive('getReminderEmail')->andReturn('email');
$this->assertTrue($repo->exists($user, 'token'));
}
public function testDeleteMethodDeletesByToken()
{
$repo = $this->getRepo();
$repo->getConnection()->shouldReceive('table')->once()->with('table')->andReturn($query = m::mock('StdClass'));
$query->shouldReceive('where')->once()->with('token', 'token')->andReturn($query);
$query->shouldReceive('delete')->once();
$repo->delete('token');
}
public function testDeleteExpiredMethodDeletesExpiredTokens()
{
$repo = $this->getRepo();
$repo->getConnection()->shouldReceive('table')->once()->with('table')->andReturn($query = m::mock('StdClass'));
$query->shouldReceive('where')->once()->with('created_at', '<', m::any())->andReturn($query);
$query->shouldReceive('delete')->once();
$repo->deleteExpired();
}
protected function getRepo()
{
return new Illuminate\Auth\Reminders\DatabaseReminderRepository(m::mock('Illuminate\Database\Connection'), 'table', 'key');
}
}
| kacperlukawski/data-processing-frontend | vendor/laravel/framework/tests/Auth/AuthDatabaseReminderRepositoryTest.php | PHP | apache-2.0 | 3,544 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.rave.portal.service.impl;
import org.apache.rave.model.Authority;
import org.apache.rave.rest.model.SearchResult;
import org.apache.rave.portal.repository.AuthorityRepository;
import org.apache.rave.portal.service.AuthorityService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
@Service
public class DefaultAuthorityService implements AuthorityService {
private final AuthorityRepository repository;
@Autowired
public DefaultAuthorityService(AuthorityRepository repository) {
this.repository = repository;
}
@Override
public Authority getAuthorityById(String entityId) {
return repository.get(entityId);
}
@Override
public Authority getAuthorityByName(String authorityName) {
return repository.getByAuthority(authorityName);
}
@Override
public SearchResult<Authority> getAllAuthorities() {
final int count = repository.getCountAll();
final List<Authority> authorities = repository.getAll();
return new SearchResult<Authority>(authorities, count);
}
@Override
public SearchResult<Authority> getDefaultAuthorities() {
final List<Authority> authorities = repository.getAllDefault();
return new SearchResult<Authority>(authorities, authorities.size());
}
}
| kidaa/rave | rave-components/rave-core/src/main/java/org/apache/rave/portal/service/impl/DefaultAuthorityService.java | Java | apache-2.0 | 2,221 |
// Copyright JS Foundation and other contributors, http://js.foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
assert(JSON.stringify("\uD834") === '"\\ud834"');
| jerryscript-project/jerryscript | tests/jerry/regression-test-issue-4129.js | JavaScript | apache-2.0 | 680 |
package com.gemstone.gemfire.internal.tools.gfsh.app.commands;
import com.gemstone.gemfire.internal.tools.gfsh.app.CommandExecutable;
import com.gemstone.gemfire.internal.tools.gfsh.app.Gfsh;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class help implements CommandExecutable
{
private Gfsh gfsh;
public help(Gfsh gfsh)
{
this.gfsh = gfsh;
}
@SuppressFBWarnings(value="NM_METHOD_CONSTRUCTOR_CONFUSION",justification="This is method and not constructor")
public void help()
{
gfsh.println("help or ?");
gfsh.println(" List command descriptions");
gfsh.println();
}
public void execute(String command) throws Exception
{
if (command.startsWith("help -?")) {
help();
} else {
String[] splitted = command.split(" ");
if (splitted.length > 1) {
gfsh.showHelp(splitted[1]);
} else {
gfsh.showHelp();
}
}
}
}
| ameybarve15/incubator-geode | gemfire-core/src/main/java/com/gemstone/gemfire/internal/tools/gfsh/app/commands/help.java | Java | apache-2.0 | 909 |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <codecvt>
// template <class Elem, unsigned long Maxcode = 0x10ffff,
// codecvt_mode Mode = (codecvt_mode)0>
// class codecvt_utf8
// : public codecvt<Elem, char, mbstate_t>
// {
// // unspecified
// };
// Not a portable test
#include <codecvt>
#include <cstdlib>
#include <cassert>
#include "count_new.h"
#include "test_macros.h"
int main(int, char**)
{
assert(globalMemCounter.checkOutstandingNewEq(0));
{
typedef std::codecvt_utf8<wchar_t> C;
C c;
assert(globalMemCounter.checkOutstandingNewEq(0));
}
{
typedef std::codecvt_utf8<wchar_t> C;
std::locale loc(std::locale::classic(), new C);
assert(globalMemCounter.checkOutstandingNewNotEq(0));
}
assert(globalMemCounter.checkOutstandingNewEq(0));
return 0;
}
| llvm-mirror/libcxx | test/std/localization/locale.stdcvt/codecvt_utf8.pass.cpp | C++ | apache-2.0 | 1,183 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*jslint browser: true, es5: true */
/*globals $: true, rootPath: true */
(function() {
"use strict";
// This mapping table should match the discriminants of
// `rustdoc::html::item_type::ItemType` type in Rust.
var itemTypes = ["mod",
"externcrate",
"import",
"struct",
"enum",
"fn",
"type",
"static",
"trait",
"impl",
"tymethod",
"method",
"structfield",
"variant",
"macro",
"primitive",
"associatedtype",
"constant",
"associatedconstant"];
// used for special search precedence
var TY_PRIMITIVE = itemTypes.indexOf("primitive");
$('.js-only').removeClass('js-only');
function getQueryStringParams() {
var params = {};
window.location.search.substring(1).split("&").
map(function(s) {
var pair = s.split("=");
params[decodeURIComponent(pair[0])] =
typeof pair[1] === "undefined" ?
null : decodeURIComponent(pair[1]);
});
return params;
}
function browserSupportsHistoryApi() {
return window.history && typeof window.history.pushState === "function";
}
function highlightSourceLines(ev) {
var i, from, to, match = window.location.hash.match(/^#?(\d+)(?:-(\d+))?$/);
if (match) {
from = parseInt(match[1], 10);
to = Math.min(50000, parseInt(match[2] || match[1], 10));
from = Math.min(from, to);
if ($('#' + from).length === 0) {
return;
}
if (ev === null) { $('#' + from)[0].scrollIntoView(); };
$('.line-numbers span').removeClass('line-highlighted');
for (i = from; i <= to; ++i) {
$('#' + i).addClass('line-highlighted');
}
}
}
highlightSourceLines(null);
$(window).on('hashchange', highlightSourceLines);
// Gets the human-readable string for the virtual-key code of the
// given KeyboardEvent, ev.
//
// This function is meant as a polyfill for KeyboardEvent#key,
// since it is not supported in Trident. We also test for
// KeyboardEvent#keyCode because the handleShortcut handler is
// also registered for the keydown event, because Blink doesn't fire
// keypress on hitting the Escape key.
//
// So I guess you could say things are getting pretty interoperable.
function getVirtualKey(ev) {
if ("key" in ev && typeof ev.key != "undefined")
return ev.key;
var c = ev.charCode || ev.keyCode;
if (c == 27)
return "Escape";
return String.fromCharCode(c);
}
function handleShortcut(ev) {
if (document.activeElement.tagName == "INPUT")
return;
switch (getVirtualKey(ev)) {
case "Escape":
if (!$("#help").hasClass("hidden")) {
ev.preventDefault();
$("#help").addClass("hidden");
$("body").removeClass("blur");
} else if (!$("#search").hasClass("hidden")) {
ev.preventDefault();
$("#search").addClass("hidden");
$("#main").removeClass("hidden");
}
break;
case "s":
case "S":
ev.preventDefault();
focusSearchBar();
break;
case "?":
if (ev.shiftKey && $("#help").hasClass("hidden")) {
ev.preventDefault();
$("#help").removeClass("hidden");
$("body").addClass("blur");
}
break;
}
}
$(document).on("keypress", handleShortcut);
$(document).on("keydown", handleShortcut);
$(document).on("click", function(ev) {
if (!$(e.target).closest("#help > div").length) {
$("#help").addClass("hidden");
$("body").removeClass("blur");
}
});
$('.version-selector').on('change', function() {
var i, match,
url = document.location.href,
stripped = '',
len = rootPath.match(/\.\.\//g).length + 1;
for (i = 0; i < len; ++i) {
match = url.match(/\/[^\/]*$/);
if (i < len - 1) {
stripped = match[0] + stripped;
}
url = url.substring(0, url.length - match[0].length);
}
url += '/' + $('.version-selector').val() + stripped;
document.location.href = url;
});
/**
* A function to compute the Levenshtein distance between two strings
* Licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported
* Full License can be found at http://creativecommons.org/licenses/by-sa/3.0/legalcode
* This code is an unmodified version of the code written by Marco de Wit
* and was found at http://stackoverflow.com/a/18514751/745719
*/
var levenshtein = (function() {
var row2 = [];
return function(s1, s2) {
if (s1 === s2) {
return 0;
}
var s1_len = s1.length, s2_len = s2.length;
if (s1_len && s2_len) {
var i1 = 0, i2 = 0, a, b, c, c2, row = row2;
while (i1 < s1_len) {
row[i1] = ++i1;
}
while (i2 < s2_len) {
c2 = s2.charCodeAt(i2);
a = i2;
++i2;
b = i2;
for (i1 = 0; i1 < s1_len; ++i1) {
c = a + (s1.charCodeAt(i1) !== c2 ? 1 : 0);
a = row[i1];
b = b < a ? (b < c ? b + 1 : c) : (a < c ? a + 1 : c);
row[i1] = b;
}
}
return b;
}
return s1_len + s2_len;
};
})();
function initSearch(rawSearchIndex) {
var currentResults, index, searchIndex;
var MAX_LEV_DISTANCE = 3;
var params = getQueryStringParams();
// Populate search bar with query string search term when provided,
// but only if the input bar is empty. This avoid the obnoxious issue
// where you start trying to do a search, and the index loads, and
// suddenly your search is gone!
if ($(".search-input")[0].value === "") {
$(".search-input")[0].value = params.search || '';
}
/**
* Executes the query and builds an index of results
* @param {[Object]} query [The user query]
* @param {[type]} max [The maximum results returned]
* @param {[type]} searchWords [The list of search words to query
* against]
* @return {[type]} [A search index of results]
*/
function execQuery(query, max, searchWords) {
var valLower = query.query.toLowerCase(),
val = valLower,
typeFilter = itemTypeFromName(query.type),
results = [],
split = valLower.split("::");
// remove empty keywords
for (var j = 0; j < split.length; ++j) {
split[j].toLowerCase();
if (split[j] === "") {
split.splice(j, 1);
}
}
function typePassesFilter(filter, type) {
// No filter
if (filter < 0) return true;
// Exact match
if (filter === type) return true;
// Match related items
var name = itemTypes[type];
switch (itemTypes[filter]) {
case "constant":
return (name == "associatedconstant");
case "fn":
return (name == "method" || name == "tymethod");
case "type":
return (name == "primitive");
}
// No match
return false;
}
// quoted values mean literal search
var nSearchWords = searchWords.length;
if ((val.charAt(0) === "\"" || val.charAt(0) === "'") &&
val.charAt(val.length - 1) === val.charAt(0))
{
val = val.substr(1, val.length - 2);
for (var i = 0; i < nSearchWords; ++i) {
if (searchWords[i] === val) {
// filter type: ... queries
if (typePassesFilter(typeFilter, searchIndex[i].ty)) {
results.push({id: i, index: -1});
}
}
if (results.length === max) {
break;
}
}
// searching by type
} else if (val.search("->") > -1) {
var trimmer = function (s) { return s.trim(); };
var parts = val.split("->").map(trimmer);
var input = parts[0];
// sort inputs so that order does not matter
var inputs = input.split(",").map(trimmer).sort();
var output = parts[1];
for (var i = 0; i < nSearchWords; ++i) {
var type = searchIndex[i].type;
if (!type) {
continue;
}
// sort index inputs so that order does not matter
var typeInputs = type.inputs.map(function (input) {
return input.name;
}).sort();
// allow searching for void (no output) functions as well
var typeOutput = type.output ? type.output.name : "";
if (inputs.toString() === typeInputs.toString() &&
output == typeOutput) {
results.push({id: i, index: -1, dontValidate: true});
}
}
} else {
// gather matching search results up to a certain maximum
val = val.replace(/\_/g, "");
for (var i = 0; i < split.length; ++i) {
for (var j = 0; j < nSearchWords; ++j) {
var lev_distance;
if (searchWords[j].indexOf(split[i]) > -1 ||
searchWords[j].indexOf(val) > -1 ||
searchWords[j].replace(/_/g, "").indexOf(val) > -1)
{
// filter type: ... queries
if (typePassesFilter(typeFilter, searchIndex[j].ty)) {
results.push({
id: j,
index: searchWords[j].replace(/_/g, "").indexOf(val),
lev: 0,
});
}
} else if (
(lev_distance = levenshtein(searchWords[j], val)) <=
MAX_LEV_DISTANCE) {
if (typePassesFilter(typeFilter, searchIndex[j].ty)) {
results.push({
id: j,
index: 0,
// we want lev results to go lower than others
lev: lev_distance,
});
}
}
if (results.length === max) {
break;
}
}
}
}
var nresults = results.length;
for (var i = 0; i < nresults; ++i) {
results[i].word = searchWords[results[i].id];
results[i].item = searchIndex[results[i].id] || {};
}
// if there are no results then return to default and fail
if (results.length === 0) {
return [];
}
results.sort(function sortResults(aaa, bbb) {
var a, b;
// Sort by non levenshtein results and then levenshtein results by the distance
// (less changes required to match means higher rankings)
a = (aaa.lev);
b = (bbb.lev);
if (a !== b) { return a - b; }
// sort by crate (non-current crate goes later)
a = (aaa.item.crate !== window.currentCrate);
b = (bbb.item.crate !== window.currentCrate);
if (a !== b) { return a - b; }
// sort by exact match (mismatch goes later)
a = (aaa.word !== valLower);
b = (bbb.word !== valLower);
if (a !== b) { return a - b; }
// sort by item name length (longer goes later)
a = aaa.word.length;
b = bbb.word.length;
if (a !== b) { return a - b; }
// sort by item name (lexicographically larger goes later)
a = aaa.word;
b = bbb.word;
if (a !== b) { return (a > b ? +1 : -1); }
// sort by index of keyword in item name (no literal occurrence goes later)
a = (aaa.index < 0);
b = (bbb.index < 0);
if (a !== b) { return a - b; }
// (later literal occurrence, if any, goes later)
a = aaa.index;
b = bbb.index;
if (a !== b) { return a - b; }
// special precedence for primitive pages
if ((aaa.item.ty === TY_PRIMITIVE) && (bbb.item.ty !== TY_PRIMITIVE)) {
return -1;
}
// sort by description (no description goes later)
a = (aaa.item.desc === '');
b = (bbb.item.desc === '');
if (a !== b) { return a - b; }
// sort by type (later occurrence in `itemTypes` goes later)
a = aaa.item.ty;
b = bbb.item.ty;
if (a !== b) { return a - b; }
// sort by path (lexicographically larger goes later)
a = aaa.item.path;
b = bbb.item.path;
if (a !== b) { return (a > b ? +1 : -1); }
// que sera, sera
return 0;
});
// remove duplicates, according to the data provided
for (var i = results.length - 1; i > 0; i -= 1) {
if (results[i].word === results[i - 1].word &&
results[i].item.ty === results[i - 1].item.ty &&
results[i].item.path === results[i - 1].item.path &&
(results[i].item.parent || {}).name === (results[i - 1].item.parent || {}).name)
{
results[i].id = -1;
}
}
for (var i = 0; i < results.length; ++i) {
var result = results[i],
name = result.item.name.toLowerCase(),
path = result.item.path.toLowerCase(),
parent = result.item.parent;
// this validation does not make sense when searching by types
if (result.dontValidate) {
continue;
}
var valid = validateResult(name, path, split, parent);
if (!valid) {
result.id = -1;
}
}
return results;
}
/**
* Validate performs the following boolean logic. For example:
* "File::open" will give IF A PARENT EXISTS => ("file" && "open")
* exists in (name || path || parent) OR => ("file" && "open") exists in
* (name || path )
*
* This could be written functionally, but I wanted to minimise
* functions on stack.
*
* @param {[string]} name [The name of the result]
* @param {[string]} path [The path of the result]
* @param {[string]} keys [The keys to be used (["file", "open"])]
* @param {[object]} parent [The parent of the result]
* @return {[boolean]} [Whether the result is valid or not]
*/
function validateResult(name, path, keys, parent) {
for (var i = 0; i < keys.length; ++i) {
// each check is for validation so we negate the conditions and invalidate
if (!(
// check for an exact name match
name.toLowerCase().indexOf(keys[i]) > -1 ||
// then an exact path match
path.toLowerCase().indexOf(keys[i]) > -1 ||
// next if there is a parent, check for exact parent match
(parent !== undefined &&
parent.name.toLowerCase().indexOf(keys[i]) > -1) ||
// lastly check to see if the name was a levenshtein match
levenshtein(name.toLowerCase(), keys[i]) <=
MAX_LEV_DISTANCE)) {
return false;
}
}
return true;
}
function getQuery() {
var matches, type, query, raw = $('.search-input').val();
query = raw;
matches = query.match(/^(fn|mod|struct|enum|trait|type|const|macro)\s*:\s*/i);
if (matches) {
type = matches[1].replace(/^const$/, 'constant');
query = query.substring(matches[0].length);
}
return {
raw: raw,
query: query,
type: type,
id: query + type
};
}
function initSearchNav() {
var hoverTimeout, $results = $('.search-results .result');
$results.on('click', function() {
var dst = $(this).find('a')[0];
if (window.location.pathname === dst.pathname) {
$('#search').addClass('hidden');
$('#main').removeClass('hidden');
document.location.href = dst.href;
}
}).on('mouseover', function() {
var $el = $(this);
clearTimeout(hoverTimeout);
hoverTimeout = setTimeout(function() {
$results.removeClass('highlighted');
$el.addClass('highlighted');
}, 20);
});
$(document).off('keydown.searchnav');
$(document).on('keydown.searchnav', function(e) {
var $active = $results.filter('.highlighted');
if (e.which === 38) { // up
e.preventDefault();
if (!$active.length || !$active.prev()) {
return;
}
$active.prev().addClass('highlighted');
$active.removeClass('highlighted');
} else if (e.which === 40) { // down
e.preventDefault();
if (!$active.length) {
$results.first().addClass('highlighted');
} else if ($active.next().length) {
$active.next().addClass('highlighted');
$active.removeClass('highlighted');
}
} else if (e.which === 13) { // return
e.preventDefault();
if ($active.length) {
document.location.href = $active.find('a').prop('href');
}
} else {
$active.removeClass('highlighted');
}
});
}
function escape(content) {
return $('<h1/>').text(content).html();
}
function showResults(results) {
var output, shown, query = getQuery();
currentResults = query.id;
output = '<h1>Results for ' + escape(query.query) +
(query.type ? ' (type: ' + escape(query.type) + ')' : '') + '</h1>';
output += '<table class="search-results">';
if (results.length > 0) {
shown = [];
results.forEach(function(item) {
var name, type, href, displayPath;
if (shown.indexOf(item) !== -1) {
return;
}
shown.push(item);
name = item.name;
type = itemTypes[item.ty];
if (type === 'mod') {
displayPath = item.path + '::';
href = rootPath + item.path.replace(/::/g, '/') + '/' +
name + '/index.html';
} else if (type === 'static' || type === 'reexport') {
displayPath = item.path + '::';
href = rootPath + item.path.replace(/::/g, '/') +
'/index.html';
} else if (item.parent !== undefined) {
var myparent = item.parent;
var anchor = '#' + type + '.' + name;
displayPath = item.path + '::' + myparent.name + '::';
href = rootPath + item.path.replace(/::/g, '/') +
'/' + itemTypes[myparent.ty] +
'.' + myparent.name +
'.html' + anchor;
} else {
displayPath = item.path + '::';
href = rootPath + item.path.replace(/::/g, '/') +
'/' + type + '.' + name + '.html';
}
output += '<tr class="' + type + ' result"><td>' +
'<a href="' + href + '">' +
displayPath + '<span class="' + type + '">' +
name + '</span></a></td><td>' +
'<a href="' + href + '">' +
'<span class="desc">' + item.desc +
' </span></a></td></tr>';
});
} else {
output += 'No results :( <a href="https://duckduckgo.com/?q=' +
encodeURIComponent('rust ' + query.query) +
'">Try on DuckDuckGo?</a>';
}
output += "</p>";
$('#main.content').addClass('hidden');
$('#search.content').removeClass('hidden').html(output);
$('#search .desc').width($('#search').width() - 40 -
$('#search td:first-child').first().width());
initSearchNav();
}
function search(e) {
var query,
filterdata = [],
obj, i, len,
results = [],
maxResults = 200,
resultIndex;
var params = getQueryStringParams();
query = getQuery();
if (e) {
e.preventDefault();
}
if (!query.query || query.id === currentResults) {
return;
}
// Update document title to maintain a meaningful browser history
$(document).prop("title", "Results for " + query.query + " - Rust");
// Because searching is incremental by character, only the most
// recent search query is added to the browser history.
if (browserSupportsHistoryApi()) {
if (!history.state && !params.search) {
history.pushState(query, "", "?search=" +
encodeURIComponent(query.raw));
} else {
history.replaceState(query, "", "?search=" +
encodeURIComponent(query.raw));
}
}
resultIndex = execQuery(query, 20000, index);
len = resultIndex.length;
for (i = 0; i < len; ++i) {
if (resultIndex[i].id > -1) {
obj = searchIndex[resultIndex[i].id];
filterdata.push([obj.name, obj.ty, obj.path, obj.desc]);
results.push(obj);
}
if (results.length >= maxResults) {
break;
}
}
showResults(results);
}
function itemTypeFromName(typename) {
for (var i = 0; i < itemTypes.length; ++i) {
if (itemTypes[i] === typename) { return i; }
}
return -1;
}
function buildIndex(rawSearchIndex) {
searchIndex = [];
var searchWords = [];
for (var crate in rawSearchIndex) {
if (!rawSearchIndex.hasOwnProperty(crate)) { continue; }
// an array of [(Number) item type,
// (String) name,
// (String) full path or empty string for previous path,
// (String) description,
// (Number | null) the parent path index to `paths`]
// (Object | null) the type of the function (if any)
var items = rawSearchIndex[crate].items;
// an array of [(Number) item type,
// (String) name]
var paths = rawSearchIndex[crate].paths;
// convert `paths` into an object form
var len = paths.length;
for (var i = 0; i < len; ++i) {
paths[i] = {ty: paths[i][0], name: paths[i][1]};
}
// convert `items` into an object form, and construct word indices.
//
// before any analysis is performed lets gather the search terms to
// search against apart from the rest of the data. This is a quick
// operation that is cached for the life of the page state so that
// all other search operations have access to this cached data for
// faster analysis operations
var len = items.length;
var lastPath = "";
for (var i = 0; i < len; ++i) {
var rawRow = items[i];
var row = {crate: crate, ty: rawRow[0], name: rawRow[1],
path: rawRow[2] || lastPath, desc: rawRow[3],
parent: paths[rawRow[4]], type: rawRow[5]};
searchIndex.push(row);
if (typeof row.name === "string") {
var word = row.name.toLowerCase();
searchWords.push(word);
} else {
searchWords.push("");
}
lastPath = row.path;
}
}
return searchWords;
}
function startSearch() {
var keyUpTimeout;
$('.do-search').on('click', search);
$('.search-input').on('keyup', function() {
clearTimeout(keyUpTimeout);
keyUpTimeout = setTimeout(search, 500);
});
// Push and pop states are used to add search results to the browser
// history.
if (browserSupportsHistoryApi()) {
// Store the previous <title> so we can revert back to it later.
var previousTitle = $(document).prop("title");
$(window).on('popstate', function(e) {
var params = getQueryStringParams();
// When browsing back from search results the main page
// visibility must be reset.
if (!params.search) {
$('#main.content').removeClass('hidden');
$('#search.content').addClass('hidden');
}
// Revert to the previous title manually since the History
// API ignores the title parameter.
$(document).prop("title", previousTitle);
// When browsing forward to search results the previous
// search will be repeated, so the currentResults are
// cleared to ensure the search is successful.
currentResults = null;
// Synchronize search bar with query string state and
// perform the search. This will empty the bar if there's
// nothing there, which lets you really go back to a
// previous state with nothing in the bar.
$('.search-input').val(params.search);
// Some browsers fire 'onpopstate' for every page load
// (Chrome), while others fire the event only when actually
// popping a state (Firefox), which is why search() is
// called both here and at the end of the startSearch()
// function.
search();
});
}
search();
}
function plainSummaryLine(markdown) {
markdown.replace(/\n/g, ' ')
.replace(/'/g, "\'")
.replace(/^#+? (.+?)/, "$1")
.replace(/\[(.*?)\]\(.*?\)/g, "$1")
.replace(/\[(.*?)\]\[.*?\]/g, "$1");
}
index = buildIndex(rawSearchIndex);
startSearch();
// Draw a convenient sidebar of known crates if we have a listing
if (rootPath === '../') {
var sidebar = $('.sidebar');
var div = $('<div>').attr('class', 'block crate');
div.append($('<h2>').text('Crates'));
var crates = [];
for (var crate in rawSearchIndex) {
if (!rawSearchIndex.hasOwnProperty(crate)) { continue; }
crates.push(crate);
}
crates.sort();
for (var i = 0; i < crates.length; ++i) {
var klass = 'crate';
if (crates[i] === window.currentCrate) {
klass += ' current';
}
if (rawSearchIndex[crates[i]].items[0]) {
var desc = rawSearchIndex[crates[i]].items[0][3];
div.append($('<a>', {'href': '../' + crates[i] + '/index.html',
'title': plainSummaryLine(desc),
'class': klass}).text(crates[i]));
}
}
sidebar.append(div);
}
}
window.initSearch = initSearch;
// delayed sidebar rendering.
function initSidebarItems(items) {
var sidebar = $('.sidebar');
var current = window.sidebarCurrent;
function block(shortty, longty) {
var filtered = items[shortty];
if (!filtered) { return; }
var div = $('<div>').attr('class', 'block ' + shortty);
div.append($('<h2>').text(longty));
for (var i = 0; i < filtered.length; ++i) {
var item = filtered[i];
var name = item[0];
var desc = item[1]; // can be null
var klass = shortty;
if (name === current.name && shortty === current.ty) {
klass += ' current';
}
var path;
if (shortty === 'mod') {
path = name + '/index.html';
} else {
path = shortty + '.' + name + '.html';
}
div.append($('<a>', {'href': current.relpath + path,
'title': desc,
'class': klass}).text(name));
}
sidebar.append(div);
}
block("mod", "Modules");
block("struct", "Structs");
block("enum", "Enums");
block("trait", "Traits");
block("fn", "Functions");
block("macro", "Macros");
}
window.initSidebarItems = initSidebarItems;
window.register_implementors = function(imp) {
var list = $('#implementors-list');
var libs = Object.getOwnPropertyNames(imp);
for (var i = 0; i < libs.length; ++i) {
if (libs[i] === currentCrate) { continue; }
var structs = imp[libs[i]];
for (var j = 0; j < structs.length; ++j) {
var code = $('<code>').append(structs[j]);
$.each(code.find('a'), function(idx, a) {
var href = $(a).attr('href');
if (href && href.indexOf('http') !== 0) {
$(a).attr('href', rootPath + href);
}
});
var li = $('<li>').append(code);
list.append(li);
}
}
};
if (window.pending_implementors) {
window.register_implementors(window.pending_implementors);
}
// See documentation in html/render.rs for what this is doing.
var query = getQueryStringParams();
if (query['gotosrc']) {
window.location = $('#src-' + query['gotosrc']).attr('href');
}
if (query['gotomacrosrc']) {
window.location = $('.srclink').attr('href');
}
function labelForToggleButton(sectionIsCollapsed) {
if (sectionIsCollapsed) {
// button will expand the section
return "+";
}
// button will collapse the section
// note that this text is also set in the HTML template in render.rs
return "\u2212"; // "\u2212" is '−' minus sign
}
$("#toggle-all-docs").on("click", function() {
var toggle = $("#toggle-all-docs");
if (toggle.hasClass("will-expand")) {
toggle.removeClass("will-expand");
toggle.children(".inner").text(labelForToggleButton(false));
toggle.attr("title", "collapse all docs");
$(".docblock").show();
$(".toggle-label").hide();
$(".toggle-wrapper").removeClass("collapsed");
$(".collapse-toggle").children(".inner").text(labelForToggleButton(false));
} else {
toggle.addClass("will-expand");
toggle.children(".inner").text(labelForToggleButton(true));
toggle.attr("title", "expand all docs");
$(".docblock").hide();
$(".toggle-label").show();
$(".toggle-wrapper").addClass("collapsed");
$(".collapse-toggle").children(".inner").text(labelForToggleButton(true));
}
});
$(document).on("click", ".collapse-toggle", function() {
var toggle = $(this);
var relatedDoc = toggle.parent().next();
if (relatedDoc.is(".stability")) {
relatedDoc = relatedDoc.next();
}
if (relatedDoc.is(".docblock")) {
if (relatedDoc.is(":visible")) {
relatedDoc.slideUp({duration: 'fast', easing: 'linear'});
toggle.parent(".toggle-wrapper").addClass("collapsed");
toggle.children(".inner").text(labelForToggleButton(true));
toggle.children(".toggle-label").fadeIn();
} else {
relatedDoc.slideDown({duration: 'fast', easing: 'linear'});
toggle.parent(".toggle-wrapper").removeClass("collapsed");
toggle.children(".inner").text(labelForToggleButton(false));
toggle.children(".toggle-label").hide();
}
}
});
$(function() {
var toggle = $("<a/>", {'href': 'javascript:void(0)', 'class': 'collapse-toggle'})
.html("[<span class='inner'></span>]");
toggle.children(".inner").text(labelForToggleButton(false));
$(".method").each(function() {
if ($(this).next().is(".docblock") ||
($(this).next().is(".stability") && $(this).next().next().is(".docblock"))) {
$(this).children().first().after(toggle.clone());
}
});
var mainToggle =
$(toggle).append(
$('<span/>', {'class': 'toggle-label'})
.css('display', 'none')
.html(' Expand description'));
var wrapper = $("<div class='toggle-wrapper'>").append(mainToggle);
$("#main > .docblock").before(wrapper);
});
$('pre.line-numbers').on('click', 'span', function() {
var prev_id = 0;
function set_fragment(name) {
if (history.replaceState) {
history.replaceState(null, null, '#' + name);
$(window).trigger('hashchange');
} else {
location.replace('#' + name);
}
}
return function(ev) {
var cur_id = parseInt(ev.target.id, 10);
if (ev.shiftKey && prev_id) {
if (prev_id > cur_id) {
var tmp = prev_id;
prev_id = cur_id;
cur_id = tmp;
}
set_fragment(prev_id + '-' + cur_id);
} else {
prev_id = cur_id;
set_fragment(cur_id);
}
};
}());
}());
// Sets the focus on the search bar at the top of the page
function focusSearchBar() {
$('.search-input').focus();
}
| dwillmer/rust | src/librustdoc/html/static/main.js | JavaScript | apache-2.0 | 38,862 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.UI.OpenGL.Label import Label
Graphline(
Label1 = Label(caption="That", size=(2,2,1), sidecolour=(0,200,0), position=(-3,0,-10)),
Label2 = Label(caption="Boy", bgcolour=(200,100,0), position=(3,0,-10)),
Label3 = Label(caption="Needs", margin=15, position=(-1,0,-10), rotation=(30,0,10)),
Label4 = Label(caption="Therapy!", fontsize=20, size=(1.3,0.3,1), position=(1,0,-10)),
ECHO = ConsoleEchoer(),
linkages = {
("Label1", "outbox") : ("ECHO", "inbox"),
("Label2", "outbox") : ("ECHO", "inbox"),
("Label3", "outbox") : ("ECHO", "inbox"),
("Label4", "outbox") : ("ECHO", "inbox"),
}
).run()
# Licensed to the BBC under a Contributor Agreement: THF
| sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/OpenGL/MiniExamples/Label.py | Python | apache-2.0 | 1,734 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
/**
* Represents the ResourceManager's view of an application container. See
* {@link RMContainerImpl} for an implementation. Containers may be in one
* of several states, given in {@link RMContainerState}. An RMContainer
* instance may exist even if there is no actual running container, such as
* when resources are being reserved to fill space for a future container
* allocation.
*/
public interface RMContainer extends EventHandler<RMContainerEvent> {
ContainerId getContainerId();
ApplicationAttemptId getApplicationAttemptId();
RMContainerState getState();
Container getContainer();
Resource getReservedResource();
NodeId getReservedNode();
Priority getReservedPriority();
Resource getAllocatedResource();
Resource getLastConfirmedResource();
NodeId getAllocatedNode();
Priority getAllocatedPriority();
long getCreationTime();
long getFinishTime();
String getDiagnosticsInfo();
String getLogURL();
int getContainerExitStatus();
ContainerState getContainerState();
ContainerReport createContainerReport();
boolean isAMContainer();
List<ResourceRequest> getResourceRequests();
String getNodeHttpAddress();
String getNodeLabelExpression();
boolean hasIncreaseReservation();
void cancelIncreaseReservation();
String getQueueName();
}
| robzor92/hops | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java | Java | apache-2.0 | 2,859 |
#!/usr/bin/python2.4
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes that define services returned by service_factory."""
class UserInfo(object):
"""User information returned by the UserInfoService.
Attributes:
name: str user name.
primary_email: str email to use for the user.
title: str user work title.
department: str name of the user's department.
employee_type: One of utils.EmployeeType choices.
photo_url: str http url for the user's photograph.
location: str display information for the users desk/building location.
country_city: str 'country-city' formatted user location. ex:US-SFO. Should
be one of timezone_helper.GetLocationCodes().
"""
def __init__(self, name, primary_email, title, department, employee_type,
photo_url, location, country_city):
self.name = name
self.primary_email = primary_email
self.title = title
self.department = department
self.employee_type = employee_type
self.photo_url = photo_url
self.location = location
self.country_city = country_city
class UserInfoService(object):
"""Base class for service that provides information for users."""
def GetManagerInfo(self, email):
"""Provides the UserInfo for the manager of the given user.
Args:
email: Str email of the user.
Returns:
UserInfo for the manager of a given user. None when the user is not valid
or doesn't have a valid manager.
Raises:
errors.ServiceCriticalError: The request failed due to a critical error
like not being able to access a datasource, or finds invalid schema
in the datasource etc.
"""
raise NotImplementedError
def GetUserInfoMulti(self, email_list):
"""Provides the UserInfo for a given list of user emails.
Args:
email_list: Str list of user emails.
Returns:
A {str email, UserInfo} dict. The user emails for whom the service didn't
find UserInfo aren't included in the dict.
Raises:
errors.ServiceCriticalError: The request failed due to a critical error
like not being able to access a datasource, or finds invalid schema
in the datasource etc.
"""
raise NotImplementedError
class DatastoreSyncService(object):
"""Base class for service that syncs entities with external storage."""
def SyncEntity(self, entity):
"""Write entity to external storage that needs to sync from datastore.
Args:
entity: db.model instance to be synced with an external system.
Raises:
errors.ServiceCriticalError: The request failed due to a critical error
like not being able to access a datasource, or finds invalid schema
in the datasource etc.
"""
raise NotImplementedError
def IsModelSynced(self, model_class):
"""Indicates if the Sync service wants models of a class to be synced.
Args:
model_class: db.Model subclass from models.py
Returns:
True if entities of model_class should be synced.
"""
raise NotImplemented
class SearchResult(object):
"""Search results display info.
Attributes:
program_key: str db.Key representation of a models.Program entity.
program_name: str name of a models.Program entity.
program_description: str description of a models.Program entity.
"""
def __init__(self, program_key, program_name, program_description):
self.program_key = program_key
self.program_name = program_name
self.program_description = program_description
class SearchService(object):
"""Base class for service that provides search functionality."""
def Search(self, search_text='', search_location='',
search_start_time=None, search_end_time=None,
max_results=20):
"""Searches for programs that match given search criteria.
Args:
search_text: Str search text, program results should have name and/or
description attributes that matches this.
search_location: Str location tag. Program results should have a
models.Activity with this location.
search_start_time: Datetime. Program results should have a models.Activity
that starts after this time.
search_end_time: Datetime. Program results should have a models.Activity
that starts before this time.
max_results: Maximum number of SearchResults to return.
Returns:
Array of SearchResult objects.
Raises:
errors.ServiceCriticalError: The request failed due to a critical error
like not being able to access a datasource, or finds invalid schema
in the datasource etc.
"""
raise NotImplementedError
class RoomInfo(object):
"""Room information returned by the RoomInfoService.
Attributes:
room_id: str unique identifier that represents the current room.
name: str display name for identifying the room.
calendar_email: str google calendar room resource email id.
location: str display information for the rooms building/city location.
ex:New York, NewYork/US, BLD1/NewYork/US.
country_city: str 'country-city' formatted room location. ex:US-SFO. Should
be one of timezone_helper.GetLocationCodes().
"""
def __init__(self, room_id, name, country_city, calendar_email='',
location=''):
self.room_id = room_id
self.name = name
self.country_city = country_city
self.location = location or country_city
self.calendar_email = calendar_email
class RoomInfoService(object):
"""Service to provide rooms information."""
def GetRoomInfoMulti(self, start_offset, num_rooms):
"""Get rooms information.
Provides rooms information for a requested number of room starting at an
offset. Lets say there are around 1000 rooms, this function helps to access
them in small batches num_rooms size at a time.
This function is called through the admin interface to update rooms in the
datastore. The rooms can be dynamic and change over time and this service
lets the system pull information about new rooms.
Args:
start_offset: int, zero indexed start offset of the rooms info to return.
num_rooms: int number of room information to return. Can return lesser
than this number if we reach the end of all room.
Returns:
List of RoomInfo objects.
Raises:
errors.ServiceCriticalError: The request failed due to a critical error
like not being able to access a datasource, or finds invalid schema
in the datasource etc.
"""
raise NotImplementedError
| Chase235/cloudcourse | core/service_interfaces.py | Python | apache-2.0 | 7,183 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.tf.loaders
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.tf.Tensorflow.typeAttr
import com.intel.analytics.bigdl.utils.tf.TensorflowSpecHelper
import org.tensorflow.framework.{DataType, NodeDef}
class IsFiniteSpec extends TensorflowSpecHelper {
"IsFinite" should "be correct for float tensor" in {
val t = Tensor[Float](4, 4).rand()
t.setValue(2, 3, Float.NegativeInfinity)
t.setValue(4, 4, Float.PositiveInfinity)
val (t1, t2) = getResult[Float, Boolean](
NodeDef.newBuilder()
.setName("isfinite_test")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.setOp("IsFinite"),
Seq(t),
0
)
t1.map(t2, (a, b) => { a should be(b); b})
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/utils/tf/loaders/IsFiniteSpec.scala | Scala | apache-2.0 | 1,381 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.waveprotocol.wave.client.widget.button;
import com.google.gwt.dom.client.Element;
import com.google.gwt.user.client.ui.Widget;
import java.util.ArrayList;
import java.util.List;
/**
* Helper class for handling multiple orthogonal CSS classnames for an element
* simultaneously.
*
* The motivating case for this class is styles on a button. Buttons might have
* two CSS class names applied at any given time - one class to represent the
* button state (NORMAL, HOVER, DOWN) and one to represent the high level
* 'style' of the button (BLUE_BUTTON, ADD_BUTTON, etc) - and we achieve a
* different look by defining CSS classes like this:
*
* .normal.blue_button { // normal blue button style }
*
* .hover.blue_button { // hovered blue button style }
*
* With {@link StyleAxis} this would be represented as:
*
* StyleAxis buttonState = new StyleAxis(getElement()); StyleAxis buttonStyle =
* new StyleAxis(getElement());
*
* Then we can move along one axis without affecting the other:
*
* buttonState.setStyle("normal"); buttonStyle.setStyle("red_button");
*
*/
public class StyleAxis {
/**
* The element to apply styles to.
*/
private final List<Element> elements = new ArrayList<Element>();
/**
* The current style that is applied to the target element.
*/
private String currentStyle = null;
/**
* @param widgets The widgets to apply styles to.
*/
public StyleAxis(Widget... widgets) {
for (Widget w : widgets) {
if (w != null) {
elements.add(w.getElement());
}
}
}
/**
* @param elements The elements to apply styles to.
*/
public StyleAxis(Element... elements) {
for (Element e : elements) {
if (e != null) {
this.elements.add(e);
}
}
}
/**
* Replaces any current style that may be applied with the given style.
*
* @param styleName The CSS style to change to, or {@code null} to change to
* no style.
*/
public void setStyle(String styleName) {
if (!isEquivalentStyle(styleName)) {
/* Remove the current style if set */
if (currentStyle != null && currentStyle.trim().length() != 0) {
for (Element e : elements) {
e.removeClassName(currentStyle);
}
}
/* Add the new style if set */
if (styleName != null) {
for (Element e : elements) {
e.addClassName(styleName);
}
}
currentStyle = styleName;
}
}
/**
* Returns true if the provided style is equivalent to the current style.
*
* @param styleName The style name to compare to the current style
*/
private boolean isEquivalentStyle(String styleName) {
return ((styleName == null && currentStyle == null) || (styleName != null && styleName
.equals(currentStyle)));
}
}
| vega113/incubator-wave | wave/src/main/java/org/waveprotocol/wave/client/widget/button/StyleAxis.java | Java | apache-2.0 | 3,630 |
# How to contribute
etcd is Apache 2.0 licensed and accepts contributions via Github pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers and other resources to make getting your contribution into etcd easier.
# Email and chat
- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
## Getting started
- Fork the repository on GitHub
- Read the README.md for build instructions
## Contribution flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work. This is usually master.
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Submit a pull request to coreos/etcd.
Thanks for your contributions!
### Code style
The coding style suggested by the Golang community is used in etcd. See [style doc](https://code.google.com/p/go-wiki/wiki/Style) for details.
Please follow this style to make etcd easy to review, maintain and develop.
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.
| wulonghui/kubernetes-release | src/etcd/CONTRIBUTING.md | Markdown | apache-2.0 | 2,008 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import timedelta # noqa
from django.conf import settings
from django.utils import datetime_safe
from keystoneclient import access
from keystoneclient.v2_0 import ec2
from keystoneclient.v2_0 import roles
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from keystoneclient.v3 import domains
from keystoneclient.v3 import groups
from openstack_auth import user as auth_user
from openstack_dashboard.test.test_data import utils
# Dummy service catalog with all service.
# All endpoint URLs should point to example.com.
# Try to keep them as accurate to real data as possible (ports, URIs, etc.)
SERVICE_CATALOG = [
{"type": "compute",
"name": "nova",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8774/v2",
"internalURL": "http://int.nova.example.com:8774/v2",
"publicURL": "http://public.nova.example.com:8774/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova2.example.com:8774/v2",
"internalURL": "http://int.nova2.example.com:8774/v2",
"publicURL": "http://public.nova2.example.com:8774/v2"}]},
{"type": "volume",
"name": "cinder",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"}]},
{"type": "image",
"name": "glance",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.glance.example.com:9292/v1",
"internalURL": "http://int.glance.example.com:9292/v1",
"publicURL": "http://public.glance.example.com:9292/v1"}]},
{"type": "identity",
"name": "keystone",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.keystone.example.com:35357/v2.0",
"internalURL": "http://int.keystone.example.com:5000/v2.0",
"publicURL": "http://public.keystone.example.com:5000/v2.0"}]},
{"type": "object-store",
"name": "swift",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.swift.example.com:8080/",
"internalURL": "http://int.swift.example.com:8080/",
"publicURL": "http://public.swift.example.com:8080/"}]},
{"type": "network",
"name": "neutron",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.neutron.example.com:9696/",
"internalURL": "http://int.neutron.example.com:9696/",
"publicURL": "http://public.neutron.example.com:9696/"}]},
{"type": "ec2",
"name": "EC2 Service",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8773/services/Admin",
"publicURL": "http://public.nova.example.com:8773/services/Cloud",
"internalURL": "http://int.nova.example.com:8773/services/Cloud"}]},
{"type": "metering",
"name": "ceilometer",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.ceilometer.example.com:8777",
"publicURL": "http://public.ceilometer.example.com:8777",
"internalURL": "http://int.ceilometer.example.com:8777"}]},
{"type": "orchestration",
"name": "Heat",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.heat.example.com:8004/v1",
"publicURL": "http://public.heat.example.com:8004/v1",
"internalURL": "http://int.heat.example.com:8004/v1"}]},
{"type": "database",
"name": "Trove",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.trove.example.com:8779/v1.0",
"publicURL": "http://public.trove.example.com:8779/v1.0",
"internalURL": "http://int.trove.example.com:8779/v1.0"}]}
]
def data(TEST):
# Make a deep copy of the catalog to avoid persisting side-effects
# when tests modify the catalog.
TEST.service_catalog = copy.deepcopy(SERVICE_CATALOG)
TEST.tokens = utils.TestDataContainer()
TEST.domains = utils.TestDataContainer()
TEST.users = utils.TestDataContainer()
TEST.groups = utils.TestDataContainer()
TEST.tenants = utils.TestDataContainer()
TEST.roles = utils.TestDataContainer()
TEST.ec2 = utils.TestDataContainer()
admin_role_dict = {'id': '1',
'name': 'admin'}
admin_role = roles.Role(roles.RoleManager, admin_role_dict)
member_role_dict = {'id': "2",
'name': settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE}
member_role = roles.Role(roles.RoleManager, member_role_dict)
TEST.roles.add(admin_role, member_role)
TEST.roles.admin = admin_role
TEST.roles.member = member_role
domain_dict = {'id': "1",
'name': 'test_domain',
'description': "a test domain.",
'enabled': True}
domain_dict_2 = {'id': "2",
'name': 'disabled_domain',
'description': "a disabled test domain.",
'enabled': False}
domain = domains.Domain(domains.DomainManager, domain_dict)
disabled_domain = domains.Domain(domains.DomainManager, domain_dict_2)
TEST.domains.add(domain, disabled_domain)
TEST.domain = domain # Your "current" domain
user_dict = {'id': "1",
'name': 'test_user',
'email': '[email protected]',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user = users.User(users.UserManager(None), user_dict)
user_dict = {'id': "2",
'name': 'user_two',
'email': '[email protected]',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user2 = users.User(users.UserManager(None), user_dict)
user_dict = {'id': "3",
'name': 'user_three',
'email': '[email protected]',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user3 = users.User(users.UserManager(None), user_dict)
user_dict = {'id': "4",
'name': 'user_four',
'email': '[email protected]',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
user4 = users.User(users.UserManager(None), user_dict)
user_dict = {'id': "5",
'name': 'user_five',
'email': None,
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "1"}
user5 = users.User(users.UserManager(None), user_dict)
TEST.users.add(user, user2, user3, user4, user5)
TEST.user = user # Your "current" user
TEST.user.service_catalog = copy.deepcopy(SERVICE_CATALOG)
group_dict = {'id': "1",
'name': 'group_one',
'description': 'group one description',
'project_id': '1',
'domain_id': '1'}
group = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "2",
'name': 'group_two',
'description': 'group two description',
'project_id': '1',
'domain_id': '1'}
group2 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "3",
'name': 'group_three',
'description': 'group three description',
'project_id': '1',
'domain_id': '1'}
group3 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "4",
'name': 'group_four',
'description': 'group four description',
'project_id': '2',
'domain_id': '2'}
group4 = groups.Group(groups.GroupManager(None), group_dict)
TEST.groups.add(group, group2, group3, group4)
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1',
'domain_name': 'test_domain'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant = tenants.Tenant(tenants.TenantManager, tenant_dict)
disabled_tenant = tenants.Tenant(tenants.TenantManager, tenant_dict_2)
tenant_unicode = tenants.Tenant(tenants.TenantManager, tenant_dict_3)
TEST.tenants.add(tenant, disabled_tenant, tenant_unicode)
TEST.tenant = tenant # Your "current" tenant
tomorrow = datetime_safe.datetime.now() + timedelta(days=1)
expiration = tomorrow.isoformat()
scoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration,
'tenant': tenant_dict,
'tenants': [tenant_dict]},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
scoped_access_info = access.AccessInfo.factory(resp=None,
body=scoped_token_dict)
unscoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
unscoped_access_info = access.AccessInfo.factory(resp=None,
body=unscoped_token_dict)
scoped_token = auth_user.Token(scoped_access_info)
unscoped_token = auth_user.Token(unscoped_access_info)
TEST.tokens.add(scoped_token, unscoped_token)
TEST.token = scoped_token # your "current" token.
TEST.tokens.scoped_token = scoped_token
TEST.tokens.unscoped_token = unscoped_token
access_secret = ec2.EC2(ec2.CredentialsManager, {"access": "access",
"secret": "secret"})
TEST.ec2.add(access_secret)
| spring-week-topos/horizon-week | openstack_dashboard/test/test_data/keystone_data.py | Python | apache-2.0 | 12,387 |
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def refine_date_col(data, col):
data["Day"] = data[col].day()
data["Month"] = data[col].month() + 1 # Since H2O indexes from 0
data["Year"] = data[col].year() + 1900 # Start of epoch is 1900
data["WeekNum"] = data[col].week()
data["WeekDay"] = data[col].dayOfWeek()
data["HourOfDay"] = data[col].hour()
# Create weekend and season cols
# Spring = Mar, Apr, May. Summer = Jun, Jul, Aug. Autumn = Sep, Oct. Winter = Nov, Dec, Jan, Feb.
# data["Weekend"] = [1 if x in ("Sun", "Sat") else 0 for x in data["WeekDay"]]
data["Weekend"] = (data["WeekDay"] == "Sun") | (data["WeekDay"] == "Sat")
assert data["Weekend"].min() < data["Weekend"].max() # Not a constant result
data["Season"] = data["Month"].cut([0, 2, 5, 7, 10, 12], ["Winter", "Spring", "Summer", "Autumn", "Winter"])
def date_munge():
crimes_path = pyunit_utils.locate("smalldata/chicago/chicagoCrimes10k.csv.zip")
crimes = h2o.import_file(path=crimes_path)
crimes.describe()
refine_date_col(crimes, "Date")
crimes = crimes.drop("Date")
crimes.describe()
if __name__ == "__main__":
pyunit_utils.standalone_test(date_munge)
else:
date_munge()
| mathemage/h2o-3 | h2o-py/tests/testdir_munging/pyunit_date_munge.py | Python | apache-2.0 | 1,258 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from twitter.common import log
from twitter.common.quantity import Amount, Time
from apache.aurora.common.health_check.http_signaler import HttpSignaler
from .common.task_runner import TaskError, TaskRunner
class HttpLifecycleManager(TaskRunner):
"""A wrapper around a TaskRunner that performs HTTP lifecycle management."""
DEFAULT_ESCALATION_WAIT = Amount(5, Time.SECONDS)
WAIT_POLL_INTERVAL = Amount(1, Time.SECONDS)
@classmethod
def wrap(cls, runner, task_instance, portmap):
"""Return a task runner that manages the http lifecycle if lifecycle is present."""
if not task_instance.has_lifecycle() or not task_instance.lifecycle().has_http():
return runner
http_lifecycle = task_instance.lifecycle().http()
http_lifecycle_port = http_lifecycle.port().get()
graceful_shutdown_wait_secs = (
Amount(http_lifecycle.graceful_shutdown_wait_secs().get(), Time.SECONDS)
if http_lifecycle.has_graceful_shutdown_wait_secs()
else cls.DEFAULT_ESCALATION_WAIT)
shutdown_wait_secs = (
Amount(http_lifecycle.shutdown_wait_secs().get(), Time.SECONDS)
if http_lifecycle.has_shutdown_wait_secs()
else cls.DEFAULT_ESCALATION_WAIT)
if not portmap or http_lifecycle_port not in portmap:
# If DefaultLifecycle is ever to disable task lifecycle by default, we should
# raise a TaskError here, since the user has requested http lifecycle without
# binding a port to send lifecycle commands.
return runner
escalation_endpoints = [
(http_lifecycle.graceful_shutdown_endpoint().get(), graceful_shutdown_wait_secs),
(http_lifecycle.shutdown_endpoint().get(), shutdown_wait_secs)
]
return cls(runner, portmap[http_lifecycle_port], escalation_endpoints)
def __init__(self,
runner,
lifecycle_port,
escalation_endpoints,
clock=time):
self._runner = runner
self._lifecycle_port = lifecycle_port
self._escalation_endpoints = escalation_endpoints
self._clock = clock
self.__started = False
def _terminate_http(self):
http_signaler = HttpSignaler(self._lifecycle_port)
for endpoint, wait_time in self._escalation_endpoints:
handled, _ = http_signaler(endpoint, use_post_method=True)
log.info('Killing task, calling %s and waiting %s, handled is %s' % (
endpoint, str(wait_time), str(handled)))
waited = Amount(0, Time.SECONDS)
while handled:
if self._runner.status is not None:
return True
if waited >= wait_time:
break
self._clock.sleep(self.WAIT_POLL_INTERVAL.as_(Time.SECONDS))
waited += self.WAIT_POLL_INTERVAL
# --- public interface
def start(self, timeout=None):
self.__started = True
return self._runner.start(timeout=timeout if timeout is not None else self._runner.MAX_WAIT)
def stop(self, timeout=None):
"""Stop the runner. If it's already completed, no-op. If it's still running, issue a kill."""
if not self.__started:
raise TaskError('Failed to call TaskRunner.start.')
log.info('Invoking runner HTTP teardown.')
self._terminate_http()
return self._runner.stop(timeout=timeout if timeout is not None else self._runner.MAX_WAIT)
@property
def status(self):
"""Return the StatusResult of this task runner. This returns None as
long as no terminal state is reached."""
return self._runner.status
| thinker0/aurora | src/main/python/apache/aurora/executor/http_lifecycle.py | Python | apache-2.0 | 4,038 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import org.apache.spark.SparkContext
/** This trait is a convenience helper for persisting your model to the local
* filesystem. This trait and [[LocalFileSystemPersistentModelLoader]] contain
* concrete implementation and need not be implemented.
*
* The underlying implementation is [[Utils.save]].
*
* {{{
* class MyModel extends LocalFileSystemPersistentModel[MyParams] {
* ...
* }
*
* object MyModel extends LocalFileSystemPersistentModelLoader[MyParams, MyModel] {
* ...
* }
* }}}
*
* @tparam AP Algorithm parameters class.
* @see [[LocalFileSystemPersistentModelLoader]]
* @group Algorithm
*/
trait LocalFileSystemPersistentModel[AP <: Params] extends PersistentModel[AP] {
def save(id: String, params: AP, sc: SparkContext): Boolean = {
Utils.save(id, this)
true
}
}
/** Implement an object that extends this trait for PredictionIO to support
* loading a persisted model from local filesystem during serving deployment.
*
* The underlying implementation is [[Utils.load]].
*
* @tparam AP Algorithm parameters class.
* @tparam M Model class.
* @see [[LocalFileSystemPersistentModel]]
* @group Algorithm
*/
trait LocalFileSystemPersistentModelLoader[AP <: Params, M]
extends PersistentModelLoader[AP, M] {
def apply(id: String, params: AP, sc: Option[SparkContext]): M = {
Utils.load(id).asInstanceOf[M]
}
}
/** DEPRECATED. Use [[LocalFileSystemPersistentModel]] instead.
*
* @group Algorithm */
@deprecated("Use LocalFileSystemPersistentModel instead.", "0.9.2")
trait IFSPersistentModel[AP <: Params] extends LocalFileSystemPersistentModel[AP]
/** DEPRECATED. Use [[LocalFileSystemPersistentModelLoader]] instead.
*
* @group Algorithm */
@deprecated("Use LocalFileSystemPersistentModelLoader instead.", "0.9.2")
trait IFSPersistentModelLoader[AP <: Params, M] extends LocalFileSystemPersistentModelLoader[AP, M]
| pferrel/PredictionIO | core/src/main/scala/org/apache/predictionio/controller/LocalFileSystemPersistentModel.scala | Scala | apache-2.0 | 2,761 |
/*
* Copyright (c) 1994, 1997, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.lang;
/**
* Thrown when the Java Virtual Machine cannot allocate an object
* because it is out of memory, and no more memory could be made
* available by the garbage collector.
*
* @author unascribed
* @since JDK1.0
*/
public
class OutOfMemoryError extends VirtualMachineError {
/**
* Constructs an <code>OutOfMemoryError</code> with no detail message.
*/
public OutOfMemoryError() {
super();
}
/**
* Constructs an <code>OutOfMemoryError</code> with the specified
* detail message.
*
* @param s the detail message.
*/
public OutOfMemoryError(String s) {
super(s);
}
}
| andreagenso/java2scala | test/J2s/java/openjdk-6-src-b27/jdk/src/share/classes/java/lang/OutOfMemoryError.java | Java | apache-2.0 | 1,886 |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from mock import patch
import six
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata, _update_options
from cassandra.cqlengine.models import Model
from tests.integration.cqlengine.base import BaseCassEngTestCase
class LeveledCompactionTestTable(Model):
__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy',
'sstable_size_in_mb': '64'}}
user_id = columns.UUID(primary_key=True)
name = columns.Text()
class AlterTableTest(BaseCassEngTestCase):
def test_alter_is_called_table(self):
drop_table(LeveledCompactionTestTable)
sync_table(LeveledCompactionTestTable)
with patch('cassandra.cqlengine.management._update_options') as mock:
sync_table(LeveledCompactionTestTable)
assert mock.called == 1
def test_compaction_not_altered_without_changes_leveled(self):
class LeveledCompactionChangesDetectionTest(Model):
__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy',
'sstable_size_in_mb': '160',
'tombstone_threshold': '0.125',
'tombstone_compaction_interval': '3600'}}
pk = columns.Integer(primary_key=True)
drop_table(LeveledCompactionChangesDetectionTest)
sync_table(LeveledCompactionChangesDetectionTest)
self.assertFalse(_update_options(LeveledCompactionChangesDetectionTest))
def test_compaction_not_altered_without_changes_sizetiered(self):
class SizeTieredCompactionChangesDetectionTest(Model):
__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy',
'bucket_high': '20',
'bucket_low': '10',
'max_threshold': '200',
'min_threshold': '100',
'min_sstable_size': '1000',
'tombstone_threshold': '0.125',
'tombstone_compaction_interval': '3600'}}
pk = columns.Integer(primary_key=True)
drop_table(SizeTieredCompactionChangesDetectionTest)
sync_table(SizeTieredCompactionChangesDetectionTest)
self.assertFalse(_update_options(SizeTieredCompactionChangesDetectionTest))
def test_alter_actually_alters(self):
tmp = copy.deepcopy(LeveledCompactionTestTable)
drop_table(tmp)
sync_table(tmp)
tmp.__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}}
sync_table(tmp)
table_meta = _get_table_metadata(tmp)
self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*')
def test_alter_options(self):
class AlterTable(Model):
__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy',
'sstable_size_in_mb': '64'}}
user_id = columns.UUID(primary_key=True)
name = columns.Text()
drop_table(AlterTable)
sync_table(AlterTable)
table_meta = _get_table_metadata(AlterTable)
self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*")
AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128'
sync_table(AlterTable)
table_meta = _get_table_metadata(AlterTable)
self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*")
class OptionsTest(BaseCassEngTestCase):
def _verify_options(self, table_meta, expected_options):
cql = table_meta.export_as_string()
for name, value in expected_options.items():
if isinstance(value, six.string_types):
self.assertIn("%s = '%s'" % (name, value), cql)
else:
start = cql.find("%s = {" % (name,))
end = cql.find('}', start)
for subname, subvalue in value.items():
attr = "'%s': '%s'" % (subname, subvalue)
found_at = cql.find(attr, start)
self.assertTrue(found_at > start)
self.assertTrue(found_at < end)
def test_all_size_tiered_options(self):
class AllSizeTieredOptionsModel(Model):
__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy',
'bucket_low': '.3',
'bucket_high': '2',
'min_threshold': '2',
'max_threshold': '64',
'tombstone_compaction_interval': '86400'}}
cid = columns.UUID(primary_key=True)
name = columns.Text()
drop_table(AllSizeTieredOptionsModel)
sync_table(AllSizeTieredOptionsModel)
table_meta = _get_table_metadata(AllSizeTieredOptionsModel)
self._verify_options(table_meta, AllSizeTieredOptionsModel.__options__)
def test_all_leveled_options(self):
class AllLeveledOptionsModel(Model):
__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy',
'sstable_size_in_mb': '64'}}
cid = columns.UUID(primary_key=True)
name = columns.Text()
drop_table(AllLeveledOptionsModel)
sync_table(AllLeveledOptionsModel)
table_meta = _get_table_metadata(AllLeveledOptionsModel)
self._verify_options(table_meta, AllLeveledOptionsModel.__options__)
| Richard-Mathie/cassandra_benchmark | vendor/github.com/datastax/python-driver/tests/integration/cqlengine/management/test_compaction_settings.py | Python | apache-2.0 | 6,656 |
/* See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Esri Inc. licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.esri.gpt.control.rest.repositories;
import com.esri.gpt.framework.context.RequestContext;
import com.esri.gpt.framework.sql.ManagedConnection;
import com.esri.gpt.framework.util.Val;
import com.esri.gpt.framework.xml.XmlIoUtil;
import com.esri.gpt.framework.xml.XsltTemplate;
import com.esri.gpt.framework.xml.XsltTemplates;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.xml.transform.TransformerException;
import org.xml.sax.SAXException;
/**
* Provides utility functions associated with registered CSW reposirories.
*/
public class CswRepository {
/** class variables ============================================================ */
/** Cached templates */
private static XsltTemplates XSLTTEMPLATES = new XsltTemplates();
/** XSLT for transforming CSW capabilities to HTML: "gpt/metadata/ogc/csw-to-html.xslt" */
public static final String XSLT_CSW_TO_HTML = "gpt/metadata/ogc/csw-to-html.xslt";
/** constructors ============================================================ */
/** Default constructor. */
public CswRepository() {}
/** methods ================================================================= */
/**
* Gets a compiled XSLT template.
* @param xsltPath the path to an XSLT
* @return the compiled template
* @throws IOException if an IO exception occurs
* @throws TransformerException if a transformation exception occurs
* @throws SAXException if a SAX parsing exception occurs
*/
private synchronized XsltTemplate getCompiledTemplate(String xsltPath)
throws TransformerException {
String sKey = xsltPath;
XsltTemplate template = XSLTTEMPLATES.get(sKey);
if (template == null) {
template = XsltTemplate.makeTemplate(xsltPath);
XSLTTEMPLATES.put(sKey,template);
}
return template;
}
/**
* Returns the URL associated with a registered CSW repository.
* @param context request context
* @param rid the remote CSW repository id
* @return the registered URL for the remote repository
* @throws SQLException if an exception occurs
*/
public String queryCswUrl(RequestContext context, String rid) throws SQLException {
PreparedStatement st = null;
ManagedConnection mcon = null;
rid = Val.chkStr(rid);
if (rid.length() > 0) {
try {
int nId = -1;
String field = "UUID";
try {
nId = Integer.parseInt(rid);
field = "ID";
} catch (NumberFormatException nfe) {}
String table = context.getCatalogConfiguration().getResourceTableName();
String sql = "SELECT PROTOCOL_TYPE,HOST_URL FROM "+table+" WHERE "+field+"=?";
mcon = context.getConnectionBroker().returnConnection("");
st = mcon.getJdbcConnection().prepareStatement(sql);
if (field.equalsIgnoreCase("ID")) {
st.setInt(1,nId);
} else {
st.setString(1,rid);
}
ResultSet rs = st.executeQuery();
if (rs.next()) {
if (Val.chkStr(rs.getString(1)).equalsIgnoreCase("CSW")) {
return rs.getString(2);
}
}
} finally {
try {if (st != null) st.close();} catch (Exception ef) {}
context.getConnectionBroker().closeConnection(mcon);
}
}
return null;
}
/**
* Transforms the response of a CSW GetCapabilities request to an HTML
* suitable for display within the GPt searach page.
* <br/>gpt/metadata/ogc/csw-to-html.xslt
* @param url the CSW GetCapabilities URL
* @return the HTML representation
* @throws TransformerException if an exception occurs
*/
public String transformToHtml(String url) throws TransformerException {
XsltTemplate template = getCompiledTemplate(CswRepository.XSLT_CSW_TO_HTML);
String xml = template.transform(XmlIoUtil.readXml(url));
return xml;
}
}
| GeoinformationSystems/GeoprocessingAppstore | src/com/esri/gpt/control/rest/repositories/CswRepository.java | Java | apache-2.0 | 4,647 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.micrometer;
import java.util.Map;
import java.util.stream.Stream;
import io.micrometer.core.instrument.Meter;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.Tags;
import org.apache.camel.Endpoint;
import org.apache.camel.impl.UriEndpointComponent;
import org.apache.camel.spi.Metadata;
import org.apache.camel.spi.Registry;
import org.apache.camel.util.StringHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Represents the component that manages Micrometer endpoints.
*/
public class MicrometerComponent extends UriEndpointComponent {
public static final Meter.Type DEFAULT_METER_TYPE = Meter.Type.COUNTER;
private static final Logger LOG = LoggerFactory.getLogger(MicrometerComponent.class);
@Metadata(label = "advanced")
private MeterRegistry metricsRegistry;
public MicrometerComponent() {
super(MicrometerEndpoint.class);
}
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
if (metricsRegistry == null) {
Registry camelRegistry = getCamelContext().getRegistry();
metricsRegistry = MicrometerUtils.getOrCreateMeterRegistry(camelRegistry, MicrometerConstants.METRICS_REGISTRY_NAME);
}
String metricsName = getMetricsName(remaining);
Meter.Type metricsType = getMetricsType(remaining);
Iterable<Tag> tags = getMetricsTag(parameters);
LOG.debug("Metrics type: {}; name: {}; tags: {}", metricsType, metricsName, tags);
Endpoint endpoint = new MicrometerEndpoint(uri, this, metricsRegistry, metricsType, metricsName, tags);
setProperties(endpoint, parameters);
return endpoint;
}
String getMetricsName(String remaining) {
String name = StringHelper.after(remaining, ":");
return name == null ? remaining : name;
}
Meter.Type getMetricsType(String remaining) {
String type = StringHelper.before(remaining, ":");
return type == null
? DEFAULT_METER_TYPE
: MicrometerUtils.getByName(type);
}
Iterable<Tag> getMetricsTag(Map<String, Object> parameters) {
String tagsString = getAndRemoveParameter(parameters, "tags", String.class, "");
if (tagsString != null && !tagsString.isEmpty()) {
String[] tagStrings = tagsString.split("\\s*,\\s*");
return Stream.of(tagStrings)
.map(s -> Tags.of(s.split("\\s*=\\s*")))
.reduce(Tags.empty(), Tags::and);
}
return Tags.empty();
}
public MeterRegistry getMetricsRegistry() {
return metricsRegistry;
}
/**
* To use a custom configured MetricRegistry.
*/
public void setMetricsRegistry(MeterRegistry metricsRegistry) {
this.metricsRegistry = metricsRegistry;
}
}
| onders86/camel | components/camel-micrometer/src/main/java/org/apache/camel/component/micrometer/MicrometerComponent.java | Java | apache-2.0 | 3,798 |
/*
* Copyright 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef vaapidecoder_mpeg2_h
#define vaapidecoder_mpeg2_h
#include "codecparsers/mpeg2_parser.h"
#include "common/Functional.h"
#include "vaapidecoder_base.h"
#include "vaapidecpicture.h"
#include <list>
#include <va/va.h>
#include <vector>
namespace YamiMediaCodec {
enum Mpeg2PictureStructureType {
kTopField = 1,
kBottomField,
kFramePicture,
};
enum {
kMaxRefPictures = 2,
kMinSurfaces = 8,
};
struct IQMatricesRefs {
IQMatricesRefs();
const uint8_t* intra_quantiser_matrix;
const uint8_t* non_intra_quantiser_matrix;
const uint8_t* chroma_intra_quantiser_matrix;
const uint8_t* chroma_non_intra_quantiser_matrix;
};
class VaapiDecPictureMpeg2;
class VaapiDecoderMPEG2 : public VaapiDecoderBase {
public:
typedef SharedPtr<VaapiDecPictureMpeg2> PicturePtr;
VaapiDecoderMPEG2();
virtual ~VaapiDecoderMPEG2();
virtual YamiStatus start(VideoConfigBuffer*);
virtual YamiStatus reset(VideoConfigBuffer*);
virtual void stop(void);
virtual void flush(void);
virtual YamiStatus decode(VideoDecodeBuffer*);
private:
// mpeg2 DPB class
class DPB {
typedef std::function<YamiStatus(const PicturePtr&)>
OutputCallback;
public:
DPB(OutputCallback callback)
: m_numberSurfaces(kMaxRefPictures)
, m_outputPicture(callback)
{
}
void flush();
bool isEmpty() { return m_referencePictures.empty(); }
YamiStatus insertPicture(const PicturePtr& picture);
YamiStatus insertPictureToReferences(const PicturePtr& picture);
YamiStatus getReferencePictures(const PicturePtr& current_picture,
PicturePtr& previous_picture,
PicturePtr& next_picture);
YamiStatus callOutputPicture(const PicturePtr& picture)
{
return m_outputPicture(picture);
}
YamiStatus outputPreviousPictures(const PicturePtr& picture,
bool empty = false);
private:
// set to minimum number of surfaces required to operate
uint32_t m_numberSurfaces;
std::list<PicturePtr> m_referencePictures;
OutputCallback m_outputPicture;
};
// mpeg2 DPB class
typedef SharedPtr<YamiParser::MPEG2::Parser> ParserPtr;
typedef SharedPtr<YamiParser::MPEG2::StreamHeader> StreamHdrPtr;
friend class FactoryTest<IVideoDecoder, VaapiDecoderMPEG2>;
friend class VaapiDecoderMPEG2Test;
void fillSliceParams(VASliceParameterBufferMPEG2* slice_param,
const YamiParser::MPEG2::Slice* slice);
void fillPictureParams(VAPictureParameterBufferMPEG2* param,
const PicturePtr& picture);
YamiStatus fillConfigBuffer();
YamiStatus
convertToVAProfile(const YamiParser::MPEG2::ProfileType& profile);
YamiStatus checkLevel(const YamiParser::MPEG2::LevelType& level);
bool isSliceCode(YamiParser::MPEG2::StartCodeType next_code);
YamiStatus processConfigBuffer();
YamiStatus processDecodeBuffer();
YamiStatus preDecode(StreamHdrPtr shdr);
YamiStatus processSlice();
YamiStatus decodeGOP();
YamiStatus assignSurface();
YamiStatus assignPicture();
YamiStatus createPicture();
YamiStatus loadIQMatrix();
bool updateIQMatrix(const YamiParser::MPEG2::QuantMatrices* refIQMatrix,
bool reset = false);
YamiStatus decodePicture();
YamiStatus outputPicture(const PicturePtr& picture);
YamiStatus findReusePicture(std::list<PicturePtr>& list, bool& reuse);
ParserPtr m_parser;
StreamHdrPtr m_stream;
const YamiParser::MPEG2::SeqHeader* m_sequenceHeader;
const YamiParser::MPEG2::SeqExtension* m_sequenceExtension;
const YamiParser::MPEG2::GOPHeader* m_GOPHeader; //check what's the use here
const YamiParser::MPEG2::PictureHeader* m_pictureHeader;
const YamiParser::MPEG2::PictureCodingExtension* m_pictureCodingExtension;
const YamiParser::MPEG2::QuantMatrixExtension* m_quantMatrixExtension;
DPB m_DPB;
VASliceParameterBufferMPEG2* mpeg2SliceParams;
bool m_VAStart;
bool m_isParsingSlices;
bool m_loadNewIQMatrix;
bool m_canCreatePicture;
PicturePtr m_currentPicture;
YamiParser::MPEG2::StartCodeType m_previousStartCode;
YamiParser::MPEG2::StartCodeType m_nextStartCode;
IQMatricesRefs m_IQMatrices;
VAProfile m_VAProfile;
uint64_t m_currentPTS;
std::list<PicturePtr> m_topFieldPictures;
std::list<PicturePtr> m_bottomFieldPictures;
/**
* VaapiDecoderFactory registration result. This decoder is registered in
* vaapidecoder_host.cpp
*/
static const bool s_registered;
};
} // namespace YamiMediaCodec
#endif
| lizhong1008/libyami | decoder/vaapidecoder_mpeg2.h | C | apache-2.0 | 5,338 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sharded_mutable_dense_hashtable.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.linear_optimizer.python.ops.sharded_mutable_dense_hashtable import ShardedMutableDenseHashTable
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class ShardedMutableDenseHashTableTest(TensorFlowTestCase):
"""Tests for the ShardedMutableHashTable class."""
def testShardedMutableHashTable(self):
for num_shards in [1, 3, 10]:
with self.test_session():
default_val = -1
empty_key = 0
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = ShardedMutableDenseHashTable(
tf.int64, tf.int64, default_val, empty_key, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([11, 12, 14], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
self.assertAllEqual([0, 1, -1], output.eval())
def testShardedMutableHashTableVectors(self):
for num_shards in [1, 3, 10]:
with self.test_session():
default_val = [-0.1, 0.2]
empty_key = [0, 1]
keys = tf.constant([[11, 12], [13, 14], [15, 16]], tf.int64)
values = tf.constant([[0.5, 0.6], [1.5, 1.6], [2.5, 2.6]], tf.float32)
table = ShardedMutableDenseHashTable(
tf.int64, tf.float32, default_val, empty_key, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([[11, 12], [13, 14], [11, 14]], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
self.assertAllClose([[0.5, 0.6], [1.5, 1.6], [-0.1, 0.2]],
output.eval())
def testExportSharded(self):
with self.test_session():
empty_key = -2
default_val = -1
num_shards = 2
keys = tf.constant([10, 11, 12], tf.int64)
values = tf.constant([2, 3, 4], tf.int64)
table = ShardedMutableDenseHashTable(
tf.int64, tf.int64, default_val, empty_key, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
keys_list, values_list = table.export_sharded()
self.assertAllEqual(num_shards, len(keys_list))
self.assertAllEqual(num_shards, len(values_list))
# Exported keys include empty key buckets set to the empty_key
self.assertAllEqual(set([-2, 10, 12]), set(keys_list[0].eval().flatten()))
self.assertAllEqual(set([-2, 11]), set(keys_list[1].eval().flatten()))
# Exported values include empty value buckets set to 0
self.assertAllEqual(set([0, 2, 4]), set(values_list[0].eval().flatten()))
self.assertAllEqual(set([0, 3]), set(values_list[1].eval().flatten()))
if __name__ == '__main__':
googletest.main()
| laosiaudi/tensorflow | tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable_test.py | Python | apache-2.0 | 4,000 |
package org.acme.quarkus.sample;
import javax.inject.Inject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.acme.common.CommonBean;
@Path("/hello")
public class HelloResource {
@Inject
CommonBean common;
@GET
@Produces(MediaType.TEXT_PLAIN)
public String hello() {
return "hello " + common.getName();
}
} | quarkusio/quarkus | integration-tests/gradle/src/main/resources/uber-jar-for-multi-module-project/application/src/main/java/org/acme/quarkus/sample/HelloResource.java | Java | apache-2.0 | 418 |
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// ServiceEndpointPoliciesClient is the network Client
type ServiceEndpointPoliciesClient struct {
BaseClient
}
// NewServiceEndpointPoliciesClient creates an instance of the ServiceEndpointPoliciesClient client.
func NewServiceEndpointPoliciesClient(subscriptionID string) ServiceEndpointPoliciesClient {
return NewServiceEndpointPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewServiceEndpointPoliciesClientWithBaseURI creates an instance of the ServiceEndpointPoliciesClient client.
func NewServiceEndpointPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ServiceEndpointPoliciesClient {
return ServiceEndpointPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a service Endpoint Policies.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy.
// parameters - parameters supplied to the create or update service endpoint policy operation.
func (client ServiceEndpointPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters ServiceEndpointPolicy) (result ServiceEndpointPoliciesCreateOrUpdateFuture, err error) {
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serviceEndpointPolicyName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ServiceEndpointPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters ServiceEndpointPolicy) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPoliciesClient) CreateOrUpdateSender(req *http.Request) (future ServiceEndpointPoliciesCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ServiceEndpointPolicy, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified service endpoint policy.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy.
func (client ServiceEndpointPoliciesClient) Delete(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string) (result ServiceEndpointPoliciesDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, serviceEndpointPolicyName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ServiceEndpointPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPoliciesClient) DeleteSender(req *http.Request) (future ServiceEndpointPoliciesDeleteFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the specified service Endpoint Policies in a specified resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy.
// expand - expands referenced resources.
func (client ServiceEndpointPoliciesClient) Get(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, expand string) (result ServiceEndpointPolicy, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, serviceEndpointPolicyName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ServiceEndpointPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPoliciesClient) GetResponder(resp *http.Response) (result ServiceEndpointPolicy, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all the service endpoint policies in a subscription.
func (client ServiceEndpointPoliciesClient) List(ctx context.Context) (result ServiceEndpointPolicyListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.seplr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "List", resp, "Failure sending request")
return
}
result.seplr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ServiceEndpointPoliciesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPoliciesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPoliciesClient) ListResponder(resp *http.Response) (result ServiceEndpointPolicyListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ServiceEndpointPoliciesClient) listNextResults(lastResults ServiceEndpointPolicyListResult) (result ServiceEndpointPolicyListResult, err error) {
req, err := lastResults.serviceEndpointPolicyListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ServiceEndpointPoliciesClient) ListComplete(ctx context.Context) (result ServiceEndpointPolicyListResultIterator, err error) {
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup gets all service endpoint Policies in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client ServiceEndpointPoliciesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ServiceEndpointPolicyListResultPage, err error) {
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.seplr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.seplr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client ServiceEndpointPoliciesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPoliciesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPoliciesClient) ListByResourceGroupResponder(resp *http.Response) (result ServiceEndpointPolicyListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client ServiceEndpointPoliciesClient) listByResourceGroupNextResults(lastResults ServiceEndpointPolicyListResult) (result ServiceEndpointPolicyListResult, err error) {
req, err := lastResults.serviceEndpointPolicyListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client ServiceEndpointPoliciesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ServiceEndpointPolicyListResultIterator, err error) {
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// Update updates service Endpoint Policies.
// Parameters:
// resourceGroupName - the name of the resource group.
// serviceEndpointPolicyName - the name of the service endpoint policy.
// parameters - parameters supplied to update service endpoint policy tags.
func (client ServiceEndpointPoliciesClient) Update(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters TagsObject) (result ServiceEndpointPoliciesUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, serviceEndpointPolicyName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client ServiceEndpointPoliciesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceEndpointPolicyName": autorest.Encode("path", serviceEndpointPolicyName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ServiceEndpointPoliciesClient) UpdateSender(req *http.Request) (future ServiceEndpointPoliciesUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client ServiceEndpointPoliciesClient) UpdateResponder(resp *http.Response) (result ServiceEndpointPolicy, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| wangxing1517/kubernetes | vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/serviceendpointpolicies.go | GO | apache-2.0 | 21,674 |
/**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.yahoo.ycsb.db.solr6;
import com.yahoo.ycsb.ByteIterator;
import com.yahoo.ycsb.DB;
import com.yahoo.ycsb.Status;
import com.yahoo.ycsb.StringByteIterator;
import org.apache.solr.client.solrj.embedded.JettyConfig;
import org.apache.solr.cloud.MiniSolrCloudCluster;
import org.apache.solr.common.util.NamedList;
import org.junit.*;
import java.io.File;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public abstract class SolrClientBaseTest {
protected static MiniSolrCloudCluster miniSolrCloudCluster;
private DB instance;
private final static HashMap<String, ByteIterator> MOCK_DATA;
protected final static String MOCK_TABLE = "ycsb";
private final static String MOCK_KEY0 = "0";
private final static String MOCK_KEY1 = "1";
private final static int NUM_RECORDS = 10;
static {
MOCK_DATA = new HashMap<>(NUM_RECORDS);
for (int i = 0; i < NUM_RECORDS; i++) {
MOCK_DATA.put("field" + i, new StringByteIterator("value" + i));
}
}
@BeforeClass
public static void onlyOnce() throws Exception {
Path miniSolrCloudClusterTempDirectory = Files.createTempDirectory("miniSolrCloudCluster");
miniSolrCloudClusterTempDirectory.toFile().deleteOnExit();
miniSolrCloudCluster = new MiniSolrCloudCluster(1, miniSolrCloudClusterTempDirectory, JettyConfig.builder().build());
// Upload Solr configuration
URL configDir = SolrClientBaseTest.class.getClassLoader().getResource("solr_config");
assertNotNull(configDir);
miniSolrCloudCluster.uploadConfigDir(new File(configDir.toURI()), MOCK_TABLE);
}
@AfterClass
public static void destroy() throws Exception {
if(miniSolrCloudCluster != null) {
miniSolrCloudCluster.shutdown();
}
}
@Before
public void setup() throws Exception {
NamedList<Object> namedList = miniSolrCloudCluster.createCollection(MOCK_TABLE, 1, 1, MOCK_TABLE, null);
assertEquals(namedList.indexOf("success", 0), 1);
Thread.sleep(1000);
instance = getDB();
}
@After
public void tearDown() throws Exception {
if(miniSolrCloudCluster != null) {
NamedList<Object> namedList = miniSolrCloudCluster.deleteCollection(MOCK_TABLE);
assertEquals(namedList.indexOf("success", 0), 1);
Thread.sleep(1000);
}
}
@Test
public void testInsert() throws Exception {
Status result = instance.insert(MOCK_TABLE, MOCK_KEY0, MOCK_DATA);
assertEquals(Status.OK, result);
}
@Test
public void testDelete() throws Exception {
Status result = instance.delete(MOCK_TABLE, MOCK_KEY1);
assertEquals(Status.OK, result);
}
@Test
public void testRead() throws Exception {
Set<String> fields = MOCK_DATA.keySet();
HashMap<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS);
Status result = instance.read(MOCK_TABLE, MOCK_KEY1, fields, resultParam);
assertEquals(Status.OK, result);
}
@Test
public void testUpdate() throws Exception {
HashMap<String, ByteIterator> newValues = new HashMap<>(NUM_RECORDS);
for (int i = 0; i < NUM_RECORDS; i++) {
newValues.put("field" + i, new StringByteIterator("newvalue" + i));
}
Status result = instance.update(MOCK_TABLE, MOCK_KEY1, newValues);
assertEquals(Status.OK, result);
//validate that the values changed
HashMap<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS);
instance.read(MOCK_TABLE, MOCK_KEY1, MOCK_DATA.keySet(), resultParam);
for (int i = 0; i < NUM_RECORDS; i++) {
assertEquals("newvalue" + i, resultParam.get("field" + i).toString());
}
}
@Test
public void testScan() throws Exception {
Set<String> fields = MOCK_DATA.keySet();
Vector<HashMap<String, ByteIterator>> resultParam = new Vector<>(NUM_RECORDS);
Status result = instance.scan(MOCK_TABLE, MOCK_KEY1, NUM_RECORDS, fields, resultParam);
assertEquals(Status.OK, result);
}
/**
* Gets the test DB.
*
* @return The test DB.
*/
protected DB getDB() {
return getDB(new Properties());
}
/**
* Gets the test DB.
*
* @param props
* Properties to pass to the client.
* @return The test DB.
*/
protected abstract DB getDB(Properties props);
}
| leschekhomann/YCSB | solr6/src/test/java/com/yahoo/ycsb/db/solr6/SolrClientBaseTest.java | Java | apache-2.0 | 5,085 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.vision.v1p2beta1.model;
/**
* The Google Cloud Storage location where the input will be read from.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Vision API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class GoogleCloudVisionV1p3beta1GcsSource extends com.google.api.client.json.GenericJson {
/**
* Google Cloud Storage URI for the input file. This must only be a Google Cloud Storage object.
* Wildcards are not currently supported.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String uri;
/**
* Google Cloud Storage URI for the input file. This must only be a Google Cloud Storage object.
* Wildcards are not currently supported.
* @return value or {@code null} for none
*/
public java.lang.String getUri() {
return uri;
}
/**
* Google Cloud Storage URI for the input file. This must only be a Google Cloud Storage object.
* Wildcards are not currently supported.
* @param uri uri or {@code null} for none
*/
public GoogleCloudVisionV1p3beta1GcsSource setUri(java.lang.String uri) {
this.uri = uri;
return this;
}
@Override
public GoogleCloudVisionV1p3beta1GcsSource set(String fieldName, Object value) {
return (GoogleCloudVisionV1p3beta1GcsSource) super.set(fieldName, value);
}
@Override
public GoogleCloudVisionV1p3beta1GcsSource clone() {
return (GoogleCloudVisionV1p3beta1GcsSource) super.clone();
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-vision/v1p2beta1/1.31.0/com/google/api/services/vision/v1p2beta1/model/GoogleCloudVisionV1p3beta1GcsSource.java | Java | apache-2.0 | 2,520 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.impl.engine;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.function.Predicate;
import org.apache.camel.AsyncCallback;
import org.apache.camel.Exchange;
import org.apache.camel.ExtendedCamelContext;
import org.apache.camel.ExtendedExchange;
import org.apache.camel.Message;
import org.apache.camel.Processor;
import org.apache.camel.Route;
import org.apache.camel.Service;
import org.apache.camel.spi.InflightRepository;
import org.apache.camel.spi.Synchronization;
import org.apache.camel.spi.SynchronizationVetoable;
import org.apache.camel.spi.UnitOfWork;
import org.apache.camel.support.DefaultMessage;
import org.apache.camel.support.EventHelper;
import org.apache.camel.support.MessageSupport;
import org.apache.camel.support.UnitOfWorkHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The default implementation of {@link org.apache.camel.spi.UnitOfWork}
*/
public class DefaultUnitOfWork implements UnitOfWork, Service {
private static final Logger LOG = LoggerFactory.getLogger(DefaultUnitOfWork.class);
final InflightRepository inflightRepository;
final boolean allowUseOriginalMessage;
final boolean useBreadcrumb;
// TODO: This implementation seems to have transformed itself into a to broad concern
// where unit of work is doing a bit more work than the transactional aspect that ties
// to its name. Maybe this implementation should be named ExchangeContext and we can
// introduce a simpler UnitOfWork concept. This would also allow us to refactor the
// SubUnitOfWork into a general parent/child unit of work concept. However this
// requires API changes and thus is best kept for future Camel work
private final Deque<Route> routes = new ArrayDeque<>(8);
private final Exchange exchange;
private final ExtendedCamelContext context;
private Logger log;
private List<Synchronization> synchronizations;
private Message originalInMessage;
private Set<Object> transactedBy;
public DefaultUnitOfWork(Exchange exchange) {
this(exchange, exchange.getContext().getInflightRepository(), exchange.getContext().isAllowUseOriginalMessage(),
exchange.getContext().isUseBreadcrumb());
}
protected DefaultUnitOfWork(Exchange exchange, Logger logger, InflightRepository inflightRepository,
boolean allowUseOriginalMessage, boolean useBreadcrumb) {
this(exchange, inflightRepository, allowUseOriginalMessage, useBreadcrumb);
this.log = logger;
}
public DefaultUnitOfWork(Exchange exchange, InflightRepository inflightRepository, boolean allowUseOriginalMessage,
boolean useBreadcrumb) {
this.exchange = exchange;
this.log = LOG;
this.allowUseOriginalMessage = allowUseOriginalMessage;
this.useBreadcrumb = useBreadcrumb;
this.context = (ExtendedCamelContext) exchange.getContext();
this.inflightRepository = inflightRepository;
if (allowUseOriginalMessage) {
// special for JmsMessage as it can cause it to loose headers later.
if (exchange.getIn().getClass().getName().equals("org.apache.camel.component.jms.JmsMessage")) {
this.originalInMessage = new DefaultMessage(context);
this.originalInMessage.setBody(exchange.getIn().getBody());
this.originalInMessage.getHeaders().putAll(exchange.getIn().getHeaders());
} else {
this.originalInMessage = exchange.getIn().copy();
}
// must preserve exchange on the original in message
if (this.originalInMessage instanceof MessageSupport) {
((MessageSupport) this.originalInMessage).setExchange(exchange);
}
}
// inject breadcrumb header if enabled
if (useBreadcrumb) {
// create or use existing breadcrumb
String breadcrumbId = exchange.getIn().getHeader(Exchange.BREADCRUMB_ID, String.class);
if (breadcrumbId == null) {
// no existing breadcrumb, so create a new one based on the exchange id
breadcrumbId = exchange.getExchangeId();
exchange.getIn().setHeader(Exchange.BREADCRUMB_ID, breadcrumbId);
}
}
// fire event
if (context.isEventNotificationApplicable()) {
try {
EventHelper.notifyExchangeCreated(context, exchange);
} catch (Throwable e) {
// must catch exceptions to ensure the exchange is not failing due to notification event failed
log.warn("Exception occurred during event notification. This exception will be ignored.", e);
}
}
// register to inflight registry
inflightRepository.add(exchange);
}
UnitOfWork newInstance(Exchange exchange) {
return new DefaultUnitOfWork(exchange, inflightRepository, allowUseOriginalMessage, useBreadcrumb);
}
@Override
public void setParentUnitOfWork(UnitOfWork parentUnitOfWork) {
}
@Override
public UnitOfWork createChildUnitOfWork(Exchange childExchange) {
// create a new child unit of work, and mark me as its parent
UnitOfWork answer = newInstance(childExchange);
answer.setParentUnitOfWork(this);
return answer;
}
@Override
public void start() {
// noop
}
@Override
public void stop() {
// noop
}
@Override
public synchronized void addSynchronization(Synchronization synchronization) {
if (synchronizations == null) {
synchronizations = new ArrayList<>(8);
}
log.trace("Adding synchronization {}", synchronization);
synchronizations.add(synchronization);
}
@Override
public synchronized void removeSynchronization(Synchronization synchronization) {
if (synchronizations != null) {
synchronizations.remove(synchronization);
}
}
@Override
public synchronized boolean containsSynchronization(Synchronization synchronization) {
return synchronizations != null && synchronizations.contains(synchronization);
}
@Override
public void handoverSynchronization(Exchange target) {
handoverSynchronization(target, null);
}
@Override
public void handoverSynchronization(Exchange target, Predicate<Synchronization> filter) {
if (synchronizations == null || synchronizations.isEmpty()) {
return;
}
Iterator<Synchronization> it = synchronizations.iterator();
while (it.hasNext()) {
Synchronization synchronization = it.next();
boolean handover = true;
if (synchronization instanceof SynchronizationVetoable) {
SynchronizationVetoable veto = (SynchronizationVetoable) synchronization;
handover = veto.allowHandover();
}
if (handover && (filter == null || filter.test(synchronization))) {
log.trace("Handover synchronization {} to: {}", synchronization, target);
target.adapt(ExtendedExchange.class).addOnCompletion(synchronization);
// remove it if its handed over
it.remove();
} else {
log.trace("Handover not allow for synchronization {}", synchronization);
}
}
}
@Override
public void done(Exchange exchange) {
if (log.isTraceEnabled()) {
log.trace("UnitOfWork done for ExchangeId: {} with {}", exchange.getExchangeId(), exchange);
}
boolean failed = exchange.isFailed();
// at first done the synchronizations
UnitOfWorkHelper.doneSynchronizations(exchange, synchronizations, log);
// unregister from inflight registry, before signalling we are done
inflightRepository.remove(exchange);
if (context.isEventNotificationApplicable()) {
// then fire event to signal the exchange is done
try {
if (failed) {
EventHelper.notifyExchangeFailed(exchange.getContext(), exchange);
} else {
EventHelper.notifyExchangeDone(exchange.getContext(), exchange);
}
} catch (Throwable e) {
// must catch exceptions to ensure synchronizations is also invoked
log.warn("Exception occurred during event notification. This exception will be ignored.", e);
}
}
}
@Override
public void beforeRoute(Exchange exchange, Route route) {
if (log.isTraceEnabled()) {
log.trace("UnitOfWork beforeRoute: {} for ExchangeId: {} with {}", route.getId(), exchange.getExchangeId(),
exchange);
}
if (synchronizations != null && !synchronizations.isEmpty()) {
UnitOfWorkHelper.beforeRouteSynchronizations(route, exchange, synchronizations, log);
}
}
@Override
public void afterRoute(Exchange exchange, Route route) {
if (log.isTraceEnabled()) {
log.trace("UnitOfWork afterRoute: {} for ExchangeId: {} with {}", route.getId(), exchange.getExchangeId(),
exchange);
}
if (synchronizations != null && !synchronizations.isEmpty()) {
UnitOfWorkHelper.afterRouteSynchronizations(route, exchange, synchronizations, log);
}
}
@Override
public Message getOriginalInMessage() {
if (originalInMessage == null && !context.isAllowUseOriginalMessage()) {
throw new IllegalStateException("AllowUseOriginalMessage is disabled. Cannot access the original message.");
}
return originalInMessage;
}
@Override
public boolean isTransacted() {
return transactedBy != null && !transactedBy.isEmpty();
}
@Override
public boolean isTransactedBy(Object key) {
return transactedBy != null && getTransactedBy().contains(key);
}
@Override
public void beginTransactedBy(Object key) {
exchange.adapt(ExtendedExchange.class).setTransacted(true);
getTransactedBy().add(key);
}
@Override
public void endTransactedBy(Object key) {
getTransactedBy().remove(key);
// we may still be transacted even if we end this section of transaction
boolean transacted = isTransacted();
exchange.adapt(ExtendedExchange.class).setTransacted(transacted);
}
@Override
public Route getRoute() {
return routes.peek();
}
@Override
public void pushRoute(Route route) {
routes.push(route);
}
@Override
public Route popRoute() {
return routes.poll();
}
@Override
public boolean isBeforeAfterProcess() {
return false;
}
@Override
public AsyncCallback beforeProcess(Processor processor, Exchange exchange, AsyncCallback callback) {
// no wrapping needed
return callback;
}
@Override
public void afterProcess(Processor processor, Exchange exchange, AsyncCallback callback, boolean doneSync) {
// noop
}
private Set<Object> getTransactedBy() {
if (transactedBy == null) {
// no need to take up so much space so use a lille set
transactedBy = new HashSet<>(4);
}
return transactedBy;
}
@Override
public String toString() {
return "DefaultUnitOfWork";
}
}
| adessaigne/camel | core/camel-base/src/main/java/org/apache/camel/impl/engine/DefaultUnitOfWork.java | Java | apache-2.0 | 12,564 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.thrift.integration;
import com.google.common.collect.ImmutableMap;
import io.trino.testing.AbstractTestIntegrationSmokeTest;
import io.trino.testing.MaterializedResult;
import io.trino.testing.QueryRunner;
import org.testng.annotations.Test;
import static io.trino.plugin.thrift.integration.ThriftQueryRunner.createThriftQueryRunner;
import static io.trino.spi.type.VarcharType.VARCHAR;
import static io.trino.testing.QueryAssertions.assertContains;
public class TestThriftIntegrationSmokeTest
// TODO extend BaseConnectorTest
extends AbstractTestIntegrationSmokeTest
{
@Override
protected QueryRunner createQueryRunner()
throws Exception
{
return createThriftQueryRunner(2, false, ImmutableMap.of());
}
@Override
@Test
public void testShowSchemas()
{
MaterializedResult actualSchemas = computeActual("SHOW SCHEMAS").toTestTypes();
MaterializedResult.Builder resultBuilder = MaterializedResult.resultBuilder(getSession(), VARCHAR)
.row("tiny")
.row("sf1");
assertContains(actualSchemas, resultBuilder.build());
}
}
| electrum/presto | plugin/trino-thrift/src/test/java/io/trino/plugin/thrift/integration/TestThriftIntegrationSmokeTest.java | Java | apache-2.0 | 1,733 |
/** @file
EFI Firmware Volume routines which work on a Fv image in buffers.
Copyright (c) 1999 - 2014, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef FirmwareVolumeBuffer_h_INCLUDED
#define FirmwareVolumeBuffer_h_INCLUDED
#include "Common/UefiBaseTypes.h"
#include "Common/PiFirmwareFile.h"
#include "Common/PiFirmwareVolume.h"
EFI_STATUS
FvBufAddFile (
IN OUT VOID *Fv,
IN VOID *File
);
EFI_STATUS
FvBufAddFileWithExtend (
IN OUT VOID **Fv,
IN VOID *File
);
EFI_STATUS
FvBufAddVtfFile (
IN OUT VOID *Fv,
IN VOID *File
);
EFI_STATUS
FvBufChecksumFile (
IN OUT VOID *FfsFile
);
EFI_STATUS
FvBufChecksumHeader (
IN OUT VOID *Fv
);
EFI_STATUS
FvBufClearAllFiles (
IN OUT VOID *Fv
);
VOID
FvBufCompact3ByteSize (
OUT VOID* SizeDest,
IN UINT32 Size
);
EFI_STATUS
FvBufCountSections (
IN VOID* FfsFile,
IN UINTN* Count
);
EFI_STATUS
FvBufDuplicate (
IN VOID *SourceFv,
IN OUT VOID **DestinationFv
);
UINT32
FvBufExpand3ByteSize (
IN VOID* Size
);
UINT32
FvBufGetFfsFileSize (
IN EFI_FFS_FILE_HEADER *Ffs
);
UINT32
FvBufGetFfsHeaderSize (
IN EFI_FFS_FILE_HEADER *Ffs
);
EFI_STATUS
FvBufExtend (
IN VOID **Fv,
IN UINTN Size
);
EFI_STATUS
FvBufFindFileByName (
IN VOID *Fv,
IN EFI_GUID *Name,
OUT VOID **File
);
EFI_STATUS
FvBufFindFileByType (
IN VOID *Fv,
IN EFI_FV_FILETYPE Type,
OUT VOID **File
);
EFI_STATUS
FvBufFindNextFile (
IN VOID *Fv,
IN OUT UINTN *Key,
OUT VOID **File
);
EFI_STATUS
FvBufFindNextSection (
IN VOID *SectionsStart,
IN UINTN TotalSectionsSize,
IN OUT UINTN *Key,
OUT VOID **Section
);
EFI_STATUS
FvBufFindSectionByType (
IN VOID *FfsFile,
IN UINT8 Type,
OUT VOID **Section
);
EFI_STATUS
FvBufGetFileRawData (
IN VOID* FfsFile,
OUT VOID** RawData,
OUT UINTN* RawDataSize
);
EFI_STATUS
FvBufGetSize (
IN VOID *Fv,
OUT UINTN *Size
);
EFI_STATUS
FvBufPackageFreeformRawFile (
IN EFI_GUID* Filename,
IN VOID* RawData,
IN UINTN RawDataSize,
OUT VOID** FfsFile
);
EFI_STATUS
FvBufRemoveFile (
IN OUT VOID *Fv,
IN EFI_GUID *Name
);
EFI_STATUS
FvBufUnifyBlockSizes (
IN OUT VOID *Fv,
IN UINTN BlockSize
);
EFI_STATUS
FvBufShrinkWrap (
IN VOID *Fv
);
#endif // #ifndef FirmwareVolumeBuffer_h_INCLUDED
| google/google-ctf | third_party/edk2/BaseTools/Source/C/Common/FirmwareVolumeBufferLib.h | C | apache-2.0 | 2,894 |
include ../../Makefile.env
ifneq ($(OS),WINSDK)
MYSQL_DEV_INSTALLED := $(shell $(WHEREISCMD) mysql 2>/dev/null | grep -c include)
else
MYSQL_DEV_INSTALLED := 0
endif
COMPONENT = staff.das/providers/staff.das.MySql
TARGET = mysql
LIBTARGETVER = $(LIBPREFIX)staffdasprov-$(TARGET)$(LIBVEREXT)
LIBTARGET = $(LIBPREFIX)staffdasprov-$(TARGET)$(LIBEXT)
CXXFLAGS += -I$(DEPLOYDIR)include -I/usr/include/mysql
LDFLAGS += -lmysqlclient -lstaffutils -lstaffxml -lstaffcommon -lstaffdascommon
LDFLAGS += $(LDLIB)$(LIBTARGETVER)
SOURCES = $(wildcard $(SRCDIR)*.cpp)
OBJECTS = $(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(SOURCES)))
# == make ===========================================
ifeq ($(CROSSCOMPILE),0)
ifneq ($(MYSQL_DEV_INSTALLED),0)
make: check "$(OBJDIR)" "$(OUTDIR)" $(OUTDIR)$(LIBTARGETVER) deploy
else
make:
@$(ECHO) "\n\033[33;1mSkipping MySQL provider compilation because MySQL development package is not installed.\033[31;0m\n"
endif
else
MYSQL_DEV_INSTALLED=0
make:
@$(ECHO) "Crosscompilation of MySQL provider is not supported now"
endif
# link
$(OUTDIR)$(LIBTARGETVER): $(OBJECTS)
$(CXX) $(OBJECTS) -o $(OUTDIR)$(LIBTARGETVER) $(LDFLAGS)
# compile
$(OBJDIR)%.o: $(SRCDIR)%.cpp
$(CXX) $(CXXFLAGS) -c $< -o $@
# == deploy ========================================
deploy: "$(DEPLOYDIR)$(COMDIR)$(COMPONENT)/" $(OUTDIR)$(LIBTARGETVER)
cp -f $(OUTDIR)$(LIBTARGETVER) "$(DEPLOYDIR)$(COMDIR)$(COMPONENT)/"
$(LN) $(LIBTARGETVER) $(DEPLOYDIR)$(COMDIR)$(COMPONENT)/$(LIBTARGET)
# == distrib =========================================
distrib:;
# == install ========================================
ifneq ($(MYSQL_DEV_INSTALLED),0)
install: check "$(INSTALLDIR)$(COMDIR)$(COMPONENT)/"
cp -f $(OUTDIR)$(LIBTARGETVER) $(INSTALLDIR)$(COMDIR)$(COMPONENT)/
$(LN) $(LIBTARGETVER) $(INSTALLDIR)$(COMDIR)$(COMPONENT)/$(LIBTARGET)
else
install:;
endif
# == uninstall ======================================
ifneq ($(MYSQL_DEV_INSTALLED),0)
uninstall: check
rm -f $(INSTALLDIR)$(COMDIR)$(COMPONENT)/$(LIBTARGETVER) $(INSTALLDIR)$(COMDIR)$(COMPONENT)/$(LIBTARGET)
-rmdir $(INSTALLDIR)$(COMDIR)$(COMPONENT)
else
uninstall:;
endif
# == clean ==========================================
clean:
rm -Rf $(OBJDIR) $(OUTDIR)
# == mkdir ==========================================
"%/":
@[ -z "$@" -o -d "$@" ] || mkdir -p $@ && chmod g+w $@
| loentar/staff | das/providers/mysql/Makefile | Makefile | apache-2.0 | 2,339 |
package ca.uhn.fhir.util;
/*
* #%L
* HAPI FHIR - Core Library
* %%
* Copyright (C) 2014 - 2015 University Health Network
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
public class ObjectUtil {
public static boolean equals(Object object1, Object object2) {
if (object1 == object2) {
return true;
}
if ((object1 == null) || (object2 == null)) {
return false;
}
return object1.equals(object2);
}
}
| dhf0820/hapi-fhir-1.2 | hapi-fhir-base/src/main/java/ca/uhn/fhir/util/ObjectUtil.java | Java | apache-2.0 | 951 |
/**
* HAL
*
* Copyright (c) 2014 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Apache Public License.
* Please see the LICENSE included with this distribution for details.
*/
#ifndef _HAL_DETAIL_JSLOGGERPOLICYFILE_HPP_
#define _HAL_DETAIL_JSLOGGERPOLICYFILE_HPP_
#include "HAL/detail/JSLoggerPolicyInterface.hpp"
#include <fstream>
namespace HAL { namespace detail {
class HAL_EXPORT JSLoggerPolicyFile final : public JSLoggerPolicyInterface {
public:
JSLoggerPolicyFile(const std::string& name) {
ofstream__.open(name, std::ios_base::binary | std::ios_base::out);
if(!ofstream__.is_open() ) {
throw(std::runtime_error("JSLoggerPolicyFile: Unable to open an output stream"));
}
}
~JSLoggerPolicyFile() {
ofstream__.close();
}
JSLoggerPolicyFile() = delete;
JSLoggerPolicyFile(const JSLoggerPolicyFile&) = default;
JSLoggerPolicyFile& operator=(const JSLoggerPolicyFile&) = default;
#ifdef HAL_MOVE_CTOR_AND_ASSIGN_DEFAULT_ENABLE
JSLoggerPolicyFile(JSLoggerPolicyFile&&) = default;
JSLoggerPolicyFile& operator=(JSLoggerPolicyFile&&) = default;
#endif
virtual void Write(const std::string& log_message) override final {
ofstream__ << log_message << std::endl;
}
private:
// Silence 4251 on Windows since private member variables do not
// need to be exported from a DLL.
#pragma warning(push)
#pragma warning(disable: 4251)
std::ofstream ofstream__;
#pragma warning(pop)
};
}} // namespace HAL { namespace detail {
#endif // _HAL_DETAIL_JSLOGGERPOLICYFILE_HPP_
| formalin14/HAL | include/HAL/detail/JSLoggerPolicyFile.hpp | C++ | apache-2.0 | 1,732 |
' Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Imports System.Runtime.InteropServices
Imports System.Xml.Linq
Imports Microsoft.CodeAnalysis
Imports Microsoft.CodeAnalysis.CodeStyle
Imports Microsoft.CodeAnalysis.Editing
Imports Microsoft.CodeAnalysis.Editor.Shared.Options
Imports Microsoft.CodeAnalysis.ExtractMethod
Imports Microsoft.CodeAnalysis.Options
Imports Microsoft.CodeAnalysis.Shared.Options
Imports Microsoft.CodeAnalysis.Simplification
Imports Microsoft.CodeAnalysis.SymbolSearch
Namespace Microsoft.VisualStudio.LanguageServices.VisualBasic.Options
<ComVisible(True)>
Public Class AutomationObject
Private ReadOnly _workspace As Workspace
Friend Sub New(workspace As Workspace)
_workspace = workspace
End Sub
Public Property AutoComment As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.AutoXmlDocCommentGeneration)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.AutoXmlDocCommentGeneration, value)
End Set
End Property
Public Property AutoEndInsert As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.EndConstruct)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.EndConstruct, value)
End Set
End Property
Public Property AutoRequiredMemberInsert As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.AutomaticInsertionOfAbstractOrInterfaceMembers)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.AutomaticInsertionOfAbstractOrInterfaceMembers, value)
End Set
End Property
<Obsolete("This SettingStore option has now been deprecated in favor of BasicClosedFileDiagnostics")>
Public Property ClosedFileDiagnostics As Boolean
Get
Return ServiceFeatureOnOffOptions.IsClosedFileDiagnosticsEnabled(_workspace.Options, LanguageNames.VisualBasic)
End Get
Set(value As Boolean)
' Even though this option has been deprecated, we want to respect the setting if the user has explicitly turned off closed file diagnostics (which is the non-default value for 'ClosedFileDiagnostics').
' So, we invoke the setter only for value = False.
If Not value Then
SetBooleanOption(ServiceFeatureOnOffOptions.ClosedFileDiagnostic, value:=0)
End If
End Set
End Property
Public Property BasicClosedFileDiagnostics As Integer
Get
Return GetBooleanOption(ServiceFeatureOnOffOptions.ClosedFileDiagnostic)
End Get
Set(value As Integer)
SetBooleanOption(ServiceFeatureOnOffOptions.ClosedFileDiagnostic, value)
End Set
End Property
Public Property RenameTrackingPreview As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.RenameTrackingPreview)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.RenameTrackingPreview, value)
End Set
End Property
Public Property DisplayLineSeparators As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.LineSeparator)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.LineSeparator, value)
End Set
End Property
Public Property EnableHighlightReferences As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.ReferenceHighlighting)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.ReferenceHighlighting, value)
End Set
End Property
Public Property EnableHighlightRelatedKeywords As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.KeywordHighlighting)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.KeywordHighlighting, value)
End Set
End Property
Public Property ExtractMethod_DoNotPutOutOrRefOnStruct As Boolean
Get
Return GetBooleanOption(ExtractMethodOptions.DontPutOutOrRefOnStruct)
End Get
Set(value As Boolean)
SetBooleanOption(ExtractMethodOptions.DontPutOutOrRefOnStruct, value)
End Set
End Property
Public Property ExtractMethod_AllowMovingDeclaration As Boolean
Get
Return GetBooleanOption(ExtractMethodOptions.AllowMovingDeclaration)
End Get
Set(value As Boolean)
SetBooleanOption(ExtractMethodOptions.AllowMovingDeclaration, value)
End Set
End Property
Public Property Outlining As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.Outlining)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.Outlining, value)
End Set
End Property
Public Property PrettyListing As Boolean
Get
Return GetBooleanOption(FeatureOnOffOptions.PrettyListing)
End Get
Set(value As Boolean)
SetBooleanOption(FeatureOnOffOptions.PrettyListing, value)
End Set
End Property
Public Property Style_PreferIntrinsicPredefinedTypeKeywordInDeclaration As String
Get
Return GetXmlOption(CodeStyleOptions.PreferIntrinsicPredefinedTypeKeywordInDeclaration)
End Get
Set(value As String)
SetXmlOption(CodeStyleOptions.PreferIntrinsicPredefinedTypeKeywordInDeclaration, value)
End Set
End Property
Public Property Style_PreferIntrinsicPredefinedTypeKeywordInMemberAccess As String
Get
Return GetXmlOption(CodeStyleOptions.PreferIntrinsicPredefinedTypeKeywordInMemberAccess)
End Get
Set(value As String)
SetXmlOption(CodeStyleOptions.PreferIntrinsicPredefinedTypeKeywordInMemberAccess, value)
End Set
End Property
Public Property Style_QualifyFieldAccess As String
Get
Return GetXmlOption(CodeStyleOptions.QualifyFieldAccess)
End Get
Set(value As String)
SetXmlOption(CodeStyleOptions.QualifyFieldAccess, value)
End Set
End Property
Public Property Style_QualifyPropertyAccess As String
Get
Return GetXmlOption(CodeStyleOptions.QualifyPropertyAccess)
End Get
Set(value As String)
SetXmlOption(CodeStyleOptions.QualifyPropertyAccess, value)
End Set
End Property
Public Property Style_QualifyMethodAccess As String
Get
Return GetXmlOption(CodeStyleOptions.QualifyMethodAccess)
End Get
Set(value As String)
SetXmlOption(CodeStyleOptions.QualifyMethodAccess, value)
End Set
End Property
Public Property Style_QualifyEventAccess As String
Get
Return GetXmlOption(CodeStyleOptions.QualifyEventAccess)
End Get
Set(value As String)
SetXmlOption(CodeStyleOptions.QualifyEventAccess, value)
End Set
End Property
Public Property Option_PlaceSystemNamespaceFirst As Boolean
Get
Return GetBooleanOption(GenerationOptions.PlaceSystemNamespaceFirst)
End Get
Set(value As Boolean)
SetBooleanOption(GenerationOptions.PlaceSystemNamespaceFirst, value)
End Set
End Property
Public Property Option_SuggestImportsForTypesInReferenceAssemblies As Boolean
Get
Return GetBooleanOption(SymbolSearchOptions.SuggestForTypesInReferenceAssemblies)
End Get
Set(value As Boolean)
SetBooleanOption(SymbolSearchOptions.SuggestForTypesInReferenceAssemblies, value)
End Set
End Property
Public Property Option_SuggestImportsForTypesInNuGetPackages As Boolean
Get
Return GetBooleanOption(SymbolSearchOptions.SuggestForTypesInNuGetPackages)
End Get
Set(value As Boolean)
SetBooleanOption(SymbolSearchOptions.SuggestForTypesInNuGetPackages, value)
End Set
End Property
Private Function GetBooleanOption(key As [PerLanguageOption](Of Boolean)) As Boolean
Return _workspace.Options.GetOption(key, LanguageNames.VisualBasic)
End Function
Private Function GetXmlOption(key As PerLanguageOption(Of CodeStyleOption(Of Boolean))) As String
Return _workspace.Options.GetOption(key, LanguageNames.VisualBasic).ToXElement().ToString()
End Function
Private Sub SetBooleanOption(key As [PerLanguageOption](Of Boolean), value As Boolean)
_workspace.Options = _workspace.Options.WithChangedOption(key, LanguageNames.VisualBasic, value)
End Sub
Private Function GetBooleanOption(key As PerLanguageOption(Of Boolean?)) As Integer
Dim [option] = _workspace.Options.GetOption(key, LanguageNames.VisualBasic)
If Not [option].HasValue Then
Return -1
End If
Return If([option].Value, 1, 0)
End Function
Private Sub SetBooleanOption(key As PerLanguageOption(Of Boolean?), value As Integer)
Dim boolValue As Boolean? = If(value < 0, Nothing, value > 0)
_workspace.Options = _workspace.Options.WithChangedOption(key, LanguageNames.VisualBasic, boolValue)
End Sub
Private Sub SetXmlOption(key As PerLanguageOption(Of CodeStyleOption(Of Boolean)), value As String)
Dim convertedValue = CodeStyleOption(Of Boolean).FromXElement(XElement.Parse(value))
_workspace.Options = _workspace.Options.WithChangedOption(key, LanguageNames.VisualBasic, convertedValue)
End Sub
End Class
End Namespace
| vslsnap/roslyn | src/VisualStudio/VisualBasic/Impl/Options/AutomationObject.vb | Visual Basic | apache-2.0 | 10,636 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.sparql.resultset;
import static java.lang.String.format ;
import java.io.BufferedReader ;
import java.io.IOException ;
import java.util.List ;
import java.util.NoSuchElementException ;
import org.apache.jena.atlas.io.IO ;
import org.apache.jena.atlas.io.IndentedWriter ;
import org.apache.jena.graph.Node ;
import org.apache.jena.riot.RiotException ;
import org.apache.jena.sparql.core.Var ;
import org.apache.jena.sparql.engine.binding.Binding ;
import org.apache.jena.sparql.engine.binding.BindingFactory ;
import org.apache.jena.sparql.engine.binding.BindingMap ;
import org.apache.jena.sparql.engine.iterator.QueryIteratorBase ;
import org.apache.jena.sparql.serializer.SerializationContext ;
import org.apache.jena.sparql.util.NodeFactoryExtra ;
/**
* Class used to do streaming parsing of actual result rows from the TSV
*/
public class TSVInputIterator extends QueryIteratorBase
{
private BufferedReader reader;
private BindingMap binding;
private int expectedItems;
private List<Var> vars;
private long lineNum = 1;
/**
* Creates a new TSV Input Iterator
* <p>
* Assumes the Header Row has already been read and that the next row to be read from the reader will be a Result Row
* </p>
*/
public TSVInputIterator(BufferedReader reader, List<Var> vars)
{
this.reader = reader;
this.expectedItems = vars.size();
this.vars = vars;
}
@Override
public void output(IndentedWriter out, SerializationContext sCxt) {
// Not needed - only called as part of printing/debugging query plans.
out.println("TSVInputIterator") ;
}
@Override
protected boolean hasNextBinding() {
if (this.reader != null)
{
if (this.binding == null)
return this.parseNextBinding();
else
return true;
}
else
{
return false;
}
}
private boolean parseNextBinding()
{
String line;
try
{
line = this.reader.readLine();
//Once EOF has been reached we'll see null for this call so we can return false because there are no further bindings
if (line == null) return false;
this.lineNum++;
}
catch (IOException e)
{ throw new ResultSetException("Error parsing TSV results - " + e.getMessage()); }
if ( line.isEmpty() )
{
// Empty input line - no bindings.
// Only valid when we expect zero/one values as otherwise we should get a sequence of tab characters
// which means a non-empty string which we handle normally
if (expectedItems > 1) throw new ResultSetException(format("Error Parsing TSV results at Line %d - The result row had 0/1 values when %d were expected", this.lineNum, expectedItems));
this.binding = BindingFactory.create() ;
return true ;
}
String[] tokens = TSVInput.pattern.split(line, -1);
if (tokens.length != expectedItems)
throw new ResultSetException(format("Error Parsing TSV results at Line %d - The result row '%s' has %d values instead of the expected %d.", this.lineNum, line, tokens.length, expectedItems));
this.binding = BindingFactory.create();
for ( int i = 0; i < tokens.length; i++ )
{
String token = tokens[i];
//If we see an empty string this denotes an unbound value
if (token.equals("")) continue;
//Bound value so parse it and add to the binding
try {
Node node = NodeFactoryExtra.parseNode(token) ;
if ( !node.isConcrete() )
throw new ResultSetException(format("Line %d: Not a concrete RDF term: %s",lineNum, token)) ;
this.binding.add(this.vars.get(i), node);
} catch (RiotException ex)
{
throw new ResultSetException(format("Line %d: Data %s contains error: %s", lineNum, token, ex.getMessage()));
}
}
return true;
}
@Override
protected Binding moveToNextBinding() {
if (!hasNext()) throw new NoSuchElementException() ;
Binding b = this.binding;
this.binding = null ;
return b;
}
@Override
protected void closeIterator() {
IO.close(reader) ;
reader = null;
}
@Override
protected void requestCancel() {
//Don't need to do anything special to cancel
//Superclass should take care of that and call closeIterator() where we do our actual clean up
}
}
| adrapereira/jena | jena-arq/src/main/java/org/apache/jena/sparql/resultset/TSVInputIterator.java | Java | apache-2.0 | 5,220 |
/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkOtsuThresholdImageFilter_h
#define itkOtsuThresholdImageFilter_h
#include "itkHistogramThresholdImageFilter.h"
#include "itkOtsuThresholdCalculator.h"
namespace itk
{
/**
*\class OtsuThresholdImageFilter
* \brief Threshold an image using the Otsu Threshold
*
* This filter creates a binary thresholded image that separates an
* image into foreground and background components. The filter
* computes the threshold using the OtsuThresholdCalculator and
* applies that threshold to the input image using the
* BinaryThresholdImageFilter.
*
* \author Richard Beare
* \author Gaetan Lehmann. Biologie du Developpement et de la Reproduction, INRA de Jouy-en-Josas, France.
*
* This implementation was taken from the Insight Journal paper:
* https://www.insight-journal.org/browse/publication/811
*
* \sa HistogramThresholdImageFilter
*
* \ingroup Multithreaded
* \ingroup ITKThresholding
*
* \sphinx
* \sphinxexample{Filtering/Thresholding/SeparateGroundUsingOtsu,Separate Foreround And Background Using Otsu Method}
* \endsphinx
*/
template <typename TInputImage, typename TOutputImage, typename TMaskImage = TOutputImage>
class ITK_TEMPLATE_EXPORT OtsuThresholdImageFilter
: public HistogramThresholdImageFilter<TInputImage, TOutputImage, TMaskImage>
{
public:
ITK_DISALLOW_COPY_AND_MOVE(OtsuThresholdImageFilter);
/** Standard Self type alias */
using Self = OtsuThresholdImageFilter;
using Superclass = HistogramThresholdImageFilter<TInputImage, TOutputImage, TMaskImage>;
using Pointer = SmartPointer<Self>;
using ConstPointer = SmartPointer<const Self>;
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** Runtime information support. */
itkTypeMacro(OtsuThresholdImageFilter, HistogramThresholdImageFilter);
using InputImageType = TInputImage;
using OutputImageType = TOutputImage;
using MaskImageType = TMaskImage;
/** Image pixel value type alias. */
using InputPixelType = typename InputImageType::PixelType;
using OutputPixelType = typename OutputImageType::PixelType;
using MaskPixelType = typename MaskImageType::PixelType;
/** Image related type alias. */
using InputImagePointer = typename InputImageType::Pointer;
using OutputImagePointer = typename OutputImageType::Pointer;
using InputSizeType = typename InputImageType::SizeType;
using InputIndexType = typename InputImageType::IndexType;
using InputImageRegionType = typename InputImageType::RegionType;
using OutputSizeType = typename OutputImageType::SizeType;
using OutputIndexType = typename OutputImageType::IndexType;
using OutputImageRegionType = typename OutputImageType::RegionType;
using MaskSizeType = typename MaskImageType::SizeType;
using MaskIndexType = typename MaskImageType::IndexType;
using MaskImageRegionType = typename MaskImageType::RegionType;
using typename Superclass::HistogramType;
using CalculatorType = OtsuThresholdCalculator<HistogramType, InputPixelType>;
/** Image related type alias. */
static constexpr unsigned int InputImageDimension = InputImageType::ImageDimension;
static constexpr unsigned int OutputImageDimension = OutputImageType::ImageDimension;
/** Should the threshold value be mid-point of the bin or the maximum?
* Default is to return bin maximum. */
itkSetMacro(ReturnBinMidpoint, bool);
itkGetConstReferenceMacro(ReturnBinMidpoint, bool);
itkBooleanMacro(ReturnBinMidpoint);
protected:
OtsuThresholdImageFilter() { this->SetCalculator(CalculatorType::New()); }
~OtsuThresholdImageFilter() override = default;
void
GenerateData() override
{
auto calc = static_cast<CalculatorType *>(this->GetModifiableCalculator());
calc->SetReturnBinMidpoint(m_ReturnBinMidpoint);
this->Superclass::GenerateData();
}
void
VerifyPreconditions() ITKv5_CONST override
{
Superclass::VerifyPreconditions();
if (dynamic_cast<const CalculatorType *>(Superclass::GetCalculator()) == nullptr)
{
itkExceptionMacro(<< "Invalid OtsuThresholdCalculator.");
}
}
private:
#if defined(ITKV4_COMPATIBILITY)
bool m_ReturnBinMidpoint{ true };
#else
bool m_ReturnBinMidpoint{ false };
#endif
};
} // end namespace itk
#endif
| BRAINSia/ITK | Modules/Filtering/Thresholding/include/itkOtsuThresholdImageFilter.h | C | apache-2.0 | 4,985 |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.cluster.infinispan;
import java.io.Serializable;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryCreated;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryModified;
import org.infinispan.client.hotrod.annotation.ClientListener;
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
import org.infinispan.client.hotrod.event.ClientEvent;
import org.infinispan.context.Flag;
import org.infinispan.marshall.core.MarshalledEntry;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
import org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent;
import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.remote.RemoteStore;
import org.infinispan.remoting.transport.Transport;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.cluster.ClusterListener;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.common.util.HostUtils;
import org.keycloak.common.util.MultivaluedHashMap;
/**
* Impl for sending infinispan messages across cluster and listening to them
*
* @author <a href="mailto:[email protected]">Marek Posolda</a>
*/
public class InfinispanNotificationsManager {
protected static final Logger logger = Logger.getLogger(InfinispanNotificationsManager.class);
private final MultivaluedHashMap<String, ClusterListener> listeners = new MultivaluedHashMap<>();
private final Cache<String, Serializable> workCache;
private final String myAddress;
protected InfinispanNotificationsManager(Cache<String, Serializable> workCache, String myAddress) {
this.workCache = workCache;
this.myAddress = myAddress;
}
// Create and init manager including all listeners etc
public static InfinispanNotificationsManager create(Cache<String, Serializable> workCache, String myAddress, Set<RemoteStore> remoteStores) {
InfinispanNotificationsManager manager = new InfinispanNotificationsManager(workCache, myAddress);
// We need CacheEntryListener just if we don't have remoteStore. With remoteStore will be all cluster nodes notified anyway from HotRod listener
if (remoteStores.isEmpty()) {
workCache.addListener(manager.new CacheEntryListener());
logger.debugf("Added listener for infinispan cache: %s", workCache.getName());
} else {
for (RemoteStore remoteStore : remoteStores) {
RemoteCache<Object, Object> remoteCache = remoteStore.getRemoteCache();
remoteCache.addClientListener(manager.new HotRodListener(remoteCache));
logger.debugf("Added listener for HotRod remoteStore cache: %s", remoteCache.getName());
}
}
return manager;
}
void registerListener(String taskKey, ClusterListener task) {
listeners.add(taskKey, task);
}
void notify(String taskKey, ClusterEvent event, boolean ignoreSender) {
WrapperClusterEvent wrappedEvent = new WrapperClusterEvent();
wrappedEvent.setDelegateEvent(event);
wrappedEvent.setIgnoreSender(ignoreSender);
wrappedEvent.setSender(myAddress);
if (logger.isTraceEnabled()) {
logger.tracef("Sending event %s: %s", taskKey, event);
}
// Put the value to the cache to notify listeners on all the nodes
workCache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES)
.put(taskKey, wrappedEvent, 120, TimeUnit.SECONDS);
}
@Listener(observation = Listener.Observation.POST)
public class CacheEntryListener {
@CacheEntryCreated
public void cacheEntryCreated(CacheEntryCreatedEvent<String, Serializable> event) {
eventReceived(event.getKey(), event.getValue());
}
@CacheEntryModified
public void cacheEntryModified(CacheEntryModifiedEvent<String, Serializable> event) {
eventReceived(event.getKey(), event.getValue());
}
}
@ClientListener
public class HotRodListener {
private final RemoteCache<Object, Object> remoteCache;
public HotRodListener(RemoteCache<Object, Object> remoteCache) {
this.remoteCache = remoteCache;
}
@ClientCacheEntryCreated
public void created(ClientCacheEntryCreatedEvent event) {
String key = event.getKey().toString();
hotrodEventReceived(key);
}
@ClientCacheEntryModified
public void updated(ClientCacheEntryModifiedEvent event) {
String key = event.getKey().toString();
hotrodEventReceived(key);
}
private void hotrodEventReceived(String key) {
// TODO: Look at CacheEventConverter stuff to possibly include value in the event and avoid additional remoteCache request
Object value = workCache.get(key);
eventReceived(key, (Serializable) value);
}
}
private void eventReceived(String key, Serializable obj) {
if (!(obj instanceof WrapperClusterEvent)) {
return;
}
WrapperClusterEvent event = (WrapperClusterEvent) obj;
if (event.isIgnoreSender()) {
if (this.myAddress.equals(event.getSender())) {
return;
}
}
if (logger.isTraceEnabled()) {
logger.tracef("Received event %s: %s", key, event);
}
ClusterEvent wrappedEvent = event.getDelegateEvent();
List<ClusterListener> myListeners = listeners.get(key);
if (myListeners != null) {
for (ClusterListener listener : myListeners) {
listener.eventReceived(wrappedEvent);
}
}
myListeners = listeners.get(ClusterProvider.ALL);
if (myListeners != null) {
for (ClusterListener listener : myListeners) {
listener.eventReceived(wrappedEvent);
}
}
}
}
| didiez/keycloak | model/infinispan/src/main/java/org/keycloak/cluster/infinispan/InfinispanNotificationsManager.java | Java | apache-2.0 | 7,165 |
<?php
// $Header: /cvsroot/html2ps/css.text-transform.inc.php,v 1.2 2006/07/09 09:07:46 Konstantin Exp $
define('CSS_TEXT_TRANSFORM_NONE' ,0);
define('CSS_TEXT_TRANSFORM_CAPITALIZE',1);
define('CSS_TEXT_TRANSFORM_UPPERCASE' ,2);
define('CSS_TEXT_TRANSFORM_LOWERCASE' ,3);
class CSSTextTransform extends CSSPropertyStringSet {
function CSSTextTransform() {
$this->CSSPropertyStringSet(false,
true,
array('inherit' => CSS_PROPERTY_INHERIT,
'none' => CSS_TEXT_TRANSFORM_NONE,
'capitalize' => CSS_TEXT_TRANSFORM_CAPITALIZE,
'uppercase' => CSS_TEXT_TRANSFORM_UPPERCASE,
'lowercase' => CSS_TEXT_TRANSFORM_LOWERCASE));
}
function default_value() {
return CSS_TEXT_TRANSFORM_NONE;
}
function getPropertyCode() {
return CSS_TEXT_TRANSFORM;
}
function getPropertyName() {
return 'text-transform';
}
}
CSS::register_css_property(new CSSTextTransform);
?>
| justinkelly/apache-php | simpleinvoices/library/pdf/css.text-transform.inc.php | PHP | apache-2.0 | 1,155 |
define(
({
_widgetLabel: "Layer-Liste",
titleBasemap: "Grundkarten",
titleLayers: "Operationale Layer",
labelLayer: "Layer-Name",
itemZoomTo: "Zoomen auf",
itemTransparency: "Transparenz",
itemTransparent: "Transparent",
itemOpaque: "Nicht transparent",
itemMoveUp: "Nach oben verschieben",
itemMoveDown: "Nach unten verschieben",
itemDesc: "Beschreibung",
itemDownload: "Herunterladen",
itemToAttributeTable: "Attributtabelle öffnen",
itemShowItemDetails: "Elementdetails anzeigen",
empty: "leer",
removePopup: "Pop-up deaktivieren",
enablePopup: "Pop-up aktivieren"
})
);
| cob222/CPG | widgets/LayerList/nls/de/strings.js | JavaScript | apache-2.0 | 651 |
/* See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Esri Inc. licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.esri.gpt.catalog.arcims;
import com.esri.gpt.framework.security.credentials.UsernamePasswordCredentials;
import com.esri.gpt.framework.util.Val;
import com.esri.gpt.framework.xml.DomUtil;
import java.io.IOException;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.xml.sax.SAXException;
/**
* Test connection harvest request.
*/
public class TestConnectionRequest extends HarvestRequest {
// class variables =============================================================
/** catalog service name */
private static final String CATALOG_SERVICE_NAME = "catalog";
// instance variables ==========================================================
/** service name */
private String _serviceName = "";
// constructors ================================================================
/**
* Creates instance of the request.
* @param url host url
* @param timeout connection timeout
*/
public TestConnectionRequest(
String url, int timeout, String serviceName) {
super();
setService(url, timeout);
setServiceName(serviceName);
}
/**
* Creates instance of the request.
* @param credentials credentials
* @param url host url
* @param timeout connection timeout
*/
public TestConnectionRequest(
UsernamePasswordCredentials credentials, String url, int timeout,
String serviceName) {
super(credentials);
setService(url, timeout);
setServiceName(serviceName);
}
// properties ==================================================================
// methods =====================================================================
/**
* Tests connection.
*/
public boolean testConnection()
throws ImsServiceException {
StringBuilder sb = new StringBuilder();
sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
sb.append("<GETCLIENTSERVICES/>");
setAxlRequest(sb.toString());
executeRequest();
if (wasActionOK()) {
try {
Document document =
DomUtil.makeDomFromString(getAxlResponse(), false);
XPath xPath = XPathFactory.newInstance().newXPath();
Node nService = (Node) xPath.evaluate(
"/ARCXML/RESPONSE/SERVICES/SERVICE[@NAME=\"" + getServiceName() + "\"]",
document, XPathConstants.NODE);
return nService != null;
} catch (XPathExpressionException ex) {
throw new ImsResponseException("Response parse error.", ex);
} catch (ParserConfigurationException ex) {
throw new ImsResponseException("Response parse error.", ex);
} catch (SAXException ex) {
throw new ImsResponseException("Response parse error.", ex);
} catch (IOException ex) {
throw new ImsResponseException("Response parse error.", ex);
}
} else {
throw new ImsResponseException("Invalid response.");
}
}
/**
* Creates service.
* @param url host url
* @param timeout connection timeout
*/
private void setService(String url, int timeout) {
ImsService service = new ImsService();
service.setServiceName(CATALOG_SERVICE_NAME);
service.setServerUrl(Val.chkStr(url));
service.setTimeoutMillisecs(Math.max(0, timeout));
setService(service);
}
/**
* Gets service name.
* @return service name
*/
private String getServiceName() {
return _serviceName;
}
/**
* Sets service name to look for in the response.
* @param serviceName service name
*/
private void setServiceName(String serviceName) {
_serviceName = Val.chkStr(serviceName);
}
}
| GeoinformationSystems/GeoprocessingAppstore | src/com/esri/gpt/catalog/arcims/TestConnectionRequest.java | Java | apache-2.0 | 4,303 |
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.financial.convention;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.id.UniqueId;
/**
* Repository for rates and associated metadata - e.g. LIBOR/EURIBOR etc...
*/
public interface ConventionBundleMaster {
/**
* Search the master for matching convention bundles
* @param searchRequest a request object containing the query parameters
* @return a search result object containing the resulting matches plus metadata
*/
ConventionBundleSearchResult searchConventionBundle(ConventionBundleSearchRequest searchRequest);
/**
* Search the master for matching convention bundles in the history
* @param searchRequest a request object containing the historic query parameters
* @return a search result object containing the resulting matches plus metadata
*/
ConventionBundleSearchResult searchHistoricConventionBundle(ConventionBundleSearchHistoricRequest searchRequest);
/**
* A direct look-up of a convention bundle using a UniqueId
* @param uniqueId the unique identifier
* @return the matching convention bundle, wrapped in a metadata document
*/
ConventionBundleDocument getConventionBundle(UniqueId uniqueId);
/**
* Add a new convention bundle to the master
* @param bundle The id bundle
* @param convention The conventions
* @return the UniqueId of the convention bundle
*/
UniqueId add(ExternalIdBundle bundle, ConventionBundleImpl convention);
}
| jeorme/OG-Platform | projects/OG-Financial/src/main/java/com/opengamma/financial/convention/ConventionBundleMaster.java | Java | apache-2.0 | 1,603 |
//
// Encog(tm) Core v3.3 - .Net Version
// http://www.heatonresearch.com/encog/
//
// Copyright 2008-2014 Heaton Research, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// For more information on Heaton Research copyrights, licenses
// and trademarks visit:
// http://www.heatonresearch.com/copyright
//
using Encog.Engine.Network.Activation;
using Encog.ML.Data;
using Encog.Neural.Flat;
using Encog.Util;
namespace Encog.MathUtil.Matrices.Hessian
{
/// <summary>
/// A threaded worker that is used to calculate the first derivatives of the
/// output of the neural network. These values are ultimatly used to calculate
/// the Hessian.
/// </summary>
public class ChainRuleWorker
{
/// <summary>
/// The actual values from the neural network.
/// </summary>
private readonly double[] _actual;
/// <summary>
/// The flat network.
/// </summary>
private readonly FlatNetwork _flat;
/// <summary>
/// The gradients.
/// </summary>
private readonly double[] _gradients;
/// <summary>
/// The high range.
/// </summary>
private readonly int _high;
/// <summary>
/// The neuron counts, per layer.
/// </summary>
private readonly int[] _layerCounts;
/// <summary>
/// The deltas for each layer.
/// </summary>
private readonly double[] _layerDelta;
/// <summary>
/// The feed counts, per layer.
/// </summary>
private readonly int[] _layerFeedCounts;
/// <summary>
/// The layer indexes.
/// </summary>
private readonly int[] _layerIndex;
/// <summary>
/// The output from each layer.
/// </summary>
private readonly double[] _layerOutput;
/// <summary>
/// The sums.
/// </summary>
private readonly double[] _layerSums;
/// <summary>
/// The low range.
/// </summary>
private readonly int _low;
/// <summary>
/// The total first derivatives.
/// </summary>
private readonly double[] _totDeriv;
/// <summary>
/// The training data.
/// </summary>
private readonly IMLDataSet _training;
/// <summary>
/// The weight count.
/// </summary>
private readonly int _weightCount;
/// <summary>
/// The index to each layer's weights and thresholds.
/// </summary>
private readonly int[] _weightIndex;
/// <summary>
/// The weights and thresholds.
/// </summary>
private readonly double[] _weights;
/// <summary>
/// The error.
/// </summary>
private double _error;
/// <summary>
/// The hessian for this worker.
/// </summary>
private readonly double[][] _hessian;
/// <summary>
/// The output neuron to calculate for.
/// </summary>
private int _outputNeuron;
/// <summary>
/// The Hessian matrix.
/// </summary>
public double[][] Hessian
{
get { return _hessian; }
}
/// <summary>
/// Construct the chain rule worker.
/// </summary>
/// <param name="theNetwork">The network to calculate a Hessian for.</param>
/// <param name="theTraining">The training data.</param>
/// <param name="theLow">The low range.</param>
/// <param name="theHigh">The high range.</param>
public ChainRuleWorker(FlatNetwork theNetwork, IMLDataSet theTraining, int theLow, int theHigh)
{
_weightCount = theNetwork.Weights.Length;
_hessian = EngineArray.AllocateDouble2D(_weightCount,_weightCount);
_training = theTraining;
_flat = theNetwork;
_layerDelta = new double[_flat.LayerOutput.Length];
_actual = new double[_flat.OutputCount];
_totDeriv = new double[_weightCount];
_gradients = new double[_weightCount];
_weights = _flat.Weights;
_layerIndex = _flat.LayerIndex;
_layerCounts = _flat.LayerCounts;
_weightIndex = _flat.WeightIndex;
_layerOutput = _flat.LayerOutput;
_layerSums = _flat.LayerSums;
_layerFeedCounts = _flat.LayerFeedCounts;
_low = theLow;
_high = theHigh;
}
/// <summary>
/// The output neuron we are processing.
/// </summary>
public int OutputNeuron
{
get { return _outputNeuron; }
set { _outputNeuron = value; }
}
/// <summary>
/// The first derivatives, used to calculate the Hessian.
/// </summary>
public double[] Derivative
{
get { return _totDeriv; }
}
/// <summary>
/// The gradients.
/// </summary>
public double[] Gradients
{
get { return _gradients; }
}
/// <summary>
/// The SSE error.
/// </summary>
public double Error
{
get { return _error; }
}
/// <summary>
/// The flat network.
/// </summary>
public FlatNetwork Network
{
get { return _flat; }
}
#region IEngineTask Members
/// <inheritdoc />
public void Run()
{
_error = 0;
EngineArray.Fill(_hessian, 0);
EngineArray.Fill(_totDeriv, 0);
EngineArray.Fill(_gradients, 0);
var derivative = new double[_weightCount];
// Loop over every training element
for (int i = _low; i <= _high; i++)
{
IMLDataPair pair = _training[i];
EngineArray.Fill(derivative, 0);
Process(_outputNeuron, derivative, pair);
}
}
#endregion
/// <summary>
/// Process one training set element.
/// </summary>
/// <param name="outputNeuron">The output neuron.</param>
/// <param name="derivative">The derivatives.</param>
/// <param name="pair">The training pair.</param>
private void Process(int outputNeuron, double[] derivative, IMLDataPair pair)
{
_flat.Compute(pair.Input, _actual);
double e = pair.Ideal[outputNeuron] - _actual[outputNeuron];
_error += e*e;
for (int i = 0; i < _actual.Length; i++)
{
if (i == outputNeuron)
{
_layerDelta[i] = _flat.ActivationFunctions[0]
.DerivativeFunction(_layerSums[i],
_layerOutput[i]);
}
else
{
_layerDelta[i] = 0;
}
}
for (int i = _flat.BeginTraining; i < _flat.EndTraining; i++)
{
ProcessLevel(i, derivative);
}
// calculate gradients
for (int j = 0; j < _weights.Length; j++)
{
_gradients[j] += e*derivative[j];
_totDeriv[j] += derivative[j];
}
// update hessian
for (int i = 0; i < _weightCount; i++)
{
for (int j = 0; j < _weightCount; j++)
{
_hessian[i][j] += derivative[i] * derivative[j];
}
}
}
/// <summary>
/// Process one level.
/// </summary>
/// <param name="currentLevel">The level.</param>
/// <param name="derivative">The derivatives.</param>
private void ProcessLevel(int currentLevel, double[] derivative)
{
int fromLayerIndex = _layerIndex[currentLevel + 1];
int toLayerIndex = _layerIndex[currentLevel];
int fromLayerSize = _layerCounts[currentLevel + 1];
int toLayerSize = _layerFeedCounts[currentLevel];
int index = _weightIndex[currentLevel];
IActivationFunction activation = _flat
.ActivationFunctions[currentLevel + 1];
// handle weights
int yi = fromLayerIndex;
for (int y = 0; y < fromLayerSize; y++)
{
double output = _layerOutput[yi];
double sum = 0;
int xi = toLayerIndex;
int wi = index + y;
for (int x = 0; x < toLayerSize; x++)
{
derivative[wi] += output*_layerDelta[xi];
sum += _weights[wi]*_layerDelta[xi];
wi += fromLayerSize;
xi++;
}
_layerDelta[yi] = sum
*(activation.DerivativeFunction(_layerSums[yi], _layerOutput[yi]));
yi++;
}
}
}
}
| yonglehou/encog-dotnet-core | encog-core-cs/MathUtil/Matrices/Hessian/ChainRuleWorker.cs | C# | apache-2.0 | 9,788 |
using System;
using System.Collections.Generic;
using System.Text;
using NUnit.Framework;
using OpenQA.Selenium.Environment;
namespace OpenQA.Selenium
{
[TestFixture]
public class TextPagesTest : DriverTestFixture
{
private string textPage = EnvironmentManager.Instance.UrlBuilder.WhereIs("plain.txt");
[Test]
[IgnoreBrowser(Browser.IE, "IE renders plain text pages as HTML with <pre> tags.")]
[IgnoreBrowser(Browser.Firefox, "Firefox renders plain text pages as HTML with <pre> tags.")]
[IgnoreBrowser(Browser.Chrome, "Chrome renders plain text pages as HTML with <pre> tags.")]
[IgnoreBrowser(Browser.PhantomJS, "PhantomJS renders plain text pages as HTML with <pre> tags.")]
public void ShouldBeAbleToLoadASimplePageOfText()
{
driver.Url = textPage;
string source = driver.PageSource;
Assert.AreEqual("Test", source);
}
[Test]
[ExpectedException(typeof(NoSuchElementException))]
public void FindingAnElementOnAPlainTextPageWillNeverWork()
{
driver.Url = textPage;
driver.FindElement(By.Id("foo"));
}
[Test]
[IgnoreBrowser(Browser.IE, "IE allows addition of cookie on text pages")]
[IgnoreBrowser(Browser.Chrome, "Chrome allows addition of cookie on text pages")]
[IgnoreBrowser(Browser.PhantomJS, "PhantomJS allows addition of cookie on text pages")]
[ExpectedException(typeof(WebDriverException))]
public void ShouldThrowExceptionWhenAddingCookieToAPageThatIsNotHtml()
{
driver.Url = textPage;
Cookie cookie = new Cookie("hello", "goodbye");
driver.Manage().Cookies.AddCookie(cookie);
}
}
}
| jmt4/Selenium2 | dotnet/test/WebDriver.Common.Tests/TextPagesTest.cs | C# | apache-2.0 | 1,835 |
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8"/>
<title>Release notes: GEE 5.1.2</title>
<link rel="stylesheet" href="../css/styles.css" type="text/css" media="all" />
<link rel="stylesheet" href="../css/containers.css" type="text/css" media="all" />
</head>
<body>
<!-- 6259403.html -->
<div class="container">
<div class="sidebar1">
<p class="sidebar-toc"><a href="../answer/7160001.html">Open GEE 5.2.1</a></p>
<p class="sidebar-toc"><a href="../answer/7160000.html">Open GEE 5.2.0</a></p>
<p class="sidebar-toc"><a href="../answer/7159245.html">GEE 5.1.3</a></p>
<p class="sidebar-toc"><a href="../answer/6259403.html" class="current-file">GEE 5.1.2</a></p>
<p class="sidebar-toc"><a href="../answer/6159443.html">GEE 5.1.1</a></p>
<p class="sidebar-toc"><a href="../answer/6078762.html">GEE 5.1.0</a></p>
<p class="sidebar-toc"><a href="../answer/6053170.html">GEE 5.0.2</a></p>
<p class="sidebar-toc"><a href="../answer/4556704.html">GEE 5.0.1</a></p>
<p class="sidebar-toc"><a href="../answer/3424202.html">GEE 5.0</a></p>
</div>
<div class="content">
<a name="top_of_file"></a>
<p><img src="../art/common/googlelogo_color_260x88dp.png" width="130" height="44" alt="Google logo" /></p>
<h1><a href="../index.html">Google Earth Enterprise Documentation Home</a> | Release notes</h1>
<h2>Release notes: GEE 5.1.2</h2>
<p>GEE 5.1.2 is an incremental release of <a href="../answer/6078762.html">GEE 5.1</a>. It contains several bug fixes in Fusion and GEE Server, and updates to third-party libraries.</p>
<h5>
<p id="supported_5.1.2">Supported Platforms</p>
</h5>
<p>The Google Earth Enterprise 5.1.2 release is supported on 64-bit versions of the following operating systems:</p>
<ul>
<li>Red Hat Enterprise Linux versions 6.0 to 7.1, including the most recent security patches</li>
<li>CentOS 6.0 to 7.1</li>
<li>Ubuntu 10.04 and 12.04 LTS</li>
</ul>
<p>Google Earth Enterprise 5.1.2 is compatible with Google Earth Enterprise Client (EC) version 7.1.5 and Google Earth Plug-in versions 7.0.1 - 7.1.5.1557 for Windows and Mac.</p>
<h5>
<p id="library_updates_5.1.2">Library updates</p>
</h5>
<table class="nice-table">
<tbody>
<tr>
<th width="50%">Library</th>
<th width="50%">Updated version</th>
</tr>
<tr>
<td>Google Maps JS API V3</td>
<td>3.20</td>
</tr>
<tr>
<td>Apache httpd</td>
<td>2.2.31</td>
</tr>
<tr>
<td>OpenSSL</td>
<td>0.9.8zg</td>
</tr>
<tr>
<td>libcurl</td>
<td>7.45</td>
</tr>
</tbody>
</table>
<h5>
<p id="resolved_issues_5.1.2">Resolved Issues</p>
</h5>
<div>
<table class="nice-table">
<tbody>
<tr>
<th>Number</th>
<th class="narrow">Description</th>
<th>Resolution</th>
</tr>
<tr>
<td>19946027</td>
<td>Support additional search option.</td>
<td>A new <b>SearchGoogle</b> search tab is available for use with 3D databases which queries Google's location database. Search sugestions are also offered as you type.</td>
</tr>
<tr>
<td>20225923, 19871110, 11179114</td>
<td>gesystemmanager crashes when processing very large projects</td>
<td>Fixed in Fusion: Optimized management of listeners of CombinedRPAsset versions in asset root. Also reduced the size of asset configuration files.</td>
</tr>
<tr>
<td>4431033</td>
<td>Extra data added in imagery projects for imagery that's not visible</td>
<td>Fixed in Fusion: skip identical assets with the same bounds.</td>
</tr>
<tr>
<td>21126307</td>
<td>Remove option for Disconnected Add-on from GEE Server installer to simplify installation.</td>
<td>Fixed in GEE Server Installer: Disconnected Add-on is always installed.</td>
</tr>
<tr>
<td>20185775</td>
<td>Add a tool to check the consistency of pushed databases in the publish root.</td>
<td>Run <code>gecheckpublishroot.py</code>.</td>
</tr>
<tr>
<td>10280645</td>
<td><code>gemodifyimageryresource</code> incorrectly reports bad images.</td>
<td>Fixed in Fusion/gevirtualraster.</td>
</tr>
<tr>
<td>16135553</td>
<td><code>gemaskgen</code> reports 'Process terminated by signal 8'.</td>
<td>Fixed in Fusion: check for invalid image sizes; improve logging.</td>
</tr>
<tr>
<td>22007798</td>
<td>Missing Apache directives when adding a SSL virtual host with <code>geserveradmin --ssl</code>code> .</td>
<td>Fixed in GEE Server: properly configure virtual host when 'vh_url' parameter is not specified.</td>
</tr>
<tr>
<td>21852939</td>
<td>Support opacity on 2D map layers.</td>
<td>Fixed in GEE Server: 2D Map Viewer includes sliders for controlling layer opacity</td>
</tr>
<tr>
<td>19499802</td>
<td><code>gebuild</code> does not check whether source file has changed.</td>
<td>Fixed in Fusion: 'Refresh' functionality added to <code>gemodifyvectorresource</code>, <code>gemodifyimageryresource</code>, and <code>gemodifyterrainresource</code> to detect changes to source files, and update assets if necessary.</td>
</tr>
<tr>
<td>4171577</td>
<td>Disconnected delta publishes not possible if previous database versions are cleaned.</td>
<td>Fixed in GEE Fusion: generate and store database manifest files within versions of a database.</td>
</tr>
<tr>
<td>19962321</td>
<td>Report the number of cached Assets and Asset Versions in <code>getop</code></td>
<td>Fixed in Fusion.</td>
</tr>
<tr>
<td>20859774</td>
<td>GLC Assembly fails with missing file: <code>/opt/google/gehttpd/htdocs/cutter/template/earth</code>.</td>
<td>Fixed in GEE Server/Cutter.</td>
</tr>
<tr>
<td>19995336, 21301170, 21301504</td>
<td>Selecting "Save As" creates duplicate IDs in Fusion.</td>
<td>Fixed in Fusion for Map Layer, Raster projects and Vector assets.</td>
</tr>
<tr>
<td>18280076</td>
<td>Implement dbroot conversion</td>
<td>Fixed in GEE Server: enable support for disconnected publishing of GEE 4.x database to GEE 5.x Server. </td>
</tr>
<tr>
<td>6820671</td>
<td>Fatal error reported while registering disconnected database: unifiedindex files not found</td>
<td>Fixed in Fusion. </td>
</tr>
<tr>
<td>17303394</td>
<td>Implement automatic detection of old glbs for globe cutting or assembly</td>
<td>Fixed in GEE Server.</td>
</tr>
<tr>
<td>19991743</td>
<td>Add support for database short names when pushing</td>
<td>Fixed in GEE Server: e.g. <code>geserveradmin --adddb Databases/BlueMarble</code>. </td>
</tr>
<tr>
<td>18917848</td>
<td>Include layer icons in 2D Maps</td>
<td>Fixed in GEE Server. </td>
</tr>
<tr>
<td>18922625</td>
<td>Coordinate Search triggers Internal Server error (HTTP 500) for bad query strings</td>
<td>Fixed in GEE Server: return HTTP 400 for incorrect search requests.</td>
</tr>
<tr>
<td>22097637</td>
<td>Support search history in client</td>
<td>Fixed in GEE Server: clicking on 'History' in EC shows previous successful search requests.</td>
</tr>
<tr>
<td>22318180</td>
<td>Security vulnerabilities in Cutter.</td>
<td>Fixed in GEE Server/Cutter: patched <code>gecutter</code> and validated inputs to minimize risk of remote-code execution and SQL-injection when Cutter is enabled.</td>
</tr>
<tr>
<td>24132440</td>
<td>Uncaught exception when serving 3D glbs: GET /query?request=Json&var;=geeServerDefs&is2d;=t</td>
<td>Fixed in Portable Server.</td>
</tr>
<tr>
<td>24103836</td>
<td>Support 'Satellite' mapTypeControl for 2D databases built with Google Basemap</td>
<td>Fixed in GEE Server.</td>
</tr>
<tr>
<td>23937667</td>
<td>Search bar is not available when viewing 3D portable globe</td>
<td>Fixed in Portable Server: POI and Places search available.</td>
</tr>
<tr>
<td>23557041</td>
<td>Missing icons in search results</td>
<td>Fixed in Portable Server.</td>
</tr>
<tr>
<td>22097637</td>
<td>Provide search tabs for 2D Map portable</td>
<td>Fixed in GEE Server/Cutter.</td>
</tr>
<tr>
<td>23569399</td>
<td>'db_id' parameter missing when POI search is not present in a published database's search services</td>
<td>Fixed in GEE Server.</td>
</tr>
<tr>
<td>23496088</td>
<td>Exception thrown when Places search returns no results</td>
<td>Fixed in Portable Server.</td>
</tr>
<tr>
<td>20068112</td>
<td><code>geserveradmin --addvh --ssl</code> creates a virtual host with an invalid port number</td>
<td>Fixed in GEE Server: use the '--vhurl' option for non-default SSL ports. See <code>geserveradmin</code> help for usage & syntax.</td>
</tr>
<tr>
<td>22958187</td>
<td>Include Google Geocoder in default search services</td>
<td>Fixed in GEE Server: 'SearchGoogle' tab is available as a default search service for both 2D and 3D databases; queries Google's geocoders and requires Internet access (client-side)</td>
</tr>
<tr>
<td>23399349</td>
<td>Incorrect handling of POI search queries like "Paris, France"</td>
<td>Fixed in GEE Server: search queries like "Paris, France" are parsed as a single search token.</td>
</tr>
<tr>
<td>1826725</td>
<td><code>gepackgen</code> fails with 'Specified data products have different coverage'.</td>
<td>Fixed in Fusion: implement Cluster Analyzer for virtual rasters (*.khvr files). It analyzes inset clustering and area ratios to suggest optimal splits of a virtual raster. Check <code>gerasterimport</code> log, and see <code>gevirtualraster</code> for usage and syntax.</td>
</tr>
<tr>
<td>22414308</td>
<td>Support snippet for 'View in Google Maps' in EC</td>
<td>Fixed in GEE Server: enable 'View in Google Maps' in EC, publishing 3D database with 'google_maps_url' snippet set to 'http://maps.google.com/'.</td>
</tr>
<tr>
<td>22958590</td>
<td>Places queries can makes server unresponsive for large number of search results</td>
<td>Fixed in GEE Server.</td>
</tr>
<tr>
<td>22879773</td>
<td>Federated Search returns HTTP 500 error</td>
<td>Fixed in GEE Server: if Coordinate search fails, proceed with Places search.</td>
</tr>
<tr>
<td>22954617</td>
<td>Viewport for displaying multiple POI search results is incorrectly calculated.</td>
<td>Fixed in GEE Server (2D Map Viewer).</td>
</tr>
<tr>
<td>21165472</td>
<td>GLC assembly fails to copy final glc to globes directory, for large glc files</td>
<td>Fixed in GEE Server/Cutter.</td>
</tr>
<tr>
<td>25422176</td>
<td>Fusion fails to push databases with very large POI files</td>
<td>Fixed in Fusion: updated internal data structures to support POI files > 4 GB; improved logging in POI parser to report exceptions when ingesting POI data into postgres.</td>
</tr>
<tr>
<td>11254639</td>
<td>EC makes calls to google.com when rendering search results</td>
<td>Fixed in GEE Server: localized all KML rendering; expose dbroot snippets in 'search_config' group: <code>kml_render_url, kml_search_url, error_page_url</code>.</td>
</tr>
<tr>
<td>25430798</td>
<td><code>SearchGoogle</code> search tab returns Server Error</td>
<td>Fixed in GEE Server: updated User-Agent header in search handler.</td>
</tr>
<tr>
<td>24407861</td>
<td>Support database pushes over HTTPS/SSL</td>
<td>Fixed in Fusion: Server Association Manager includes 'CA certificate' path and 'Insecure SSL connection' checkbox for self-signed certificates.</td>
</tr>
</tbody>
</table>
<div class="footer">
<p class="BackToTop"><a href="#top_of_file">Back to top</a>
<hr />
</p>
<p class="copyright">©2015 Google</p>
</div>
</div>
</body>
</html>
| google/earthenterprise | docs/geedocs/5.2.1/answer/6259403.html | HTML | apache-2.0 | 13,313 |
/**
* @fileoverview Rule to check for the usage of var.
* @author Jamund Ferguson
*/
"use strict";
//------------------------------------------------------------------------------
// Requirements
//------------------------------------------------------------------------------
const astUtils = require("../ast-utils");
//------------------------------------------------------------------------------
// Helpers
//------------------------------------------------------------------------------
/**
* Check whether a given variable is a global variable or not.
* @param {eslint-scope.Variable} variable The variable to check.
* @returns {boolean} `true` if the variable is a global variable.
*/
function isGlobal(variable) {
return Boolean(variable.scope) && variable.scope.type === "global";
}
/**
* Finds the nearest function scope or global scope walking up the scope
* hierarchy.
*
* @param {eslint-scope.Scope} scope - The scope to traverse.
* @returns {eslint-scope.Scope} a function scope or global scope containing the given
* scope.
*/
function getEnclosingFunctionScope(scope) {
while (scope.type !== "function" && scope.type !== "global") {
scope = scope.upper;
}
return scope;
}
/**
* Checks whether the given variable has any references from a more specific
* function expression (i.e. a closure).
*
* @param {eslint-scope.Variable} variable - A variable to check.
* @returns {boolean} `true` if the variable is used from a closure.
*/
function isReferencedInClosure(variable) {
const enclosingFunctionScope = getEnclosingFunctionScope(variable.scope);
return variable.references.some(reference =>
getEnclosingFunctionScope(reference.from) !== enclosingFunctionScope);
}
/**
* Checks whether the given node is the assignee of a loop.
*
* @param {ASTNode} node - A VariableDeclaration node to check.
* @returns {boolean} `true` if the declaration is assigned as part of loop
* iteration.
*/
function isLoopAssignee(node) {
return (node.parent.type === "ForOfStatement" || node.parent.type === "ForInStatement") &&
node === node.parent.left;
}
/**
* Checks whether the given variable declaration is immediately initialized.
*
* @param {ASTNode} node - A VariableDeclaration node to check.
* @returns {boolean} `true` if the declaration has an initializer.
*/
function isDeclarationInitialized(node) {
return node.declarations.every(declarator => declarator.init !== null);
}
const SCOPE_NODE_TYPE = /^(?:Program|BlockStatement|SwitchStatement|ForStatement|ForInStatement|ForOfStatement)$/;
/**
* Gets the scope node which directly contains a given node.
*
* @param {ASTNode} node - A node to get. This is a `VariableDeclaration` or
* an `Identifier`.
* @returns {ASTNode} A scope node. This is one of `Program`, `BlockStatement`,
* `SwitchStatement`, `ForStatement`, `ForInStatement`, and
* `ForOfStatement`.
*/
function getScopeNode(node) {
while (node) {
if (SCOPE_NODE_TYPE.test(node.type)) {
return node;
}
node = node.parent;
}
/* istanbul ignore next : unreachable */
return null;
}
/**
* Checks whether a given variable is redeclared or not.
*
* @param {eslint-scope.Variable} variable - A variable to check.
* @returns {boolean} `true` if the variable is redeclared.
*/
function isRedeclared(variable) {
return variable.defs.length >= 2;
}
/**
* Checks whether a given variable is used from outside of the specified scope.
*
* @param {ASTNode} scopeNode - A scope node to check.
* @returns {Function} The predicate function which checks whether a given
* variable is used from outside of the specified scope.
*/
function isUsedFromOutsideOf(scopeNode) {
/**
* Checks whether a given reference is inside of the specified scope or not.
*
* @param {eslint-scope.Reference} reference - A reference to check.
* @returns {boolean} `true` if the reference is inside of the specified
* scope.
*/
function isOutsideOfScope(reference) {
const scope = scopeNode.range;
const id = reference.identifier.range;
return id[0] < scope[0] || id[1] > scope[1];
}
return function(variable) {
return variable.references.some(isOutsideOfScope);
};
}
/**
* Creates the predicate function which checks whether a variable has their references in TDZ.
*
* The predicate function would return `true`:
*
* - if a reference is before the declarator. E.g. (var a = b, b = 1;)(var {a = b, b} = {};)
* - if a reference is in the expression of their default value. E.g. (var {a = a} = {};)
* - if a reference is in the expression of their initializer. E.g. (var a = a;)
*
* @param {ASTNode} node - The initializer node of VariableDeclarator.
* @returns {Function} The predicate function.
* @private
*/
function hasReferenceInTDZ(node) {
const initStart = node.range[0];
const initEnd = node.range[1];
return variable => {
const id = variable.defs[0].name;
const idStart = id.range[0];
const defaultValue = (id.parent.type === "AssignmentPattern" ? id.parent.right : null);
const defaultStart = defaultValue && defaultValue.range[0];
const defaultEnd = defaultValue && defaultValue.range[1];
return variable.references.some(reference => {
const start = reference.identifier.range[0];
const end = reference.identifier.range[1];
return !reference.init && (
start < idStart ||
(defaultValue !== null && start >= defaultStart && end <= defaultEnd) ||
(start >= initStart && end <= initEnd)
);
});
};
}
//------------------------------------------------------------------------------
// Rule Definition
//------------------------------------------------------------------------------
module.exports = {
meta: {
docs: {
description: "require `let` or `const` instead of `var`",
category: "ECMAScript 6",
recommended: false,
url: "https://eslint.org/docs/rules/no-var"
},
schema: [],
fixable: "code"
},
create(context) {
const sourceCode = context.getSourceCode();
/**
* Checks whether the variables which are defined by the given declarator node have their references in TDZ.
*
* @param {ASTNode} declarator - The VariableDeclarator node to check.
* @returns {boolean} `true` if one of the variables which are defined by the given declarator node have their references in TDZ.
*/
function hasSelfReferenceInTDZ(declarator) {
if (!declarator.init) {
return false;
}
const variables = context.getDeclaredVariables(declarator);
return variables.some(hasReferenceInTDZ(declarator.init));
}
/**
* Checks whether it can fix a given variable declaration or not.
* It cannot fix if the following cases:
*
* - A variable is a global variable.
* - A variable is declared on a SwitchCase node.
* - A variable is redeclared.
* - A variable is used from outside the scope.
* - A variable is used from a closure within a loop.
* - A variable might be used before it is assigned within a loop.
* - A variable might be used in TDZ.
* - A variable is declared in statement position (e.g. a single-line `IfStatement`)
*
* ## A variable is declared on a SwitchCase node.
*
* If this rule modifies 'var' declarations on a SwitchCase node, it
* would generate the warnings of 'no-case-declarations' rule. And the
* 'eslint:recommended' preset includes 'no-case-declarations' rule, so
* this rule doesn't modify those declarations.
*
* ## A variable is redeclared.
*
* The language spec disallows redeclarations of `let` declarations.
* Those variables would cause syntax errors.
*
* ## A variable is used from outside the scope.
*
* The language spec disallows accesses from outside of the scope for
* `let` declarations. Those variables would cause reference errors.
*
* ## A variable is used from a closure within a loop.
*
* A `var` declaration within a loop shares the same variable instance
* across all loop iterations, while a `let` declaration creates a new
* instance for each iteration. This means if a variable in a loop is
* referenced by any closure, changing it from `var` to `let` would
* change the behavior in a way that is generally unsafe.
*
* ## A variable might be used before it is assigned within a loop.
*
* Within a loop, a `let` declaration without an initializer will be
* initialized to null, while a `var` declaration will retain its value
* from the previous iteration, so it is only safe to change `var` to
* `let` if we can statically determine that the variable is always
* assigned a value before its first access in the loop body. To keep
* the implementation simple, we only convert `var` to `let` within
* loops when the variable is a loop assignee or the declaration has an
* initializer.
*
* @param {ASTNode} node - A variable declaration node to check.
* @returns {boolean} `true` if it can fix the node.
*/
function canFix(node) {
const variables = context.getDeclaredVariables(node);
const scopeNode = getScopeNode(node);
if (node.parent.type === "SwitchCase" ||
node.declarations.some(hasSelfReferenceInTDZ) ||
variables.some(isGlobal) ||
variables.some(isRedeclared) ||
variables.some(isUsedFromOutsideOf(scopeNode))
) {
return false;
}
if (astUtils.isInLoop(node)) {
if (variables.some(isReferencedInClosure)) {
return false;
}
if (!isLoopAssignee(node) && !isDeclarationInitialized(node)) {
return false;
}
}
if (
!isLoopAssignee(node) &&
!(node.parent.type === "ForStatement" && node.parent.init === node) &&
!astUtils.STATEMENT_LIST_PARENTS.has(node.parent.type)
) {
// If the declaration is not in a block, e.g. `if (foo) var bar = 1;`, then it can't be fixed.
return false;
}
return true;
}
/**
* Reports a given variable declaration node.
*
* @param {ASTNode} node - A variable declaration node to report.
* @returns {void}
*/
function report(node) {
const varToken = sourceCode.getFirstToken(node);
context.report({
node,
message: "Unexpected var, use let or const instead.",
fix(fixer) {
if (canFix(node)) {
return fixer.replaceText(varToken, "let");
}
return null;
}
});
}
return {
"VariableDeclaration:exit"(node) {
if (node.kind === "var") {
report(node);
}
}
};
}
};
| ian-donaldson/world.exposed | node_modules/eslint/lib/rules/no-var.js | JavaScript | apache-2.0 | 11,740 |
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
type A = for<'b, 'a: 'b> fn(); //~ ERROR lifetime bounds cannot be used in this context
type B = for<'b, 'a: 'b,> fn(); //~ ERROR lifetime bounds cannot be used in this context
type C = for<'b, 'a: 'b +> fn(); //~ ERROR lifetime bounds cannot be used in this context
type D = for<'a, T> fn(); //~ ERROR only lifetime parameters can be used in this context
type E = for<T> Fn(); //~ ERROR only lifetime parameters can be used in this context
fn main() {}
| GBGamer/rust | src/test/ui/bounds-lifetime.rs | Rust | apache-2.0 | 922 |
<?php
// +----------------------------------------------------------------------
// | OneThink [ WE CAN DO IT JUST THINK IT ]
// +----------------------------------------------------------------------
// | Copyright (c) 2013 http://www.onethink.cn All rights reserved.
// +----------------------------------------------------------------------
// | Author: 凡星
// +----------------------------------------------------------------------
namespace Admin\Controller;
/**
* 模型数据管理控制器
*
* @author 凡星
*/
class CreditController extends AdminController {
/**
* 显示指定模型列表数据
*
* @param String $model
* 模型标识
* @author 凡星
*/
public function lists() {
$model = $this->getModel ( 'credit_config' );
$map ['token'] = '0';
session ( 'common_condition', $map );
$list_data = $this->_get_model_list ( $model );
$this->assign ( $list_data );
$this->meta_title = $model ['title'] . '列表';
$this->display ( 'Think:lists' );
}
public function edit($model = null, $id = 0) {
D ( 'Common/Credit' )->clear ();
is_array ( $model ) || $model = $this->getModel ( 'credit_confit' );
$this->meta_title = '编辑' . $model ['title'];
parent::common_edit ( $model, $id, 'Think:edit' );
}
public function add($model = null) {
is_array ( $model ) || $model = $this->getModel ( 'credit_confit' );
$this->meta_title = '新增' . $model ['title'];
parent::common_add ( $model, 'Think:add' );
}
public function del($model = null, $ids = null) {
D ( 'Common/Credit' )->clear ();
is_array ( $model ) || $model = $this->getModel ( 'credit_confit' );
parent::common_del ( $model, $ids );
}
} | lovebull/wild_weiphp | Application/Admin/Controller/CreditController.class.php | PHP | apache-2.0 | 1,690 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package macromedia.asc.parser;
import macromedia.asc.util.*;
import macromedia.asc.semantics.*;
/**
* Node
*/
public class RestExpressionNode extends Node
{
public Node expr;
public RestExpressionNode(Node expr)
{
this.expr = expr;
}
public Value evaluate(Context cx, Evaluator evaluator)
{
if (evaluator.checkFeature(cx, this))
{
return evaluator.evaluate(cx, this);
}
else
{
return null;
}
}
public String toString()
{
return "RestExpression";
}
}
| adufilie/flex-sdk | modules/asc/src/java/macromedia/asc/parser/RestExpressionNode.java | Java | apache-2.0 | 1,292 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.editor;
import com.intellij.openapi.editor.colors.TextAttributesKey;
public interface HighlighterColors {
TextAttributesKey TEXT = TextAttributesKey.createTextAttributesKey("TEXT");
TextAttributesKey BAD_CHARACTER = TextAttributesKey.createTextAttributesKey("BAD_CHARACTER");
}
| android-ia/platform_tools_idea | platform/platform-api/src/com/intellij/openapi/editor/HighlighterColors.java | Java | apache-2.0 | 914 |
// Generated by xsd compiler for android/java
// DO NOT CHANGE!
package com.ebay.trading.api;
import java.io.Serializable;
import com.leansoft.nano.annotation.*;
import java.util.List;
/**
*
* Indicates whether the category supports the use of email to contact the
* seller for Classified Ad format listings.Added for EbayMotors Pro users.
*
*/
public class EBayMotorsProContactByEmailEnabledDefinitionType implements Serializable {
private static final long serialVersionUID = -1L;
@AnyElement
@Order(value=0)
public List<Object> any;
} | uaraven/nano | sample/webservice/eBayDemoApp/src/com/ebay/trading/api/EBayMotorsProContactByEmailEnabledDefinitionType.java | Java | apache-2.0 | 563 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.launchers;
import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestName;
import org.apache.geode.internal.AvailablePortHelper;
import org.apache.geode.test.junit.rules.gfsh.GfshRule;
public class ServerStartupValueRecoveryNotificationTest {
private static final String SERVER_1_NAME = "server1";
private static final String LOCATOR_NAME = "locator";
private static final String DISKSTORE_1 = "diskstore1";
private static final String DISKSTORE_2 = "diskstore2";
@Rule
public GfshRule gfshRule = new GfshRule();
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Rule
public TestName testName = new TestName();
private Path locatorFolder;
private Path server1Folder;
private int locatorPort;
private String startServer1Command;
@Before
public void persistentRegionThatRequiresValueRecovery() throws IOException {
locatorFolder = temporaryFolder.newFolder(LOCATOR_NAME).toPath().toAbsolutePath();
server1Folder = temporaryFolder.newFolder(SERVER_1_NAME).toPath().toAbsolutePath();
Path diskStore1Folder = temporaryFolder.newFolder(DISKSTORE_1).toPath().toAbsolutePath();
Path diskStore2Folder = temporaryFolder.newFolder(DISKSTORE_2).toPath().toAbsolutePath();
int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
locatorPort = ports[0];
String startLocatorCommand = String.join(" ",
"start locator",
"--name=" + LOCATOR_NAME,
"--dir=" + locatorFolder,
"--port=" + locatorPort,
"--locators=localhost[" + locatorPort + "]");
startServer1Command = String.join(" ",
"start server",
"--name=" + SERVER_1_NAME,
"--dir=" + server1Folder,
"--locators=localhost[" + locatorPort + "]",
"--disable-default-server");
String createDiskStore1 = String.join(" ",
"create disk-store",
"--name=" + DISKSTORE_1,
"--dir=" + diskStore1Folder);
String regionName = "myRegion";
String createRegionCommand = String.join(" ",
"create region",
"--name=" + regionName,
"--type=REPLICATE_PERSISTENT",
"--disk-store=" + DISKSTORE_1);
String createDiskStore2 = String.join(" ",
"create disk-store",
"--name=" + DISKSTORE_2,
"--dir=" + diskStore2Folder);
String regionNameTwo = "mySecondRegion";
String createRegionTwoCommand = String.join(" ",
"create region",
"--name=" + regionNameTwo,
"--type=REPLICATE_PERSISTENT",
"--disk-store=" + DISKSTORE_2);
String putCommand = String.join(" ",
"put",
"--region=" + regionName,
"--key=James",
"--value=Bond");
String putCommandInRegionTwo = String.join(" ",
"put",
"--region=" + regionNameTwo,
"--key=Derrick",
"--value=Flint");
gfshRule.execute(startLocatorCommand, startServer1Command, createDiskStore1,
createRegionCommand, createDiskStore2, createRegionTwoCommand, putCommand,
putCommandInRegionTwo);
String stopServer1Command = "stop server --dir=" + server1Folder;
gfshRule.execute(stopServer1Command);
}
@After
public void stopAllMembers() {
String stopServer1Command = "stop server --dir=" + server1Folder;
String stopLocatorCommand = "stop locator --dir=" + locatorFolder;
gfshRule.execute(stopServer1Command, stopLocatorCommand);
}
@Test
public void startupReportsOnlineOnlyAfterRedundancyRestored() throws IOException {
String connectCommand = "connect --locator=localhost[" + locatorPort + "]";
server1Folder =
temporaryFolder.newFolder(SERVER_1_NAME + "secondfolder").toPath().toAbsolutePath();
startServer1Command = String.join(" ",
"start server",
"--name=" + SERVER_1_NAME,
"--dir=" + server1Folder,
"--locators=localhost[" + locatorPort + "]");
gfshRule.execute(connectCommand, startServer1Command);
Pattern serverOnlinePattern =
Pattern.compile("^\\[info .*].*Server " + SERVER_1_NAME + " startup completed in \\d+ ms");
Pattern valuesRecoveredPattern =
Pattern.compile(
"^\\[info .*].* Recovered values for disk store " + DISKSTORE_1 + " with unique id .*");
Pattern valuesRecoveredSecondRegionPattern =
Pattern.compile(
"^\\[info .*].* Recovered values for disk store " + DISKSTORE_2 + " with unique id .*");
Path logFile = server1Folder.resolve(SERVER_1_NAME + ".log");
await()
.untilAsserted(() -> {
final Predicate<String> isRelevantLine = valuesRecoveredPattern.asPredicate()
.or(valuesRecoveredSecondRegionPattern.asPredicate())
.or(serverOnlinePattern.asPredicate());
final List<String> foundPatterns =
Files.lines(logFile).filter(isRelevantLine)
.collect(Collectors.toList());
assertThat(foundPatterns)
.as("Log file " + logFile + " includes one line matching each of "
+ valuesRecoveredPattern + ", " + valuesRecoveredSecondRegionPattern
+ ", and "
+ serverOnlinePattern)
.hasSize(3);
assertThat(foundPatterns)
.as("lines in the log file")
.withFailMessage("%n Expect line matching %s %n but was %s",
valuesRecoveredPattern.pattern(), foundPatterns)
.anyMatch(valuesRecoveredPattern.asPredicate())
.withFailMessage("%n Expect line matching %s %n but was %s",
valuesRecoveredSecondRegionPattern.pattern(), foundPatterns)
.anyMatch(valuesRecoveredSecondRegionPattern.asPredicate());
assertThat(foundPatterns.get(2))
.as("Third matching Log line of " + foundPatterns)
.matches(serverOnlinePattern.asPredicate(), serverOnlinePattern.pattern());
});
}
}
| smgoller/geode | geode-assembly/src/acceptanceTest/java/org/apache/geode/launchers/ServerStartupValueRecoveryNotificationTest.java | Java | apache-2.0 | 7,188 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using SuperSocket.SocketBase.Command;
using SuperWebSocket.Protocol;
namespace SuperWebSocket.Command
{
/// <summary>
/// The command to handling text message in plain text of hybi00
/// </summary>
/// <typeparam name="TWebSocketSession">The type of the web socket session.</typeparam>
class Plain<TWebSocketSession> : CommandBase<TWebSocketSession, IWebSocketFragment>
where TWebSocketSession : WebSocketSession<TWebSocketSession>, new()
{
/// <summary>
/// Gets the name.
/// </summary>
public override string Name
{
get
{
return OpCode.PlainTag;
}
}
/// <summary>
/// Executes the command.
/// </summary>
/// <param name="session">The session.</param>
/// <param name="requestInfo">The request info.</param>
public override void ExecuteCommand(TWebSocketSession session, IWebSocketFragment requestInfo)
{
var plainFragment = requestInfo as PlainFragment;
session.AppServer.OnNewMessageReceived(session, plainFragment.Message);
}
}
}
| v-rawang/SuperWebSocket | SuperWebSocket/Command/Plain.cs | C# | apache-2.0 | 1,252 |
class Regldg < Formula
desc "Regular expression grammar language dictionary generator"
homepage "https://regldg.com/"
url "https://regldg.com/regldg-1.0.0.tar.gz"
sha256 "cd550592cc7a2f29f5882dcd9cf892875dd4e84840d8fe87133df9814c8003f1"
bottle do
sha256 cellar: :any_skip_relocation, arm64_big_sur: "d6456b6abf558106d2cae0459145d3070b07dc37d3757f84d325f11aaa7badf4"
sha256 cellar: :any_skip_relocation, big_sur: "30966f99bf5fa0f3af539ce5b8eaca2666db734ac561d2b3a261532636b2a54c"
sha256 cellar: :any_skip_relocation, catalina: "6c69006dc5eb93be0eb6b39cb396e59e8c09aa5d65f56a216cd19753a7f28232"
sha256 cellar: :any_skip_relocation, mojave: "15f7e95f3d84d091a942e836ab9a27b3df2594e3f378da26f10371e7ba01be5c"
sha256 cellar: :any_skip_relocation, high_sierra: "45950c0b432b227711570e3b9ea79fe9bf96b3239a062c5a736f9a3fdf294fb5"
sha256 cellar: :any_skip_relocation, sierra: "26f12ca7e41b36a167d94f403b97557490fd1ad0ed1a2d4d0b30c86164ae9d39"
sha256 cellar: :any_skip_relocation, el_capitan: "52c64d6766b68a1ed602d3878368109d3ac3e5e60d6fc14a4606518d14f6e678"
sha256 cellar: :any_skip_relocation, yosemite: "c4157a77e2620b868b2dbbb3ebf126193b238c6a69d2a895420950d4203d7a17"
end
def install
system "make"
bin.install "regldg"
end
test do
system "#{bin}/regldg", "test"
end
end
| JCount/homebrew-core | Formula/regldg.rb | Ruby | bsd-2-clause | 1,368 |
cask :v1 => 'cartao-de-cidadao' do
version '1.26.2-1389'
sha256 '28b2adf148451590dc73c991251798f3676ae784b176651f4f911ecf46a61e4f'
url "http://www.cartaodecidadao.pt/ccsoftware/Cartao_de_Cidadao_#{version}_LION_MOUNTAIN_LION_MAVERICKS.dmg"
name 'Cartão de Cidadão'
homepage 'http://www.cartaodecidadao.pt/'
license :unknown # todo: change license and remove this comment; ':unknown' is a machine-generated placeholder
pkg 'Cartao_de_Cidadao.pkg'
uninstall :script => '/usr/local/bin/pteid_uninstall.sh'
end
| a1russell/homebrew-cask | Casks/cartao-de-cidadao.rb | Ruby | bsd-2-clause | 531 |
class Renameutils < Formula
desc "Tools for file renaming"
homepage "https://www.nongnu.org/renameutils/"
url "https://download.savannah.gnu.org/releases/renameutils/renameutils-0.12.0.tar.gz"
sha256 "cbd2f002027ccf5a923135c3f529c6d17fabbca7d85506a394ca37694a9eb4a3"
revision 3
bottle do
cellar :any
sha256 "2ec48c66fea9f53acf2b2ba3b726e6f7a9ff35778a3fb574fc59e7c6d01f681a" => :catalina
sha256 "4f360267cba9842ef85e9cfbb1baaf73e9576dccfb924aade7f0ad6bbf0bf605" => :mojave
sha256 "d25dc64bcc5d30e7695c65a93f7285849b57fdbdb18bf7d5e7bc22f0786cb14c" => :high_sierra
end
depends_on "coreutils"
depends_on "readline" # Use instead of system libedit
conflicts_with "ipmiutil", :because => "both install `icmd` binaries"
# Use the GNU versions of certain system utilities. See:
# https://trac.macports.org/ticket/24525
# Patches rewritten at version 0.12.0 to handle file changes.
# The fourth patch is new and fixes a Makefile syntax error that causes
# make install to fail. Reported upstream via email and fixed in HEAD.
# Remove patch #4 at version > 0.12.0. The first three should persist.
patch do
url "https://raw.githubusercontent.com/Homebrew/formula-patches/85fa66a9/renameutils/0.12.0.patch"
sha256 "ed964edbaf388db40a787ffd5ca34d525b24c23d3589c68dc9aedd8b45160cd9"
end
def install
system "./configure", "--disable-dependency-tracking",
"--prefix=#{prefix}",
"--with-packager=Homebrew"
system "make"
ENV.deparallelize # parallel install fails
system "make", "install"
end
test do
(testpath/"test.txt").write "Hello World!"
pipe_output("#{bin}/icp test.txt", ".2\n")
assert_equal File.read("test.txt"), File.read("test.txt.2")
end
end
| BrewTestBot/homebrew-core | Formula/renameutils.rb | Ruby | bsd-2-clause | 1,793 |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PlatformEventDispatcher_h
#define PlatformEventDispatcher_h
#include "core/CoreExport.h"
#include "platform/heap/Handle.h"
#include "wtf/Vector.h"
namespace blink {
class PlatformEventController;
class CORE_EXPORT PlatformEventDispatcher : public GarbageCollectedMixin {
public:
void addController(PlatformEventController*);
void removeController(PlatformEventController*);
DECLARE_VIRTUAL_TRACE();
protected:
PlatformEventDispatcher();
void notifyControllers();
virtual void startListening() = 0;
virtual void stopListening() = 0;
private:
void purgeControllers();
HeapHashSet<WeakMember<PlatformEventController>> m_controllers;
bool m_isDispatching;
bool m_isListening;
};
} // namespace blink
#endif // PlatformEventDispatcher_h
| ssaroha/node-webrtc | third_party/webrtc/include/chromium/src/third_party/WebKit/Source/core/frame/PlatformEventDispatcher.h | C | bsd-2-clause | 944 |
cask 'capsee' do
version '1.2'
sha256 'e78cdfe435cca259e0111a2b2131ad3be7d5ba6160cf69c8e7cbcc033eac2fc4'
url "http://www.threemagination.com/CapSee#{version.gsub('.','')}.zip"
name 'CapSee'
homepage 'http://www.threemagination.com/capsee/'
license :gratis
container :nested => "CapSee#{version.gsub('.','')}.dmg"
app 'CapSee.app'
end
| mgryszko/homebrew-cask | Casks/capsee.rb | Ruby | bsd-2-clause | 352 |
// Copyright (c) 2003-present, Jodd Team (http://jodd.org)
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
package jodd.vtor.constraint;
import jodd.bean.BeanException;
import jodd.bean.BeanUtil;
import jodd.vtor.ValidationConstraint;
import jodd.vtor.ValidationConstraintContext;
import jodd.vtor.VtorException;
public class EqualToDeclaredFieldConstraint implements ValidationConstraint<EqualToDeclaredField> {
public EqualToDeclaredFieldConstraint() {
}
public EqualToDeclaredFieldConstraint(String fieldName) {
this.fieldName = fieldName;
}
// ---------------------------------------------------------------- properties
protected String fieldName;
public String getFieldName() {
return fieldName;
}
public void setFieldName(String fieldName) {
this.fieldName = fieldName;
}
// ---------------------------------------------------------------- configure
public void configure(EqualToDeclaredField annotation) {
this.fieldName = annotation.value();
}
// ---------------------------------------------------------------- valid
public boolean isValid(ValidationConstraintContext vcc, Object value) {
return validate(vcc.getTarget(), value, fieldName);
}
public static boolean validate(Object target, Object value, String fieldName) {
if (value == null) {
return true;
}
Object valueToCompare;
try {
valueToCompare = BeanUtil.getDeclaredProperty(target, fieldName);
} catch (BeanException bex) {
throw new VtorException("Invalid value: " + fieldName, bex);
}
if (valueToCompare == null) {
return false;
}
return value.equals(valueToCompare);
}
} | mohanaraosv/jodd | jodd-vtor/src/main/java/jodd/vtor/constraint/EqualToDeclaredFieldConstraint.java | Java | bsd-2-clause | 2,970 |
cask :v1 => 'codekit' do
version '2.3.7-18917'
sha256 '5958b170026f37bb78b31a9251cd1ccafb2239d9a85e9729593948b9d00255fc'
url "http://incident57.com/codekit/files/codekit-#{version.sub(%r{.*-},'')}.zip"
appcast 'https://incident57.com/codekit/appcast/ck2appcast.xml',
:sha256 => 'fba4e9552ebabca2b700f6bdcdbb83132856d6c467f536250fc34beed9a8f104'
name 'CodeKit'
homepage 'http://incident57.com/codekit/'
license :commercial
app 'CodeKit.app'
end
| wmorin/homebrew-cask | Casks/codekit.rb | Ruby | bsd-2-clause | 472 |
/*
* Copyright (c) 2001-2003
* Fraunhofer Institute for Open Communication Systems (FhG Fokus).
* All rights reserved.
*
* Author: Harti Brandt <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Begemot: bsnmp/lib/snmppriv.h,v 1.9 2004/08/06 08:46:58 brandt Exp $
*
* Private functions.
*/
#include <sys/cdefs.h>
enum asn_err snmp_binding_encode(struct asn_buf *, const struct snmp_value *);
enum snmp_code snmp_pdu_encode_header(struct asn_buf *, struct snmp_pdu *);
enum snmp_code snmp_fix_encoding(struct asn_buf *, struct snmp_pdu *);
enum asn_err snmp_parse_pdus_hdr(struct asn_buf *b, struct snmp_pdu *pdu,
asn_len_t *lenp);
enum snmp_code snmp_pdu_calc_digest(const struct snmp_pdu *, uint8_t *);
enum snmp_code snmp_pdu_encrypt(const struct snmp_pdu *);
enum snmp_code snmp_pdu_decrypt(const struct snmp_pdu *);
#define DEFAULT_HOST "localhost"
#define DEFAULT_PORT "snmp"
#define DEFAULT_LOCAL "/var/run/snmp.sock"
| dplbsd/zcaplib | head/contrib/bsnmp/lib/snmppriv.h | C | bsd-2-clause | 2,196 |
# Copyright 2009-2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.ec2 import EC2Request
from requestbuilder import Arg, Filter, GenericTagFilter
class DescribeVolumes(EC2Request):
DESCRIPTION = 'Display information about volumes'
ARGS = [Arg('VolumeId', metavar='VOLUME', nargs='*',
help='limit results to specific volumes')]
FILTERS = [Filter('attachment.attach-time', help='attachment start time'),
Filter('attachment.delete-on-termination', help='''whether the
volume will be deleted upon instance termination'''),
Filter('attachment.device',
help='device node exposed to the instance'),
Filter('attachment.instance-id',
help='ID of the instance the volume is attached to'),
Filter('attachment.status', help='attachment state'),
Filter('availability-zone'),
Filter('create-time', help='creation time'),
Filter('size', type=int, help='size in GiB'),
Filter('snapshot-id',
help='snapshot from which the volume was created'),
Filter('status'),
Filter('tag-key', help='key of a tag assigned to the volume'),
Filter('tag-value',
help='value of a tag assigned to the volume'),
GenericTagFilter('tag:KEY',
help='specific tag key/value combination'),
Filter(name='volume-id'),
Filter(name='volume-type')]
LIST_TAGS = ['volumeSet', 'attachmentSet', 'tagSet']
def print_result(self, result):
for volume in result.get('volumeSet'):
self.print_volume(volume)
| vasiliykochergin/euca2ools | euca2ools/commands/ec2/describevolumes.py | Python | bsd-2-clause | 3,066 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.