code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
using System;
using System.Collections.Generic;
using System.Data.Services.Common;
using System.IO;
using System.Linq;
using NuGet.Resources;
namespace NuGet {
[DataServiceKey("Id", "Version")]
[EntityPropertyMapping("LastUpdated", SyndicationItemProperty.Updated, SyndicationTextContentKind.Plaintext, keepInContent: false)]
[EntityPropertyMapping("Id", SyndicationItemProperty.Title, SyndicationTextContentKind.Plaintext, keepInContent: false)]
[EntityPropertyMapping("Authors", SyndicationItemProperty.AuthorName, SyndicationTextContentKind.Plaintext, keepInContent: false)]
[EntityPropertyMapping("Summary", SyndicationItemProperty.Summary, SyndicationTextContentKind.Plaintext, keepInContent: false)]
[CLSCompliant(false)]
public class DataServicePackage : IPackage {
private readonly LazyWithRecreate<IPackage> _package;
public DataServicePackage() {
_package = new LazyWithRecreate<IPackage>(DownloadAndVerifyPackage, ShouldUpdatePackage);
}
public string Id {
get;
set;
}
public string Version {
get;
set;
}
public string Title {
get;
set;
}
public string Authors {
get;
set;
}
public string Owners {
get;
set;
}
public Uri IconUrl {
get;
set;
}
public Uri LicenseUrl {
get;
set;
}
public Uri ProjectUrl {
get;
set;
}
public Uri ReportAbuseUrl {
get;
set;
}
public Uri GalleryDetailsUrl {
get;
set;
}
public Uri DownloadUrl {
get {
return Context.GetReadStreamUri(this);
}
}
public DateTimeOffset Published {
get;
set;
}
public DateTimeOffset LastUpdated {
get;
set;
}
public int DownloadCount {
get;
set;
}
public double Rating {
get;
set;
}
public int RatingsCount {
get;
set;
}
public bool RequireLicenseAcceptance {
get;
set;
}
public string Description {
get;
set;
}
public string Summary {
get;
set;
}
public string Language {
get;
set;
}
public string Tags {
get;
set;
}
public string Dependencies {
get;
set;
}
public string PackageHash {
get;
set;
}
internal string OldHash {
get;
set;
}
internal IDataServiceContext Context {
get;
set;
}
internal PackageDownloader Downloader { get; set; }
IEnumerable<string> IPackageMetadata.Authors {
get {
if (String.IsNullOrEmpty(Authors)) {
return Enumerable.Empty<string>();
}
return Authors.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries);
}
}
IEnumerable<string> IPackageMetadata.Owners {
get {
if (String.IsNullOrEmpty(Owners)) {
return Enumerable.Empty<string>();
}
return Owners.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries);
}
}
IEnumerable<PackageDependency> IPackageMetadata.Dependencies {
get {
if (String.IsNullOrEmpty(Dependencies)) {
return Enumerable.Empty<PackageDependency>();
}
return from d in Dependencies.Split('|')
let dependency = ParseDependency(d)
where dependency != null
select dependency;
}
}
Version IPackageMetadata.Version {
get {
if (Version != null) {
return new Version(Version);
}
return null;
}
}
public IEnumerable<IPackageAssemblyReference> AssemblyReferences {
get {
return _package.Value.AssemblyReferences;
}
}
public IEnumerable<FrameworkAssemblyReference> FrameworkAssemblies {
get {
return _package.Value.FrameworkAssemblies;
}
}
public IEnumerable<IPackageFile> GetFiles() {
return _package.Value.GetFiles();
}
public Stream GetStream() {
return _package.Value.GetStream();
}
public override string ToString() {
return this.GetFullName();
}
private bool ShouldUpdatePackage() {
return ShouldUpdatePackage(MachineCache.Default);
}
internal bool ShouldUpdatePackage(IPackageRepository repository) {
// If the hash changed re-download the package.
if (OldHash != PackageHash) {
return true;
}
// If the package hasn't been cached, then re-download the package.
IPackage package = GetPackage(repository);
if (package == null) {
return true;
}
// If the cached package hash isn't the same as incoming package hash
// then re-download the package.
string cachedHash = package.GetHash();
if (cachedHash != PackageHash) {
return true;
}
return false;
}
private IPackage DownloadAndVerifyPackage() {
return DownloadAndVerifyPackage(MachineCache.Default);
}
internal IPackage DownloadAndVerifyPackage(IPackageRepository repository) {
if (String.IsNullOrEmpty(PackageHash)) {
throw new InvalidOperationException(NuGetResources.PackageContentsVerifyError);
}
IPackage package = null;
// If OldHash is null, we're looking at a new instance of the data service package.
// The package might be stored in the cache so we're going to try the looking there before attempting a download.
if (OldHash == null) {
package = GetPackage(repository);
}
if (package == null) {
byte[] hashBytes = Convert.FromBase64String(PackageHash);
package = Downloader.DownloadPackage(DownloadUrl, hashBytes, this);
// Add the package to the cache
repository.AddPackage(package);
// Clear the cache for this package
ZipPackage.ClearCache(package);
}
// Update the hash
OldHash = PackageHash;
return package;
}
/// <summary>
/// Parses a dependency from the feed in the format:
/// id:versionSpec or id
/// </summary>
private static PackageDependency ParseDependency(string value) {
if (String.IsNullOrWhiteSpace(value)) {
return null;
}
string[] tokens = value.Trim().Split(new[] { ':' }, StringSplitOptions.RemoveEmptyEntries);
if (tokens.Length == 0) {
return null;
}
// Trim the id
string id = tokens[0].Trim();
IVersionSpec versionSpec = null;
if (tokens.Length > 1) {
// Attempt to parse the version
VersionUtility.TryParseVersionSpec(tokens[1], out versionSpec);
}
return new PackageDependency(id, versionSpec);
}
private IPackage GetPackage(IPackageRepository repository) {
return repository.FindPackage(Id, ((IPackageMetadata)this).Version);
}
/// <summary>
/// We can't use the built in Lazy for 2 reasons:
/// 1. It caches the exception if any is thrown from the creator func (this means it won't retry calling the function).
/// 2. There's no way to force a retry or expiration of the cache.
/// </summary>
private class LazyWithRecreate<T> {
private readonly Func<T> _creator;
private readonly Func<bool> _shouldRecreate;
private T _value;
private bool _isValueCreated;
public LazyWithRecreate(Func<T> creator, Func<bool> shouldRecreate) {
_creator = creator;
_shouldRecreate = shouldRecreate;
}
public T Value {
get {
if (_shouldRecreate() || !_isValueCreated) {
_value = _creator();
_isValueCreated = true;
}
return _value;
}
}
}
}
} | grendello/nuget | src/Core/Packages/DataServicePackage.cs | C# | apache-2.0 | 9,582 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.tools.common;
import javax.xml.namespace.QName;
public final class ToolConstants {
//public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/common/toolspec/toolspecs/";
public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/";
public static final String SCHEMA_URI = "http://www.w3.org/2001/XMLSchema";
public static final String XML_NAMESPACE_URI = "http://www.w3.org/XML/1998/namespace";
public static final String WSDL_NAMESPACE_URI = "http://schemas.xmlsoap.org/wsdl/";
public static final String WSA_NAMESPACE_URI = "http://www.w3.org/2005/08/addressing";
/**
* Tools permit caller to pass in additional bean definitions.
*/
public static final String CFG_BEAN_CONFIG = "beans";
public static final String DEFAULT_TEMP_DIR = "gen_tmp";
public static final String CFG_OUTPUTDIR = "outputdir";
public static final String CFG_OUTPUTFILE = "outputfile";
public static final String CFG_WSDLURL = "wsdlurl";
public static final String CFG_WSDLLOCATION = "wsdlLocation";
public static final String CFG_WSDLLIST = "wsdlList";
public static final String CFG_NAMESPACE = "namespace";
public static final String CFG_VERBOSE = "verbose";
public static final String CFG_PORT = "port";
public static final String CFG_BINDING = "binding";
public static final String CFG_AUTORESOLVE = "autoNameResolution";
public static final String CFG_WEBSERVICE = "webservice";
public static final String CFG_SERVER = "server";
public static final String CFG_CLIENT = "client";
public static final String CFG_ALL = "all";
public static final String CFG_IMPL = "impl";
public static final String CFG_PACKAGENAME = "packagename";
public static final String CFG_JSPACKAGEPREFIX = "jspackageprefix";
public static final String CFG_NINCLUDE = "ninclude";
public static final String CFG_NEXCLUDE = "nexclude";
public static final String CFG_CMD_ARG = "args";
public static final String CFG_INSTALL_DIR = "install.dir";
public static final String CFG_PLATFORM_VERSION = "platform.version";
public static final String CFG_COMPILE = "compile";
public static final String CFG_CLASSDIR = "classdir";
public static final String CFG_EXTRA_SOAPHEADER = "exsoapheader";
public static final String CFG_DEFAULT_NS = "defaultns";
public static final String CFG_DEFAULT_EX = "defaultex";
public static final String CFG_NO_TYPES = "notypes";
public static final String CFG_XJC_ARGS = "xjc";
public static final String CFG_CATALOG = "catalog";
public static final String CFG_BAREMETHODS = "bareMethods";
public static final String CFG_ASYNCMETHODS = "asyncMethods";
public static final String CFG_MIMEMETHODS = "mimeMethods";
public static final String CFG_DEFAULT_VALUES = "defaultValues";
public static final String CFG_JAVASCRIPT_UTILS = "javascriptUtils";
public static final String CFG_VALIDATE_WSDL = "validate";
public static final String CFG_CREATE_XSD_IMPORTS = "createxsdimports";
/**
* Front-end selection command-line option to java2ws.
*/
public static final String CFG_FRONTEND = "frontend";
public static final String CFG_DATABINDING = "databinding";
public static final String DEFAULT_ADDRESS = "http://localhost:9090";
// WSDL2Java Constants
public static final String CFG_TYPES = "types";
public static final String CFG_INTERFACE = "interface";
public static final String CFG_NIGNOREEXCLUDE = "nignoreexclude";
public static final String CFG_ANT = "ant";
public static final String CFG_LIB_REF = "library.references";
public static final String CFG_ANT_PROP = "ant.prop";
public static final String CFG_NO_ADDRESS_BINDING = "noAddressBinding";
public static final String CFG_ALLOW_ELEMENT_REFS = "allowElementReferences";
public static final String CFG_RESERVE_NAME = "reserveClass";
public static final String CFG_FAULT_SERIAL_VERSION_UID = "faultSerialVersionUID";
public static final String CFG_EXCEPTION_SUPER = "exceptionSuper";
public static final String CFG_MARK_GENERATED = "mark-generated";
//Internal Flag to generate
public static final String CFG_IMPL_CLASS = "implClass";
public static final String CFG_GEN_CLIENT = "genClient";
public static final String CFG_GEN_SERVER = "genServer";
public static final String CFG_GEN_IMPL = "genImpl";
public static final String CFG_GEN_TYPES = "genTypes";
public static final String CFG_GEN_SEI = "genSEI";
public static final String CFG_GEN_ANT = "genAnt";
public static final String CFG_GEN_SERVICE = "genService";
public static final String CFG_GEN_OVERWRITE = "overwrite";
public static final String CFG_GEN_FAULT = "genFault";
public static final String CFG_GEN_NEW_ONLY = "newonly";
// Java2WSDL Constants
public static final String CFG_CLASSPATH = "classpath";
public static final String CFG_TNS = "tns";
public static final String CFG_SERVICENAME = "servicename";
public static final String CFG_SCHEMANS = "schemans";
public static final String CFG_USETYPES = "usetypes";
public static final String CFG_CLASSNAME = "classname";
public static final String CFG_PORTTYPE = "porttype";
public static final String CFG_SOURCEDIR = "sourcedir";
public static final String CFG_WSDL = "wsdl";
public static final String CFG_WRAPPERBEAN = "wrapperbean";
// WSDL2Service Constants
public static final String CFG_ADDRESS = "address";
public static final String CFG_TRANSPORT = "transport";
public static final String CFG_SERVICE = "service";
public static final String CFG_BINDING_ATTR = "attrbinding";
public static final String CFG_SOAP12 = "soap12";
// WSDL2Soap Constants
public static final String CFG_STYLE = "style";
public static final String CFG_USE = "use";
// XSD2WSDL Constants
public static final String CFG_XSDURL = "xsdurl";
public static final String CFG_NAME = "name";
// WsdlValidator
public static final String CFG_DEEP = "deep";
public static final String CFG_SCHEMA_DIR = "schemaDir";
public static final String CFG_SCHEMA_URL = "schemaURL";
public static final String CXF_SCHEMA_DIR = "cxf_schema_dir";
public static final String CXF_SCHEMAS_DIR_INJAR = "schemas/wsdl/";
public static final String CFG_SUPPRESS_WARNINGS = "suppressWarnings";
// WSDL2Java Processor Constants
public static final String SEI_GENERATOR = "sei.generator";
public static final String FAULT_GENERATOR = "fault.generator";
public static final String TYPE_GENERATOR = "type.generator";
public static final String IMPL_GENERATOR = "impl.generator";
public static final String SVR_GENERATOR = "svr.generator";
public static final String CLT_GENERATOR = "clt.generator";
public static final String SERVICE_GENERATOR = "service.generator";
public static final String ANT_GENERATOR = "ant.generator";
public static final String HANDLER_GENERATOR = "handler.generator";
// Binding namespace
public static final String NS_JAXWS_BINDINGS = "http://java.sun.com/xml/ns/jaxws";
public static final String NS_JAXB_BINDINGS = "http://java.sun.com/xml/ns/jaxb";
public static final QName JAXWS_BINDINGS = new QName(NS_JAXWS_BINDINGS, "bindings");
public static final QName JAXB_BINDINGS = new QName(NS_JAXB_BINDINGS, "bindings");
public static final String JAXWS_BINDINGS_WSDL_LOCATION = "wsdlLocation";
public static final String JAXWS_BINDING_NODE = "node";
public static final String JAXWS_BINDING_VERSION = "version";
public static final String ASYNC_METHOD_SUFFIX = "Async";
public static final String HANDLER_CHAINS_URI = "http://java.sun.com/xml/ns/javaee";
public static final String HANDLER_CHAIN = "handler-chain";
public static final String HANDLER_CHAINS = "handler-chains";
//public static final String RAW_JAXB_MODEL = "rawjaxbmodel";
// JMS address
public static final String NS_JMS_ADDRESS = "http://cxf.apache.org/transports/jms";
public static final QName JMS_ADDRESS = new QName(NS_JMS_ADDRESS, "address");
public static final String JMS_ADDR_DEST_STYLE = "destinationStyle";
public static final String JMS_ADDR_JNDI_URL = "jndiProviderURL";
public static final String JMS_ADDR_JNDI_FAC = "jndiConnectionFactoryName";
public static final String JMS_ADDR_JNDI_DEST = "jndiDestinationName";
public static final String JMS_ADDR_MSG_TYPE = "messageType";
public static final String JMS_ADDR_INIT_CTX = "initialContextFactory";
public static final String JMS_ADDR_SUBSCRIBER_NAME = "durableSubscriberName";
public static final String JMS_ADDR_MSGID_TO_CORRID = "useMessageIDAsCorrelationID";
// XML Binding
public static final String XMLBINDING_ROOTNODE = "rootNode";
public static final String XMLBINDING_HTTP_LOCATION = "location";
public static final String NS_XML_FORMAT = "http://cxf.apache.org/bindings/xformat";
public static final String XML_FORMAT_PREFIX = "xformat";
public static final String NS_XML_HTTP = "http://schemas.xmlsoap.org/wsdl/http/";
public static final String XML_HTTP_PREFIX = "http";
public static final QName XML_HTTP_ADDRESS = new QName(NS_XML_HTTP, "address");
public static final QName XML_FORMAT = new QName(NS_XML_FORMAT, "body");
public static final QName XML_BINDING_FORMAT = new QName(NS_XML_FORMAT, "binding");
public static final String XML_SCHEMA_COLLECTION = "xmlSchemaCollection";
public static final String PORTTYPE_MAP = "portTypeMap";
public static final String SCHEMA_TARGET_NAMESPACES = "schemaTargetNameSpaces";
public static final String WSDL_DEFINITION = "wsdlDefinition";
public static final String IMPORTED_DEFINITION = "importedDefinition";
public static final String IMPORTED_PORTTYPE = "importedPortType";
public static final String IMPORTED_SERVICE = "importedService";
public static final String BINDING_GENERATOR = "BindingGenerator";
// Tools framework
public static final String FRONTEND_PLUGIN = "frontend";
public static final String DATABINDING_PLUGIN = "databinding";
public static final String RUNTIME_DATABINDING_CLASS = "databinding-class";
public static final String CFG_WSDL_VERSION = "wsdlversion";
// Suppress the code generation, in this case you can just get the generated code model
public static final String CFG_SUPPRESS_GEN = "suppress";
public static final String DEFAULT_PACKAGE_NAME = "defaultnamespace";
//For java2ws tool
public static final String SERVICE_LIST = "serviceList";
public static final String GEN_FROM_SEI = "genFromSEI";
public static final String JAXWS_FRONTEND = "jaxws";
public static final String SIMPLE_FRONTEND = "simple";
public static final String JAXB_DATABINDING = "jaxb";
public static final String AEGIS_DATABINDING = "aegis";
//For Simple FrontEnd
public static final String SEI_CLASS = "seiClass";
public static final String IMPL_CLASS = "implClass";
public static final String SERVICE_NAME = "serviceName";
public static final String PORT_NAME = "portName";
public static final String DEFAULT_DATA_BINDING_NAME = "jaxb";
public static final String DATABIND_BEAN_NAME_SUFFIX = "DatabindingBean";
public static final String CLIENT_CLASS = "clientClass";
public static final String SERVER_CLASS = "serverClass";
public static final String CFG_JSPREFIXMAP = "javascriptPrefixMap";
private ToolConstants() {
//utility class
}
}
| zzsoszz/webservice_gzdx | opensource_cxf/org/apache/cxf/tools/common/ToolConstants.java | Java | apache-2.0 | 12,534 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_75) on Tue May 19 17:15:49 PDT 2015 -->
<title>Uses of Class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter (Hadoop 2.6.0-mr1-cdh5.4.2 API)</title>
<meta name="date" content="2015-05-19">
<link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter (Hadoop 2.6.0-mr1-cdh5.4.2 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/apache/hadoop/mapred/lib/db/DBOutputFormat.DBRecordWriter.html" title="class in org.apache.hadoop.mapred.lib.db">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/apache/hadoop/mapred/lib/db/class-use/DBOutputFormat.DBRecordWriter.html" target="_top">Frames</a></li>
<li><a href="DBOutputFormat.DBRecordWriter.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter" class="title">Uses of Class<br>org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter</h2>
</div>
<div class="classUseContainer">No usage of org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/apache/hadoop/mapred/lib/db/DBOutputFormat.DBRecordWriter.html" title="class in org.apache.hadoop.mapred.lib.db">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/apache/hadoop/mapred/lib/db/class-use/DBOutputFormat.DBRecordWriter.html" target="_top">Frames</a></li>
<li><a href="DBOutputFormat.DBRecordWriter.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2009 The Apache Software Foundation</small></p>
</body>
</html>
| ZhangXFeng/hadoop | share/doc/hadoop-mapreduce1/api/org/apache/hadoop/mapred/lib/db/class-use/DBOutputFormat.DBRecordWriter.html | HTML | apache-2.0 | 4,643 |
<!doctype html>
<html class="default no-js">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>PriceComparable | GDAX Trading Toolkit API Reference</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="../assets/css/main.css">
</head>
<body>
<header>
<div class="tsd-page-toolbar">
<div class="container">
<div class="table-wrap">
<div class="table-cell" id="tsd-search" data-index="../assets/js/search.js" data-base="..">
<div class="field">
<label for="tsd-search-field" class="tsd-widget search no-caption">Search</label>
<input id="tsd-search-field" type="text" />
</div>
<ul class="results">
<li class="state loading">Preparing search index...</li>
<li class="state failure">The search index is not available</li>
</ul>
<a href="../index.html" class="title">GDAX Trading Toolkit API Reference</a>
</div>
<div class="table-cell" id="tsd-widgets">
<div id="tsd-filter">
<a href="#" class="tsd-widget options no-caption" data-toggle="options">Options</a>
<div class="tsd-filter-group">
<div class="tsd-select" id="tsd-filter-visibility">
<span class="tsd-select-label">All</span>
<ul class="tsd-select-list">
<li data-value="public">Public</li>
<li data-value="protected">Public/Protected</li>
<li data-value="private" class="selected">All</li>
</ul>
</div>
<input type="checkbox" id="tsd-filter-inherited" checked />
<label class="tsd-widget" for="tsd-filter-inherited">Inherited</label>
<input type="checkbox" id="tsd-filter-externals" checked />
<label class="tsd-widget" for="tsd-filter-externals">Externals</label>
<input type="checkbox" id="tsd-filter-only-exported" />
<label class="tsd-widget" for="tsd-filter-only-exported">Only exported</label>
</div>
</div>
<a href="#" class="tsd-widget menu no-caption" data-toggle="menu">Menu</a>
</div>
</div>
</div>
</div>
<div class="tsd-page-title">
<div class="container">
<ul class="tsd-breadcrumb">
<li>
<a href="../globals.html">Globals</a>
</li>
<li>
<a href="../modules/_src_lib_orderbook_.html">"src/lib/Orderbook"</a>
</li>
<li>
<a href="_src_lib_orderbook_.pricecomparable.html">PriceComparable</a>
</li>
</ul>
<h1>Interface PriceComparable</h1>
</div>
</div>
</header>
<div class="container container-main">
<div class="row">
<div class="col-8 col-content">
<section class="tsd-panel tsd-hierarchy">
<h3>Hierarchy</h3>
<ul class="tsd-hierarchy">
<li>
<span class="target">PriceComparable</span>
<ul class="tsd-hierarchy">
<li>
<a href="_src_lib_orderbook_.pricelevel.html" class="tsd-signature-type">PriceLevel</a>
</li>
</ul>
</li>
</ul>
</section>
<section class="tsd-panel-group tsd-index-group">
<h2>Index</h2>
<section class="tsd-panel tsd-index-panel">
<div class="tsd-index-content">
<section class="tsd-index-section ">
<h3>Properties</h3>
<ul class="tsd-index-list">
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="_src_lib_orderbook_.pricecomparable.html#price" class="tsd-kind-icon">price</a></li>
</ul>
</section>
</div>
</section>
</section>
<section class="tsd-panel-group tsd-member-group ">
<h2>Properties</h2>
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
<a name="price" class="tsd-anchor"></a>
<h3>price</h3>
<div class="tsd-signature tsd-kind-icon">price<span class="tsd-signature-symbol">:</span> <a href="../modules/_src_lib_types_.html#bigjs" class="tsd-signature-type">BigJS</a></div>
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/coinbase/gdax-tt/blob/a3d18bb/src/lib/Orderbook.ts#L29">src/lib/Orderbook.ts:29</a></li>
</ul>
</aside>
</section>
</section>
</div>
<div class="col-4 col-menu menu-sticky-wrap menu-highlight">
<nav class="tsd-navigation primary">
<ul>
<li class="globals ">
<a href="../globals.html"><em>Globals</em></a>
</li>
<li class="current tsd-kind-external-module">
<a href="../modules/_src_lib_orderbook_.html">"src/lib/<wbr>Orderbook"</a>
</li>
</ul>
</nav>
<nav class="tsd-navigation secondary menu-sticky">
<ul class="before-current">
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.basicorder.html" class="tsd-kind-icon">Basic<wbr>Order</a>
</li>
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.cumulativepricelevel.html" class="tsd-kind-icon">Cumulative<wbr>Price<wbr>Level</a>
</li>
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.level3order.html" class="tsd-kind-icon">Level3<wbr>Order</a>
</li>
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.liveorder.html" class="tsd-kind-icon">Live<wbr>Order</a>
</li>
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.orderbook.html" class="tsd-kind-icon">Orderbook</a>
</li>
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.orderbookstate.html" class="tsd-kind-icon">Orderbook<wbr>State</a>
</li>
</ul>
<ul class="current">
<li class="current tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.pricecomparable.html" class="tsd-kind-icon">Price<wbr>Comparable</a>
<ul>
<li class=" tsd-kind-property tsd-parent-kind-interface">
<a href="_src_lib_orderbook_.pricecomparable.html#price" class="tsd-kind-icon">price</a>
</li>
</ul>
</li>
</ul>
<ul class="after-current">
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.pricelevel.html" class="tsd-kind-icon">Price<wbr>Level</a>
</li>
<li class=" tsd-kind-interface tsd-parent-kind-external-module">
<a href="_src_lib_orderbook_.pricelevelwithorders.html" class="tsd-kind-icon">Price<wbr>Level<wbr>With<wbr>Orders</a>
</li>
<li class=" tsd-kind-function tsd-parent-kind-external-module">
<a href="../modules/_src_lib_orderbook_.html#pricelevelfactory" class="tsd-kind-icon">Price<wbr>Level<wbr>Factory</a>
</li>
<li class=" tsd-kind-function tsd-parent-kind-external-module tsd-has-type-parameter">
<a href="../modules/_src_lib_orderbook_.html#pricetreefactory" class="tsd-kind-icon">Price<wbr>Tree<wbr>Factory</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<footer class="with-border-bottom">
<div class="container">
<h2>Legend</h2>
<div class="tsd-legend-group">
<ul class="tsd-legend">
<li class="tsd-kind-module"><span class="tsd-kind-icon">Module</span></li>
<li class="tsd-kind-object-literal"><span class="tsd-kind-icon">Object literal</span></li>
<li class="tsd-kind-variable"><span class="tsd-kind-icon">Variable</span></li>
<li class="tsd-kind-function"><span class="tsd-kind-icon">Function</span></li>
<li class="tsd-kind-function tsd-has-type-parameter"><span class="tsd-kind-icon">Function with type parameter</span></li>
<li class="tsd-kind-index-signature"><span class="tsd-kind-icon">Index signature</span></li>
<li class="tsd-kind-type-alias"><span class="tsd-kind-icon">Type alias</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-enum"><span class="tsd-kind-icon">Enumeration</span></li>
<li class="tsd-kind-enum-member"><span class="tsd-kind-icon">Enumeration member</span></li>
<li class="tsd-kind-property tsd-parent-kind-enum"><span class="tsd-kind-icon">Property</span></li>
<li class="tsd-kind-method tsd-parent-kind-enum"><span class="tsd-kind-icon">Method</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-interface"><span class="tsd-kind-icon">Interface</span></li>
<li class="tsd-kind-interface tsd-has-type-parameter"><span class="tsd-kind-icon">Interface with type parameter</span></li>
<li class="tsd-kind-constructor tsd-parent-kind-interface"><span class="tsd-kind-icon">Constructor</span></li>
<li class="tsd-kind-property tsd-parent-kind-interface"><span class="tsd-kind-icon">Property</span></li>
<li class="tsd-kind-method tsd-parent-kind-interface"><span class="tsd-kind-icon">Method</span></li>
<li class="tsd-kind-index-signature tsd-parent-kind-interface"><span class="tsd-kind-icon">Index signature</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-class"><span class="tsd-kind-icon">Class</span></li>
<li class="tsd-kind-class tsd-has-type-parameter"><span class="tsd-kind-icon">Class with type parameter</span></li>
<li class="tsd-kind-constructor tsd-parent-kind-class"><span class="tsd-kind-icon">Constructor</span></li>
<li class="tsd-kind-property tsd-parent-kind-class"><span class="tsd-kind-icon">Property</span></li>
<li class="tsd-kind-method tsd-parent-kind-class"><span class="tsd-kind-icon">Method</span></li>
<li class="tsd-kind-accessor tsd-parent-kind-class"><span class="tsd-kind-icon">Accessor</span></li>
<li class="tsd-kind-index-signature tsd-parent-kind-class"><span class="tsd-kind-icon">Index signature</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-constructor tsd-parent-kind-class tsd-is-inherited"><span class="tsd-kind-icon">Inherited constructor</span></li>
<li class="tsd-kind-property tsd-parent-kind-class tsd-is-inherited"><span class="tsd-kind-icon">Inherited property</span></li>
<li class="tsd-kind-method tsd-parent-kind-class tsd-is-inherited"><span class="tsd-kind-icon">Inherited method</span></li>
<li class="tsd-kind-accessor tsd-parent-kind-class tsd-is-inherited"><span class="tsd-kind-icon">Inherited accessor</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-property tsd-parent-kind-class tsd-is-protected"><span class="tsd-kind-icon">Protected property</span></li>
<li class="tsd-kind-method tsd-parent-kind-class tsd-is-protected"><span class="tsd-kind-icon">Protected method</span></li>
<li class="tsd-kind-accessor tsd-parent-kind-class tsd-is-protected"><span class="tsd-kind-icon">Protected accessor</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-property tsd-parent-kind-class tsd-is-private"><span class="tsd-kind-icon">Private property</span></li>
<li class="tsd-kind-method tsd-parent-kind-class tsd-is-private"><span class="tsd-kind-icon">Private method</span></li>
<li class="tsd-kind-accessor tsd-parent-kind-class tsd-is-private"><span class="tsd-kind-icon">Private accessor</span></li>
</ul>
<ul class="tsd-legend">
<li class="tsd-kind-property tsd-parent-kind-class tsd-is-static"><span class="tsd-kind-icon">Static property</span></li>
<li class="tsd-kind-call-signature tsd-parent-kind-class tsd-is-static"><span class="tsd-kind-icon">Static method</span></li>
</ul>
</div>
</div>
</footer>
<div class="container tsd-generator">
<p>Generated using <a href="http://typedoc.org/" target="_blank">TypeDoc</a></p>
</div>
<div class="overlay"></div>
<script src="../assets/js/main.js"></script>
<script>if (location.protocol == 'file:') document.write('<script src="../assets/js/search.js"><' + '/script>');</script>
</body>
</html> | tianyangj/gdax-tt | docs/apiref/interfaces/_src_lib_orderbook_.pricecomparable.html | HTML | apache-2.0 | 11,761 |
/**
* Copyright Pravega Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.pravega.client.connection.impl;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.handler.ssl.SslHandler;
import io.pravega.client.ClientConfig;
import io.pravega.shared.protocol.netty.CommandDecoder;
import io.pravega.shared.protocol.netty.CommandEncoder;
import io.pravega.shared.protocol.netty.ConnectionFailedException;
import io.pravega.shared.protocol.netty.FailingReplyProcessor;
import io.pravega.shared.protocol.netty.PravegaNodeUri;
import io.pravega.shared.protocol.netty.WireCommands;
import io.pravega.test.common.AssertExtensions;
import io.pravega.test.common.SecurityConfigDefaults;
import io.pravega.test.common.TestUtils;
import java.io.File;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLParameters;
import lombok.Cleanup;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import static io.pravega.shared.metrics.MetricNotifier.NO_OP_METRIC_NOTIFIER;
import static io.pravega.shared.protocol.netty.WireCommands.MAX_WIRECOMMAND_SIZE;
import static io.pravega.test.common.AssertExtensions.assertThrows;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
public class ConnectionPoolingTest {
@Rule
public Timeout globalTimeout = Timeout.seconds(1000);
boolean ssl = false;
private Channel serverChannel;
private int port;
private final String seg = "Segment-0";
private final long offset = 1234L;
private final int length = 1024;
private final String data = "data";
private final Function<Long, WireCommands.ReadSegment> readRequestGenerator = id ->
new WireCommands.ReadSegment(seg, offset, length, "", id);
private final Function<Long, WireCommands.SegmentRead> readResponseGenerator = id ->
new WireCommands.SegmentRead(seg, offset, true, false, Unpooled.wrappedBuffer(data.getBytes(StandardCharsets.UTF_8)), id);
private class EchoServerHandler extends ChannelInboundHandlerAdapter {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object message) {
if (message instanceof WireCommands.Hello) {
ctx.write(message);
ctx.flush();
} else if (message instanceof WireCommands.ReadSegment) {
WireCommands.ReadSegment msg = (WireCommands.ReadSegment) message;
ctx.write(readResponseGenerator.apply(msg.getRequestId()));
ctx.flush();
}
}
}
@Before
public void setUp() throws Exception {
// Configure SSL.
port = TestUtils.getAvailableListenPort();
final SslContext sslCtx;
if (ssl) {
try {
sslCtx = SslContextBuilder.forServer(
new File(SecurityConfigDefaults.TLS_SERVER_CERT_PATH),
new File(SecurityConfigDefaults.TLS_SERVER_PRIVATE_KEY_PATH))
.build();
} catch (SSLException e) {
throw new RuntimeException(e);
}
} else {
sslCtx = null;
}
boolean nio = false;
EventLoopGroup bossGroup;
EventLoopGroup workerGroup;
try {
bossGroup = new EpollEventLoopGroup(1);
workerGroup = new EpollEventLoopGroup();
} catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) {
nio = true;
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
}
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
SslHandler handler = sslCtx.newHandler(ch.alloc());
SSLEngine sslEngine = handler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm("LDAPS");
sslEngine.setSSLParameters(sslParameters);
p.addLast(handler);
}
p.addLast(new CommandEncoder(null, NO_OP_METRIC_NOTIFIER),
new LengthFieldBasedFrameDecoder(MAX_WIRECOMMAND_SIZE, 4, 4),
new CommandDecoder(),
new EchoServerHandler());
}
});
// Start the server.
serverChannel = b.bind("localhost", port).awaitUninterruptibly().channel();
}
@After
public void tearDown() throws Exception {
serverChannel.close();
serverChannel.closeFuture();
}
@Test
public void testNonPooling() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(1)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
@Cleanup
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
connection1.send(readRequestGenerator.apply(flow1.asLong()));
WireCommands.SegmentRead msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
// create a second connection, since not using a flow.
@Cleanup
ClientConnection connection2 = connectionPool.getClientConnection(new PravegaNodeUri("localhost", port), rp).join();
Flow flow2 = new Flow(2, 0);
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
assertEquals(2, factory.getOpenSocketCount());
// send data over connection1 and verify.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
// close a client connection, this should not close the channel.
connection2.close();
assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong())));
// verify we are able to send data over connection1.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// close connection1
connection1.close();
assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong())));
AssertExtensions.assertEventuallyEquals(0, () -> {
connectionPool.pruneUnusedConnections();
return factory.getOpenSocketCount();
}, 10000);
assertEquals(0, connectionPool.getActiveChannels().size());
}
@Test
public void testConnectionPooling() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(1)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
@Cleanup
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
connection1.send(readRequestGenerator.apply(flow1.asLong()));
WireCommands.SegmentRead msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
// create a second connection, since the max number of connections is 1 this should reuse the same connection.
Flow flow2 = new Flow(2, 0);
CompletableFuture<ClientConnection> cf = new CompletableFuture<>();
connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp, cf);
@Cleanup
ClientConnection connection2 = cf.join();
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
assertEquals(1, factory.getOpenSocketCount());
// send data over connection1 and verify.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
// close a client connection, this should not close the channel.
connection2.close();
assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong())));
// verify we are able to send data over connection1.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// close connection1
connection1.close();
assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong())));
AssertExtensions.assertEventuallyEquals(0, () -> {
connectionPool.pruneUnusedConnections();
return factory.getOpenSocketCount();
}, 10000);
assertEquals(0, connectionPool.getActiveChannels().size());
}
@Test
public void testPoolBalancing() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(2)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
@Cleanup
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
connection1.send(readRequestGenerator.apply(flow1.asLong()));
WireCommands.SegmentRead msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
assertEquals(1, factory.getOpenSocketCount());
// create a second connection, since the max number of connections is 2 this should not reuse the same connection.
Flow flow2 = new Flow(2, 0);
@Cleanup
ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join();
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
assertEquals(2, factory.getOpenSocketCount());
assertNotEquals(((FlowClientConnection) connection1).getChannel(),
((FlowClientConnection) connection2).getChannel());
// create a second connection, since the max number of connections is 2 this should reuse the same connection.
Flow flow3 = new Flow(3, 0);
@Cleanup
ClientConnection connection3 = connectionPool.getClientConnection(flow3, new PravegaNodeUri("localhost", port), rp).join();
// send data over connection3 and verify.
connection3.send(readRequestGenerator.apply(flow3.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow3.asLong()), msg);
assertEquals(2, factory.getOpenSocketCount());
assertEquals(((FlowClientConnection) connection1).getChannel(),
((FlowClientConnection) connection3).getChannel());
Flow flow4 = new Flow(3, 0);
@Cleanup
ClientConnection connection4 = connectionPool.getClientConnection(flow4, new PravegaNodeUri("localhost", port), rp).join();
// send data over connection3 and verify.
connection3.send(readRequestGenerator.apply(flow4.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow4.asLong()), msg);
assertEquals(2, factory.getOpenSocketCount());
assertEquals(2, connectionPool.getActiveChannels().size());
assertNotEquals(((FlowClientConnection) connection3).getChannel(),
((FlowClientConnection) connection4).getChannel());
assertEquals(((FlowClientConnection) connection2).getChannel(),
((FlowClientConnection) connection4).getChannel());
}
@Test
public void testConcurrentRequests() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(1)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
// create a second connection, since the max number of connections is 1 this should reuse the same connection.
Flow flow2 = new Flow(2, 0);
ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join();
assertEquals(1, factory.getOpenSocketCount());
assertEquals(1, connectionPool.getActiveChannels().size());
connection1.send(readRequestGenerator.apply(flow1.asLong()));
connection2.send(readRequestGenerator.apply(flow2.asLong()));
List<WireCommands.SegmentRead> msgs = new ArrayList<WireCommands.SegmentRead>();
msgs.add(msgRead.take());
msgs.add(msgRead.take());
assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong())));
assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong())));
assertEquals(1, factory.getOpenSocketCount());
connection1.close();
connection2.close();
AssertExtensions.assertEventuallyEquals(0, () -> {
connectionPool.pruneUnusedConnections();
return factory.getOpenSocketCount();
}, 10000);
assertEquals(0, connectionPool.getActiveChannels().size());
}
}
| pravega/pravega | client/src/test/java/io/pravega/client/connection/impl/ConnectionPoolingTest.java | Java | apache-2.0 | 21,376 |
#define DEBUG_TYPE "sil-simplify-cfg"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/SILOptimizer/Analysis/DominanceAnalysis.h"
#include "swift/SILOptimizer/Utils/CFG.h"
#include "swift/SILOptimizer/Utils/Local.h"
#include "swift/SILOptimizer/Utils/SILInliner.h"
using namespace swift;
namespace {
/// This is a class implementing a dominator-based jump-threading
/// for checked_cast_br [exact].
class CheckedCastBrJumpThreading {
// The checked_cast_br instruction, which
// we try to jump-thread
CheckedCastBranchInst *CCBI;
// Basic block of the current checked_cast_br instruction.
SILBasicBlock *BB;
// Condition used by the current checked_cast_br instruction.
SILValue Condition;
// Success branch of the current checked_cast_br instruction.
SILBasicBlock *SuccessBB;
// Failure branch of the current checked_cast_br instruction.
SILBasicBlock *FailureBB;
// Current dominating checked_cast_br instruction.
CheckedCastBranchInst *DomCCBI;
// Basic block of the dominating checked_cast_br instruction.
SILBasicBlock *DomBB;
// Condition used by the dominating checked_cast_br instruction.
SILValue DomCondition;
// Success branch of the dominating checked_cast_br instruction.
SILBasicBlock *DomSuccessBB;
// Failure branch of the dominating checked_cast_br instruction.
SILBasicBlock *DomFailureBB;
// Current dominator tree node where we look for a dominating
// checked_cast_br instruction.
llvm::DomTreeNodeBase<SILBasicBlock> *Node;
SILBasicBlock *ArgBB;
// Dominator information to be used.
DominanceInfo *DT;
// Basic block created as a landing BB for all failure predecessors.
SILBasicBlock *TargetFailureBB;
// Basic block created as a landing BB for all success predecessors.
SILBasicBlock *TargetSuccessBB;
// Cloner used to clone the BB to FailureSuccessBB.
Optional<BasicBlockCloner> FailureBBCloner;
// Cloner used to clone the BB to TargetSuccessBB.
Optional<BasicBlockCloner> SuccessBBCloner;
// Predecessors reached only via a path along the
// success branch of the dominating checked_cast_br.
SmallVector<SILBasicBlock *, 8> SuccessPreds;
// Predecessors reached only via a path along the
// failure branch of the dominating checked_cast_br.
SmallVector<SILBasicBlock *, 8> FailurePreds;
// All other predecessors, where the outcome of the
// checked_cast_br along the path is not known.
SmallVector<SILBasicBlock *, 8> UnknownPreds;
// Basic blocks to be added to for reprocessing
// after jump-threading is done.
SmallVectorImpl<SILBasicBlock *> &BlocksForWorklist;
bool areEquivalentConditionsAlongPaths();
bool areEquivalentConditionsAlongSomePaths();
bool handleArgBBIsEntryBlock(SILBasicBlock *ArgBB);
bool checkCloningConstraints();
void modifyCFGForUnknownPreds();
void modifyCFGForFailurePreds();
void modifyCFGForSuccessPreds();
void updateDominatorTree();
void updateSSA();
void addBlockToSimplifyCFGWorklist(SILBasicBlock *BB);
void addBlocksToWorklist();
void classifyPredecessor(
SILBasicBlock *Pred,
SmallVectorImpl<SILBasicBlock *> &SuccessPreds,
SmallVectorImpl<SILBasicBlock *> &FailurePreds,
SmallVectorImpl<SILBasicBlock *> &UnknownPreds,
bool SuccessDominates,
bool FailureDominates);
SILValue isArgValueEquivalentToCondition(SILValue Value,
SILBasicBlock *DomBB,
SILValue DomValue,
DominanceInfo *DT);
public:
CheckedCastBrJumpThreading(DominanceInfo *DT,
SmallVectorImpl<SILBasicBlock *> &BBs)
: DT(DT), BlocksForWorklist(BBs) { }
bool trySimplify(TermInst *Term);
ArrayRef<SILBasicBlock*> getBlocksForWorklist() {
return BlocksForWorklist;
}
};
} // end anonymous namespace
/// Find a nearest common dominator for a given set of basic blocks.
static DominanceInfoNode *findCommonDominator(ArrayRef<SILBasicBlock *> BBs,
DominanceInfo *DT) {
DominanceInfoNode *CommonDom = nullptr;
for (auto *BB : BBs) {
if (!CommonDom) {
CommonDom = DT->getNode(BB);
} else {
CommonDom = DT->getNode(
DT->findNearestCommonDominator(CommonDom->getBlock(), BB));
}
}
return CommonDom;
}
/// Find a nearest common dominator for all predecessors of
/// a given basic block.
static DominanceInfoNode *findCommonDominator(SILBasicBlock *BB,
DominanceInfo *DT) {
SmallVector<SILBasicBlock *, 8> Preds;
for (auto *Pred: BB->getPreds())
Preds.push_back(Pred);
return findCommonDominator(Preds, DT);
}
/// Estimate the cost of inlining a given basic block.
static unsigned basicBlockInlineCost(SILBasicBlock *BB, unsigned Cutoff) {
unsigned Cost = 0;
for (auto &I : *BB) {
auto ICost = instructionInlineCost(I);
Cost += unsigned(ICost);
if (Cost > Cutoff)
return Cost;
}
return Cost;
}
/// We cannot duplicate blocks with AllocStack instructions (they need to be
/// FIFO). Other instructions can be duplicated.
static bool canDuplicateBlock(SILBasicBlock *BB) {
for (auto &I : *BB) {
if (!I.isTriviallyDuplicatable())
return false;
}
return true;
}
void CheckedCastBrJumpThreading::addBlockToSimplifyCFGWorklist(SILBasicBlock *BB) {
BlocksForWorklist.push_back(BB);
}
/// Add affected blocks for re-processing by simplifyCFG
void CheckedCastBrJumpThreading::addBlocksToWorklist() {
if (TargetFailureBB) {
if (!TargetFailureBB->pred_empty())
addBlockToSimplifyCFGWorklist(TargetFailureBB);
}
if (TargetSuccessBB) {
if (!TargetSuccessBB->pred_empty())
addBlockToSimplifyCFGWorklist(TargetSuccessBB);
}
if (!BB->pred_empty())
addBlockToSimplifyCFGWorklist(BB);
}
/// Classify a predecessor of a BB containing checked_cast_br as being
/// reachable via success or failure branches of a dominating checked_cast_br
/// or as unknown if it can be reached via success or failure branches
/// at the same time.
void CheckedCastBrJumpThreading::classifyPredecessor(
SILBasicBlock *Pred,
SmallVectorImpl<SILBasicBlock *> &SuccessPreds,
SmallVectorImpl<SILBasicBlock *> &FailurePreds,
SmallVectorImpl<SILBasicBlock *> &UnknownPreds,
bool SuccessDominates,
bool FailureDominates) {
if (SuccessDominates && FailureDominates) {
UnknownPreds.push_back(Pred);
return;
}
if (SuccessDominates) {
SuccessPreds.push_back(Pred);
return;
}
if (FailureDominates) {
FailurePreds.push_back(Pred);
return;
}
UnknownPreds.push_back(Pred);
}
/// Check if the root value for Value that comes
/// along the path from DomBB is equivalent to the
/// DomCondition.
SILValue CheckedCastBrJumpThreading::isArgValueEquivalentToCondition(
SILValue Value, SILBasicBlock *DomBB, SILValue DomValue,
DominanceInfo *DT) {
SmallPtrSet<ValueBase *, 16> SeenValues;
DomValue = DomValue.stripClassCasts();
while (true) {
Value = Value.stripClassCasts();
if (Value == DomValue)
return Value;
// We know how to propagate through BBArgs only.
auto *V = dyn_cast<SILArgument>(Value);
if (!V)
return SILValue();
// Have we visited this BB already?
if (!SeenValues.insert(Value.getDef()).second)
return SILValue();
if (SeenValues.size() > 10)
return SILValue();
SmallVector<SILValue, 4> IncomingValues;
if (!V->getIncomingValues(IncomingValues) || IncomingValues.empty())
return SILValue();
ValueBase *Def = nullptr;
for (auto IncomingValue : IncomingValues) {
// Each incoming value should be either from a block
// dominated by DomBB or it should be the value used in
// condition in DomBB
Value = IncomingValue.stripClassCasts();
if (Value == DomValue)
continue;
// Values should be the same
if (!Def)
Def = Value.getDef();
if (Def != Value.getDef())
return SILValue();
if (!DT->dominates(DomBB, Value.getDef()->getParentBB()))
return SILValue();
// OK, this value is a potential candidate
}
Value = IncomingValues[0];
}
}
/// Update the SSA form after all changes.
void CheckedCastBrJumpThreading::updateSSA() {
assert(!(SuccessBBCloner.hasValue() && FailureBBCloner.hasValue()) &&
"Both cloners cannot be used at the same time yet");
// Now update the SSA form.
if (!FailurePreds.empty() && FailureBBCloner.hasValue() &&
!SuccessBBCloner.hasValue())
updateSSAAfterCloning(*FailureBBCloner.getPointer(), TargetFailureBB, BB);
if (SuccessBBCloner.hasValue() && !FailureBBCloner.hasValue()) {
updateSSAAfterCloning(*SuccessBBCloner.getPointer(), TargetSuccessBB, BB);
}
}
/// Update the SSA form after all changes.
void CheckedCastBrJumpThreading::updateDominatorTree() {
// Update the dominator tree.
// If BB was IDom of something, then PredCBBI becomes the IDOM
// of this after jump-threading.
auto *BBDomNode = DT->getNode(BB);
auto &Children = BBDomNode->getChildren();
if (Children.size() > 1) {
SmallVector<DominanceInfoNode *, 16> ChildrenCopy;
std::copy(Children.begin(), Children.end(),
std::back_inserter(ChildrenCopy));
for (auto *Child : ChildrenCopy) {
DT->changeImmediateDominator(Child, Node);
}
}
DominanceInfoNode *CommonDom;
// Find a common dominator for all unknown preds.
if (!UnknownPreds.empty()) {
// Find a new IDom for FailureBB
CommonDom = findCommonDominator(FailureBB, DT);
if (CommonDom)
DT->changeImmediateDominator(FailureBB, CommonDom->getBlock());
CommonDom = findCommonDominator(UnknownPreds, DT);
// This common dominator dominates the BB now.
if (CommonDom) {
DT->changeImmediateDominator(BB, CommonDom->getBlock());
}
}
// Find a common dominator for all failure preds.
CommonDom = findCommonDominator(FailurePreds, DT);
// This common dominator dominates the TargetFailureBB now.
if (CommonDom) {
DT->addNewBlock(TargetFailureBB, CommonDom->getBlock());
// Find a new IDom for FailureBB
CommonDom = findCommonDominator(FailureBB, DT);
if (CommonDom)
DT->changeImmediateDominator(FailureBB, CommonDom->getBlock());
}
// Find a common dominator for all success preds.
CommonDom = findCommonDominator(SuccessPreds, DT);
// This common dominator of all success preds dominates the BB now.
if (CommonDom) {
if (TargetSuccessBB) {
DT->addNewBlock(TargetSuccessBB, CommonDom->getBlock());
} else {
DT->changeImmediateDominator(BB, CommonDom->getBlock());
}
CommonDom = findCommonDominator(SuccessBB, DT);
if (CommonDom)
DT->changeImmediateDominator(SuccessBB, CommonDom->getBlock());
}
// End of dominator tree update.
}
void CheckedCastBrJumpThreading::modifyCFGForUnknownPreds() {
if (UnknownPreds.empty())
return;
// Check the FailureBB if it is a BB that contains a class_method
// referring to the same value as a condition. This pattern is typical
// for method chaining code like obj.method1().method2().etc()
SILInstruction *Inst = &*FailureBB->begin();
if (ClassMethodInst *CMI = dyn_cast<ClassMethodInst>(Inst)) {
if (CMI->getOperand() == Condition) {
// Replace checked_cast_br by branch to FailureBB.
SILBuilder(BB).createBranch(CCBI->getLoc(), FailureBB);
CCBI->eraseFromParent();
}
}
}
/// Create a copy of the BB as a landing BB
/// for all FailurePreds.
void CheckedCastBrJumpThreading::modifyCFGForFailurePreds() {
if (FailurePreds.empty())
return;
FailureBBCloner.emplace(BasicBlockCloner(BB));
FailureBBCloner->clone();
TargetFailureBB = FailureBBCloner->getDestBB();
auto *TI = TargetFailureBB->getTerminator();
SILBuilderWithScope Builder(TI);
// This BB copy branches to a FailureBB.
Builder.createBranch(TI->getLoc(), FailureBB);
TI->eraseFromParent();
// Redirect all FailurePreds to the copy of BB.
for (auto *Pred : FailurePreds) {
TermInst *TI = Pred->getTerminator();
// Replace branch to BB by branch to TargetFailureBB.
replaceBranchTarget(TI, BB, TargetFailureBB, /*PreserveArgs=*/true);
Pred = nullptr;
}
}
/// Create a copy of the BB or reuse BB as
/// a landing basic block for all FailurePreds.
void CheckedCastBrJumpThreading::modifyCFGForSuccessPreds() {
if (!UnknownPreds.empty()) {
if (!SuccessPreds.empty()) {
// Create a copy of the BB as a landing BB.
// for all SuccessPreds.
SuccessBBCloner.emplace(BasicBlockCloner(BB));
SuccessBBCloner->clone();
TargetSuccessBB = SuccessBBCloner->getDestBB();
auto *TI = TargetSuccessBB->getTerminator();
SILBuilderWithScope Builder(TI);
SmallVector<SILValue, 8> SuccessBBArgs;
// Take argument value from the dominating BB.
SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0));
// This BB copy branches to SuccessBB.
Builder.createBranch(TI->getLoc(), SuccessBB, SuccessBBArgs);
TI->eraseFromParent();
// Redirect all SuccessPreds to the copy of BB.
for (auto *Pred : SuccessPreds) {
TermInst *TI = Pred->getTerminator();
// Replace branch to BB by branch to TargetSuccessBB.
replaceBranchTarget(TI, BB, TargetSuccessBB, /*PreserveArgs=*/true);
SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0));
Pred = nullptr;
}
}
} else {
// There are no predecessors where it is not clear
// if they are dominated by a success or failure branch
// of DomBB. Therefore, there is no need to clone
// the BB for SuccessPreds. Current BB can be re-used
// instead as their target.
// Add an unconditional jump at the end of the block.
SmallVector<SILValue, 1> SuccessBBArgs;
// Take argument value from the dominating BB
SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0));
SILBuilder(BB).createBranch(CCBI->getLoc(), SuccessBB, SuccessBBArgs);
CCBI->eraseFromParent();
}
}
/// Handle a special case, where ArgBB is the entry block.
bool CheckedCastBrJumpThreading::handleArgBBIsEntryBlock(SILBasicBlock *ArgBB) {
if (ArgBB->getPreds().begin() == ArgBB->getPreds().end()) {
// It must be the entry block
// See if it is reached over Success or Failure path.
bool SuccessDominates = DomSuccessBB == BB;
bool FailureDominates = DomFailureBB == BB;
classifyPredecessor(ArgBB, SuccessPreds, FailurePreds, UnknownPreds,
SuccessDominates, FailureDominates);
return true;
}
return false;
}
// Returns false if cloning required by jump threading cannot
// be performed, because some of the constraints are violated.
bool CheckedCastBrJumpThreading::checkCloningConstraints() {
// Check some cloning related constraints.
// If this argument from a different BB, then jump-threading
// may require too much code duplication.
if (ArgBB && ArgBB != BB)
return false;
// Bail out if current BB cannot be duplicated.
if (!canDuplicateBlock(BB))
return false;
// Check if code-bloat would be too big when this BB
// is jump-threaded.
// TODO: Make InlineCostCutoff parameter configurable?
// Dec 1, 2014:
// We looked at the inline costs of BBs from our benchmark suite
// and found that currently the highest inline cost for the
// whole benchmark suite is 12. In 95% of all cases it is <=3.
const unsigned InlineCostCutoff = 20;
if (basicBlockInlineCost(BB, InlineCostCutoff) >= InlineCostCutoff)
return false;
return true;
}
/// If conditions are not equivalent along all paths, try harder
/// to check if they are actually equivalent along a subset of paths.
/// To do it, try to back-propagate the Condition
/// backwards and see if it is actually equivalent to DomCondition.
/// along some of the paths.
bool CheckedCastBrJumpThreading::areEquivalentConditionsAlongSomePaths() {
auto *Arg = dyn_cast<SILArgument>(Condition);
if (!Arg)
return false;
ArgBB = Arg->getParent();
if (!DT->dominates(DomBB, ArgBB))
return false;
// Incoming values for the BBArg.
SmallVector<SILValue, 4> IncomingValues;
if (ArgBB != ArgBB->getParent()->begin() &&
(!Arg->getIncomingValues(IncomingValues) || IncomingValues.empty()))
return false;
// Check for each predecessor, if the incoming value coming from it
// is equivalent to the DomCondition. If this is the case, it is
// possible to try jump-threading along this path.
if (!handleArgBBIsEntryBlock(ArgBB)) {
// ArgBB is not the entry block and has predecessors.
unsigned idx = 0;
for (auto *PredBB : ArgBB->getPreds()) {
auto IncomingValue = IncomingValues[idx];
SILValue ReachingValue = isArgValueEquivalentToCondition(
IncomingValue, DomBB, DomCondition, DT);
if (ReachingValue == SILValue()) {
UnknownPreds.push_back(PredBB);
idx++;
continue;
}
// Condition is the same if BB is reached over a pass through Pred.
DEBUG(llvm::dbgs() << "Condition is the same if reached over ");
DEBUG(PredBB->print(llvm::dbgs()));
// See if it is reached over Success or Failure path.
bool SuccessDominates = DT->dominates(DomSuccessBB, PredBB) ||
DT->dominates(DomSuccessBB, BB) ||
DomSuccessBB == BB;
bool FailureDominates = DT->dominates(DomFailureBB, PredBB) ||
DT->dominates(DomFailureBB, BB) ||
DomFailureBB == BB;
classifyPredecessor(
PredBB, SuccessPreds, FailurePreds, UnknownPreds,
SuccessDominates, FailureDominates);
idx++;
}
} else {
// ArgBB is the entry block. Check that conditions are the equivalent in this
// case as well.
if (!isArgValueEquivalentToCondition(Condition, DomBB, DomCondition, DT))
return false;
}
// At this point we know for each predecessor of ArgBB if its reached
// over the success, failure or unknown path from DomBB.
// Now we can generate a new BB for preds reaching BB over the success
// path and a new BB for preds reaching BB over the failure path.
// Then we redirect those preds to those new basic blocks.
return true;
}
/// Check if conditions of CCBI and DomCCBI are equivalent along
/// all or at least some paths.
bool CheckedCastBrJumpThreading::areEquivalentConditionsAlongPaths() {
// Are conditions equivalent along all paths?
if (DomCondition == Condition) {
// Conditions are exactly the same, without any restrictions.
// They are equivalent along all paths.
// Figure out for each predecessor which branch of
// the dominating checked_cast_br is used to reach it.
for (auto *PredBB : BB->getPreds()) {
// All predecessors should either unconditionally branch
// to the current BB or be another checked_cast_br instruction.
if (!dyn_cast<CheckedCastBranchInst>(PredBB->getTerminator()) &&
!dyn_cast<BranchInst>(PredBB->getTerminator()))
return false;
bool SuccessDominates =
DT->dominates(DomSuccessBB, PredBB) || DomSuccessBB == BB;
bool FailureDominates =
DT->dominates(DomFailureBB, PredBB) || DomFailureBB == BB;
classifyPredecessor(PredBB, SuccessPreds, FailurePreds, UnknownPreds,
SuccessDominates, FailureDominates);
}
return true;
}
// Check if conditions are equivalent along a subset of reaching paths.
return areEquivalentConditionsAlongSomePaths();
}
/// Try performing a dominator-based jump-threading for
/// checked_cast_br instructions.
bool CheckedCastBrJumpThreading::trySimplify(TermInst *Term) {
CCBI = cast<CheckedCastBranchInst>(Term);
if (!CCBI)
return false;
// Init information about the checked_cast_br we try to
// jump-thread.
BB = Term->getParent();
Condition = Term->getOperand(0).stripClassCasts();
SuccessBB = CCBI->getSuccessBB();
FailureBB = CCBI->getFailureBB();
// Find a dominating checked_cast_br, which performs the same check.
for (Node = DT->getNode(BB)->getIDom(); Node; Node = Node->getIDom()) {
// Get current dominating block.
DomBB = Node->getBlock();
auto *DomTerm = DomBB->getTerminator();
if (!DomTerm->getNumOperands())
continue;
// Check that it is a dominating checked_cast_br.
DomCCBI = dyn_cast<CheckedCastBranchInst>(DomTerm);
if (!DomCCBI)
continue;
// We need to verify that the result type is the same in the
// dominating checked_cast_br, but only for non-exact casts.
// For exact casts, we are interested only in the
// fact that the source operand is the same for
// both instructions.
if (!CCBI->isExact() && !DomCCBI->isExact()) {
if (DomCCBI->getCastType() != CCBI->getCastType())
continue;
}
// Conservatively check that both checked_cast_br instructions
// are either exact or non-exact. This is very conservative,
// but safe.
//
// TODO:
// If the dominating checked_cast_br is non-exact, then
// it is in general not safe to assume that current exact cast
// would have the same outcome. But if the dominating non-exact
// checked_cast_br fails, then the current exact cast would
// always fail as well.
//
// If the dominating checked_cast_br is exact then then
// it is in general not safe to assume that the current non-exact
// cast would have the same outcome. But if the dominating exact
// checked_cast_br succeeds, then the current non-exact cast
// would always succeed as well.
//
// TODO: In some specific cases, it is possible to prove that
// success or failure of the dominating cast is equivalent to
// the success or failure of the current cast, even if one
// of them is exact and the other not. This is the case
// e.g. if the class has no subclasses.
if (DomCCBI->isExact() != CCBI->isExact())
continue;
// Initialize state variables for the current round of checks
// based on the found dominating checked_cast_br.
DomSuccessBB = DomCCBI->getSuccessBB();
DomFailureBB = DomCCBI->getFailureBB();
DomCondition = DomTerm->getOperand(0).stripClassCasts();
// Init state variables for paths analysis
SuccessPreds.clear();
FailurePreds.clear();
UnknownPreds.clear();
ArgBB = nullptr;
// Init state variables for jump-threading transformation.
TargetFailureBB = nullptr;
TargetSuccessBB = nullptr;
// Are conditions of CCBI and DomCCBI equivalent along (some) paths?
// If this is the case, classify all incoming paths into SuccessPreds,
// FailurePreds or UnknownPreds depending on how they reach CCBI.
if (!areEquivalentConditionsAlongPaths())
continue;
// Check if any jump-threading is required and possible.
if (SuccessPreds.empty() && FailurePreds.empty())
return false;
// If this check is reachable via success, failure and unknown
// at the same time, then we don't know the outcome of the
// dominating check. No jump-threading is possible in this case.
if (!SuccessPreds.empty() && !FailurePreds.empty() &&
!UnknownPreds.empty()) {
return false;
}
unsigned TotalPreds =
SuccessPreds.size() + FailurePreds.size() + UnknownPreds.size();
// We only need to clone the BB if not all of its
// predecessors are in the same group.
if (TotalPreds != SuccessPreds.size() &&
TotalPreds != UnknownPreds.size()) {
// Check some cloning related constraints.
if (!checkCloningConstraints())
return false;
}
bool InvertSuccess = false;
if (DomCCBI->isExact() && CCBI->isExact() &&
DomCCBI->getCastType() != CCBI->getCastType()) {
if (TotalPreds == SuccessPreds.size()) {
// The dominating exact cast was successful, but it casted to a
// different type. Therefore, the current cast fails for sure.
// Since we are going to change the BB,
// add its successors and predecessors
// for re-processing.
InvertSuccess = true;
} else {
// Otherwise, we don't know if the current cast will succeed or
// fail.
return false;
}
}
// If we have predecessors, where it is not known if they are reached over
// success or failure path, we cannot eliminate a checked_cast_br.
// We have to generate new dedicated BBs as landing BBs for all
// FailurePreds and all SuccessPreds.
// Since we are going to change the BB,
// add its successors and predecessors
// for re-processing.
for (auto *B : BB->getPreds()) {
addBlockToSimplifyCFGWorklist(B);
}
for (auto *B : BB->getSuccessorBlocks()) {
addBlockToSimplifyCFGWorklist(B);
}
// Create a copy of the BB as a landing BB
// for all FailurePreds.
modifyCFGForFailurePreds();
if (InvertSuccess) {
SILBuilder(BB).createBranch(CCBI->getLoc(), FailureBB);
CCBI->eraseFromParent();
SuccessPreds.clear();
} else {
// Create a copy of the BB or reuse BB as
// a landing basic block for all SuccessPreds.
modifyCFGForSuccessPreds();
}
// Handle unknown preds.
modifyCFGForUnknownPreds();
// Update the dominator tree after all changes.
updateDominatorTree();
// Update the SSA form after all changes.
updateSSA();
// Since a few BBs were changed now, add them for re-processing.
addBlocksToWorklist();
return true;
}
// Jump-threading was not possible.
return false;
}
namespace swift {
bool tryCheckedCastBrJumpThreading(TermInst *Term, DominanceInfo *DT,
SmallVectorImpl<SILBasicBlock *> &BBs) {
CheckedCastBrJumpThreading CCBJumpThreading(DT, BBs);
return CCBJumpThreading.trySimplify(Term);
}
} // end namespace swift
| adrfer/swift | lib/SILOptimizer/Utils/CheckedCastBrJumpThreading.cpp | C++ | apache-2.0 | 26,094 |
package jp.co.omana.action;
import org.seasar.struts.annotation.Execute;
public class ServiceAction {
@Execute(validator = false)
public String index() {
return "board.jsp";
}
@Execute(validator = false)
public String confirm() {
return "index.jsp";
}
@Execute(validator = false)
public String finish() {
return "index.jsp";
}
}
| ikraikra/bunsekiya | src/main/java/jp/co/omana/action/ServiceAction.java | Java | apache-2.0 | 387 |
from turbo.flux import Mutation, register, dispatch, register_dispatch
import mutation_types
@register_dispatch('user', mutation_types.INCREASE)
def increase(rank):
pass
def decrease(rank):
return dispatch('user', mutation_types.DECREASE, rank)
@register_dispatch('metric', 'inc_qps')
def inc_qps():
pass | tao12345666333/app-turbo | demos/helloworld/store/actions.py | Python | apache-2.0 | 322 |
/**
*
*/
package com.sivalabs.demo.orders.repositories;
import org.springframework.data.jpa.repository.JpaRepository;
import com.sivalabs.demo.orders.entities.Order;
/**
* @author Siva
*
*/
public interface OrderRepository extends JpaRepository<Order, Integer>{
}
| sivaprasadreddy/springboot-learn-by-example | chapter-09/springboot-multiple-datasources-demo/src/main/java/com/sivalabs/demo/orders/repositories/OrderRepository.java | Java | apache-2.0 | 274 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
"""Test migration operations supported by admin user"""
@classmethod
def setup_clients(cls):
super(MigrationsAdminTest, cls).setup_clients()
cls.client = cls.os_admin.migrations_client
@decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
def test_list_migrations(self):
"""Test admin user can get the migrations list"""
self.client.list_migrations()
@decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_list_migrations_in_flavor_resize_situation(self):
"""Admin can get the migrations list containing the resized server"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.resize_server(server_id, self.flavor_ref_alt)
body = self.client.list_migrations()['migrations']
instance_uuids = [x['instance_uuid'] for x in body]
self.assertIn(server_id, instance_uuids)
def _flavor_clean_up(self, flavor_id):
try:
self.admin_flavors_client.delete_flavor(flavor_id)
self.admin_flavors_client.wait_for_resource_deletion(flavor_id)
except exceptions.NotFound:
pass
@decorators.idempotent_id('33f1fec3-ba18-4470-8e4e-1d888e7c3593')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert_deleted_flavor(self):
"""Test reverting resized server with original flavor deleted
Tests that we can revert the resize on an instance whose original
flavor has been deleted.
"""
# First we have to create a flavor that we can delete so make a copy
# of the normal flavor from which we'd create a server.
flavor = self.admin_flavors_client.show_flavor(
self.flavor_ref)['flavor']
flavor = self.admin_flavors_client.create_flavor(
name=data_utils.rand_name('test_resize_flavor_'),
ram=flavor['ram'],
disk=flavor['disk'],
vcpus=flavor['vcpus']
)['flavor']
self.addCleanup(self._flavor_clean_up, flavor['id'])
# Set extra specs same as self.flavor_ref for the created flavor,
# because the environment may need some special extra specs to
# create server which should have been contained in
# self.flavor_ref.
extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor_ref)['extra_specs']
if extra_spec_keys:
self.admin_flavors_client.set_flavor_extra_spec(
flavor['id'], **extra_spec_keys)
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
server = self.servers_client.show_server(server['id'])['server']
# If 'id' not in server['flavor'], we can only compare the flavor
# details, so here we should save the to-be-deleted flavor's details,
# for the flavor comparison after the server resizing.
if not server['flavor'].get('id'):
pre_flavor = {}
body = self.flavors_client.show_flavor(flavor['id'])['flavor']
for key in ['name', 'ram', 'vcpus', 'disk']:
pre_flavor[key] = body[key]
# Delete the flavor we used to boot the instance.
self._flavor_clean_up(flavor['id'])
# Now resize the server and wait for it to go into verify state.
self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
waiters.wait_for_server_status(self.servers_client, server['id'],
'VERIFY_RESIZE')
# Now revert the resize, it should be OK even though the original
# flavor used to boot the server was deleted.
self.servers_client.revert_resize_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
server = self.servers_client.show_server(server['id'])['server']
if server['flavor'].get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor['id'], server['flavor']['id'], msg)
else:
self.assertEqual(pre_flavor['name'],
server['flavor']['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
def _test_cold_migrate_server(self, revert=False):
if CONF.compute.min_compute_nodes < 2:
msg = "Less than 2 compute nodes, skipping multinode tests."
raise self.skipException(msg)
server = self.create_test_server(wait_until="ACTIVE")
src_host = self.get_host_for_server(server['id'])
self.admin_servers_client.migrate_server(server['id'])
waiters.wait_for_server_status(self.servers_client,
server['id'], 'VERIFY_RESIZE')
if revert:
self.servers_client.revert_resize_server(server['id'])
assert_func = self.assertEqual
else:
self.servers_client.confirm_resize_server(server['id'])
assert_func = self.assertNotEqual
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
dst_host = self.get_host_for_server(server['id'])
assert_func(src_host, dst_host)
@decorators.idempotent_id('4bf0be52-3b6f-4746-9a27-3143636fe30d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_cold_migration(self):
"""Test cold migrating server and then confirm the migration"""
self._test_cold_migrate_server(revert=False)
@decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_revert_cold_migration(self):
"""Test cold migrating server and then revert the migration"""
self._test_cold_migrate_server(revert=True)
| openstack/tempest | tempest/api/compute/admin/test_migrations.py | Python | apache-2.0 | 7,577 |
var structCO__config__t =
[
[ "CNT_NMT", "structCO__config__t.html#aeef814580eb5ece5156e63bfc1b490c9", null ],
[ "ENTRY_H1017", "structCO__config__t.html#ad17f77b55de3d90ec983fcac49eeab6d", null ],
[ "CNT_HB_CONS", "structCO__config__t.html#a0031fc8f80e95f8480c918dbf8289671", null ],
[ "ENTRY_H1016", "structCO__config__t.html#a0af4cf7d0355861e7f60206d794d6a91", null ],
[ "CNT_EM", "structCO__config__t.html#a515e08f68835f71a6f145be8f27b510a", null ],
[ "ENTRY_H1001", "structCO__config__t.html#a6a6c19e816fb76882e85b2c07c0d8f42", null ],
[ "ENTRY_H1014", "structCO__config__t.html#a4827d94f6152cc12d86bd21312ae86e4", null ],
[ "ENTRY_H1015", "structCO__config__t.html#a141f21b4d1730206d1af823fd6b13a01", null ],
[ "ENTRY_H1003", "structCO__config__t.html#a7e320b309714b7f623c2006d45fee929", null ],
[ "CNT_SDO_SRV", "structCO__config__t.html#aac83faf556924515cc2aa8003753ab58", null ],
[ "ENTRY_H1200", "structCO__config__t.html#a05ab8adad4517850e31e5542895f7cc5", null ],
[ "CNT_SDO_CLI", "structCO__config__t.html#a2fc9606643a7fb4d4237f01812d3a6d2", null ],
[ "ENTRY_H1280", "structCO__config__t.html#a9f871c4ec753e8414cdb47eb78c3e09d", null ],
[ "CNT_TIME", "structCO__config__t.html#ada2a43384a544fa2f235de24a874b1e6", null ],
[ "ENTRY_H1012", "structCO__config__t.html#abac6be7122af1a8a4f9ae3ff5912d490", null ],
[ "CNT_SYNC", "structCO__config__t.html#af6dbc7d9f31b4cb050e23af8cff3df33", null ],
[ "ENTRY_H1005", "structCO__config__t.html#a02a4992f47db72816753ff2aa1964318", null ],
[ "ENTRY_H1006", "structCO__config__t.html#aa9befdebbaaa22f309b9a1b115612071", null ],
[ "ENTRY_H1007", "structCO__config__t.html#ad51ab63ca8b5836bf0dd8543f02db544", null ],
[ "ENTRY_H1019", "structCO__config__t.html#a468c82f6a0afd757a6b78ce33532c0d2", null ],
[ "CNT_RPDO", "structCO__config__t.html#a7a75302ac077462b67d767b0a11c9f56", null ],
[ "ENTRY_H1400", "structCO__config__t.html#a5e0984d93183493d587523888465eaa7", null ],
[ "ENTRY_H1600", "structCO__config__t.html#ab2ddc9943fd8c89f3b852d7ac9508d21", null ],
[ "CNT_TPDO", "structCO__config__t.html#a1d830617f50e3235de35a403a1513693", null ],
[ "ENTRY_H1800", "structCO__config__t.html#a29b98c08edfe0fba2e46c7af7a9edf6f", null ],
[ "ENTRY_H1A00", "structCO__config__t.html#a43fd6a448c91910c603f2c7756610432", null ],
[ "CNT_LEDS", "structCO__config__t.html#a642809cc681792bca855906241d891cc", null ],
[ "CNT_GFC", "structCO__config__t.html#ae282bab830810b61c0b0c3223654d674", null ],
[ "ENTRY_H1300", "structCO__config__t.html#a91c9f3ddb67231854af39224a9597e20", null ],
[ "CNT_SRDO", "structCO__config__t.html#ae58a44be57069709af3f6acbd10953e1", null ],
[ "ENTRY_H1301", "structCO__config__t.html#a87076cb1f9282d9720c21d395ff4e541", null ],
[ "ENTRY_H1381", "structCO__config__t.html#a7b3172b29ce8751adcab9e4351dcc31e", null ],
[ "ENTRY_H13FE", "structCO__config__t.html#a03fcaca5a8e0e71b86086908cae75f3d", null ],
[ "ENTRY_H13FF", "structCO__config__t.html#aa4cb9674209b83e7f0e48b01feaa04ef", null ],
[ "CNT_LSS_SLV", "structCO__config__t.html#a00a7a598b946ed13e3af7696e9f92dcc", null ],
[ "CNT_LSS_MST", "structCO__config__t.html#ac253cae7039090a6c04bc1e385f3ec21", null ],
[ "CNT_GTWA", "structCO__config__t.html#a64725014ecce342843f14ffc4b57e2a2", null ],
[ "CNT_TRACE", "structCO__config__t.html#aaafb8ffff236b51cd6d4ab16426d460f", null ]
]; | CANopenNode/CANopenSocket | docs/structCO__config__t.js | JavaScript | apache-2.0 | 3,451 |
/*
Copyright 2017 Processwall Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Company: Processwall Limited
Address: The Winnowing House, Mill Lane, Askham Richard, York, YO23 3NW, United Kingdom
Tel: +44 113 815 3440
Web: http://www.processwall.com
Email: [email protected]
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Aras.ViewModel.Design.Applications
{
[Aras.ViewModel.Attributes.Application("Parts", "PartFamily", "Design", false)]
public class Parts : Aras.ViewModel.Containers.Application
{
public Model.Design.Queries.Searches.Part SearchQuery { get; private set; }
public Aras.ViewModel.Grids.Search Search { get; private set; }
public Model.Design.Queries.Forms.Part FormQuery { get; private set; }
public Forms.Part Form { get; private set; }
private void Search_ItemsSelected(object sender, Aras.ViewModel.Grids.Search.ItemsSelectedEventArgs e)
{
if (this.Search.Selected.Count() > 0)
{
this.Form.Binding = this.Form.Store.Get(this.Search.Selected.First().ID);
}
else
{
this.Form.Binding = null;
}
}
public Parts(Aras.ViewModel.Manager.Session Session)
: base(Session)
{
this.Children.NotifyListChanged = false;
// Create Search Query
this.SearchQuery = new Model.Design.Queries.Searches.Part(this.Session.Model);
// Create Search
this.Search = new Aras.ViewModel.Grids.Search(this.Session);
this.Search.Width = 300;
this.Children.Add(this.Search);
this.Search.Region = Aras.ViewModel.Regions.Left;
this.Search.Binding = this.SearchQuery.Store;
this.Search.Splitter = true;
this.Search.ItemsSelected += Search_ItemsSelected;
// Create Form Query
this.FormQuery = new Model.Design.Queries.Forms.Part(this.Session.Model);
// Create Form
this.Form = new Forms.Part(this.Session, this.FormQuery.Store);
this.Children.Add(this.Form);
this.Children.NotifyListChanged = true;
// Select First Part
if (this.SearchQuery.Store.Count() > 0)
{
this.Search.Select(this.SearchQuery.Store.First());
}
}
}
}
| ArasExtensions/Aras.ViewModel | Aras.ViewModel.Design/Applications/Parts.cs | C# | apache-2.0 | 3,016 |
<!--
@license
Copyright (C) 2016 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<link rel="import" href="../../../bower_components/polymer/polymer.html">
<link rel="import" href="../../../behaviors/docs-url-behavior/docs-url-behavior.html">
<link rel="import" href="../../../behaviors/base-url-behavior/base-url-behavior.html">
<link rel="import" href="../../../behaviors/gr-admin-nav-behavior/gr-admin-nav-behavior.html">
<link rel="import" href="../../plugins/gr-endpoint-decorator/gr-endpoint-decorator.html">
<link rel="import" href="../../shared/gr-dropdown/gr-dropdown.html">
<link rel="import" href="../../shared/gr-icons/gr-icons.html">
<link rel="import" href="../../shared/gr-js-api-interface/gr-js-api-interface.html">
<link rel="import" href="../../shared/gr-rest-api-interface/gr-rest-api-interface.html">
<link rel="import" href="../gr-account-dropdown/gr-account-dropdown.html">
<link rel="import" href="../gr-smart-search/gr-smart-search.html">
<dom-module id="gr-main-header">
<template>
<style include="shared-styles">
:host {
display: block;
}
nav {
align-items: center;
display: flex;
}
.bigTitle {
color: var(--header-text-color);
font-size: var(--header-title-font-size);
text-decoration: none;
}
.bigTitle:hover {
text-decoration: underline;
}
/* TODO (viktard): Clean-up after chromium-style migrates to component. */
.titleText::before {
background-image: var(--header-icon);
background-size: var(--header-icon-size) var(--header-icon-size);
background-repeat: no-repeat;
content: "";
display: inline-block;
height: var(--header-icon-size);
margin-right: calc(var(--header-icon-size) / 4);
vertical-align: text-bottom;
width: var(--header-icon-size);
}
.titleText::after {
content: var(--header-title-content);
}
ul {
list-style: none;
padding-left: 1em;
}
.links > li {
cursor: default;
display: inline-block;
padding: 0;
position: relative;
}
.linksTitle {
display: inline-block;
font-weight: var(--font-weight-bold);
position: relative;
text-transform: uppercase;
}
.linksTitle:hover {
opacity: .75;
}
.rightItems {
align-items: center;
display: flex;
flex: 1;
justify-content: flex-end;
}
.rightItems gr-endpoint-decorator:not(:empty) {
margin-left: 1em;
}
gr-smart-search {
flex-grow: 1;
margin-left: .5em;
max-width: 500px;
}
gr-dropdown,
.browse {
padding: .6em .5em;
}
gr-dropdown {
--gr-dropdown-item: {
color: var(--primary-text-color);
}
}
.settingsButton {
margin-left: .5em;
}
.browse {
color: var(--header-text-color);
/* Same as gr-button */
margin: 5px 4px;
text-decoration: none;
}
.invisible,
.settingsButton,
gr-account-dropdown {
display: none;
}
:host([loading]) .accountContainer,
:host([logged-in]) .loginButton,
:host([logged-in]) .registerButton {
display: none;
}
:host([logged-in]) .settingsButton,
:host([logged-in]) gr-account-dropdown {
display: inline;
}
.accountContainer {
align-items: center;
display: flex;
margin: 0 -.5em 0 .5em;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.loginButton, .registerButton {
padding: .5em 1em;
}
.dropdown-trigger {
text-decoration: none;
}
.dropdown-content {
background-color: var(--view-background-color);
box-shadow: 0 1px 5px rgba(0, 0, 0, .3);
}
/*
* We are not using :host to do this, because :host has a lowest css priority
* compared to others. This means that using :host to do this would break styles.
*/
.linksTitle,
.bigTitle,
.loginButton,
.registerButton,
iron-icon,
gr-account-dropdown {
color: var(--header-text-color);
}
#mobileSearch {
display: none;
}
@media screen and (max-width: 50em) {
.bigTitle {
font-size: var(--font-size-large);
font-weight: var(--font-weight-bold);
}
gr-smart-search,
.browse,
.rightItems .hideOnMobile,
.links > li.hideOnMobile {
display: none;
}
#mobileSearch {
display: inline-flex;
}
.accountContainer {
margin-left: .5em !important;
}
gr-dropdown {
padding: .5em 0 .5em .5em;
}
}
</style>
<nav>
<a href$="[[_computeRelativeURL('/')]]" class="bigTitle">
<gr-endpoint-decorator name="header-title">
<span class="titleText"></span>
</gr-endpoint-decorator>
</a>
<ul class="links">
<template is="dom-repeat" items="[[_links]]" as="linkGroup">
<li class$="[[linkGroup.class]]">
<gr-dropdown
link
down-arrow
items = [[linkGroup.links]]
horizontal-align="left">
<span class="linksTitle" id="[[linkGroup.title]]">
[[linkGroup.title]]
</span>
</gr-dropdown>
</li>
</template>
</ul>
<div class="rightItems">
<gr-endpoint-decorator
class="hideOnMobile"
name="header-small-banner"></gr-endpoint-decorator>
<gr-smart-search
id="search"
search-query="{{searchQuery}}"></gr-smart-search>
<gr-endpoint-decorator
class="hideOnMobile"
name="header-browse-source"></gr-endpoint-decorator>
<div class="accountContainer" id="accountContainer">
<iron-icon id="mobileSearch" icon="gr-icons:search" on-tap='_onMobileSearchTap'></iron-icon>
<div class$="[[_computeIsInvisible(_registerURL)]]">
<a
class="registerButton"
href$="[[_registerURL]]">
[[_registerText]]
</a>
</div>
<a class="loginButton" href$="[[_loginURL]]">Sign in</a>
<a
class="settingsButton"
href$="[[_generateSettingsLink()]]"
title="Settings">
<iron-icon icon="gr-icons:settings"></iron-icon>
</a>
<gr-account-dropdown account="[[_account]]"></gr-account-dropdown>
</div>
</div>
</nav>
<gr-js-api-interface id="jsAPI"></gr-js-api-interface>
<gr-rest-api-interface id="restAPI"></gr-rest-api-interface>
</template>
<script src="gr-main-header.js"></script>
</dom-module>
| qtproject/qtqa-gerrit | polygerrit-ui/app/elements/core/gr-main-header/gr-main-header.html | HTML | apache-2.0 | 7,508 |
#include <tuple>
#include "Vector2.h"
Vector2::Vector2(void)
{
}
Vector2::Vector2(float X, float Y)
{
this->X = X;
this->Y = Y;
}
// Returns the length of the vector
float Vector2::Magnitude()
{
return sqrt(X * X + Y * Y);
}
// Returns the length of the vector squared
// Used for length comparisons without needing roots
float Vector2::MagnitudeSquared()
{
return X * X + Y * Y;
}
// Normalizes the vector
Vector2 Vector2::Normal()
{
float length = this->Magnitude();
if (length != 0)
return Vector2(X / length, Y / length);
return Vector2();
}
// Sets the magnitude of the vector
void Vector2::SetMagnitude(float mag)
{
Vector2 v = this->Normal();
X = v.X*mag;
Y = v.Y*mag;
}
float Vector2::Dot(Vector2 other)
{
return X * other.X + Y * other.Y;
}
float Vector2::Cross(Vector2 other)
{
return X * other.Y - Y * other.X;
}
Vector2 Vector2::operator+(Vector2 other)
{
return Vector2(X + other.X, Y + other.Y);
}
Vector2 Vector2::operator-(Vector2 other)
{
return Vector2(X - other.X, Y - other.Y);
}
Vector2 Vector2::operator*(float scalar)
{
return Vector2(X * scalar, Y * scalar);
}
Vector2 Vector2::operator-()
{
return Vector2(-X, -Y);
}
Vector2& Vector2::operator+=(const Vector2& other)
{
X += other.X;
Y += other.Y;
return *this;
}
Vector2& Vector2::operator-=(const Vector2& other)
{
X -= other.X;
Y -= other.Y;
return *this;
}
Vector2& Vector2::operator*=(const Vector2& other)
{
X *= other.X;
Y *= other.Y;
return *this;
}
Vector2& Vector2::operator/=(const Vector2& other)
{
X /= other.X;
Y /= other.Y;
return *this;
}
bool operator==(const Vector2& L, const Vector2& R)
{
return std::tie(L.X, L.Y) == std::tie(R.X, R.Y);
} | Henrywald/crispy-waffle | Crispy-Waffle/Vector2.cpp | C++ | apache-2.0 | 1,685 |
/************************************************************
* * EaseMob CONFIDENTIAL
* __________________
* Copyright (C) 2013-2014 EaseMob Technologies. All rights reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of EaseMob Technologies.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from EaseMob Technologies.
*/
package com.easemob.chatuidemo.activity;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import android.annotation.SuppressLint;
import android.app.AlertDialog;
import android.app.ProgressDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.graphics.Bitmap;
import android.graphics.PixelFormat;
import android.hardware.Camera;
import android.hardware.Camera.CameraInfo;
import android.hardware.Camera.Parameters;
import android.hardware.Camera.Size;
import android.media.MediaRecorder;
import android.media.MediaRecorder.OnErrorListener;
import android.media.MediaRecorder.OnInfoListener;
import android.media.MediaScannerConnection;
import android.media.MediaScannerConnection.MediaScannerConnectionClient;
import android.net.Uri;
import android.os.Bundle;
import android.os.Environment;
import android.os.PowerManager;
import android.os.SystemClock;
import android.text.TextUtils;
import android.view.SurfaceHolder;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.Window;
import android.view.WindowManager;
import android.widget.Button;
import android.widget.Chronometer;
import android.widget.ImageView;
import android.widget.Toast;
import android.widget.VideoView;
import com.easemob.chatuidemo.utils.CommonUtils;
import com.easemob.chatuidemo.video.util.Utils;
import com.easemob.qixin.R;
import com.easemob.util.EMLog;
import com.easemob.util.PathUtil;
public class RecorderVideoActivity extends BaseActivity implements
OnClickListener, SurfaceHolder.Callback, OnErrorListener,
OnInfoListener {
private static final String TAG = "RecorderVideoActivity";
private final static String CLASS_LABEL = "RecordActivity";
private PowerManager.WakeLock mWakeLock;
private ImageView btnStart;// 开始录制按钮
private ImageView btnStop;// 停止录制按钮
private MediaRecorder mediaRecorder;// 录制视频的类
private VideoView mVideoView;// 显示视频的控件
String localPath = "";// 录制的视频路径
private Camera mCamera;
// 预览的宽高
private int previewWidth = 480;
private int previewHeight = 480;
private Chronometer chronometer;
private int frontCamera = 0;// 0是后置摄像头,1是前置摄像头
private Button btn_switch;
Parameters cameraParameters = null;
private SurfaceHolder mSurfaceHolder;
int defaultVideoFrameRate = -1;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);// 去掉标题栏
getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,
WindowManager.LayoutParams.FLAG_FULLSCREEN);// 设置全屏
// 选择支持半透明模式,在有surfaceview的activity中使用
getWindow().setFormat(PixelFormat.TRANSLUCENT);
setContentView(R.layout.recorder_activity);
PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE);
mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK,
CLASS_LABEL);
mWakeLock.acquire();
initViews();
}
private void initViews() {
btn_switch = (Button) findViewById(R.id.switch_btn);
btn_switch.setOnClickListener(this);
btn_switch.setVisibility(View.VISIBLE);
mVideoView = (VideoView) findViewById(R.id.mVideoView);
btnStart = (ImageView) findViewById(R.id.recorder_start);
btnStop = (ImageView) findViewById(R.id.recorder_stop);
btnStart.setOnClickListener(this);
btnStop.setOnClickListener(this);
mSurfaceHolder = mVideoView.getHolder();
mSurfaceHolder.addCallback(this);
mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
chronometer = (Chronometer) findViewById(R.id.chronometer);
}
public void back(View view) {
releaseRecorder();
releaseCamera();
finish();
}
@Override
protected void onResume() {
super.onResume();
if (mWakeLock == null) {
// 获取唤醒锁,保持屏幕常亮
PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE);
mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK,
CLASS_LABEL);
mWakeLock.acquire();
}
// if (!initCamera()) {
// showFailDialog();
// }
}
@SuppressLint("NewApi")
private boolean initCamera() {
try {
if (frontCamera == 0) {
mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK);
} else {
mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT);
}
Camera.Parameters camParams = mCamera.getParameters();
mCamera.lock();
mSurfaceHolder = mVideoView.getHolder();
mSurfaceHolder.addCallback(this);
mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
mCamera.setDisplayOrientation(90);
} catch (RuntimeException ex) {
EMLog.e("video", "init Camera fail " + ex.getMessage());
return false;
}
return true;
}
private void handleSurfaceChanged() {
if (mCamera == null) {
finish();
return;
}
boolean hasSupportRate = false;
List<Integer> supportedPreviewFrameRates = mCamera.getParameters()
.getSupportedPreviewFrameRates();
if (supportedPreviewFrameRates != null
&& supportedPreviewFrameRates.size() > 0) {
Collections.sort(supportedPreviewFrameRates);
for (int i = 0; i < supportedPreviewFrameRates.size(); i++) {
int supportRate = supportedPreviewFrameRates.get(i);
if (supportRate == 15) {
hasSupportRate = true;
}
}
if (hasSupportRate) {
defaultVideoFrameRate = 15;
} else {
defaultVideoFrameRate = supportedPreviewFrameRates.get(0);
}
}
// 获取摄像头的所有支持的分辨率
List<Camera.Size> resolutionList = Utils.getResolutionList(mCamera);
if (resolutionList != null && resolutionList.size() > 0) {
Collections.sort(resolutionList, new Utils.ResolutionComparator());
Camera.Size previewSize = null;
boolean hasSize = false;
// 如果摄像头支持640*480,那么强制设为640*480
for (int i = 0; i < resolutionList.size(); i++) {
Size size = resolutionList.get(i);
if (size != null && size.width == 640 && size.height == 480) {
previewSize = size;
previewWidth = previewSize.width;
previewHeight = previewSize.height;
hasSize = true;
break;
}
}
// 如果不支持设为中间的那个
if (!hasSize) {
int mediumResolution = resolutionList.size() / 2;
if (mediumResolution >= resolutionList.size())
mediumResolution = resolutionList.size() - 1;
previewSize = resolutionList.get(mediumResolution);
previewWidth = previewSize.width;
previewHeight = previewSize.height;
}
}
}
@Override
protected void onPause() {
super.onPause();
if (mWakeLock != null) {
mWakeLock.release();
mWakeLock = null;
}
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.switch_btn:
switchCamera();
break;
case R.id.recorder_start:
// start recording
if(!startRecording())
return;
Toast.makeText(this, R.string.The_video_to_start, Toast.LENGTH_SHORT).show();
btn_switch.setVisibility(View.INVISIBLE);
btnStart.setVisibility(View.INVISIBLE);
btnStart.setEnabled(false);
btnStop.setVisibility(View.VISIBLE);
// 重置其他
chronometer.setBase(SystemClock.elapsedRealtime());
chronometer.start();
break;
case R.id.recorder_stop:
btnStop.setEnabled(false);
// 停止拍摄
stopRecording();
btn_switch.setVisibility(View.VISIBLE);
chronometer.stop();
btnStart.setVisibility(View.VISIBLE);
btnStop.setVisibility(View.INVISIBLE);
new AlertDialog.Builder(this)
.setMessage(R.string.Whether_to_send)
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
dialog.dismiss();
sendVideo(null);
}
})
.setNegativeButton(R.string.cancel,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
if(localPath != null){
File file = new File(localPath);
if(file.exists())
file.delete();
}
finish();
}
}).setCancelable(false).show();
break;
default:
break;
}
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width,
int height) {
// 将holder,这个holder为开始在oncreat里面取得的holder,将它赋给surfaceHolder
mSurfaceHolder = holder;
}
@Override
public void surfaceCreated(SurfaceHolder holder) {
if (mCamera == null){
if(!initCamera()){
showFailDialog();
return;
}
}
try {
mCamera.setPreviewDisplay(mSurfaceHolder);
mCamera.startPreview();
handleSurfaceChanged();
} catch (Exception e1) {
EMLog.e("video", "start preview fail " + e1.getMessage());
showFailDialog();
}
}
@Override
public void surfaceDestroyed(SurfaceHolder arg0) {
EMLog.v("video", "surfaceDestroyed");
}
public boolean startRecording(){
if (mediaRecorder == null){
if(!initRecorder())
return false;
}
mediaRecorder.setOnInfoListener(this);
mediaRecorder.setOnErrorListener(this);
mediaRecorder.start();
return true;
}
@SuppressLint("NewApi")
private boolean initRecorder(){
if(!CommonUtils.isExitsSdcard()){
showNoSDCardDialog();
return false;
}
if (mCamera == null) {
if(!initCamera()){
showFailDialog();
return false;
}
}
mVideoView.setVisibility(View.VISIBLE);
// TODO init button
mCamera.stopPreview();
mediaRecorder = new MediaRecorder();
mCamera.unlock();
mediaRecorder.setCamera(mCamera);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.DEFAULT);
// 设置录制视频源为Camera(相机)
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
if (frontCamera == 1) {
mediaRecorder.setOrientationHint(270);
} else {
mediaRecorder.setOrientationHint(90);
}
// 设置录制完成后视频的封装格式THREE_GPP为3gp.MPEG_4为mp4
mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4);
mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC);
// 设置录制的视频编码h263 h264
mediaRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264);
// 设置视频录制的分辨率。必须放在设置编码和格式的后面,否则报错
mediaRecorder.setVideoSize(previewWidth, previewHeight);
// 设置视频的比特率
mediaRecorder.setVideoEncodingBitRate(384 * 1024);
// // 设置录制的视频帧率。必须放在设置编码和格式的后面,否则报错
if (defaultVideoFrameRate != -1) {
mediaRecorder.setVideoFrameRate(defaultVideoFrameRate);
}
// 设置视频文件输出的路径
localPath = PathUtil.getInstance().getVideoPath() + "/"
+ System.currentTimeMillis() + ".mp4";
mediaRecorder.setOutputFile(localPath);
mediaRecorder.setMaxDuration(30000);
mediaRecorder.setPreviewDisplay(mSurfaceHolder.getSurface());
try {
mediaRecorder.prepare();
} catch (IllegalStateException e) {
e.printStackTrace();
return false;
} catch (IOException e) {
e.printStackTrace();
return false;
}
return true;
}
public void stopRecording() {
if (mediaRecorder != null) {
mediaRecorder.setOnErrorListener(null);
mediaRecorder.setOnInfoListener(null);
try {
mediaRecorder.stop();
} catch (IllegalStateException e) {
EMLog.e("video", "stopRecording error:" + e.getMessage());
}
}
releaseRecorder();
if (mCamera != null) {
mCamera.stopPreview();
releaseCamera();
}
}
private void releaseRecorder() {
if (mediaRecorder != null) {
mediaRecorder.release();
mediaRecorder = null;
}
}
protected void releaseCamera() {
try {
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
} catch (Exception e) {
}
}
@SuppressLint("NewApi")
public void switchCamera() {
if (mCamera == null) {
return;
}
if (Camera.getNumberOfCameras() >= 2) {
btn_switch.setEnabled(false);
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
switch (frontCamera) {
case 0:
mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT);
frontCamera = 1;
break;
case 1:
mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK);
frontCamera = 0;
break;
}
try {
mCamera.lock();
mCamera.setDisplayOrientation(90);
mCamera.setPreviewDisplay(mVideoView.getHolder());
mCamera.startPreview();
} catch (IOException e) {
mCamera.release();
mCamera = null;
}
btn_switch.setEnabled(true);
}
}
MediaScannerConnection msc = null;
ProgressDialog progressDialog = null;
public void sendVideo(View view) {
if (TextUtils.isEmpty(localPath)) {
EMLog.e("Recorder", "recorder fail please try again!");
return;
}
if(msc == null)
msc = new MediaScannerConnection(this,
new MediaScannerConnectionClient() {
@Override
public void onScanCompleted(String path, Uri uri) {
EMLog.d(TAG, "scanner completed");
msc.disconnect();
progressDialog.dismiss();
setResult(RESULT_OK, getIntent().putExtra("uri", uri));
finish();
}
@Override
public void onMediaScannerConnected() {
msc.scanFile(localPath, "video/*");
}
});
if(progressDialog == null){
progressDialog = new ProgressDialog(this);
progressDialog.setMessage("processing...");
progressDialog.setCancelable(false);
}
progressDialog.show();
msc.connect();
}
@Override
public void onInfo(MediaRecorder mr, int what, int extra) {
EMLog.v("video", "onInfo");
if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_DURATION_REACHED) {
EMLog.v("video", "max duration reached");
stopRecording();
btn_switch.setVisibility(View.VISIBLE);
chronometer.stop();
btnStart.setVisibility(View.VISIBLE);
btnStop.setVisibility(View.INVISIBLE);
chronometer.stop();
if (localPath == null) {
return;
}
String st3 = getResources().getString(R.string.Whether_to_send);
new AlertDialog.Builder(this)
.setMessage(st3)
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface arg0,
int arg1) {
arg0.dismiss();
sendVideo(null);
}
}).setNegativeButton(R.string.cancel, null)
.setCancelable(false).show();
}
}
@Override
public void onError(MediaRecorder mr, int what, int extra) {
EMLog.e("video", "recording onError:");
stopRecording();
Toast.makeText(this,
"Recording error has occurred. Stopping the recording",
Toast.LENGTH_SHORT).show();
}
public void saveBitmapFile(Bitmap bitmap) {
File file = new File(Environment.getExternalStorageDirectory(), "a.jpg");
try {
BufferedOutputStream bos = new BufferedOutputStream(
new FileOutputStream(file));
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, bos);
bos.flush();
bos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
protected void onDestroy() {
super.onDestroy();
releaseCamera();
if (mWakeLock != null) {
mWakeLock.release();
mWakeLock = null;
}
}
@Override
public void onBackPressed() {
back(null);
}
private void showFailDialog() {
new AlertDialog.Builder(this)
.setTitle(R.string.prompt)
.setMessage(R.string.Open_the_equipment_failure)
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
finish();
}
}).setCancelable(false).show();
}
private void showNoSDCardDialog() {
new AlertDialog.Builder(this)
.setTitle(R.string.prompt)
.setMessage("No sd card!")
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
finish();
}
}).setCancelable(false).show();
}
}
| liyuzhao/enterpriseChat-android | src/com/easemob/chatuidemo/activity/RecorderVideoActivity.java | Java | apache-2.0 | 16,993 |
// Copyright © 2017 Chocolatey Software, Inc
// Copyright © 2011 - 2017 RealDimensions Software, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
namespace chocolatey.tests.integration.scenarios
{
using System.Collections.Generic;
using System.Linq;
using bdddoc.core;
using chocolatey.infrastructure.app;
using chocolatey.infrastructure.app.commands;
using chocolatey.infrastructure.app.configuration;
using chocolatey.infrastructure.app.services;
using chocolatey.infrastructure.results;
using NuGet;
using Should;
public class ListScenarios
{
public abstract class ScenariosBase : TinySpec
{
protected IList<PackageResult> Results;
protected ChocolateyConfiguration Configuration;
protected IChocolateyPackageService Service;
public override void Context()
{
Configuration = Scenario.list();
Scenario.reset(Configuration);
Scenario.add_packages_to_source_location(Configuration, Configuration.Input + "*" + Constants.PackageExtension);
Scenario.add_packages_to_source_location(Configuration, "installpackage*" + Constants.PackageExtension);
Scenario.install_package(Configuration, "installpackage", "1.0.0");
Scenario.install_package(Configuration, "upgradepackage", "1.0.0");
Service = NUnitSetup.Container.GetInstance<IChocolateyPackageService>();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_searching_packages_with_no_filter_happy_path : ScenariosBase
{
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_list_available_packages_only_once()
{
MockLogger.contains_message_count("upgradepackage").ShouldEqual(1);
}
[Fact]
public void should_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("upgradepackage 1.1.0").ShouldBeTrue();
}
[Fact]
public void should_not_contain_packages_and_versions_with_a_pipe_between_them()
{
MockLogger.contains_message("upgradepackage|1.1.0").ShouldBeFalse();
}
[Fact]
public void should_contain_a_summary()
{
MockLogger.contains_message("packages found").ShouldBeTrue();
}
[Fact]
public void should_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeTrue();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_searching_for_a_particular_package : ScenariosBase
{
public override void Context()
{
base.Context();
Configuration.Input = Configuration.PackageNames = "upgradepackage";
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("upgradepackage 1.1.0").ShouldBeTrue();
}
[Fact]
public void should_not_contain_packages_that_do_not_match()
{
MockLogger.contains_message("installpackage").ShouldBeFalse();
}
[Fact]
public void should_contain_a_summary()
{
MockLogger.contains_message("packages found").ShouldBeTrue();
}
[Fact]
public void should_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeTrue();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_searching_all_available_packages : ScenariosBase
{
public override void Context()
{
base.Context();
Configuration.AllVersions = true;
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_list_available_packages_as_many_times_as_they_show_on_the_feed()
{
MockLogger.contains_message_count("upgradepackage").ShouldNotEqual(0);
MockLogger.contains_message_count("upgradepackage").ShouldNotEqual(1);
}
[Fact]
public void should_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("upgradepackage 1.1.0").ShouldBeTrue();
}
[Fact]
public void should_not_contain_packages_and_versions_with_a_pipe_between_them()
{
MockLogger.contains_message("upgradepackage|1.1.0").ShouldBeFalse();
}
[Fact]
public void should_contain_a_summary()
{
MockLogger.contains_message("packages found").ShouldBeTrue();
}
[Fact]
public void should_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeTrue();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_searching_packages_with_verbose : ScenariosBase
{
public override void Context()
{
base.Context();
Configuration.Verbose = true;
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("upgradepackage 1.1.0").ShouldBeTrue();
}
[Fact]
public void should_contain_description()
{
MockLogger.contains_message("Description: ").ShouldBeTrue();
}
[Fact]
public void should_contain_tags()
{
MockLogger.contains_message("Tags: ").ShouldBeTrue();
}
[Fact]
public void should_contain_download_counts()
{
MockLogger.contains_message("Number of Downloads: ").ShouldBeTrue();
}
[Fact]
public void should_not_contain_packages_and_versions_with_a_pipe_between_them()
{
MockLogger.contains_message("upgradepackage|1.1.0").ShouldBeFalse();
}
[Fact]
public void should_contain_a_summary()
{
MockLogger.contains_message("packages found").ShouldBeTrue();
}
[Fact]
public void should_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeTrue();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_listing_local_packages : ScenariosBase
{
public override void Context()
{
base.Context();
Configuration.ListCommand.LocalOnly = true;
Configuration.Sources = ApplicationParameters.PackagesLocation;
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("upgradepackage 1.0.0").ShouldBeTrue();
}
[Fact]
public void should_not_contain_packages_and_versions_with_a_pipe_between_them()
{
MockLogger.contains_message("upgradepackage|1.0.0").ShouldBeFalse();
}
[Fact]
public void should_contain_a_summary()
{
MockLogger.contains_message("packages installed").ShouldBeTrue();
}
[Fact]
public void should_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeTrue();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_listing_local_packages_limiting_output : ScenariosBase
{
public override void Context()
{
base.Context();
Configuration.ListCommand.LocalOnly = true;
Configuration.Sources = ApplicationParameters.PackagesLocation;
Configuration.RegularOutput = false;
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_contain_packages_and_versions_with_a_pipe_between_them()
{
MockLogger.contains_message("upgradepackage|1.0.0").ShouldBeTrue();
}
[Fact]
public void should_only_have_messages_related_to_package_information()
{
var count = MockLogger.Messages.SelectMany(messageLevel => messageLevel.Value.or_empty_list_if_null()).Count();
count.ShouldEqual(2);
}
[Fact]
public void should_not_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("upgradepackage 1.0.0").ShouldBeFalse();
}
[Fact]
public void should_not_contain_a_summary()
{
MockLogger.contains_message("packages installed").ShouldBeFalse();
}
[Fact]
public void should_not_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeFalse();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeFalse();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeFalse();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeFalse();
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_listing_packages_with_no_sources_enabled : ScenariosBase
{
public override void Context()
{
base.Context();
Configuration.Sources = null;
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_have_no_sources_enabled_result()
{
MockLogger.contains_message("Unable to search for packages when there are no sources enabled for", LogLevel.Error).ShouldBeTrue();
}
[Fact]
public void should_not_list_any_packages()
{
Results.Count().ShouldEqual(0);
}
}
[Concern(typeof(ChocolateyListCommand))]
public class when_searching_for_an_exact_package : ScenariosBase
{
public override void Context()
{
Configuration = Scenario.list();
Scenario.reset(Configuration);
Scenario.add_packages_to_source_location(Configuration, "exactpackage*" + Constants.PackageExtension);
Service = NUnitSetup.Container.GetInstance<IChocolateyPackageService>();
Configuration.ListCommand.Exact = true;
Configuration.Input = Configuration.PackageNames = "exactpackage";
}
public override void Because()
{
MockLogger.reset();
Results = Service.list_run(Configuration).ToList();
}
[Fact]
public void should_contain_packages_and_versions_with_a_space_between_them()
{
MockLogger.contains_message("exactpackage 1.0.0").ShouldBeTrue();
}
[Fact]
public void should_not_contain_packages_that_do_not_match()
{
MockLogger.contains_message("exactpackage.dontfind").ShouldBeFalse();
}
[Fact]
public void should_contain_a_summary()
{
MockLogger.contains_message("packages found").ShouldBeTrue();
}
[Fact]
public void should_contain_debugging_messages()
{
MockLogger.contains_message("Searching for package information", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Running list with the following filter", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("Start of List", LogLevel.Debug).ShouldBeTrue();
MockLogger.contains_message("End of List", LogLevel.Debug).ShouldBeTrue();
}
}
}
}
| Apteryx0/choco | src/chocolatey.tests.integration/scenarios/ListScenarios.cs | C# | apache-2.0 | 16,496 |
---
layout: post
title: ">Solution Domain Architecture"
date: 2007-05-19 21:10:00.000000000 +02:00
categories:
- SDA SOA
tags: []
status: publish
type: post
published: true
meta:
blogger_blog: andreasohlund.blogspot.com
blogger_permalink: "/2007/05/solution-domain-architecture.html"
author:
login: andreas.ohlund
email: [email protected]
display_name: Andreas Öhlund
first_name: Andreas
last_name: "Öhlund"
---
<p>><a href="http://blogs.msdn.com/nickmalik/default.aspx">Nick Malik</a> has a very interesting post about <a href="http://blogs.msdn.com/nickmalik/archive/2007/05/04/mining-for-services-with-solution-domain-architecture.aspx">Solution Domain Architecture </a>(SDA) which I believe is a great addition to the endless list of methods for <a href="http://www.squidoo.com/serviceengineering/">Service Engineering</a>. I especially like his teory that service reuse is most likely to happen with <em>" 'top down' services that we know, in advance, that we are going to need. " </em>and any reuse of bottom up services is <em>"is a happy accident". </em>This really highlights the need for SOA-governance in order to have a high degree of reuse of services in your service oriented architecture.</p>
| andreasohlund/andreasohlund.github.io | _posts/2007-05-19-solution-domain-architecture.html | HTML | apache-2.0 | 1,233 |
@font-face {
font-family: 'Braille';
src: url(http://tjb0607.me/i3_tumblr_theme/font/Braille.woff);
}
html {
background-color: #7C8491;
background-image: url(http://static.tumblr.com/bwey4ra/Gzwno4oq5/sunset.jpg);
background-position: bottom;
background-attachment: fixed;
height: 100%;
margin: 0px;
-webkit-background-size: cover;
background-size: cover;
font-family: Inconsolata, monospace;
font-size: 11px;
color: #fff;
}
#tumblr_controls {
margin-top: 18px;
}
p, code {
line-height: 11px;
margin: 0px;
}
a, #sidebar .blogdescription a, .jslink {
color: #5FD7FF;
text-decoration: underline;
cursor: pointer;
}
ul {
padding-left: 11px;
}
#i3bar {
overflow: hidden;
color: #dedede;
position: fixed;
z-index: 1;
top: 0px;
left: 0px;
right: 0px;
height: 18px;
background-color: #2d2d2d;
}
#i3bar p {
margin: 3px 8px;
}
#sidebar {
position: fixed;
top: 47px;
left: 35px;
bottom: 35px;
width: 450px;
}
.short-info .separator {
color: #808080;
}
.window {
overflow: auto;
position: relative;
margin: 10px;
border: 2px solid #2d2d2d;
box-shadow: 0px 0px 12px rgba(0, 0, 0, 0.75);
}
.window:hover, html:hover #active-window {
border-color: #D64937;
}
.window:hover .cursor, html:hover #active-window .cursor {
background-color: #fff;
}
.urxvt {
background-color: rgba(28, 28, 28, 0.9);
cursor: text;
padding: 2px;
max-width: 100%;
min-height: 100%;
}
#i3-gaps-tumblr a .blog-title {
cursor: pointer;
}
#i3-gaps-tumblr .blog-title p {
text-align: center;
}
#i3-gaps-tumblr .blog-title p.small, #i3-gaps-tumblr .blog-title a {
font-weight: 700;
background-color: #107bcc;
color: #fff;
text-decoration: none;
}
#content {
position: absolute;
top: 47px;
right: 35px;
left: 475px;
padding-bottom: 35px;
}
.post-header, .post-footer {
overflow: hidden;
position: relative;
width: 100%;
background-color: #303030;
}
.left {
float: left;
}
.tags {
padding-right: 60px;
}
.right {
float: right;
}
.post-body {
padding: 1em;
max-width: 100%;
}
.post-body .title, .post-body .title a {
text-decoration: none;
color: #fff;
font-size: 1.5em;
}
.post-body .link {
font-size: 1.5em;
}
.post-body img {
max-width: 100%;
width: auto;
height: auto;
max-height: 95vh;
}
.post-body figure {
margin: 0px;
}
.post-body p, .post-body blockquote {
margin-top: 4px;
margin-bottom: 4px;
}
.post-body blockquote {
margin-left: 4px;
border-left: 2px solid #5f5f5f;
padding-left: 6px;
margin-right: 0px;
}
.post-header, .post-header a, .post-footer, .post-footer a {
color: #808080;
}
.footer-left {
display: inline-block;
}
.post-footer {
min-height: 22px;
}
.buttons {
position: absolute;
width: 50px;
text-align: right;
display: inline-block;
right: 0px;
padding: 2px 4px 2px 0px;
}
.buttons .reblogbutton {
margin-bottom: -20px;
}
#i3bar .left .page-button {
padding: 3px 5px 2px 5px;
margin-bottom: 1px;
color: #808080;
float: left;
}
#i3bar .left .page-button-active {
color: #dedede;
background-color: #D64937;
}
#i3status {
padding: 3px 5px 2px 5px;
}
#i3status > span a, #sidebar a {
color: inherit;
text-decoration: none;
}
#i3status span.i3status-separator {
color: #808080;
}
#nav {
text-align: center;
overflow: hidden;
width: 100%;
margin-top: -10px;
}
#nav .prev-page-window {
float: left;
}
#nav .next-page-window {
float: right;
}
#nav .current-page-window {
display: inline-block;
margin-left: auto;
margin-right: auto;
}
#nav .prev-page-window .urxvt, #nav .next-page-window .urxvt {
cursor: pointer;
}
#nav a, #sidebar a, #sidebar .jslink {
text-decoration: inherit;
color: inherit;
}
#nav .prev-page-window .urxvt p, #nav .next-page-window .urxvt p {
text-decoration: none;
background-color: #107BCC;
padding: 0px 5px;
}
.mobile-info {
display: none;
}
#confirm {
display: none;
}
#confirm-window {
z-index: 2;
position: fixed;
left: 50%;
top: 50%;
width: 200px;
margin-left: -100px;
transform: translateY(-50%);
border-color: #fff;
text-align: center;
}
#confirm-bg {
position: fixed;
top: 0px;
bottom: 0px;
left: 0px;
right: 0px;
background-color: rgba(0, 0, 0, 0.5);
z-index: 1;
} | camerond594/camerond594.github.io | css/linux-styles.css | CSS | apache-2.0 | 4,117 |
# This code was automatically generated using xdrgen
# DO NOT EDIT or your changes may be overwritten
require 'xdr'
# === xdr source ============================================================
#
# enum PublicKeyType
# {
# PUBLIC_KEY_TYPE_ED25519 = KEY_TYPE_ED25519
# };
#
# ===========================================================================
module Stellar
class PublicKeyType < XDR::Enum
member :public_key_type_ed25519, 0
seal
end
end
| stellar/ruby-stellar-base | generated/stellar/public_key_type.rb | Ruby | apache-2.0 | 473 |
@CHARSET "UTF-8";
/*PRINCIPAL*/
body{
margin:0px;
padding:0;
font-family:verdana;
font-size: 10px;
height:100%;
width: 100%;
}
img {
border:0;
}
em {
color: #b1b1b1;
font-style: italic;
}
a {
color:#333333;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
h1, h2, h3, h4, h5, h6 {
font-family: Arial;
color:#333;
}
h1 {
font-size:2em;
}
h2 {
clear:both;
margin-top: 22px;
}
#principal {
background-image:url(../imagens/bg-topo.png);
background-repeat:repeat-x;
background-position:top;
}
#topo{
height: 110px; /*Height of top section*/
background-image:url("../imagens/topo.png");
background-position:bottom left;
background-repeat:no-repeat;
}
#logo {
width:150px;
height:70px;
float:left;
}
#logon {
width:254px;
height:25px;
float:right;
padding-left:10px;
margin: 10px 30px 0 30px;
}
#logon img{
margin-top:5px;
}
#logo-sistema {
height: 30px;
position: absolute;
right: 82px;
top: 43px;
width: 127px;
}
#topo h1{
margin: 0;
padding-top: 15px;
}
#barra-1 {
width:100%;
background-color: #cc0001;
}
#barra-2 {
width:100%;
background-color: #c8fc98;
}
#conteudo-col{
margin-left: 193px; /*Set left margin to LeftColumnWidth*/
}
#cabecalho-div-1 {
background-image:url(../imagens/bg-cabecalho-1.png);
width:394px;
width /*\**/: 393px; /* IE8 */
height:29px;
float:left;
background-repeat:repeat-x;
background-position:left bottom;
margin-bottom:5px;
}
:root #cabecalho-div-1 {
width:375px\0/IE9; /* IE9 */
}
#cabecalho-div-2 {
background-image:url(../imagens/bg-cabecalho-2.png);
width:165px;
width /*\**/: 166px; /* IE8 */
height:29px;
float:left;
background-repeat:repeat-x;
background-position:left bottom;
margin-bottom:5px;
}
:root #cabecalho-div-2 {
width:158px\0/IE9; /* IE9 */
}
#miolo{
float: left;
width: 100%;
}
.principal {
background: none repeat scroll 0 0 #FFFFFF;
border-bottom: 4px solid #B31419;
clear: both;
float: left;
overflow: hidden;
padding-bottom: 20px;
width: 100%;
}
#col-esquerda{
float: left;
width: 175px; /*Width of left column*/
height: 100%;
margin-left: -100%;
background: #efefef;
margin-top:3px;
}
#titulo-pag{
margin-left:193px;
width:57%;
float:left;
}
#ico-internos{
float:right;
text-align:right;
margin:10px 20px 0 0;
}
#ico-internos img {
}
.alerta {
margin-left:193px;
width:100%;
float:left;
}
#parametros {
width:100%;
margin-bottom:20px;
}
#menu {
}
#smoothmenu2 {
height:400px;
}
#rodape{
clear: left;
width: 100%;
background: #d9d9d9;
color: #333333;
text-align: center;
padding: 4px 0;
text-align:center;
}
#rodape a{
color: #FFFF80;
}
/*FORMULARIO*/
.label-inter { /*Diminui o tamanho da label do formulário para estabelecer campos intermediários*/
position:absolute;
top:0;
left:5px;
margin-right: 3px;
text-align: right;
width:80px;
font-size:11px;
z-index:1;
}
.obs {
color: #b1b1b1;
font-size: 11px;
margin-left:5px;
float:left;
}
div.form-divisoria {
margin:-10px 0 5px 0px;
width:100%;
height:1px;
background-color:#fff;
clear:both;
}
div.field {
}
.field-busca {
margin-bottom: 7px;
margin-top: 7px;
position: relative;
text-align: right;
}
.buscar {
height: 27px;
width: 32px;
float: left;
}
.buttonSubmit {
clear:both;
}
#bt-submit {
border: none;
text-indent:-9999px;
}
.bt-salvar {
background-image: url("../imagens/bt-salvar.png");
height: 30px;
width: 63px;
}
.bt-cancelar {
background-image:url(../imagens/bt-cancelar.png);
width:30px;
height:25px;
}
#bt-submit:hover{
border: none;
}
.botoes-internos {
width:98.5%;
text-align:right;
margin-top:17px;
}
#voltar, #buscar-modal, #alteraplano-modal, #alteraextrato-modal, #desativaplano-modal, #novo-plano-modal, #pesquisar, #adiciona-linha-tabela-modal {
display: inline;
float: left;
margin-left: 2px;
}
div.field input.error, div.field select.error, tr.errorRow div.field input, tr.errorRow div.field select {
border: 1px solid #b31419;
background-color: #FAFFD6;
color: #b31419;
}
div.field div.formError {
display: none;
color: #FF0000;
}
div.field div.formError {
font-weight: normal;
}
div.error {
margin: 5px 0 23px 0;
color: #b31419 !important;
width:400px;
padding:10px 0px 5px 3px;
border:1px solid #b31419;
}
div.error a {
color: #336699;
font-size: 12px;
text-decoration: underline;
}
label.error {
color: #b31419;
}
.error span {
margin-top:10px;
}
.botoes-externos {
float:right;
margin: -14px 30px 10px 0;
}
#caixa-pesquisa { /*Este não configura o MODAL */
background-color: #F5F5F5;
background-image: url("../imagens/bg-pesquisa-lupa.gif");
background-position: right top;
background-repeat: no-repeat;
border: 1px solid #999999;
display: block;
min-width: 575px;
padding: 0 15px 15px;
position: relative;
width: 65%;
}
/* INÍCIO - Teste Form Flexível */
.clear {
clear:both;
}
.linha-form {
line-height: 1.5em;
}
div.field-form-1 {
width:48%;
float:left;
margin-right:2%;
position:relative;
}
div.field-form-2 {
float: left;
margin-right: 2%;
position: relative;
width: 23%;
}
div.field-form-3 {
width:14.65%;
float:left;
margin-right:2%;
position:relative;
}
.textarea-form-1 {
width:100%;
}
.textarea-form-2 {
width:100%;
}
div.tabela-form-2 {
height:91px;
overflow:auto;
background-color:#f5f5f5;
border: 1px solid #999999;
width:48%;
float:left;
}
div.tabela-form-1 {
height:99px;
overflow:auto;
background-color:#f5f5f5;
border: 1px solid #999999;
width:100%;
float:left;
min-width:200px;
}
div.tabela-form-1-menor {
height:68px;
overflow:auto;
background-color:#f5f5f5;
border: 1px solid #999999;
width:100%;
float:left;
}
div.ico-planos{
height:68px;
width:100%;
float:left;
}
div.ico-reneg{
height:49px;
width:100%;
float:left;
}
div.tabela-form-2 a{
font-weight:900;
}
div.add-contrato-tabela {
float:left;
color:#b31419;
margin-left:7px;
}
div.field-com-busca {
width:23%;
float:left;
margin-right:2%;
position:relative;
}
div.ico-busca {
position:absolute;
z-index:500;
right:0;
}
.textarea-form-input-1 textarea {
width:98%;
min-width:136px;
border:1px solid #999999;
height: 62px;
}
.textarea-form-input-2 textarea {
width:100%;
min-width:136px;
border:1px solid #999999;
}
div.label-form {
color:#666;
margin-top:7px;
}
div.input-form {
width:100%;
}
div.input-form input {
width:100%;
min-width:100px;
border:1px solid #999999;
}
div.input-form select {
width:100%;
min-width:100px;
border:1px solid #999999;
}
div.input-form-calendario input {
width:100%;
min-width:100px;
border:1px solid #999999;
background-image:url(../imagens/bt-calendar-18x17.png);
background-repeat:no-repeat;
background-position:center right;
cursor:pointer;
}
div.input-form-busca input {
width:87%;
min-width:100px;
border:1px solid #999999;
position:relative;
}
div.input-form-menor input {
width:100%;
min-width:50px;
border:1px solid #999999;
}
div.input-form-menor select {
width:100%;
min-width:50px;
border:1px solid #999999;
}
/* FIM - Teste Form Flexível */
/* TABS */
ul.tabs {
margin: 0;
padding: 0;
float: left;
list-style: none;
height: 32px; /*--Set height of tabs--*/
border-bottom: 1px solid #999;
border-left: 1px solid #999;
width: 98%;
}
ul.tabs li {
float: left;
margin: 0;
padding: 0;
height: 31px; /*--Subtract 1px from the height of the unordered list--*/
line-height: 31px; /*--Vertically aligns the text within the tab--*/
border: 1px solid #999;
border-left: none;
margin-bottom: -1px; /*--Pull the list item down 1px--*/
overflow: hidden;
position: relative;
background: #e0e0e0;
}
ul.tabs li a {
text-decoration: none;
color: #000;
display: block;
font-size: 1.2em;
padding: 0 10px;
border: 1px solid #fff; /*--Gives the bevel look with a 1px white border inside the list item--*/
outline: none;
}
ul.tabs li a:hover {
background: #ccc;
}
html ul.tabs li.active, html ul.tabs li.active a:hover { /*--Makes sure that the active tab does not listen to the hover properties--*/
background: #fff;
border-bottom: 1px solid #fff; /*--Makes the active tab look like it's connected with its content--*/
}
ul.tabs li .reneg-tab {
background-color:#b2b2b2;
}
ul.tabs li .reneg-tab:hover {
background-color:#949494;
}
html ul.tabs li.active .reneg-tab, html ul.tabs li.active .reneg-tab:hover { /*--Makes sure that the active tab does not listen to the hover properties--*/
background: #fafce8;
border-bottom: 1px solid #fafce8; /*--Makes the active tab look like it's connected with its content--*/
}
.reneg-tb-bg {
background-color:#fafce8;
}
/************************************/
.tab_container {
border: 1px solid #999;
border-top: none;
border-bottom: 4px solid #ab1f23;
overflow: hidden;
clear: both;
float: left;
width: 98%;
background: #fff;
}
.tab_content {
padding: 10px;
overflow:auto;
}
/* INÍCIO - TABELA*/
.grid td{
border-bottom:1px solid #e1e1e1;
font-size:10px;
line-height:2em;
}
table.nivel-1 {
border-collapse:collapse;
}
table.nivel-1 tr {
border-bottom:1px solid #E1E1E1;
}
.detalhes-contrato{
background-color:#efefef;
width:100%;
margin-top: -1px;
padding-top: 11px;
margin-bottom:20px;
}
.detalhes-contrato table {
width:100%;
background-color:#efefef;
}
/* FIM - TABELA*/
/************************************/
/* PÁGINAS INTERNAS */
.principal td {
border-bottom:1px solid #e1e1e1;
font-size:10px;
}
.principal td a {
font-weight: bold;
}
/*SOLUÇÕES TEMPORÁRIAS*/
.link-pesquisa a{
border: 1px solid #999999;
padding: 4px;
background-color: #efefef;
text-decoration:none;
}
.link-pesquisa-contrato a {
background-color: #EFEFEF;
border: 1px solid #999999;
padding: 4px;
position: absolute;
right: 34px;
text-decoration: none;
top: 216px;
}
#nav-planos {
height: 30px;
margin: 20px 0;
width: 100%;
}
#nav-icons-tabela {
height: 30px;
margin: 0;
width: 100%;
}
#nav-planos h3{
float:left;
margin-top: 4px;
}
#paginacao {
float:left;
}
#paginacao ul{
padding:0px;
margin-top: 5px;
float: left;
list-style:none;
}
#paginacao ul li {
display: inline;
}
#paginacao ul li a{
background-image: url("../imagens/nav-bg.png");
background-repeat: no-repeat;
height: 22px;
padding: 5px 8px;
margin-top:3px;
}
#paginacao ul li a:hover{
background-image: url("../imagens/nav-bg-hover.png");
text-decoration:none;
}
.seta-navega {
float:left;
}
.ico-plano img{
margin-left:5px;
}
.ico-plano img{
margin-left:1px;
}
| topazio/varjaomidia | WebContent/css/geral.css | CSS | apache-2.0 | 11,612 |
import { Component, OnInit } from '@angular/core';
import { ActivatedRoute } from '@angular/router';
import { Location } from '@angular/common';
import { Merch } from '../data/merch';
import { MerchService } from '../data/merch.service';
@Component({
selector: 'app-merch-display',
templateUrl: './merch-display.component.html',
styleUrls: ['./merch-display.component.scss'],
})
export class MerchDisplayComponent implements OnInit {
merch: Merch[] = [];
private _serviceWorker: ServiceWorker|null = null;
constructor(
private route: ActivatedRoute,
private merchService: MerchService,
private location: Location
) {}
ngOnInit(): void {
navigator.serviceWorker.ready.then( registration => {
this._serviceWorker = registration.active;
});
this.route.params.subscribe((routeParams) => {
this.getMerch(routeParams.category);
if (this._serviceWorker) {
this._serviceWorker.postMessage({ page: routeParams.category });
}
});
}
getMerch(category: string): void {
this.merchService
.getMerchList(category)
.then((merch) => (this.merch = merch));
}
goBack(): void {
this.location.back();
}
}
| tensorflow/tfjs-examples | angular-predictive-prefetching/client/src/app/merch-display/merch-display.component.ts | TypeScript | apache-2.0 | 1,198 |
package com.github.sergejsamsonow.codegenerator.producer.pojo.renderer;
import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCMethodCodeConcatenator;
import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCNewLineAndIndentationFormat;
import com.github.sergejsamsonow.codegenerator.producer.pojo.model.PojoProperty;
import com.github.sergejsamsonow.codegenerator.producer.pojo.renderer.javalang.BeanModifier;
public class JavaLangToString extends BeanModifier {
public JavaLangToString(SCNewLineAndIndentationFormat format) {
super(format);
}
@Override
protected void writeBeforePropertiesIteration() {
SCMethodCodeConcatenator writer = getMethodCodeWriter();
writer.annotation("@Override");
writer.start("public String toString() {");
writer.code("StringBuilder builder = new StringBuilder();");
writer.code("builder.append(\"%s (\");", getData().getClassName());
}
@Override
protected void writePropertyCode(PojoProperty property) {
SCMethodCodeConcatenator writer = getMethodCodeWriter();
String end = isLast() ? ");" : " + \", \");";
writer.code("builder.append(\"%s: \" + Objects.toString(%s())%s",
property.getFieldName(), property.getGetterName(), end);
}
@Override
protected void writeAfterPropertiesIteration() {
SCMethodCodeConcatenator writer = getMethodCodeWriter();
writer.code("builder.append(\")\");");
writer.code("return builder.toString();");
writer.end();
writer.emptyNewLine();
}
}
| sergej-samsonow/code-generator | producer/pojo/src/main/java/com/github/sergejsamsonow/codegenerator/producer/pojo/renderer/JavaLangToString.java | Java | apache-2.0 | 1,597 |
/*
* Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.autoscaling.model;
/**
* <p>
* The output for the TerminateInstanceInAutoScalingGroup action.
* </p>
*/
public class TerminateInstanceInAutoScalingGroupResult {
/**
* A Scaling Activity.
*/
private Activity activity;
/**
* A Scaling Activity.
*
* @return A Scaling Activity.
*/
public Activity getActivity() {
return activity;
}
/**
* A Scaling Activity.
*
* @param activity A Scaling Activity.
*/
public void setActivity(Activity activity) {
this.activity = activity;
}
/**
* A Scaling Activity.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param activity A Scaling Activity.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public TerminateInstanceInAutoScalingGroupResult withActivity(Activity activity) {
this.activity = activity;
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("Activity: " + activity + ", ");
sb.append("}");
return sb.toString();
}
}
| apetresc/aws-sdk-for-java-on-gae | src/main/java/com/amazonaws/services/autoscaling/model/TerminateInstanceInAutoScalingGroupResult.java | Java | apache-2.0 | 2,135 |
/**
* Copyright 2013-present NightWorld.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var error = require('./error'),
runner = require('./runner'),
Client = require('./client');
module.exports = Authorise;
/**
* This is the function order used by the runner
*
* @type {Array}
*/
var fns = [
checkAuthoriseType,
checkScope
];
/**
* Authorise
*
* @param {Object} config Instance of OAuth object
* @param {Object} req
* @param {Object} res
* @param {Object} options
* @param {Function} next
*/
function Authorise (config, req, res, options, next) {
options = options || {};
this.config = config;
this.model = config.model;
this.req = req;
this.res = res;
this.options = options;
runner(fns, this, next);
}
function checkAuthoriseType(done) {
var client = Client.credsFromBasic(this.req) || Client.credsFromBody(this.req);
if (this.options.implicit) {
if (this.req.body.response_type === 'token') {
if (client.clientId) {
this.redirectUri = this.req.body.redirect_uri || this.req.query.redirect_uri;
this.clientId = client.clientId;
this.req.auth_type = 'implicit';
return checkImplicitClient.call(this, done);
}
}
}
if (this.options.client_credentials) {
if (client.clientId && client.clientSecret) {
this.client = client;
this.req.auth_type = 'client_credentials';
return getUserFromClient.call(this, done);
}
}
getBearerToken.call(this, done);
}
function getUserFromClient(done) {
var self = this;
this.model.getClient(this.client.clientId, this.client.clientSecret,
function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Client credentials are invalid'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
function checkImplicitClient (done) {
var self = this;
this.model.getClient(this.clientId, null, function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Invalid client credentials'));
} else if (self.redirectUri && Array.isArray(client.redirectUri)) {
if (client.redirectUri.indexOf(self.redirectUri) === -1) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
client.redirectUri = self.redirectUri;
} else if (self.redirectUri && client.redirectUri !== self.redirectUri) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
// The request contains valid params so any errors after this point
// are redirected to the redirect_uri
self.res.redirectUri = client.redirectUri;
self.res.oauthRedirect = true;
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
/**
* Get bearer token
*
* Extract token from request according to RFC6750
*
* @param {Function} done
* @this OAuth
*/
function getBearerToken (done) {
var headerToken = this.req.get('Authorization'),
getToken = this.req.query.access_token,
postToken = this.req.body ? this.req.body.access_token : undefined;
// Check exactly one method was used
var methodsUsed = (headerToken !== undefined) + (getToken !== undefined) +
(postToken !== undefined);
if (methodsUsed > 1) {
return done(error('invalid_request',
'Only one method may be used to authenticate at a time (Auth header, ' +
'GET or POST).'));
} else if (methodsUsed === 0) {
return done(error('invalid_request', 'The access token was not found'));
}
// Header: http://tools.ietf.org/html/rfc6750#section-2.1
if (headerToken) {
var matches = headerToken.match(/Bearer\s(\S+)/);
if (!matches) {
return done(error('invalid_request', 'Malformed auth header'));
}
headerToken = matches[1];
}
// POST: http://tools.ietf.org/html/rfc6750#section-2.2
if (postToken) {
if (this.req.method === 'GET') {
return done(error('invalid_request',
'Method cannot be GET When putting the token in the body.'));
}
if (!this.req.is('application/x-www-form-urlencoded')) {
return done(error('invalid_request', 'When putting the token in the ' +
'body, content type must be application/x-www-form-urlencoded.'));
}
}
this.bearerToken = headerToken || postToken || getToken;
checkToken.call(this, done);
}
/**
* Check token
*
* Check it against model, ensure it's not expired
* @param {Function} done
* @this OAuth
*/
function checkToken (done) {
var self = this;
this.model.getAccessToken(this.bearerToken, function (err, token) {
if (err) return done(error('server_error', false, err));
if (!token) {
return done(error('invalid_token',
'The access token provided is invalid.'));
}
if (token.expires !== null &&
(!token.expires || token.expires < new Date())) {
return done(error('invalid_token',
'The access token provided has expired.'));
}
// Expose params
self.req.oauth = { bearerToken: token };
self.req.user = token.user ? token.user : { id: token.userId };
done();
});
}
/**
* Check scope
*
* @param {Function} done
* @this OAuth
*/
function checkScope (done) {
if (!this.model.authoriseScope) return done();
this.model.authoriseScope(this.req.oauth.bearerToken, this.options.scope,
function (err, invalid) {
if (err) return done(error('server_error', false, err));
if (invalid) return done(error('invalid_scope', invalid));
done();
});
}
| zoltangbereczky/node-oauth2-server | lib/authorise.js | JavaScript | apache-2.0 | 6,699 |
from functools import wraps
import json
import os
import traceback
import validators
from jinja2 import Environment, PackageLoader
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
import requests
from requests.auth import HTTPBasicAuth
env = Environment(
loader=PackageLoader('saagie', 'jinja2'),
)
SAAGIE_ROOT_URL = os.environ.get("SAAGIE_ROOT_URL", None)
SAAGIE_USERNAME = None
PLATFORMS_URL = None
SAAGIE_BASIC_AUTH_TOKEN = None
JOBS_URL_PATTERN = None
JOB_URL_PATTERN = None
JOB_UPGRADE_URL_PATTERN = None
SCRIPT_UPLOAD_URL_PATTERN = None
def get_absolute_saagie_url(saagie_url):
if saagie_url.startswith('/'):
return SAAGIE_ROOT_URL + saagie_url
return saagie_url
class ResponseError(Exception):
def __init__(self, status_code):
self.status_code = status_code
super(ResponseError, self).__init__(status_code)
class SaagieHandler(IPythonHandler):
def handle_request(self, method):
data = {k: v[0].decode() for k, v in self.request.arguments.items()}
if 'view' not in data:
self.send_error(404)
return
view_name = data.pop('view')
notebook_path = data.pop('notebook_path', None)
notebook_json = data.pop('notebook_json', None)
notebook = Notebook(notebook_path, notebook_json)
try:
template_name, template_data = views.render(
view_name, notebook=notebook, data=data, method=method)
except ResponseError as e:
self.send_error(e.status_code)
return
except:
template_name = 'internal_error.html'
template_data = {'error': traceback.format_exc()}
self.set_status(500)
template_data.update(
notebook=notebook,
)
template = env.get_template(template_name)
self.finish(template.render(template_data))
def get(self):
self.handle_request('GET')
def post(self):
self.handle_request('POST')
def check_xsrf_cookie(self):
return
class SaagieCheckHandler(IPythonHandler):
def get(self):
self.finish()
class SaagieJobRun:
def __init__(self, job, run_data):
self.job = job
self.id = run_data['id']
self.status = run_data['status']
self.stderr = run_data.get('logs_err', '')
self.stdout = run_data.get('logs_out', '')
class SaagieJob:
@classmethod
def from_id(cls, notebook, platform_id, job_id):
return SaagieJob(
notebook,
requests.get(JOB_URL_PATTERN % (platform_id, job_id), auth=SAAGIE_BASIC_AUTH_TOKEN).json())
def __init__(self, notebook, job_data):
self.notebook = notebook
self.data = job_data
self.platform_id = job_data['platform_id']
self.capsule_type = job_data['capsule_code']
self.id = job_data['id']
self.name = job_data['name']
self.last_run = None
def set_as_current(self):
self.notebook.current_job = self
@property
def url(self):
return (JOBS_URL_PATTERN + '/%s') % (self.platform_id, self.id)
@property
def admin_url(self):
return get_absolute_saagie_url('/#/manager/%s/job/%s'
% (self.platform_id, self.id))
@property
def logs_url(self):
return self.admin_url + '/logs'
@property
def is_started(self):
return self.last_run is not None
def fetch_logs(self):
job_data = requests.get(self.url, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
run_data = job_data.get('last_instance')
if run_data is None or run_data['status'] not in ('SUCCESS', 'FAILED'):
return
run_data = requests.get(
get_absolute_saagie_url('/api/v1/jobtask/%s'
% run_data['id']), auth=SAAGIE_BASIC_AUTH_TOKEN).json()
self.last_run = SaagieJobRun(self, run_data)
@property
def details_template_name(self):
return 'include/python_job_details.html'
def __str__(self):
return self.name
def __eq__(self, other):
if other is None:
return False
return self.platform_id == other.platform_id and self.id == other.id
def __lt__(self, other):
if other is None:
return False
return self.id < other.id
class SaagiePlatform:
SUPPORTED_CAPSULE_TYPES = {'python'}
def __init__(self, notebook, platform_data):
self.notebook = notebook
self.id = platform_data['id']
self.name = platform_data['name']
self.capsule_types = {c['code'] for c in platform_data['capsules']}
@property
def is_supported(self):
return not self.capsule_types.isdisjoint(self.SUPPORTED_CAPSULE_TYPES)
def get_jobs(self):
if not self.is_supported:
return []
jobs_data = requests.get(JOBS_URL_PATTERN % self.id, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
return [SaagieJob(self.notebook, job_data) for job_data in jobs_data
if job_data['category'] == 'processing' and
job_data['capsule_code'] in self.SUPPORTED_CAPSULE_TYPES]
def __eq__(self, other):
return self.id == other.id
class Notebook:
CACHE = {}
def __new__(cls, path, json):
if path in cls.CACHE:
return cls.CACHE[path]
cls.CACHE[path] = new = super(Notebook, cls).__new__(cls)
return new
def __init__(self, path, json_data):
if path is None:
path = 'Untitled.ipynb'
if json_data is None:
json_data = json.dumps({
'cells': [],
'metadata': {'kernelspec': {'name': 'python3'}}})
self.path = path
self.json = json.loads(json_data)
# In cached instances, current_job is already defined.
if not hasattr(self, 'current_job'):
self.current_job = None
@property
def name(self):
return os.path.splitext(os.path.basename(self.path))[0]
@property
def kernel_name(self):
return self.json['metadata']['kernelspec']['name']
@property
def kernel_display_name(self):
return self.json['metadata']['kernelspec']['display_name']
def get_code_cells(self):
return [cell['source'] for cell in self.json['cells']
if cell['cell_type'] == 'code']
def get_code(self, indices=None):
cells = self.get_code_cells()
if indices is None:
indices = list(range(len(cells)))
return '\n\n\n'.join([cells[i] for i in indices])
def get_platforms(self):
return [SaagiePlatform(self, platform_data)
for platform_data in requests.get(PLATFORMS_URL, auth=SAAGIE_BASIC_AUTH_TOKEN).json()]
class ViewsCollection(dict):
def add(self, func):
self[func.__name__] = func
return func
def render(self, view_name, notebook, data=None, method='GET', **kwargs):
if data is None:
data = {}
try:
view = views[view_name]
except KeyError:
raise ResponseError(404)
template_data = view(method, notebook, data, **kwargs)
if isinstance(template_data, tuple):
template_name, template_data = template_data
else:
template_name = view.__name__ + '.html'
return template_name, template_data
views = ViewsCollection()
@views.add
def modal(method, notebook, data):
return {}
def clear_basic_auth_token():
global SAAGIE_BASIC_AUTH_TOKEN
SAAGIE_BASIC_AUTH_TOKEN = None
# Init an empty Basic Auth token on first launch
clear_basic_auth_token()
def is_logged():
if SAAGIE_ROOT_URL is None or SAAGIE_BASIC_AUTH_TOKEN is None:
return False
else:
# Check if Basic token is still valid
is_logged_in = False
try:
response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=SAAGIE_BASIC_AUTH_TOKEN, allow_redirects=False)
is_logged_in = response.ok
except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err:
print ('Error while trying to connect to Saagie: ', err)
if is_logged_in is not True:
# Remove Basic Auth token from globals. It will force a new login phase.
clear_basic_auth_token()
return is_logged_in
def define_globals(saagie_root_url, saagie_username):
if saagie_root_url is not None:
global SAAGIE_ROOT_URL
global SAAGIE_USERNAME
global PLATFORMS_URL
global JOBS_URL_PATTERN
global JOB_URL_PATTERN
global JOB_UPGRADE_URL_PATTERN
global SCRIPT_UPLOAD_URL_PATTERN
SAAGIE_USERNAME = saagie_username
SAAGIE_ROOT_URL = saagie_root_url.strip("/")
PLATFORMS_URL = SAAGIE_ROOT_URL + '/api/v1/platform'
JOBS_URL_PATTERN = PLATFORMS_URL + '/%s/job'
JOB_URL_PATTERN = JOBS_URL_PATTERN + '/%s'
JOB_UPGRADE_URL_PATTERN = JOBS_URL_PATTERN + '/%s/version'
SCRIPT_UPLOAD_URL_PATTERN = JOBS_URL_PATTERN + '/upload'
@views.add
def login_form(method, notebook, data):
if method == 'POST':
# check if the given Saagie URL is well formed
if not validators.url(data['saagie_root_url']):
return {'error': 'Invalid URL', 'saagie_root_url': data['saagie_root_url'] or '', 'username': data['username'] or ''}
define_globals(data['saagie_root_url'], data['username'])
try:
basic_token = HTTPBasicAuth(data['username'], data['password'])
current_user_response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=basic_token, allow_redirects=False)
if current_user_response.ok:
# Login succeeded, keep the basic token for future API calls
global SAAGIE_BASIC_AUTH_TOKEN
SAAGIE_BASIC_AUTH_TOKEN = basic_token
except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err:
print ('Error while trying to connect to Saagie: ', err)
return {'error': 'Connection error', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''}
if SAAGIE_BASIC_AUTH_TOKEN is not None:
return views.render('capsule_type_chooser', notebook)
return {'error': 'Invalid URL, username or password.', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''}
if is_logged():
return views.render('capsule_type_chooser', notebook)
return {'error': None, 'saagie_root_url': SAAGIE_ROOT_URL or '', 'username': SAAGIE_USERNAME or ''}
def login_required(view):
@wraps(view)
def inner(method, notebook, data, *args, **kwargs):
if not is_logged():
return views.render('login_form', notebook)
return view(method, notebook, data, *args, **kwargs)
return inner
@views.add
@login_required
def capsule_type_chooser(method, notebook, data):
return {'username': SAAGIE_USERNAME}
def get_job_form(method, notebook, data):
context = {'platforms': notebook.get_platforms()}
context['values'] = ({'current': {'options': {}}} if notebook.current_job is None
else notebook.current_job.data)
return context
def create_job_base_data(data):
return {
'platform_id': data['saagie-platform'],
'category': 'processing',
'name': data['job-name'],
'description': data['description'],
'current': {
'cpu': data['cpu'],
'disk': data['disk'],
'memory': data['ram'],
'isInternalSubDomain': False,
'isInternalPort': False,
'options': {}
}
}
def upload_python_script(notebook, data):
code = notebook.get_code(map(int, data.get('code-lines', '').split('|')))
files = {'file': (data['job-name'] + '.py', code)}
return requests.post(
SCRIPT_UPLOAD_URL_PATTERN % data['saagie-platform'],
files=files, auth=SAAGIE_BASIC_AUTH_TOKEN).json()['fileName']
@views.add
@login_required
def python_job_form(method, notebook, data):
if method == 'POST':
platform_id = data['saagie-platform']
job_data = create_job_base_data(data)
job_data['capsule_code'] = 'python'
job_data['always_email'] = False
job_data['manual'] = True
job_data['retry'] = ''
current = job_data['current']
current['options']['language_version'] = data['language-version']
current['releaseNote'] = data['release-note']
current['template'] = data['shell-command']
current['file'] = upload_python_script(notebook, data)
new_job_data = requests.post(JOBS_URL_PATTERN % platform_id,
json=job_data, auth=SAAGIE_BASIC_AUTH_TOKEN).json()
job = SaagieJob(notebook, new_job_data)
job.set_as_current()
return views.render('starting_job', notebook, {'job': job})
context = get_job_form(method, notebook, data)
context['action'] = '/saagie?view=python_job_form'
context['username'] = SAAGIE_USERNAME
return context
@views.add
@login_required
def update_python_job(method, notebook, data):
if method == 'POST':
job = notebook.current_job
platform_id = job.platform_id
data['saagie-platform'] = platform_id
data['job-name'] = job.name
data['description'] = ''
current = create_job_base_data(data)['current']
current['options']['language_version'] = data['language-version']
current['releaseNote'] = data['release-note']
current['template'] = data['shell-command']
current['file'] = upload_python_script(notebook, data)
requests.post(JOB_UPGRADE_URL_PATTERN % (platform_id, job.id),
json={'current': current}, auth=SAAGIE_BASIC_AUTH_TOKEN)
job.last_run = None
return views.render('starting_job', notebook, {'job': job})
context = get_job_form(method, notebook, data)
context['action'] = '/saagie?view=update_python_job'
context['username'] = SAAGIE_USERNAME
return context
@views.add
@login_required
def select_python_job(method, notebook, data):
if method == 'POST':
platform_id, job_id = data['job'].split('-')
notebook.current_job = SaagieJob.from_id(notebook, platform_id, job_id)
return views.render('update_python_job', notebook, data)
jobs_by_platform = []
for platform in notebook.get_platforms():
jobs = platform.get_jobs()
if jobs:
jobs_by_platform.append((platform,
list(sorted(jobs, reverse=True))))
return {'jobs_by_platform': jobs_by_platform,
'action': '/saagie?view=select_python_job', 'username': SAAGIE_USERNAME}
@views.add
@login_required
def unsupported_kernel(method, notebook, data):
return {'username': SAAGIE_USERNAME}
@views.add
@login_required
def starting_job(method, notebook, data):
job = notebook.current_job
job.fetch_logs()
if job.is_started:
return views.render('started_job', notebook, {'job': job})
return {'job': job, 'username': SAAGIE_USERNAME}
@views.add
@login_required
def started_job(method, notebook, data):
return {'job': notebook.current_job, 'username': SAAGIE_USERNAME}
@views.add
def logout(method, notebook, data):
global SAAGIE_BASIC_AUTH_TOKEN
global SAAGIE_ROOT_URL
global SAAGIE_USERNAME
SAAGIE_BASIC_AUTH_TOKEN = None
SAAGIE_ROOT_URL = None
SAAGIE_USERNAME = None
return {}
def load_jupyter_server_extension(nb_app):
web_app = nb_app.web_app
base_url = web_app.settings['base_url']
route_pattern = url_path_join(base_url, '/saagie')
web_app.add_handlers('.*$', [(route_pattern, SaagieHandler)])
route_pattern = url_path_join(base_url, '/saagie/check')
web_app.add_handlers('.*$', [(route_pattern, SaagieCheckHandler)])
| saagie/jupyter-saagie-plugin | saagie/server_extension.py | Python | apache-2.0 | 16,090 |
//// [contextualTypeWithUnionTypeMembers.ts]
//When used as a contextual type, a union type U has those members that are present in any of
// its constituent types, with types that are unions of the respective members in the constituent types.
interface I1<T> {
commonMethodType(a: string): string;
commonPropertyType: string;
commonMethodWithTypeParameter(a: T): T;
methodOnlyInI1(a: string): string;
propertyOnlyInI1: string;
}
interface I2<T> {
commonMethodType(a: string): string;
commonPropertyType: string;
commonMethodWithTypeParameter(a: T): T;
methodOnlyInI2(a: string): string;
propertyOnlyInI2: string;
}
// Let S be the set of types in U that has a property P.
// If S is not empty, U has a property P of a union type of the types of P from each type in S.
var i1: I1<number>;
var i2: I2<number>;
var i1Ori2: I1<number> | I2<number> = i1;
var i1Ori2: I1<number> | I2<number> = i2;
var i1Ori2: I1<number> | I2<number> = { // Like i1
commonPropertyType: "hello",
commonMethodType: a=> a,
commonMethodWithTypeParameter: a => a,
methodOnlyInI1: a => a,
propertyOnlyInI1: "Hello",
};
var i1Ori2: I1<number> | I2<number> = { // Like i2
commonPropertyType: "hello",
commonMethodType: a=> a,
commonMethodWithTypeParameter: a => a,
methodOnlyInI2: a => a,
propertyOnlyInI2: "Hello",
};
var i1Ori2: I1<number> | I2<number> = { // Like i1 and i2 both
commonPropertyType: "hello",
commonMethodType: a=> a,
commonMethodWithTypeParameter: a => a,
methodOnlyInI1: a => a,
propertyOnlyInI1: "Hello",
methodOnlyInI2: a => a,
propertyOnlyInI2: "Hello",
};
var arrayI1OrI2: Array<I1<number> | I2<number>> = [i1, i2, { // Like i1
commonPropertyType: "hello",
commonMethodType: a=> a,
commonMethodWithTypeParameter: a => a,
methodOnlyInI1: a => a,
propertyOnlyInI1: "Hello",
},
{ // Like i2
commonPropertyType: "hello",
commonMethodType: a=> a,
commonMethodWithTypeParameter: a => a,
methodOnlyInI2: a => a,
propertyOnlyInI2: "Hello",
}, { // Like i1 and i2 both
commonPropertyType: "hello",
commonMethodType: a=> a,
commonMethodWithTypeParameter: a => a,
methodOnlyInI1: a => a,
propertyOnlyInI1: "Hello",
methodOnlyInI2: a => a,
propertyOnlyInI2: "Hello",
}];
interface I11 {
commonMethodDifferentReturnType(a: string, b: number): string;
commonPropertyDifferentType: string;
}
interface I21 {
commonMethodDifferentReturnType(a: string, b: number): number;
commonPropertyDifferentType: number;
}
var i11: I11;
var i21: I21;
var i11Ori21: I11 | I21 = i11;
var i11Ori21: I11 | I21 = i21;
var i11Ori21: I11 | I21 = {
// Like i1
commonMethodDifferentReturnType: (a, b) => {
var z = a.charAt(b);
return z;
},
commonPropertyDifferentType: "hello",
};
var i11Ori21: I11 | I21 = {
// Like i2
commonMethodDifferentReturnType: (a, b) => {
var z = a.charCodeAt(b);
return z;
},
commonPropertyDifferentType: 10,
};
var arrayOrI11OrI21: Array<I11 | I21> = [i11, i21, i11 || i21, {
// Like i1
commonMethodDifferentReturnType: (a, b) => {
var z = a.charAt(b);
return z;
},
commonPropertyDifferentType: "hello",
}, {
// Like i2
commonMethodDifferentReturnType: (a, b) => {
var z = a.charCodeAt(b);
return z;
},
commonPropertyDifferentType: 10,
}];
//// [contextualTypeWithUnionTypeMembers.js]
// Let S be the set of types in U that has a property P.
// If S is not empty, U has a property P of a union type of the types of P from each type in S.
var i1;
var i2;
var i1Ori2 = i1;
var i1Ori2 = i2;
var i1Ori2 = {
commonPropertyType: "hello",
commonMethodType: function (a) { return a; },
commonMethodWithTypeParameter: function (a) { return a; },
methodOnlyInI1: function (a) { return a; },
propertyOnlyInI1: "Hello"
};
var i1Ori2 = {
commonPropertyType: "hello",
commonMethodType: function (a) { return a; },
commonMethodWithTypeParameter: function (a) { return a; },
methodOnlyInI2: function (a) { return a; },
propertyOnlyInI2: "Hello"
};
var i1Ori2 = {
commonPropertyType: "hello",
commonMethodType: function (a) { return a; },
commonMethodWithTypeParameter: function (a) { return a; },
methodOnlyInI1: function (a) { return a; },
propertyOnlyInI1: "Hello",
methodOnlyInI2: function (a) { return a; },
propertyOnlyInI2: "Hello"
};
var arrayI1OrI2 = [i1, i2, {
commonPropertyType: "hello",
commonMethodType: function (a) { return a; },
commonMethodWithTypeParameter: function (a) { return a; },
methodOnlyInI1: function (a) { return a; },
propertyOnlyInI1: "Hello"
},
{
commonPropertyType: "hello",
commonMethodType: function (a) { return a; },
commonMethodWithTypeParameter: function (a) { return a; },
methodOnlyInI2: function (a) { return a; },
propertyOnlyInI2: "Hello"
}, {
commonPropertyType: "hello",
commonMethodType: function (a) { return a; },
commonMethodWithTypeParameter: function (a) { return a; },
methodOnlyInI1: function (a) { return a; },
propertyOnlyInI1: "Hello",
methodOnlyInI2: function (a) { return a; },
propertyOnlyInI2: "Hello"
}];
var i11;
var i21;
var i11Ori21 = i11;
var i11Ori21 = i21;
var i11Ori21 = {
// Like i1
commonMethodDifferentReturnType: function (a, b) {
var z = a.charAt(b);
return z;
},
commonPropertyDifferentType: "hello"
};
var i11Ori21 = {
// Like i2
commonMethodDifferentReturnType: function (a, b) {
var z = a.charCodeAt(b);
return z;
},
commonPropertyDifferentType: 10
};
var arrayOrI11OrI21 = [i11, i21, i11 || i21, {
// Like i1
commonMethodDifferentReturnType: function (a, b) {
var z = a.charAt(b);
return z;
},
commonPropertyDifferentType: "hello"
}, {
// Like i2
commonMethodDifferentReturnType: function (a, b) {
var z = a.charCodeAt(b);
return z;
},
commonPropertyDifferentType: 10
}];
| freedot/tstolua | tests/baselines/reference/contextualTypeWithUnionTypeMembers.js | JavaScript | apache-2.0 | 6,404 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using SuperMap.WinRT.Core;
using SuperMap.WinRT.Utilities;
using Windows.Data.Json;
using Windows.UI;
namespace SuperMap.WinRT.REST
{
/// <summary>
/// <para>${REST_ServerLayer_Title}</para>
/// <para>${REST_ServerLayer_Description}</para>
/// </summary>
public class ServerLayer
{
/// <summary>${REST_ServerLayer_constructor_D}</summary>
public ServerLayer()
{ }
//Property
/// <summary>${REST_ServerLayer_attribute_Bounds_D}</summary>
public Rectangle2D Bounds
{
get;
internal set;
}
//对应服务端的Layer属性
/// <summary>${REST_ServerLayer_attribute_caption_D}</summary>
public string Caption { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_Description_D}</summary>
public string Description { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_Name_D}</summary>
public string Name { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_IsQueryable_D}</summary>
public bool IsQueryable { get; internal set; }
//图层的子图层先不控制
//public System.Collections.Generic.List<LayerInfo> SubLayers { get; internal set; }
//这里默认是UGC了,不开放给用户啦
//private string LayerType = "UGC";
/// <summary>${REST_ServerLayer_attribute_IsVisible_D}</summary>
public bool IsVisible { get; internal set; }
//对应服务端UGCMapLayer属性
/// <summary>${REST_ServerLayer_attribute_IsCompleteLineSymbolDisplayed_D}</summary>
public bool IsCompleteLineSymbolDisplayed { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_MaxScale_D}</summary>
public double MaxScale { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_minScale_D}</summary>
public double MinScale { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_MinVisibleGeometrySize_D}</summary>
public double MinVisibleGeometrySize { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_OpaqueRate_D}</summary>
public int OpaqueRate { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_IsSymbolScalable_D}</summary>
public bool IsSymbolScalable { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_SymbolScale_D}</summary>
public double SymbolScale { get; internal set; }
//对应服务端UGCLayer
/// <summary>${REST_ServerLayer_attribute_DatasetInfo_D}</summary>
public DatasetInfo DatasetInfo { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_DisplayFilter_D}</summary>
public string DisplayFilter { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_JoinItems_D}</summary>
public System.Collections.Generic.List<JoinItem> JoinItems { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_RepresentationField_D}</summary>
public string RepresentationField { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_UGCLayerType_D}</summary>
public SuperMapLayerType UGCLayerType { get; internal set; }
/// <summary>${REST_ServerLayer_attribute_UGCLayer_D}</summary>
public UGCLayer UGCLayer { get; internal set; }
/// <summary>${REST_ServerLayer_method_FromJson_D}</summary>
/// <returns>${REST_ServerLayer_method_FromJson_return}</returns>
/// <param name="json">${REST_ServerLayer_method_FromJson_param_jsonObject}</param>
internal static ServerLayer FromJson(JsonObject json)
{
var serverLayer = new ServerLayer();
if (json["bounds"].ValueType != JsonValueType.Null)
{
serverLayer.Bounds = JsonHelper.ToRectangle2D(json["bounds"].GetObjectEx());
}
else
{
//null
}
serverLayer.Caption = json["caption"].GetStringEx();
serverLayer.Description = json["description"].GetStringEx();
serverLayer.Name = json["name"].GetStringEx();
serverLayer.IsQueryable = json["queryable"].GetBooleanEx();
serverLayer.IsVisible = json["visible"].GetBooleanEx();
serverLayer.IsCompleteLineSymbolDisplayed = json["completeLineSymbolDisplayed"].GetBooleanEx();
serverLayer.MaxScale = json["maxScale"].GetNumberEx();
serverLayer.MinScale = json["minScale"].GetNumberEx();
serverLayer.MinVisibleGeometrySize = json["minVisibleGeometrySize"].GetNumberEx();
serverLayer.OpaqueRate = (int)json["opaqueRate"].GetNumberEx();
serverLayer.IsSymbolScalable = json["symbolScalable"].GetBooleanEx();
serverLayer.SymbolScale = json["symbolScale"].GetNumberEx();
serverLayer.DatasetInfo = DatasetInfo.FromJson(json["datasetInfo"].GetObjectEx());
serverLayer.DisplayFilter = json["displayFilter"].GetStringEx();
if (json["joinItems"].ValueType != JsonValueType.Null)
{
List<JoinItem> joinItems = new List<JoinItem>();
foreach (JsonValue item in json["joinItems"].GetArray())
{
joinItems.Add(JoinItem.FromJson(item.GetObjectEx()));
}
serverLayer.JoinItems = joinItems;
}
serverLayer.RepresentationField = json["representationField"].GetStringEx();
if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.GRID.ToString())
{
UGCGridLayer ugcGridLayer = new UGCGridLayer();
List<Color> colors = new List<Color>();
foreach (JsonValue colorItem in json["colors"].GetArray())
{
colors.Add(ServerColor.FromJson(colorItem.GetObjectEx()).ToColor());
}
ugcGridLayer.Colors = colors;
if (json["dashStyle"].ValueType != JsonValueType.Null)
{
ugcGridLayer.DashStyle = ServerStyle.FromJson(json["dashStyle"].GetObjectEx());
}
if (json["gridType"].ValueType != JsonValueType.Null)
{
ugcGridLayer.GridType = (GridType)Enum.Parse(typeof(GridType), json["gridType"].GetStringEx(), true);
}
else
{
}
ugcGridLayer.HorizontalSpacing = json["horizontalSpacing"].GetNumberEx();
ugcGridLayer.SizeFixed = json["sizeFixed"].GetBooleanEx();
if (json["solidStyle"].ValueType != JsonValueType.Null)
{
ugcGridLayer.SolidStyle = ServerStyle.FromJson(json["solidStyle"].GetObjectEx());
}
if (json["specialColor"].ValueType != JsonValueType.Null)
{
ugcGridLayer.SpecialColor = ServerColor.FromJson(json["specialColor"].GetObjectEx()).ToColor();
}
ugcGridLayer.SpecialValue = json["specialValue"].GetNumberEx();
ugcGridLayer.VerticalSpacing = json["verticalSpacing"].GetNumberEx();
serverLayer.UGCLayer = ugcGridLayer;
}
else if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.IMAGE.ToString())
{
UGCImageLayer ugcImageLayer = new UGCImageLayer();
ugcImageLayer.Brightness = (int)json["brightness"].GetNumberEx();
if (json["colorSpaceType"].ValueType != JsonValueType.Null)
{
ugcImageLayer.ColorSpaceType = (ColorSpaceType)Enum.Parse(typeof(ColorSpaceType), json["colorSpaceType"].GetStringEx(), true);
}
else
{
}
ugcImageLayer.Contrast = (int)json["contrast"].GetNumberEx();
List<int> bandIndexes = new List<int>();
if (json["displayBandIndexes"].ValueType != JsonValueType.Null && (json["displayBandIndexes"].GetArray()).Count > 0)
{
foreach (JsonObject item in json["displayBandIndexes"].GetArray())
{
bandIndexes.Add((int)item.GetNumber());
}
ugcImageLayer.DisplayBandIndexes = bandIndexes;
}
ugcImageLayer.Transparent = json["transparent"].GetBooleanEx();
ugcImageLayer.TransparentColor = ServerColor.FromJson(json["transparentColor"].GetObjectEx()).ToColor();
serverLayer.UGCLayer = ugcImageLayer;
}
else if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.THEME.ToString())
{
UGCThemeLayer ugcThemeLayer = new UGCThemeLayer();
if (json["theme"].ValueType != JsonValueType.Null)
{
if (json["theme"].GetObjectEx()["type"].GetStringEx() == "UNIQUE")
{
ugcThemeLayer.Theme = ThemeUnique.FromJson(json["theme"].GetObjectEx());
}
else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "RANGE")
{
ugcThemeLayer.Theme = ThemeRange.FromJson(json["theme"].GetObjectEx());
}
else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "LABEL")
{
ugcThemeLayer.Theme = ThemeLabel.FromJson(json["theme"].GetObjectEx());
}
else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "GRAPH")
{
ugcThemeLayer.Theme = ThemeGraph.FromJson(json["theme"].GetObjectEx());
}
else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "DOTDENSITY")
{
ugcThemeLayer.Theme = ThemeDotDensity.FromJson(json["theme"].GetObjectEx());
}
else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "GRADUATEDSYMBOL")
{
ugcThemeLayer.Theme = ThemeGraduatedSymbol.FromJson(json["theme"].GetObjectEx());
}
else
{
//以后有需求再添加,现在就写到这里,共六个专题图。
}
}
if (json["theme"].GetObjectEx()["type"].ValueType != JsonValueType.Null)
{
ugcThemeLayer.ThemeType = (ThemeType)Enum.Parse(typeof(ThemeType), json["theme"].GetObjectEx()["type"].GetStringEx(), true);
}
serverLayer.UGCLayer = ugcThemeLayer;
//ugcThemeLayer.Theme
}
else if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.VECTOR.ToString() && json.ContainsKey("style"))
{
serverLayer.UGCLayer = UGCVectorLayer.FromJson(json["style"].GetObjectEx());
}
else
{
serverLayer.UGCLayer = new UGCLayer();
}
if (json["ugcLayerType"].ValueType != JsonValueType.Null)
{
serverLayer.UGCLayerType = (SuperMapLayerType)Enum.Parse(typeof(SuperMapLayerType), json["ugcLayerType"].GetStringEx(), true);
}
else
{
//不做处理
}
//这里不判断WMS和WFS图层。
//else if (json["ugcLayerType"] == SuperMapLayerType.WMS.ToString())
//{
//}
//根据图层类型增加相应属性。
return serverLayer;
}
}
}
| SuperMap/iClient-for-Win8 | iClient60ForWinRT/SuperMap.WinRT.REST/Map/GetMapStatusAndLayerInfo/ServerLayer.cs | C# | apache-2.0 | 12,064 |
// /*
// Copyright The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// */
//
// Code generated by MockGen. DO NOT EDIT.
// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
// Package mockdiskclient is a generated GoMock package.
package mockdiskclient
import (
context "context"
reflect "reflect"
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute"
gomock "github.com/golang/mock/gomock"
retry "sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
// MockInterface is a mock of Interface interface.
type MockInterface struct {
ctrl *gomock.Controller
recorder *MockInterfaceMockRecorder
}
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
type MockInterfaceMockRecorder struct {
mock *MockInterface
}
// NewMockInterface creates a new mock instance.
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
mock := &MockInterface{ctrl: ctrl}
mock.recorder = &MockInterfaceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
return m.recorder
}
// Get mocks base method.
func (m *MockInterface) Get(ctx context.Context, resourceGroupName, diskName string) (compute.Disk, *retry.Error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, diskName)
ret0, _ := ret[0].(compute.Disk)
ret1, _ := ret[1].(*retry.Error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, diskName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, diskName)
}
// CreateOrUpdate mocks base method.
func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, diskName, diskParameter)
ret0, _ := ret[0].(*retry.Error)
return ret0
}
// CreateOrUpdate indicates an expected call of CreateOrUpdate.
func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, diskName, diskParameter)
}
// Update mocks base method.
func (m *MockInterface) Update(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, diskName, diskParameter)
ret0, _ := ret[0].(*retry.Error)
return ret0
}
// Update indicates an expected call of Update.
func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, diskName, diskParameter)
}
// Delete mocks base method.
func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, diskName string) *retry.Error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, diskName)
ret0, _ := ret[0].(*retry.Error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName)
}
// ListByResourceGroup mocks base method.
func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName)
ret0, _ := ret[0].([]compute.Disk)
ret1, _ := ret[1].(*retry.Error)
return ret0, ret1
}
// ListByResourceGroup indicates an expected call of ListByResourceGroup.
func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName)
}
| kubernetes/autoscaler | cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go | GO | apache-2.0 | 5,056 |
.tobox{
float: left;
padding: 10px;
background-color: #eee;
width:120px;
margin:0 10px 10px 0px;
text-align:center;
border-radius: 3px;
}
.tobox.two{
width: 180px;
}
.tobox.fixed-box{
position:fixed;
right:50px;
top:50px;
}
.dark-tooltip{ display:none; position:absolute; z-index:99; text-decoration:none; font-weight:normal; height:auto; top:0; left:0;}
.dark-tooltip.small{ padding:4px; font-size:12px; max-width:150px; -webkit-border-radius: 2px; -moz-border-radius: 2px; border-radius: 2px; }
.dark-tooltip.medium{ padding:10px; font-size:14px; max-width:200px; -webkit-border-radius: 4px; -moz-border-radius: 4px; border-radius: 4px;}
.dark-tooltip.large{ padding:16px; font-size:16px; max-width:250px; -webkit-border-radius: 6px; -moz-border-radius: 6px; border-radius: 6px; }
/* Tips */
.dark-tooltip .tip{ transform: scale(1.01); -webkit-transform: scale(1.01); transform: scale(1.01); content: ""; position: absolute; width:0; height:0; border-style: solid; line-height: 0px; }
.dark-tooltip.south .tip{ left:50%; top:100%;}
.dark-tooltip.west .tip{ left:0; top:50%;}
.dark-tooltip.north .tip{ left:50%; top:0; }
.dark-tooltip.east .tip{ left:100%; top:50%;}
.dark-tooltip.south.small .tip{ border-width: 7px 5px 0 5px; margin-left:-5px;}
.dark-tooltip.south.medium .tip{ border-width: 8px 6px 0 6px; margin-left:-6px;}
.dark-tooltip.south.large .tip{ border-width: 14px 12px 0 12px; margin-left:-12px;}
.dark-tooltip.west.small .tip{ border-width: 5px 7px 5px 0; margin-left:-7px; margin-top:-5px;}
.dark-tooltip.west.medium .tip{ border-width: 6px 8px 6px 0; margin-left:-8px; margin-top:-6px;}
.dark-tooltip.west.large .tip{ border-width: 12px 14px 12px 0; margin-left:-14px; margin-top:-12px;}
.dark-tooltip.north.small .tip{ border-width: 0 5px 7px 5px; margin-left:-5px; margin-top:-7px;}
.dark-tooltip.north.medium .tip{ border-width: 0 6px 8px 6px; margin-left:-6px; margin-top:-8px;}
.dark-tooltip.north.large .tip{ border-width: 0 12px 14px 12px; margin-left:-12px; margin-top:-14px;}
.dark-tooltip.east.small .tip{ border-width: 5px 0 5px 7px; margin-top:-5px;}
.dark-tooltip.east.medium .tip{ border-width: 6px 0 6px 8px; margin-top:-6px;}
.dark-tooltip.east.large .tip{ border-width: 12px 0 12px 14px; margin-top:-12px;}
/* confirm */
.dark-tooltip ul.confirm{ list-style-type:none;margin-top:5px;display:inline-block;margin:0 auto; }
.dark-tooltip ul.confirm li{ padding:10px;float:left;margin:5px;min-width:25px;-webkit-border-radius:3px;-moz-border-radius:3px;-o-border-radius:3px;border-radius:3px;}
/* themes */
.dark-tooltip.dark{ background-color:#1e1e1e; color:#fff; }
.dark-tooltip.light{ background-color:#ebedf3; color:#1e1e1e; }
.dark-tooltip.dark.south .tip{ border-color: #1e1e1e transparent transparent transparent; _border-color: #1e1e1e #000000 #000000 #000000; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.dark.west .tip{ border-color: transparent #1e1e1e transparent transparent; _border-color: #000000 #1e1e1e #000000 #000000; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.dark.north .tip{ border-color: transparent transparent #1e1e1e transparent; _border-color: #000000 #000000 #1e1e1e #000000; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.dark.east .tip{ border-color: transparent transparent transparent #1e1e1e; _border-color: #000000 #000000 #000000 #1e1e1e; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.light.south .tip{ border-color: #ebedf3 transparent transparent transparent; _border-color: #ebedf3 #000000 #000000 #000000; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.light.west .tip{ border-color: transparent #ebedf3 transparent transparent; _border-color: #000000 #ebedf3 #000000 #000000; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.light.north .tip{ border-color: transparent transparent #ebedf3 transparent; _border-color: #000000 #000000 #ebedf3 #000000; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.light.east .tip{ border-color: transparent transparent transparent #ebedf3; _border-color:#000000 #000000 #000000 #ebedf3 ; _filter: progid:DXImageTransform.Microsoft.Chroma(color='#000000'); }
.dark-tooltip.dark ul.confirm li{ background-color:#416E85;}
.dark-tooltip.dark ul.confirm li:hover{ background-color:#417E85;}
.dark-tooltip.light ul.confirm li{ background-color:#C1DBDB;}
.dark-tooltip.light ul.confirm li:hover{ background-color:#DCE8E8;}
/* Animations */
.animated{
-webkit-animation-fill-mode:both;-moz-animation-fill-mode:both;-ms-animation-fill-mode:both;-o-animation-fill-mode:both;animation-fill-mode:both;
-webkit-animation-duration:.5s;-moz-animation-duration:.5s;-ms-animation-duration:.5s;-o-animation-duration:.5s;animation-duration:.5s;
}
@-webkit-keyframes flipInUp {
0% { -webkit-transform: perspective(400px) rotateX(-90deg); opacity: 0;}
40% { -webkit-transform: perspective(400px) rotateX(5deg);}
70% { -webkit-transform: perspective(400px) rotateX(-5deg);}
100% { -webkit-transform: perspective(400px) rotateX(0deg); opacity: 1;}
}
@-moz-keyframes flipInUp {
0% {transform: perspective(400px) rotateX(-90deg);opacity: 0;}
40% {transform: perspective(400px) rotateX(5deg);}
70% {transform: perspective(400px) rotateX(-5deg);}
100% {transform: perspective(400px) rotateX(0deg);opacity: 1;}
}
@-o-keyframes flipInUp {
0% {-o-transform: perspective(400px) rotateX(-90deg);opacity: 0;}
40% {-o-transform: perspective(400px) rotateX(5deg);}
70% {-o-transform: perspective(400px) rotateX(-5deg);}
100% {-o-transform: perspective(400px) rotateX(0deg);opacity: 1;}
}
@keyframes flipInUp {
0% {transform: perspective(400px) rotateX(-90deg);opacity: 0;}
40% {transform: perspective(400px) rotateX(5deg);}
70% {transform: perspective(400px) rotateX(-5deg);}
100% {transform: perspective(400px) rotateX(0deg);opacity: 1;}
}
@-webkit-keyframes flipInRight {
0% { -webkit-transform: perspective(400px) rotateY(-90deg); opacity: 0;}
40% { -webkit-transform: perspective(400px) rotateY(5deg);}
70% { -webkit-transform: perspective(400px) rotateY(-5deg);}
100% { -webkit-transform: perspective(400px) rotateY(0deg); opacity: 1;}
}
@-moz-keyframes flipInRight {
0% {transform: perspective(400px) rotateY(-90deg);opacity: 0;}
40% {transform: perspective(400px) rotateY(5deg);}
70% {transform: perspective(400px) rotateY(-5deg);}
100% {transform: perspective(400px) rotateY(0deg);opacity: 1;}
}
@-o-keyframes flipInRight {
0% {-o-transform: perspective(400px) rotateY(-90deg);opacity: 0;}
40% {-o-transform: perspective(400px) rotateY(5deg);}
70% {-o-transform: perspective(400px) rotateY(-5deg);}
100% {-o-transform: perspective(400px) rotateY(0deg);opacity: 1;}
}
@keyframes flipInRight {
0% {transform: perspective(400px) rotateY(-90deg);opacity: 0;}
40% {transform: perspective(400px) rotateY(5deg);}
70% {transform: perspective(400px) rotateY(-5deg);}
100% {transform: perspective(400px) rotateY(0deg);opacity: 1;}
}
.flipIn { -webkit-backface-visibility: visible !important; -moz-backface-visibility: visible !important; -o-backface-visibility: visible !important; backface-visibility: visible !important}
.flipIn.south, .flipIn.north { -webkit-animation-name: flipInUp; -moz-animation-name: flipInUp; -o-animation-name: flipInUp; animation-name: flipInUp; }
.flipIn.west, .flipIn.east { -webkit-animation-name: flipInRight; -moz-animation-name: flipInRight; -o-animation-name: flipInRight; animation-name: flipInRight; }
@-webkit-keyframes fadeIn { 0% {opacity: 0;} 100% {opacity: 1;}}
@-moz-keyframes fadeIn { 0% {opacity: 0;} 100% {opacity: 1;}}
@-o-keyframes fadeIn {0% {opacity: 0;}100% {opacity: 1;}}
@keyframes fadeIn {0% {opacity: 0;}100% {opacity: 1;}}
.fadeIn{-webkit-animation-name: fadeIn; -moz-animation-name: fadeIn; -o-animation-name: fadeIn; animation-name: fadeIn;}
/* Modal */
.darktooltip-modal-layer{
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-image: url('http://gsrthemes.com/aaika/fullwidth/js/img/modal-bg.png');
opacity: .7;
display: none;
}
| RSUP/TENJUTA | js/third-party/tooltips/darktooltip.css | CSS | apache-2.0 | 8,220 |
<!doctype html public "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<html>
<head>
<title>PHPXRef 0.7.1 : Unnamed Project : Variable Reference: $object1</title>
<link rel="stylesheet" href="../sample.css" type="text/css">
<link rel="stylesheet" href="../sample-print.css" type="text/css" media="print">
<style id="hilight" type="text/css"></style>
<meta http-equiv="content-type" content="text/html;charset=iso-8859-1">
</head>
<body bgcolor="#ffffff" text="#000000" link="#801800" vlink="#300540" alink="#ffffff">
<table class="pagetitle" width="100%">
<tr>
<td valign="top" class="pagetitle">
[ <a href="../index.html">Index</a> ]
</td>
<td align="right" class="pagetitle">
<h2 style="margin-bottom: 0px">PHP Cross Reference of Unnamed Project</h2>
</td>
</tr>
</table>
<!-- Generated by PHPXref 0.7.1 at Thu Oct 23 18:57:41 2014 -->
<!-- PHPXref (c) 2000-2010 Gareth Watts - [email protected] -->
<!-- http://phpxref.sourceforge.net/ -->
<script src="../phpxref.js" type="text/javascript"></script>
<script language="JavaScript" type="text/javascript">
<!--
ext='.html';
relbase='../';
subdir='_variables';
filename='index.html';
cookiekey='phpxref';
handleNavFrame(relbase, subdir, filename);
logVariable('object1');
// -->
</script>
<script language="JavaScript" type="text/javascript">
if (gwGetCookie('xrefnav')=='off')
document.write('<p class="navlinks">[ <a href="javascript:navOn()">Show Explorer<\/a> ]<\/p>');
else
document.write('<p class="navlinks">[ <a href="javascript:navOff()">Hide Explorer<\/a> ]<\/p>');
</script>
<noscript>
<p class="navlinks">
[ <a href="../nav.html" target="_top">Show Explorer</a> ]
[ <a href="index.html" target="_top">Hide Navbar</a> ]
</p>
</noscript>
[<a href="../index.html">Top level directory</a>]<br>
<script language="JavaScript" type="text/javascript">
<!--
document.writeln('<table align="right" class="searchbox-link"><tr><td><a class="searchbox-link" href="javascript:void(0)" onMouseOver="showSearchBox()">Search</a><br>');
document.writeln('<table border="0" cellspacing="0" cellpadding="0" class="searchbox" id="searchbox">');
document.writeln('<tr><td class="searchbox-title">');
document.writeln('<a class="searchbox-title" href="javascript:showSearchPopup()">Search History +</a>');
document.writeln('<\/td><\/tr>');
document.writeln('<tr><td class="searchbox-body" id="searchbox-body">');
document.writeln('<form name="search" style="margin:0px; padding:0px" onSubmit=\'return jump()\'>');
document.writeln('<a class="searchbox-body" href="../_classes/index.html">Class<\/a>: ');
document.writeln('<input type="text" size=10 value="" name="classname"><br>');
document.writeln('<a id="funcsearchlink" class="searchbox-body" href="../_functions/index.html">Function<\/a>: ');
document.writeln('<input type="text" size=10 value="" name="funcname"><br>');
document.writeln('<a class="searchbox-body" href="../_variables/index.html">Variable<\/a>: ');
document.writeln('<input type="text" size=10 value="" name="varname"><br>');
document.writeln('<a class="searchbox-body" href="../_constants/index.html">Constant<\/a>: ');
document.writeln('<input type="text" size=10 value="" name="constname"><br>');
document.writeln('<a class="searchbox-body" href="../_tables/index.html">Table<\/a>: ');
document.writeln('<input type="text" size=10 value="" name="tablename"><br>');
document.writeln('<input type="submit" class="searchbox-button" value="Search">');
document.writeln('<\/form>');
document.writeln('<\/td><\/tr><\/table>');
document.writeln('<\/td><\/tr><\/table>');
// -->
</script>
<div id="search-popup" class="searchpopup"><p id="searchpopup-title" class="searchpopup-title">title</p><div id="searchpopup-body" class="searchpopup-body">Body</div><p class="searchpopup-close"><a href="javascript:gwCloseActive()">[close]</a></p></div>
<h3>Variable Cross Reference</h3>
<h2><a href="index.html#object1">$object1</a></h2>
<b>Defined at:</b><ul>
<li><a href="../tests/simpletest/test/compatibility_test.php.html">/tests/simpletest/test/compatibility_test.php</A> -> <a href="../tests/simpletest/test/compatibility_test.php.source.html#l32"> line 32</A></li>
</ul>
<br><b>Referenced 2 times:</b><ul>
<li><a href="../tests/simpletest/test/compatibility_test.php.html">/tests/simpletest/test/compatibility_test.php</a> -> <a href="../tests/simpletest/test/compatibility_test.php.source.html#l32"> line 32</a></li>
<li><a href="../tests/simpletest/test/compatibility_test.php.html">/tests/simpletest/test/compatibility_test.php</a> -> <a href="../tests/simpletest/test/compatibility_test.php.source.html#l34"> line 34</a></li>
</ul>
<!-- A link to the phpxref site in your customized footer file is appreciated ;-) -->
<br><hr>
<table width="100%">
<tr><td>Generated: Thu Oct 23 18:57:41 2014</td>
<td align="right"><i>Cross-referenced by <a href="http://phpxref.sourceforge.net/">PHPXref 0.7.1</a></i></td>
</tr>
</table>
</body></html>
| inputx/code-ref-doc | bonfire/_variables/object1.html | HTML | apache-2.0 | 5,024 |
/*
* Copyright 2014 Alexey Andreev.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teavm.classlib.java.util;
import org.teavm.classlib.java.io.TSerializable;
import org.teavm.classlib.java.lang.TMath;
import org.teavm.classlib.java.lang.TObject;
import org.teavm.javascript.spi.GeneratedBy;
/**
*
* @author Alexey Andreev
*/
public class TRandom extends TObject implements TSerializable {
public TRandom() {
}
public TRandom(@SuppressWarnings("unused") long seed) {
}
public void setSeed(@SuppressWarnings("unused") long seed) {
}
protected int next(int bits) {
return (int)(random() * (1L << TMath.min(32, bits)));
}
public void nextBytes(byte[] bytes) {
for (int i = 0; i < bytes.length; ++i) {
bytes[i] = (byte)next(8);
}
}
public int nextInt() {
return next(32);
}
public int nextInt(int n) {
return (int)(random() * n);
}
public long nextLong() {
return ((long)nextInt() << 32) | nextInt();
}
public boolean nextBoolean() {
return nextInt() % 2 == 0;
}
public float nextFloat() {
return (float)random();
}
public double nextDouble() {
return random();
}
@GeneratedBy(RandomNativeGenerator.class)
private static native double random();
}
| mpoindexter/teavm | teavm-classlib/src/main/java/org/teavm/classlib/java/util/TRandom.java | Java | apache-2.0 | 1,877 |
#include <iostream>
#include "SParser.hpp"
#pragma once
namespace Silent
{
/*static class SymTablePrinter
{
public:
static void Out(std::string str, uint64_t currentTab)
{
std::string tabs = "";
for (uint64_t i = 0; i < currentTab; i++) tabs += "\t";
std::cout << tabs << str << std::endl;
}
SymTablePrinter()
{
}
static void PrintSymTable(SymbolTable* symTable)
{
PrintNode(symTable->self, 0);
}
static void PrintNode(TableNode node, uint64_t currentTab)
{
switch (node.nodeType)
{
case TableNode::Type::Program:
{
Program* p = (Program*)node.GetNode();
Out("Program", currentTab);
currentTab++;
for(TableNode node : p->table->GetItems())
PrintNode(node, currentTab);
currentTab--;
}
break;
case TableNode::Type::Namespace:
{
Namespace* n = (Namespace*)node.GetNode();
Out("Namespace " + n->GetId(), currentTab);
currentTab++;
for (TableNode node : n->GetTable()->GetItems())
PrintNode(node, currentTab);
currentTab--;
}
break;
case TableNode::Type::Subroutine:
{
Subroutine* s = (Subroutine*)node.GetNode();
Out("Subroutine " + s->GetId(), currentTab);
currentTab++;
for (TableNode node : s->GetTable()->GetItems())
PrintNode(node, currentTab);
currentTab--;
}
break;
case TableNode::Type::Variable:
{
Variable* v = (Variable*)node.GetNode();
Out("Variable " + v->GetId(), currentTab);
}
break;
case TableNode::Type::Structure:
{
Type* t = (Type*)node.GetNode();
Out("Type " + t->GetId(), currentTab);
currentTab++;
for (TableNode node : t->GetTable()->GetItems())
PrintNode(node, currentTab);
currentTab--;
}
break;
}
}
};*/
} | EdwinRybarczyk/SilentProgrammingLanguage | SilentCompiler/include/SHelper.hpp | C++ | apache-2.0 | 2,595 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.util.io.base;
import net.sf.mmm.util.exception.api.NlsNullPointerException;
/**
* This class is similar to {@link java.nio.ByteBuffer} but a lot simpler.
*
* @see java.nio.ByteBuffer#wrap(byte[], int, int)
*
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.1.0
*/
public class ByteArrayImpl extends AbstractByteArray {
private final byte[] buffer;
private int minimumIndex;
private int maximumIndex;
/**
* The constructor.
*
* @param capacity is the {@code length} of the internal {@link #getBytes() buffer}.
*/
public ByteArrayImpl(int capacity) {
this(new byte[capacity], 0, -1);
}
/**
* The constructor.
*
* @param buffer is the internal {@link #getBytes() buffer}.
*/
public ByteArrayImpl(byte[] buffer) {
this(buffer, 0, buffer.length - 1);
}
/**
* The constructor.
*
* @param buffer is the internal {@link #getBytes() buffer}.
* @param startIndex is the {@link #getCurrentIndex() current index} as well as the {@link #getMinimumIndex() minimum
* index}.
* @param maximumIndex is the {@link #getMaximumIndex() maximum index}.
*/
public ByteArrayImpl(byte[] buffer, int startIndex, int maximumIndex) {
super();
if (buffer == null) {
throw new NlsNullPointerException("buffer");
}
this.buffer = buffer;
this.minimumIndex = startIndex;
this.maximumIndex = maximumIndex;
}
@Override
public byte[] getBytes() {
return this.buffer;
}
@Override
public int getCurrentIndex() {
return this.minimumIndex;
}
@Override
public int getMinimumIndex() {
return this.minimumIndex;
}
@Override
public int getMaximumIndex() {
return this.maximumIndex;
}
/**
* This method sets the {@link #getMaximumIndex() maximumIndex}. This may be useful if the buffer should be reused.
* <br>
* <b>ATTENTION:</b><br>
* Be very careful and only use this method if you know what you are doing!
*
* @param maximumIndex is the {@link #getMaximumIndex() maximumIndex} to set. It has to be in the range from {@code 0}
* ( <code>{@link #getCurrentIndex() currentIndex} - 1</code>) to <code>{@link #getBytes()}.length</code>.
*/
protected void setMaximumIndex(int maximumIndex) {
this.maximumIndex = maximumIndex;
}
@Override
public ByteArrayImpl createSubArray(int minimum, int maximum) {
checkSubArray(minimum, maximum);
return new ByteArrayImpl(this.buffer, minimum, maximum);
}
@Override
public String toString() {
return new String(this.buffer, this.minimumIndex, getBytesAvailable());
}
}
| m-m-m/util | io/src/main/java/net/sf/mmm/util/io/base/ByteArrayImpl.java | Java | apache-2.0 | 2,897 |
/*
* Licensed to Crate under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership. Crate licenses this file
* to you under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial
* agreement.
*/
package io.crate.execution.engine.collect;
import io.crate.breaker.RamAccounting;
import io.crate.data.BatchIterator;
import io.crate.data.Row;
import io.crate.execution.engine.aggregation.impl.SumAggregation;
import io.crate.expression.reference.doc.lucene.BytesRefColumnReference;
import io.crate.expression.reference.doc.lucene.CollectorContext;
import io.crate.expression.reference.doc.lucene.LongColumnReference;
import io.crate.expression.reference.doc.lucene.LuceneCollectorExpression;
import io.crate.metadata.Functions;
import io.crate.metadata.Reference;
import io.crate.metadata.ReferenceIdent;
import io.crate.metadata.RelationName;
import io.crate.metadata.RowGranularity;
import io.crate.metadata.functions.Signature;
import io.crate.test.integration.CrateDummyClusterServiceUnitTest;
import io.crate.testing.TestingRowConsumer;
import io.crate.types.DataTypes;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import static io.crate.testing.TestingHelpers.createNodeContext;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.instanceOf;
public class DocValuesGroupByOptimizedIteratorTest extends CrateDummyClusterServiceUnitTest {
private Functions functions;
private IndexSearcher indexSearcher;
private List<Object[]> rows = List.of(
new Object[]{"1", 1L, 1L},
new Object[]{"0", 0L, 2L},
new Object[]{"1", 1L, 3L},
new Object[]{"0", 0L, 4L}
);
@Before
public void setup() throws IOException {
var nodeContext = createNodeContext();
functions = nodeContext.functions();
var indexWriter = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig());
for (var row : rows) {
Document doc = new Document();
doc.add(new SortedSetDocValuesField("x", BytesRefs.toBytesRef(row[0])));
doc.add(new NumericDocValuesField("y", (Long) row[1]));
doc.add(new NumericDocValuesField("z", (Long) row[2]));
indexWriter.addDocument(doc);
}
indexWriter.commit();
indexSearcher = new IndexSearcher(DirectoryReader.open(indexWriter));
}
@Test
public void test_group_by_doc_values_optimized_iterator_for_single_numeric_key() throws Exception {
SumAggregation<?> sumAggregation = (SumAggregation<?>) functions.getQualified(
Signature.aggregate(
SumAggregation.NAME,
DataTypes.LONG.getTypeSignature(),
DataTypes.LONG.getTypeSignature()
),
List.of(DataTypes.LONG),
DataTypes.LONG
);
var aggregationField = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
aggregationField.setName("z");
var sumDocValuesAggregator = sumAggregation.getDocValueAggregator(
List.of(DataTypes.LONG),
List.of(aggregationField)
);
var keyExpressions = List.of(new LongColumnReference("y"));
var it = DocValuesGroupByOptimizedIterator.GroupByIterator.forSingleKey(
List.of(sumDocValuesAggregator),
indexSearcher,
new Reference(
new ReferenceIdent(RelationName.fromIndexName("test"), "y"),
RowGranularity.DOC,
DataTypes.LONG,
null,
null
),
keyExpressions,
RamAccounting.NO_ACCOUNTING,
new MatchAllDocsQuery(),
new CollectorContext()
);
var rowConsumer = new TestingRowConsumer();
rowConsumer.accept(it, null);
assertThat(
rowConsumer.getResult(),
containsInAnyOrder(new Object[]{0L, 6L}, new Object[]{1L, 4L}));
}
@Test
public void test_group_by_doc_values_optimized_iterator_for_many_keys() throws Exception {
SumAggregation<?> sumAggregation = (SumAggregation<?>) functions.getQualified(
Signature.aggregate(
SumAggregation.NAME,
DataTypes.LONG.getTypeSignature(),
DataTypes.LONG.getTypeSignature()
),
List.of(DataTypes.LONG),
DataTypes.LONG
);
var aggregationField = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
aggregationField.setName("z");
var sumDocValuesAggregator = sumAggregation.getDocValueAggregator(
List.of(DataTypes.LONG),
List.of(aggregationField)
);
var keyExpressions = List.of(new BytesRefColumnReference("x"), new LongColumnReference("y"));
var keyRefs = List.of(
new Reference(
new ReferenceIdent(RelationName.fromIndexName("test"), "x"),
RowGranularity.DOC,
DataTypes.STRING,
null,
null
),
new Reference(
new ReferenceIdent(RelationName.fromIndexName("test"), "y"),
RowGranularity.DOC,
DataTypes.LONG,
null,
null
)
);
var it = DocValuesGroupByOptimizedIterator.GroupByIterator.forManyKeys(
List.of(sumDocValuesAggregator),
indexSearcher,
keyRefs,
keyExpressions,
RamAccounting.NO_ACCOUNTING,
new MatchAllDocsQuery(),
new CollectorContext()
);
var rowConsumer = new TestingRowConsumer();
rowConsumer.accept(it, null);
assertThat(
rowConsumer.getResult(),
containsInAnyOrder(new Object[]{"0", 0L, 6L}, new Object[]{"1", 1L, 4L})
);
}
@Test
public void test_optimized_iterator_stop_processing_on_kill() throws Exception {
Throwable expectedException = stopOnInterrupting(it -> it.kill(new InterruptedException("killed")));
assertThat(expectedException, instanceOf(InterruptedException.class));
}
@Test
public void test_optimized_iterator_stop_processing_on_close() throws Exception {
Throwable expectedException = stopOnInterrupting(BatchIterator::close);
assertThat(expectedException, instanceOf(IllegalStateException.class));
}
private Throwable stopOnInterrupting(Consumer<BatchIterator<Row>> interrupt) throws Exception {
CountDownLatch waitForLoadNextBatch = new CountDownLatch(1);
CountDownLatch pauseOnDocumentCollecting = new CountDownLatch(1);
CountDownLatch batchLoadingCompleted = new CountDownLatch(1);
BatchIterator<Row> it = createBatchIterator(() -> {
waitForLoadNextBatch.countDown();
try {
pauseOnDocumentCollecting.await(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
AtomicReference<Throwable> exception = new AtomicReference<>();
Thread t = new Thread(() -> {
try {
it.loadNextBatch().whenComplete((r, e) -> {
if (e != null) {
exception.set(e.getCause());
}
batchLoadingCompleted.countDown();
});
} catch (Exception e) {
exception.set(e);
}
});
t.start();
waitForLoadNextBatch.await(5, TimeUnit.SECONDS);
interrupt.accept(it);
pauseOnDocumentCollecting.countDown();
batchLoadingCompleted.await(5, TimeUnit.SECONDS);
return exception.get();
}
private BatchIterator<Row> createBatchIterator(Runnable onNextReader) {
return DocValuesGroupByOptimizedIterator.GroupByIterator.getIterator(
List.of(),
indexSearcher,
List.of(new LuceneCollectorExpression<>() {
@Override
public void setNextReader(LeafReaderContext context) {
onNextReader.run();
}
@Override
public Object value() {
return null;
}
}),
RamAccounting.NO_ACCOUNTING,
(states, key) -> {
},
(expressions) -> expressions.get(0).value(),
(key, cells) -> cells[0] = key,
new MatchAllDocsQuery(),
new CollectorContext()
);
}
}
| EvilMcJerkface/crate | server/src/test/java/io/crate/execution/engine/collect/DocValuesGroupByOptimizedIteratorTest.java | Java | apache-2.0 | 10,272 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespaclient;
public class ClusterDef {
private final String name;
public ClusterDef(String name) { this.name = name; }
public String getName() { return name; }
public String getRoute() { return "[Content:cluster=" + name + "]"; }
}
| vespa-engine/vespa | vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterDef.java | Java | apache-2.0 | 372 |
/*
* Copyright (C) 2018 the original author or authors.
*
* This file is part of jBB Application Project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*/
package org.jbb.security.rest.oauth.client;
import io.swagger.annotations.ApiModel;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
@Getter
@Setter
@Builder
@ApiModel("OAuthClientSecret")
@NoArgsConstructor(access = AccessLevel.PUBLIC)
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class ClientSecretDto {
private String clientSecret;
}
| jbb-project/jbb | domain-rest/jbb-security-rest/src/main/java/org/jbb/security/rest/oauth/client/ClientSecretDto.java | Java | apache-2.0 | 751 |
#
# Author:: Steven Danna (<[email protected]>)
# Author:: Tyler Cloke (<[email protected]>)
# Copyright:: Copyright (c) Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require_relative "../knife"
require_relative "../dist"
class Chef
class Knife
class UserCreate < Knife
attr_accessor :user_field
deps do
require_relative "../user_v1"
end
option :file,
short: "-f FILE",
long: "--file FILE",
description: "Write the private key to a file if the server generated one."
option :user_key,
long: "--user-key FILENAME",
description: "Set the initial default key for the user from a file on disk (cannot pass with --prevent-keygen)."
option :prevent_keygen,
short: "-k",
long: "--prevent-keygen",
description: "API V1 (#{Chef::Dist::SERVER_PRODUCT} 12.1+) only. Prevent server from generating a default key pair for you. Cannot be passed with --user-key.",
boolean: true
banner "knife user create USERNAME DISPLAY_NAME FIRST_NAME LAST_NAME EMAIL PASSWORD (options)"
def user
@user_field ||= Chef::UserV1.new
end
def create_user_from_hash(hash)
Chef::UserV1.from_hash(hash).create
end
def run
test_mandatory_field(@name_args[0], "username")
user.username @name_args[0]
test_mandatory_field(@name_args[1], "display name")
user.display_name @name_args[1]
test_mandatory_field(@name_args[2], "first name")
user.first_name @name_args[2]
test_mandatory_field(@name_args[3], "last name")
user.last_name @name_args[3]
test_mandatory_field(@name_args[4], "email")
user.email @name_args[4]
test_mandatory_field(@name_args[5], "password")
user.password @name_args[5]
if config[:user_key] && config[:prevent_keygen]
show_usage
ui.fatal("You cannot pass --user-key and --prevent-keygen")
exit 1
end
if !config[:prevent_keygen] && !config[:user_key]
user.create_key(true)
end
if config[:user_key]
user.public_key File.read(File.expand_path(config[:user_key]))
end
output = edit_hash(user)
final_user = create_user_from_hash(output)
ui.info("Created #{user}")
if final_user.private_key
if config[:file]
File.open(config[:file], "w") do |f|
f.print(final_user.private_key)
end
else
ui.msg final_user.private_key
end
end
end
end
end
end
| jaymzh/chef | lib/chef/knife/user_create.rb | Ruby | apache-2.0 | 3,187 |
import numpy as np
class WordClusters(object):
def __init__(self, vocab, clusters):
self.vocab = vocab
self.clusters = clusters
def ix(self, word):
"""
Returns the index on self.vocab and self.clusters for 'word'
"""
temp = np.where(self.vocab == word)[0]
if temp.size == 0:
raise KeyError("Word not in vocabulary")
else:
return temp[0]
def __getitem__(self, word):
return self.get_cluster(word)
def get_cluster(self, word):
"""
Returns the cluster number for a word in the vocabulary
"""
idx = self.ix(word)
return self.clusters[idx]
def get_words_on_cluster(self, cluster):
return self.vocab[self.clusters == cluster]
@classmethod
def from_text(cls, fname):
vocab = np.genfromtxt(fname, dtype=str, delimiter=" ", usecols=0)
clusters = np.genfromtxt(fname, dtype=int, delimiter=" ", usecols=1)
return cls(vocab=vocab, clusters=clusters)
| danielfrg/word2vec | word2vec/wordclusters.py | Python | apache-2.0 | 1,041 |
package weixin.popular.bean.scan.crud;
import weixin.popular.bean.scan.base.ProductGet;
import weixin.popular.bean.scan.info.BrandInfo;
public class ProductCreate extends ProductGet {
private BrandInfo brand_info;
public BrandInfo getBrand_info() {
return brand_info;
}
public void setBrand_info(BrandInfo brand_info) {
this.brand_info = brand_info;
}
}
| liyiorg/weixin-popular | src/main/java/weixin/popular/bean/scan/crud/ProductCreate.java | Java | apache-2.0 | 395 |
<!doctype html>
<html ng-app="frontend">
<head>
<meta charset="utf-8">
<title>frontend</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width">
<!-- Place favicon.ico and apple-touch-icon.png in the root directory -->
<!-- build:css({.tmp/serve,src}) styles/vendor.css -->
<!-- bower:css -->
<!-- run `gulp inject` to automatically populate bower styles dependencies -->
<!-- endbower -->
<!-- endbuild -->
<!-- build:css({.tmp/serve,src}) styles/app.css -->
<!-- inject:css -->
<!-- css files will be automatically insert here -->
<!-- endinject -->
<!-- endbuild -->
</head>
<body>
<!--[if lt IE 10]>
<p class="browsehappy">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
<![endif]-->
<div ui-view></div>
<!-- build:js(src) scripts/vendor.js -->
<!-- bower:js -->
<!-- run `gulp inject` to automatically populate bower script dependencies -->
<!-- endbower -->
<!-- endbuild -->
<!-- build:js({.tmp/serve,.tmp/partials}) scripts/app.js -->
<!-- inject:js -->
<!-- js files will be automatically insert here -->
<!-- endinject -->
<script src="https://api-maps.yandex.ru/2.1/?lang=ru_RU" type="text/javascript"></script>
<!-- inject:partials -->
<!-- angular templates will be automatically converted in js and inserted here -->
<!-- endinject -->
<!-- endbuild -->
</body>
</html>
| lunatik-210/maps-test | frontend/src/index.html | HTML | apache-2.0 | 1,575 |
package Paws::CloudDirectory::BatchDetachFromIndex;
use Moose;
has IndexReference => (is => 'ro', isa => 'Paws::CloudDirectory::ObjectReference', required => 1);
has TargetReference => (is => 'ro', isa => 'Paws::CloudDirectory::ObjectReference', required => 1);
1;
### main pod documentation begin ###
=head1 NAME
Paws::CloudDirectory::BatchDetachFromIndex
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::CloudDirectory::BatchDetachFromIndex object:
$service_obj->Method(Att1 => { IndexReference => $value, ..., TargetReference => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::CloudDirectory::BatchDetachFromIndex object:
$result = $service_obj->Method(...);
$result->Att1->IndexReference
=head1 DESCRIPTION
Detaches the specified object from the specified index inside a
BatchRead operation. For more information, see DetachFromIndex and
BatchReadRequest$Operations.
=head1 ATTRIBUTES
=head2 B<REQUIRED> IndexReference => L<Paws::CloudDirectory::ObjectReference>
A reference to the index object.
=head2 B<REQUIRED> TargetReference => L<Paws::CloudDirectory::ObjectReference>
A reference to the object being detached from the index.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::CloudDirectory>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/CloudDirectory/BatchDetachFromIndex.pm | Perl | apache-2.0 | 1,824 |
# Dematium graminum Lib. SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
Pl. crypt. Arduenna (Liège), fasc. no. 284 (1830)
#### Original name
Dematium graminum Lib.
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Dematium/Dematium graminum/README.md | Markdown | apache-2.0 | 219 |
#!/usr/bin/env bash
systemctl stop fuseki
systemctl stop marple
#general vars
echo ">>>> Updating Fuseki"
export TC_USER=fuseki
export TC_GROUP=fuseki
# set erb vars
# endpoint name for fuseki
export EP_NAME=core
export SVC=fuseki
export SVC_DESC="Jena-Fuseki Tomcat container"
export MARPLE_SVC=marple
export MARPLE_SVC_DESC="Marple service for fuseki Lucene indexes"
export JAVA_HOME=`type -p javac|xargs readlink -f|xargs dirname|xargs dirname`
export LUCENE_BO_VER=1.5.0
export LUCENE_BO_JAR="lucene-bo-${LUCENE_BO_VER}.jar"
export LUCENE_BO_REL="https://github.com/buda-base/lucene-bo/releases/download/v${LUCENE_BO_VER}/${LUCENE_BO_JAR}"
export LUCENE_ZH_VER=0.4.1
export LUCENE_ZH_JAR="lucene-zh-${LUCENE_ZH_VER}.jar"
export LUCENE_ZH_REL="https://github.com/buda-base/lucene-zh/releases/download/v${LUCENE_ZH_VER}/${LUCENE_ZH_JAR}"
export LUCENE_SA_VER=1.1.0
export LUCENE_SA_JAR="lucene-sa-${LUCENE_SA_VER}.jar"
export LUCENE_SA_REL="https://github.com/buda-base/lucene-sa/releases/download/v${LUCENE_SA_VER}/${LUCENE_SA_JAR}"
export MARPLE_REL="https://github.com/flaxsearch/marple/releases/download/v1.0/marple-1.0.jar"
if [ -d /mnt/data ] ; then
export DATA_DIR=/mnt/data ;
else
export DATA_DIR=/usr/local ;
fi
echo ">>>> DATA_DIR: " $DATA_DIR
export DOWNLOADS=$DATA_DIR/downloads
export THE_HOME=$DATA_DIR/$SVC
export THE_BASE=$THE_HOME/base
export CAT_HOME=$THE_HOME/tomcat
echo ">>>>>>>> updating {$EP_NAME}.ttl to {$THE_BASE}/configuration/"
erb /vagrant/conf/fuseki/ttl.erb > $THE_BASE/configuration/$EP_NAME.ttl
echo ">>>>>>>> updating qonsole-config.js to {$CAT_HOME}/webapps/fuseki/js/app/"
cp /vagrant/conf/fuseki/qonsole-config.js $CAT_HOME/webapps/fuseki/js/app/
echo ">>>>>>>> updating analyzers to {$CAT_HOME}/webapps/fuseki/WEB-INF/lib/"
# the lucene-bo jar has to be added to fuseki/WEB-INF/lib/ otherwise
# tomcat class loading cannot find rest of Lucene classes
rm -f $CAT_HOME/webapps/fuseki/WEB-INF/lib/lucene-bo-*.jar
rm -f $CAT_HOME/webapps/fuseki/WEB-INF/lib/lucene-sa-*.jar
rm -f $CAT_HOME/webapps/fuseki/WEB-INF/lib/lucene-zh-*.jar
pushd $DOWNLOADS;
# wget -q -c $LUCENE_BO_REL
wget -q $LUCENE_BO_REL -O $LUCENE_BO_JAR
cp $LUCENE_BO_JAR $CAT_HOME/webapps/fuseki/WEB-INF/lib/
wget -q -c $LUCENE_ZH_REL
cp $LUCENE_ZH_JAR $CAT_HOME/webapps/fuseki/WEB-INF/lib/
wget -q -c $LUCENE_SA_REL
cp $LUCENE_SA_JAR $CAT_HOME/webapps/fuseki/WEB-INF/lib/
popd
echo ">>>> restarting ${SVC}"
systemctl start fuseki
systemctl start marple
echo ">>>> ${SVC} service listening on ${MAIN_PORT}"
echo ">>>> Fuseki updating complete"
| BuddhistDigitalResourceCenter/buda-base | conf/fuseki/update.sh | Shell | apache-2.0 | 2,560 |
package com.mattinsler.guiceymongo.data.query;
import org.bson.BSON;
/**
* Created by IntelliJ IDEA.
* User: mattinsler
* Date: 12/29/10
* Time: 3:28 AM
* To change this template use File | Settings | File Templates.
*/
public enum BSONType {
Double(BSON.NUMBER),
String(BSON.STRING),
Object(BSON.OBJECT),
Array(BSON.ARRAY),
BinaryData(BSON.BINARY),
ObjectId(BSON.OID),
Boolean(BSON.BOOLEAN),
Date(BSON.DATE),
Null(BSON.NULL),
RegularExpression(BSON.REGEX),
Code(BSON.CODE),
Symbol(BSON.SYMBOL),
CodeWithScope(BSON.CODE_W_SCOPE),
Integer(BSON.NUMBER_INT),
Timestamp(BSON.TIMESTAMP),
Long(BSON.NUMBER_LONG),
MinKey(BSON.MINKEY),
MaxKey(BSON.MAXKEY);
private final byte _typeCode;
BSONType(byte typeCode) {
_typeCode = typeCode;
}
byte getTypeCode() {
return _typeCode;
}
}
| mattinsler/guiceymongo | src/main/java/com/mattinsler/guiceymongo/data/query/BSONType.java | Java | apache-2.0 | 891 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2017 Serge Rider ([email protected])
* Copyright (C) 2011-2012 Eugene Fradkin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.oracle.views;
import org.eclipse.swt.SWT;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.widgets.*;
import org.jkiss.dbeaver.core.DBeaverCore;
import org.jkiss.dbeaver.ext.oracle.model.OracleConstants;
import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore;
import org.jkiss.dbeaver.model.DBPDataSourceContainer;
import org.jkiss.dbeaver.ui.UIUtils;
import org.jkiss.dbeaver.ui.preferences.PreferenceStoreDelegate;
import org.jkiss.dbeaver.ui.preferences.TargetPrefPage;
import org.jkiss.dbeaver.utils.PrefUtils;
/**
* PrefPageOracle
*/
public class PrefPageOracle extends TargetPrefPage
{
public static final String PAGE_ID = "org.jkiss.dbeaver.preferences.oracle.general"; //$NON-NLS-1$
private Text explainTableText;
private Button rowidSupportCheck;
private Button enableDbmsOuputCheck;
public PrefPageOracle()
{
super();
setPreferenceStore(new PreferenceStoreDelegate(DBeaverCore.getGlobalPreferenceStore()));
}
@Override
protected boolean hasDataSourceSpecificOptions(DBPDataSourceContainer dataSourceDescriptor)
{
DBPPreferenceStore store = dataSourceDescriptor.getPreferenceStore();
return
store.contains(OracleConstants.PREF_EXPLAIN_TABLE_NAME) ||
store.contains(OracleConstants.PREF_SUPPORT_ROWID) ||
store.contains(OracleConstants.PREF_DBMS_OUTPUT)
;
}
@Override
protected boolean supportsDataSourceSpecificOptions()
{
return true;
}
@Override
protected Control createPreferenceContent(Composite parent)
{
Composite composite = UIUtils.createPlaceholder(parent, 1);
{
Group planGroup = UIUtils.createControlGroup(composite, "Execution plan", 2, GridData.FILL_HORIZONTAL, 0);
Label descLabel = new Label(planGroup, SWT.WRAP);
descLabel.setText("By default plan table in current or SYS schema will be used.\nYou may set some particular fully qualified plan table name here.");
GridData gd = new GridData(GridData.HORIZONTAL_ALIGN_BEGINNING);
gd.horizontalSpan = 2;
descLabel.setLayoutData(gd);
explainTableText = UIUtils.createLabelText(planGroup, "Plan table", "", SWT.BORDER, new GridData(GridData.FILL_HORIZONTAL));
}
{
Group planGroup = UIUtils.createControlGroup(composite, "Misc", 2, GridData.FILL_HORIZONTAL, 0);
rowidSupportCheck = UIUtils.createLabelCheckbox(planGroup, "Use ROWID to identify rows", true);
enableDbmsOuputCheck = UIUtils.createLabelCheckbox(planGroup, "Enable DBMS Output", true);
}
return composite;
}
@Override
protected void loadPreferences(DBPPreferenceStore store)
{
explainTableText.setText(store.getString(OracleConstants.PREF_EXPLAIN_TABLE_NAME));
rowidSupportCheck.setSelection(store.getBoolean(OracleConstants.PREF_SUPPORT_ROWID));
enableDbmsOuputCheck.setSelection(store.getBoolean(OracleConstants.PREF_DBMS_OUTPUT));
}
@Override
protected void savePreferences(DBPPreferenceStore store)
{
store.setValue(OracleConstants.PREF_EXPLAIN_TABLE_NAME, explainTableText.getText());
store.setValue(OracleConstants.PREF_SUPPORT_ROWID, rowidSupportCheck.getSelection());
store.setValue(OracleConstants.PREF_DBMS_OUTPUT, enableDbmsOuputCheck.getSelection());
PrefUtils.savePreferenceStore(store);
}
@Override
protected void clearPreferences(DBPPreferenceStore store)
{
store.setToDefault(OracleConstants.PREF_EXPLAIN_TABLE_NAME);
store.setToDefault(OracleConstants.PREF_SUPPORT_ROWID);
store.setToDefault(OracleConstants.PREF_DBMS_OUTPUT);
}
@Override
protected String getPropertyPageID()
{
return PAGE_ID;
}
} | ruspl-afed/dbeaver | plugins/org.jkiss.dbeaver.ext.oracle/src/org/jkiss/dbeaver/ext/oracle/views/PrefPageOracle.java | Java | apache-2.0 | 4,753 |
package com.winsun.fruitmix.model;
/**
* Created by Administrator on 2016/7/6.
*/
public class Equipment {
private String serviceName;
private String host;
private int port;
public Equipment(String serviceName, String host, int port) {
this.serviceName = serviceName;
this.host = host;
this.port = port;
}
public Equipment() {
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
}
| andywu91/fruitMix-android | app/src/main/java/com/winsun/fruitmix/model/Equipment.java | Java | apache-2.0 | 815 |
---
title: End of August Blooms
date: 2009-08-31 00:00:00 -06:00
categories:
- whats-blooming
layout: post
blog-banner: whats-blooming-now-summer.jpg
post-date: August 31, 2009
post-time: 8:09 AM
blog-image: wbn-default.jpg
---
<div class = "text-center">
<p>Look for these beauties as you stroll through the garden.</p>
</div>
<div class="text-center">
<img src="/images/blogs/old-posts/Buddleja davidii 'Pink Delight'.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Caryopteris x clandonensis 'First Choice'.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Chasmanthium latifolium.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Cirsium undulatum.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class="text-center">
<img src="/images/blogs/old-posts/Linaria dalmatica.jpg" width="450" height="450" alt="" title="" />
</div>
<br>
<div class= "text-center">
Don't forget to visit the What's Blooming Blog every day for cool and interesting facts about each of these plants.
</div>
| redbuttegarden/redbuttegarden.github.io | whats-blooming/_posts/2009-08-31-end_of_august_blooms.html | HTML | apache-2.0 | 1,240 |
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"math"
"time"
"github.com/pingcap/tidb/util/collate"
)
// CompareInt64 returns an integer comparing the int64 x to y.
func CompareInt64(x, y int64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareUint64 returns an integer comparing the uint64 x to y.
func CompareUint64(x, y uint64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
//VecCompareUU returns []int64 comparing the []uint64 x to []uint64 y
func VecCompareUU(x, y []uint64, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < y[i] {
res[i] = -1
} else if x[i] == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
//VecCompareII returns []int64 comparing the []int64 x to []int64 y
func VecCompareII(x, y, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < y[i] {
res[i] = -1
} else if x[i] == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
//VecCompareUI returns []int64 comparing the []uint64 x to []int64y
func VecCompareUI(x []uint64, y, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if y[i] < 0 || x[i] > math.MaxInt64 {
res[i] = 1
} else if int64(x[i]) < y[i] {
res[i] = -1
} else if int64(x[i]) == y[i] {
res[i] = 0
} else {
res[i] = 1
}
}
}
//VecCompareIU returns []int64 comparing the []int64 x to []uint64y
func VecCompareIU(x []int64, y []uint64, res []int64) {
n := len(x)
for i := 0; i < n; i++ {
if x[i] < 0 || uint64(y[i]) > math.MaxInt64 {
res[i] = -1
} else if x[i] < int64(y[i]) {
res[i] = -1
} else if x[i] == int64(y[i]) {
res[i] = 0
} else {
res[i] = 1
}
}
}
// CompareFloat64 returns an integer comparing the float64 x to y.
func CompareFloat64(x, y float64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
// CompareString returns an integer comparing the string x to y with the specified collation and length.
func CompareString(x, y, collation string, length int) int {
return collate.GetCollator(collation).Compare(x, y, collate.NewCollatorOption(length))
}
// CompareDuration returns an integer comparing the duration x to y.
func CompareDuration(x, y time.Duration) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
| Cofyc/tidb | types/compare.go | GO | apache-2.0 | 2,788 |
package com.wangshan.service.impl;
import com.wangshan.dao.UserDao;
import com.wangshan.models.User;
import com.wangshan.service.ValidateService;
import com.wangshan.utils.gabriel.EncryptUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* Created by Administrator on 2015/11/15.
*/
@Service
public class ValidateServiceImpl implements ValidateService{
@Autowired
private UserDao userDao;
@Override
public Boolean validatePassword(String email, String password){
User user = userDao.getUserByEmail(email);
if(user != null && new EncryptUtil().encrypt(password + "-" + user.getSalt(), "SHA-1").equals(user.getPassword())){
return true;
} else {
return false;
}
}
@Override
public Boolean validateMobileRepeat(String mobile){
return false;
}
@Override
public Boolean validateEmailRepeat(String email){
return false;
}
}
| sanyiwangshan/my_space | backend/src/main/java/com/wangshan/service/impl/ValidateServiceImpl.java | Java | apache-2.0 | 1,047 |
# Mitozus scabridulus Miers SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Gentianales/Apocynaceae/Mitozus/Mitozus scabridulus/README.md | Markdown | apache-2.0 | 175 |
## Versioning & Releasing
Increments in the version should be done by one of the project owners.
The version should follow the standard `major.minor.patch` style, see http://semver.org/ for which defines the specification. The examples below are a paraphrasing of that specification.
# Examples of changes that would warrant a `patch` version changes
* Small changes that are completely backwards compatible, normally bug fixes.
* changes in dependencies
# Examples of changes that would warrant a `minor` version changes
* Introduction of new functionality, without breaking backwards compatibility.
# Examples of changes that would warrant a `major` version changes
* Any break in backwards compatibility must result in an increment of the `major` version.
| bbc/process-helper | VERSIONING.md | Markdown | apache-2.0 | 767 |
<?php
/**
* Created by PhpStorm.
* User: Mohammad Eslahi Sani
* Date: 04/10/1394
* Time: 9:06 PM
*/
//dl('php_pdo_sqlsrv_55_ts.dll');
// phpinfo();
if(isset($_SESSION['login'])){
}
elseif(isset($_POST['username']) && isset($_POST['password'])){
$u = $_POST['username'];
$p = $_POST['password'];
// exec("echo username and password are: $u --- $p >> debug.txt");
$serverName = "MMDES"; //serverName\instanceName
// Since UID and PWD are not specified in the $connectionInfo array,
// The connection will be attempted using Windows Authentication.
$connectionInfo = array( "Database"=>"officeAutomation");
$conn = sqlsrv_connect( $serverName, $connectionInfo);
if( $conn ) {
// echo "Connection established.<br />";
}else{
// echo "Connection could not be established.<br />";
// die( print_r( sqlsrv_errors(), true));
exec("echo connection was not established >> debug.txt");
}
$query = "";
$query = "SELECT * FROM sysUser WHERE Username='".$u . "'";
$result = sqlsrv_query( $conn , $query);
if (!$result)
die( print_r( sqlsrv_errors(), true));
$row = sqlsrv_fetch_array($result);
if( $row['Password'] == $p ){
$query2 = "SELECT firstName,lastName,Gender FROM Person JOIN Employee on Person.NationalID=Employee.NationalID WHERE PersonalID='".$row['PersonalID'] . "'";
$result2 = sqlsrv_query( $conn , $query2);
if (!$result2)
die( print_r( sqlsrv_errors(), true));
$row2 = sqlsrv_fetch_array($result2);
// print_r($row2);
$tempAry=array('username'=>$row['Username'],'role'=>$row['Role'],'personalId'=>$row['PersonalID'],
'firstName'=>$row2['firstName'],'lastName'=>$row2['lastName'],'gender'=>$row2['Gender']);
$_SESSION['login'] = $tempAry;
header('location: ');
// print_r($_SESSION);
}
else{
header('location: ?invalid');
die();
}
}
elseif (isset($_GET['invalid'])){
?>
<body>
<div class="container sign-in-container">
<p class="invalid-text">Invalid username or password,<br> Try again!</p>
<form method="post" class="form-signin login-form">
<h2 class="form-signin-heading">Please sign in</h2>
<label for="inputEmail" class="sr-only">Username</label>
<input name="username" type="text" id="inputEmail" class="username-input form-control" placeholder="Username" required autofocus>
<label for="inputPassword" class="password-input sr-only">Password</label>
<input name="password" type="password" id="inputPassword" class="form-control" placeholder="Password" required>
<button class="submit-button btn btn-lg btn-primary btn-block" type="submit">Sign in</button>
</form>
</div> <!-- /container -->
</body>
</html>
<?php
}
else{
?>
<body>
<div class="container sign-in-container">
<form method="post" class="form-signin login-form">
<h2 class="form-signin-heading">Please sign in</h2>
<label for="inputEmail" class="sr-only">Username</label>
<input name="username" type="text" id="inputEmail" class="username-input form-control" placeholder="Username" required autofocus>
<label for="inputPassword" class="password-input sr-only">Password</label>
<input name="password" type="password" id="inputPassword" class="form-control" placeholder="Password" required>
<button class="submit-button btn btn-lg btn-primary btn-block" type="submit">Sign in</button>
</form>
</div> <!-- /container -->
</body>
</html>
<?php } ?> | mrhsce/officeAutomation | login.php | PHP | apache-2.0 | 3,700 |
# Croton touranensis Gagnep. SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Malpighiales/Euphorbiaceae/Croton/Croton touranensis/README.md | Markdown | apache-2.0 | 184 |
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.util.prop;
import ghidra.util.*;
import java.io.*;
/**
* Handles general storage and retrieval of saveable objects indexed by long
* keys.
*
*/
public class SaveableObjectPropertySet extends PropertySet {
private final static long serialVersionUID = 1;
/**
* Constructor for SaveableObjectPropertySet.
* @param name the name associated with this property set.
*/
public SaveableObjectPropertySet(String name, Class<?> objectClass) {
super(name, objectClass);
if (!Saveable.class.isAssignableFrom(objectClass)) {
throw new IllegalArgumentException("Class "+objectClass+
"does not implement the Saveable interface");
}
try {
objectClass.newInstance();
} catch(Exception e) {
throw new IllegalArgumentException("Class "+objectClass+
"must be public and have a public, no args, constructor");
}
}
/**
* @see PropertySet#getDataSize()
*/
@Override
public int getDataSize() {
return 20;
}
/**
* Stores a saveable object at the given index. Any object currently at
* that index will be replaced by the new object.
* @param index the index at which to store the saveable object.
* @param value the saveable object to store.
*/
public void putObject(long index, Saveable value) {
PropertyPage page = getOrCreatePage(getPageID(index));
int n = page.getSize();
page.addSaveableObject(getPageOffset(index), value);
numProperties += page.getSize() - n;
}
/**
* Retrieves the saveable object stored at the given index.
* @param index the index at which to retrieve the saveable object.
* @return the saveable object stored at the given index or null if no
* object is stored at the index.
*/
public Saveable getObject(long index) {
PropertyPage page = getPage(getPageID(index));
if (page != null) {
return page.getSaveableObject(getPageOffset(index));
}
return null;
}
/* (non-Javadoc)
* @see ghidra.util.prop.PropertySet#moveIndex(long, long)
*/
@Override
protected void moveIndex(long from, long to) {
Saveable value = getObject(from);
remove(from);
putObject(to, value);
}
/**
* saves the property at the given index to the given output stream.
*/
@Override
protected void saveProperty(ObjectOutputStream oos, long index) throws IOException {
Saveable obj = getObject(index);
oos.writeObject(obj.getClass().getName());
obj.save(new ObjectStorageStreamAdapter(oos));
}
/**
* restores the property from the input stream to the given index.
*/
@Override
protected void restoreProperty(ObjectInputStream ois, long index)
throws IOException, ClassNotFoundException {
try {
String className = (String)ois.readObject();
Class<?> c = Class.forName(className);
Saveable obj = (Saveable)c.newInstance();
obj.restore(new ObjectStorageStreamAdapter(ois));
putObject(index, obj);
} catch (Exception e) {
Msg.showError(this, null, null, null, e);
}
}
/**
*
* @see ghidra.util.prop.PropertySet#applyValue(PropertyVisitor, long)
*/
@Override
public void applyValue(PropertyVisitor visitor, long addr) {
Saveable obj = getObject(addr);
if (obj != null) {
visitor.visit(obj);
}
}
}
| NationalSecurityAgency/ghidra | Ghidra/Framework/Generic/src/main/java/ghidra/util/prop/SaveableObjectPropertySet.java | Java | apache-2.0 | 3,858 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Text.RegularExpressions;
using HtmlAgilityPack;
namespace Html2Markdown.Replacement
{
internal static class HtmlParser
{
private static readonly Regex NoChildren = new Regex(@"<(ul|ol)\b[^>]*>(?:(?!<ul|<ol)[\s\S])*?<\/\1>");
internal static string ReplaceLists(string html)
{
var finalHtml = html;
while (HasNoChildLists(finalHtml))
{
var listToReplace = NoChildren.Match(finalHtml).Value;
var formattedList = ReplaceList(listToReplace);
finalHtml = finalHtml.Replace(listToReplace, formattedList);
}
return finalHtml;
}
private static string ReplaceList(string html)
{
var list = Regex.Match(html, @"<(ul|ol)\b[^>]*>([\s\S]*?)<\/\1>");
var listType = list.Groups[1].Value;
var listItems = Regex.Split(list.Groups[2].Value, "<li[^>]*>");
if(listItems.All(string.IsNullOrEmpty))
{
return String.Empty;
}
listItems = listItems.Skip(1).ToArray();
var counter = 0;
var markdownList = new List<string>();
listItems.ToList().ForEach(listItem =>
{
var listPrefix = (listType.Equals("ol")) ? $"{++counter}. " : "* ";
var finalList = listItem.Replace(@"</li>", string.Empty);
if (finalList.Trim().Length == 0) {
return;
}
finalList = Regex.Replace(finalList, @"^\s+", string.Empty);
finalList = Regex.Replace(finalList, @"\n{2}", $"{Environment.NewLine}{Environment.NewLine} ");
// indent nested lists
finalList = Regex.Replace(finalList, @"\n([ ]*)+(\*|\d+\.)", "\n$1 $2");
markdownList.Add($"{listPrefix}{finalList}");
});
return Environment.NewLine + Environment.NewLine + markdownList.Aggregate((current, item) => current + Environment.NewLine + item);
}
private static bool HasNoChildLists(string html)
{
return NoChildren.Match(html).Success;
}
internal static string ReplacePre(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//pre");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var tagContents = node.InnerHtml;
var markdown = ConvertPre(tagContents);
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
private static string ConvertPre(string html)
{
var tag = TabsToSpaces(html);
tag = IndentNewLines(tag);
return Environment.NewLine + Environment.NewLine + tag + Environment.NewLine;
}
private static string IndentNewLines(string tag)
{
return tag.Replace(Environment.NewLine, Environment.NewLine + " ");
}
private static string TabsToSpaces(string tag)
{
return tag.Replace("\t", " ");
}
internal static string ReplaceImg(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//img");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var src = node.Attributes.GetAttributeOrEmpty("src");
var alt = node.Attributes.GetAttributeOrEmpty("alt");
var title = node.Attributes.GetAttributeOrEmpty("title");
var markdown = $@" ? $" \"{title}\"" : "")})";
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
public static string ReplaceAnchor(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//a");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var linkText = node.InnerHtml;
var href = node.Attributes.GetAttributeOrEmpty("href");
var title = node.Attributes.GetAttributeOrEmpty("title");
var markdown = "";
if (!IsEmptyLink(linkText, href))
{
markdown = $@"[{linkText}]({href}{((title.Length > 0) ? $" \"{title}\"" : "")})";
}
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
public static string ReplaceCode(string html)
{
var finalHtml = html;
var doc = GetHtmlDocument(finalHtml);
var nodes = doc.DocumentNode.SelectNodes("//code");
if (nodes == null) {
return finalHtml;
}
nodes.ToList().ForEach(node =>
{
var code = node.InnerHtml;
string markdown;
if(IsSingleLineCodeBlock(code))
{
markdown = "`" + code + "`";
}
else
{
markdown = ReplaceBreakTagsWithNewLines(code);
markdown = Regex.Replace(markdown, "^\r\n", "");
markdown = Regex.Replace(markdown, "\r\n$", "");
markdown = "```" + Environment.NewLine + markdown + Environment.NewLine + "```";
}
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
private static string ReplaceBreakTagsWithNewLines(string code)
{
return Regex.Replace(code, "<\\s*?/?\\s*?br\\s*?>", "");
}
private static bool IsSingleLineCodeBlock(string code)
{
// single line code blocks do not have new line characters
return code.IndexOf(Environment.NewLine, StringComparison.Ordinal) == -1;
}
public static string ReplaceBlockquote(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//blockquote");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var quote = node.InnerHtml;
var lines = quote.TrimStart().Split(new[] { Environment.NewLine }, StringSplitOptions.None);
var markdown = "";
lines.ToList().ForEach(line =>
{
markdown += $"> {line.TrimEnd()}{Environment.NewLine}";
});
markdown = Regex.Replace(markdown, @"(>\s\r\n)+$", "");
markdown = Environment.NewLine + Environment.NewLine + markdown + Environment.NewLine + Environment.NewLine;
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
public static string ReplaceEntites(string html)
{
return WebUtility.HtmlDecode(html);
}
public static string ReplaceParagraph(string html)
{
var doc = GetHtmlDocument(html);
var nodes = doc.DocumentNode.SelectNodes("//p");
if (nodes == null) {
return html;
}
nodes.ToList().ForEach(node =>
{
var text = node.InnerHtml;
var markdown = Regex.Replace(text, @"\s+", " ");
markdown = markdown.Replace(Environment.NewLine, " ");
markdown = Environment.NewLine + Environment.NewLine + markdown + Environment.NewLine;
ReplaceNode(node, markdown);
});
return doc.DocumentNode.OuterHtml;
}
private static bool IsEmptyLink(string linkText, string href)
{
var length = linkText.Length + href.Length;
return length == 0;
}
private static HtmlDocument GetHtmlDocument(string html)
{
var doc = new HtmlDocument();
doc.LoadHtml(html);
return doc;
}
private static void ReplaceNode(HtmlNode node, string markdown)
{
if (string.IsNullOrEmpty(markdown))
{
node.ParentNode.RemoveChild(node);
}
else
{
node.ReplaceNodeWithString(markdown);
}
}
}
} | baynezy/Html2Markdown | src/Html2Markdown/Replacement/HtmlParser.cs | C# | apache-2.0 | 7,024 |
"""api_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
version = 'v1.0'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/%s/' % version, include('apis.urls'))
]
| AutohomeOps/Assets_Report | api_server/api_server/urls.py | Python | apache-2.0 | 846 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.seda;
import org.apache.camel.CamelExecutionException;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
/**
* @version
*/
public class SedaInOutWithErrorDeadLetterChannelTest extends ContextTestSupport {
public void testInOutWithErrorUsingDLC() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:dead").expectedMessageCount(1);
try {
template.requestBody("direct:start", "Hello World", String.class);
fail("Should have thrown an exception");
} catch (CamelExecutionException e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Damn I cannot do this", e.getCause().getMessage());
}
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
errorHandler(deadLetterChannel("mock:dead").maximumRedeliveries(2).redeliveryDelay(0).handled(false));
from("direct:start").to("seda:foo");
from("seda:foo").transform(constant("Bye World"))
.throwException(new IllegalArgumentException("Damn I cannot do this"))
.to("mock:result");
}
};
}
} | everttigchelaar/camel-svn | camel-core/src/test/java/org/apache/camel/component/seda/SedaInOutWithErrorDeadLetterChannelTest.java | Java | apache-2.0 | 2,341 |
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package coniks provides hashing for maps.
package coniks
import (
"bytes"
"crypto"
"encoding/binary"
"fmt"
"github.com/golang/glog"
"github.com/google/trillian"
"github.com/google/trillian/merkle/hashers"
)
func init() {
hashers.RegisterMapHasher(trillian.HashStrategy_CONIKS_SHA512_256, Default)
hashers.RegisterMapHasher(trillian.HashStrategy_CONIKS_SHA256, New(crypto.SHA256))
}
// Domain separation prefixes
var (
leafIdentifier = []byte("L")
emptyIdentifier = []byte("E")
// Default is the standard CONIKS hasher.
Default = New(crypto.SHA512_256)
// Some zeroes, to avoid allocating temporary slices.
zeroes = make([]byte, 32)
)
// hasher implements the sparse merkle tree hashing algorithm specified in the CONIKS paper.
type hasher struct {
crypto.Hash
}
// New creates a new hashers.TreeHasher using the passed in hash function.
func New(h crypto.Hash) hashers.MapHasher {
return &hasher{Hash: h}
}
// EmptyRoot returns the root of an empty tree.
func (m *hasher) EmptyRoot() []byte {
panic("EmptyRoot() not defined for coniks.Hasher")
}
// HashEmpty returns the hash of an empty branch at a given height.
// A height of 0 indicates the hash of an empty leaf.
// Empty branches within the tree are plain interior nodes e1 = H(e0, e0) etc.
func (m *hasher) HashEmpty(treeID int64, index []byte, height int) []byte {
depth := m.BitLen() - height
buf := bytes.NewBuffer(make([]byte, 0, 32))
h := m.New()
buf.Write(emptyIdentifier)
binary.Write(buf, binary.BigEndian, uint64(treeID))
m.writeMaskedIndex(buf, index, depth)
binary.Write(buf, binary.BigEndian, uint32(depth))
h.Write(buf.Bytes())
r := h.Sum(nil)
if glog.V(5) {
glog.Infof("HashEmpty(%x, %d): %x", index, depth, r)
}
return r
}
// HashLeaf calculate the merkle tree leaf value:
// H(Identifier || treeID || depth || index || dataHash)
func (m *hasher) HashLeaf(treeID int64, index []byte, leaf []byte) []byte {
depth := m.BitLen()
buf := bytes.NewBuffer(make([]byte, 0, 32+len(leaf)))
h := m.New()
buf.Write(leafIdentifier)
binary.Write(buf, binary.BigEndian, uint64(treeID))
m.writeMaskedIndex(buf, index, depth)
binary.Write(buf, binary.BigEndian, uint32(depth))
buf.Write(leaf)
h.Write(buf.Bytes())
p := h.Sum(nil)
if glog.V(5) {
glog.Infof("HashLeaf(%x, %d, %s): %x", index, depth, leaf, p)
}
return p
}
// HashChildren returns the internal Merkle tree node hash of the the two child nodes l and r.
// The hashed structure is H(l || r).
func (m *hasher) HashChildren(l, r []byte) []byte {
buf := bytes.NewBuffer(make([]byte, 0, 32+len(l)+len(r)))
h := m.New()
buf.Write(l)
buf.Write(r)
h.Write(buf.Bytes())
p := h.Sum(nil)
if glog.V(5) {
glog.Infof("HashChildren(%x, %x): %x", l, r, p)
}
return p
}
// BitLen returns the number of bits in the hash function.
func (m *hasher) BitLen() int {
return m.Size() * 8
}
// leftmask contains bitmasks indexed such that the left x bits are set. It is
// indexed by byte position from 0-7 0 is special cased to 0xFF since 8 mod 8
// is 0. leftmask is only used to mask the last byte.
var leftmask = [8]byte{0xFF, 0x80, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC, 0xFE}
// writeMaskedIndex writes the left depth bits of index directly to a Buffer (which never
// returns an error on writes). This is then padded with zero bits to the Size()
// of the index values in use by this hashes. This avoids the need to allocate
// space for and copy a value that will then be discarded immediately.
func (m *hasher) writeMaskedIndex(b *bytes.Buffer, index []byte, depth int) {
if got, want := len(index), m.Size(); got != want {
panic(fmt.Sprintf("index len: %d, want %d", got, want))
}
if got, want := depth, m.BitLen(); got < 0 || got > want {
panic(fmt.Sprintf("depth: %d, want <= %d && >= 0", got, want))
}
prevLen := b.Len()
if depth > 0 {
// Write the first depthBytes, if there are any complete bytes.
depthBytes := depth >> 3
if depthBytes > 0 {
b.Write(index[:depthBytes])
}
// Mask off unwanted bits in the last byte, if there is an incomplete one.
if depth%8 != 0 {
b.WriteByte(index[depthBytes] & leftmask[depth%8])
}
}
// Pad to the correct length with zeros. Allow for future hashers that
// might be > 256 bits.
needZeros := prevLen + len(index) - b.Len()
for needZeros > 0 {
chunkSize := needZeros
if chunkSize > 32 {
chunkSize = 32
}
b.Write(zeroes[:chunkSize])
needZeros -= chunkSize
}
}
| Martin2112/trillian | merkle/coniks/coniks.go | GO | apache-2.0 | 5,031 |
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the waf-2015-08-24.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Net;
using System.Text;
using System.Xml.Serialization;
using Amazon.WAF.Model;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
using Amazon.Runtime.Internal.Transform;
using Amazon.Runtime.Internal.Util;
using ThirdParty.Json.LitJson;
namespace Amazon.WAF.Model.Internal.MarshallTransformations
{
/// <summary>
/// Response Unmarshaller for RuleSummary Object
/// </summary>
public class RuleSummaryUnmarshaller : IUnmarshaller<RuleSummary, XmlUnmarshallerContext>, IUnmarshaller<RuleSummary, JsonUnmarshallerContext>
{
/// <summary>
/// Unmarshaller the response from the service to the response class.
/// </summary>
/// <param name="context"></param>
/// <returns></returns>
RuleSummary IUnmarshaller<RuleSummary, XmlUnmarshallerContext>.Unmarshall(XmlUnmarshallerContext context)
{
throw new NotImplementedException();
}
/// <summary>
/// Unmarshaller the response from the service to the response class.
/// </summary>
/// <param name="context"></param>
/// <returns></returns>
public RuleSummary Unmarshall(JsonUnmarshallerContext context)
{
context.Read();
if (context.CurrentTokenType == JsonToken.Null)
return null;
RuleSummary unmarshalledObject = new RuleSummary();
int targetDepth = context.CurrentDepth;
while (context.ReadAtDepth(targetDepth))
{
if (context.TestExpression("Name", targetDepth))
{
var unmarshaller = StringUnmarshaller.Instance;
unmarshalledObject.Name = unmarshaller.Unmarshall(context);
continue;
}
if (context.TestExpression("RuleId", targetDepth))
{
var unmarshaller = StringUnmarshaller.Instance;
unmarshalledObject.RuleId = unmarshaller.Unmarshall(context);
continue;
}
}
return unmarshalledObject;
}
private static RuleSummaryUnmarshaller _instance = new RuleSummaryUnmarshaller();
/// <summary>
/// Gets the singleton.
/// </summary>
public static RuleSummaryUnmarshaller Instance
{
get
{
return _instance;
}
}
}
} | rafd123/aws-sdk-net | sdk/src/Services/WAF/Generated/Model/Internal/MarshallTransformations/RuleSummaryUnmarshaller.cs | C# | apache-2.0 | 3,306 |
# Contributing guidelines
## How to become a contributor and submit your own code
### Contributor License Agreements
We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles.
Please fill out either the individual or corporate Contributor License Agreement (CLA).
* If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
* If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests.
***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the main repository.
### Contributing code
If you have improvements to Copybara, send us your pull requests!
| google/copybara | CONTRIBUTING.md | Markdown | apache-2.0 | 1,101 |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpoints
import (
"fmt"
"net/http"
gpath "path"
"reflect"
"sort"
"strings"
"time"
"unicode"
restful "github.com/emicklei/go-restful"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/endpoints/discovery"
"k8s.io/apiserver/pkg/endpoints/handlers"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
genericfilters "k8s.io/apiserver/pkg/server/filters"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
const (
ROUTE_META_GVK = "x-kubernetes-group-version-kind"
ROUTE_META_ACTION = "x-kubernetes-action"
)
type APIInstaller struct {
group *APIGroupVersion
prefix string // Path prefix where API resources are to be registered.
minRequestTimeout time.Duration
enableAPIResponseCompression bool
}
// Struct capturing information about an action ("GET", "POST", "WATCH", "PROXY", etc).
type action struct {
Verb string // Verb identifying the action ("GET", "POST", "WATCH", "PROXY", etc).
Path string // The path of the action
Params []*restful.Parameter // List of parameters associated with the action.
Namer handlers.ScopeNamer
AllNamespaces bool // true iff the action is namespaced but works on aggregate result for all namespaces
}
// An interface to see if one storage supports override its default verb for monitoring
type StorageMetricsOverride interface {
// OverrideMetricsVerb gives a storage object an opportunity to override the verb reported to the metrics endpoint
OverrideMetricsVerb(oldVerb string) (newVerb string)
}
// An interface to see if an object supports swagger documentation as a method
type documentable interface {
SwaggerDoc() map[string]string
}
// toDiscoveryKubeVerb maps an action.Verb to the logical kube verb, used for discovery
var toDiscoveryKubeVerb = map[string]string{
"CONNECT": "", // do not list in discovery.
"DELETE": "delete",
"DELETECOLLECTION": "deletecollection",
"GET": "get",
"LIST": "list",
"PATCH": "patch",
"POST": "create",
"PROXY": "proxy",
"PUT": "update",
"WATCH": "watch",
"WATCHLIST": "watch",
}
// Install handlers for API resources.
func (a *APIInstaller) Install() ([]metav1.APIResource, *restful.WebService, []error) {
var apiResources []metav1.APIResource
var errors []error
ws := a.newWebService()
// Register the paths in a deterministic (sorted) order to get a deterministic swagger spec.
paths := make([]string, len(a.group.Storage))
var i int = 0
for path := range a.group.Storage {
paths[i] = path
i++
}
sort.Strings(paths)
for _, path := range paths {
apiResource, err := a.registerResourceHandlers(path, a.group.Storage[path], ws)
if err != nil {
errors = append(errors, fmt.Errorf("error in registering resource: %s, %v", path, err))
}
if apiResource != nil {
apiResources = append(apiResources, *apiResource)
}
}
return apiResources, ws, errors
}
// newWebService creates a new restful webservice with the api installer's prefix and version.
func (a *APIInstaller) newWebService() *restful.WebService {
ws := new(restful.WebService)
ws.Path(a.prefix)
// a.prefix contains "prefix/group/version"
ws.Doc("API at " + a.prefix)
// Backwards compatibility, we accepted objects with empty content-type at V1.
// If we stop using go-restful, we can default empty content-type to application/json on an
// endpoint by endpoint basis
ws.Consumes("*/*")
mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer)
ws.Produces(append(mediaTypes, streamMediaTypes...)...)
ws.ApiVersion(a.group.GroupVersion.String())
return ws
}
// calculate the storage gvk, the gvk objects are converted to before persisted to the etcd.
func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) {
object := storage.New()
fqKinds, _, err := typer.ObjectKinds(object)
if err != nil {
return schema.GroupVersionKind{}, err
}
gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds)
if !ok {
return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object))
}
return gvk, nil
}
// GetResourceKind returns the external group version kind registered for the given storage
// object. If the storage object is a subresource and has an override supplied for it, it returns
// the group version kind supplied in the override.
func GetResourceKind(groupVersion schema.GroupVersion, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) {
// Let the storage tell us exactly what GVK it has
if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok {
return gvkProvider.GroupVersionKind(groupVersion), nil
}
object := storage.New()
fqKinds, _, err := typer.ObjectKinds(object)
if err != nil {
return schema.GroupVersionKind{}, err
}
// a given go type can have multiple potential fully qualified kinds. Find the one that corresponds with the group
// we're trying to register here
fqKindToRegister := schema.GroupVersionKind{}
for _, fqKind := range fqKinds {
if fqKind.Group == groupVersion.Group {
fqKindToRegister = groupVersion.WithKind(fqKind.Kind)
break
}
}
if fqKindToRegister.Empty() {
return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, groupVersion)
}
// group is guaranteed to match based on the check above
return fqKindToRegister, nil
}
func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storage, ws *restful.WebService) (*metav1.APIResource, error) {
admit := a.group.Admit
optionsExternalVersion := a.group.GroupVersion
if a.group.OptionsExternalVersion != nil {
optionsExternalVersion = *a.group.OptionsExternalVersion
}
resource, subresource, err := splitSubresource(path)
if err != nil {
return nil, err
}
group, version := a.group.GroupVersion.Group, a.group.GroupVersion.Version
fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer)
if err != nil {
return nil, err
}
versionedPtr, err := a.group.Creater.New(fqKindToRegister)
if err != nil {
return nil, err
}
defaultVersionedObject := indirectArbitraryPointer(versionedPtr)
kind := fqKindToRegister.Kind
isSubresource := len(subresource) > 0
// If there is a subresource, namespace scoping is defined by the parent resource
namespaceScoped := true
if isSubresource {
parentStorage, ok := a.group.Storage[resource]
if !ok {
return nil, fmt.Errorf("missing parent storage: %q", resource)
}
scoper, ok := parentStorage.(rest.Scoper)
if !ok {
return nil, fmt.Errorf("%q must implement scoper", resource)
}
namespaceScoped = scoper.NamespaceScoped()
} else {
scoper, ok := storage.(rest.Scoper)
if !ok {
return nil, fmt.Errorf("%q must implement scoper", resource)
}
namespaceScoped = scoper.NamespaceScoped()
}
// what verbs are supported by the storage, used to know what verbs we support per path
creater, isCreater := storage.(rest.Creater)
namedCreater, isNamedCreater := storage.(rest.NamedCreater)
lister, isLister := storage.(rest.Lister)
getter, isGetter := storage.(rest.Getter)
getterWithOptions, isGetterWithOptions := storage.(rest.GetterWithOptions)
gracefulDeleter, isGracefulDeleter := storage.(rest.GracefulDeleter)
collectionDeleter, isCollectionDeleter := storage.(rest.CollectionDeleter)
updater, isUpdater := storage.(rest.Updater)
patcher, isPatcher := storage.(rest.Patcher)
watcher, isWatcher := storage.(rest.Watcher)
connecter, isConnecter := storage.(rest.Connecter)
storageMeta, isMetadata := storage.(rest.StorageMetadata)
storageVersionProvider, isStorageVersionProvider := storage.(rest.StorageVersionProvider)
if !isMetadata {
storageMeta = defaultStorageMetadata{}
}
exporter, isExporter := storage.(rest.Exporter)
if !isExporter {
exporter = nil
}
versionedExportOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ExportOptions"))
if err != nil {
return nil, err
}
if isNamedCreater {
isCreater = true
}
var versionedList interface{}
if isLister {
list := lister.NewList()
listGVKs, _, err := a.group.Typer.ObjectKinds(list)
if err != nil {
return nil, err
}
versionedListPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind(listGVKs[0].Kind))
if err != nil {
return nil, err
}
versionedList = indirectArbitraryPointer(versionedListPtr)
}
versionedListOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ListOptions"))
if err != nil {
return nil, err
}
versionedCreateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("CreateOptions"))
if err != nil {
return nil, err
}
versionedPatchOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("PatchOptions"))
if err != nil {
return nil, err
}
versionedUpdateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("UpdateOptions"))
if err != nil {
return nil, err
}
var versionedDeleteOptions runtime.Object
var versionedDeleterObject interface{}
if isGracefulDeleter {
versionedDeleteOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind("DeleteOptions"))
if err != nil {
return nil, err
}
versionedDeleterObject = indirectArbitraryPointer(versionedDeleteOptions)
}
versionedStatusPtr, err := a.group.Creater.New(optionsExternalVersion.WithKind("Status"))
if err != nil {
return nil, err
}
versionedStatus := indirectArbitraryPointer(versionedStatusPtr)
var (
getOptions runtime.Object
versionedGetOptions runtime.Object
getOptionsInternalKind schema.GroupVersionKind
getSubpath bool
)
if isGetterWithOptions {
getOptions, getSubpath, _ = getterWithOptions.NewGetOptions()
getOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(getOptions)
if err != nil {
return nil, err
}
getOptionsInternalKind = getOptionsInternalKinds[0]
versionedGetOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(getOptionsInternalKind.Kind))
if err != nil {
versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind))
if err != nil {
return nil, err
}
}
isGetter = true
}
var versionedWatchEvent interface{}
if isWatcher {
versionedWatchEventPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind("WatchEvent"))
if err != nil {
return nil, err
}
versionedWatchEvent = indirectArbitraryPointer(versionedWatchEventPtr)
}
var (
connectOptions runtime.Object
versionedConnectOptions runtime.Object
connectOptionsInternalKind schema.GroupVersionKind
connectSubpath bool
)
if isConnecter {
connectOptions, connectSubpath, _ = connecter.NewConnectOptions()
if connectOptions != nil {
connectOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(connectOptions)
if err != nil {
return nil, err
}
connectOptionsInternalKind = connectOptionsInternalKinds[0]
versionedConnectOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(connectOptionsInternalKind.Kind))
if err != nil {
versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind))
if err != nil {
return nil, err
}
}
}
}
allowWatchList := isWatcher && isLister // watching on lists is allowed only for kinds that support both watch and list.
nameParam := ws.PathParameter("name", "name of the "+kind).DataType("string")
pathParam := ws.PathParameter("path", "path to the resource").DataType("string")
params := []*restful.Parameter{}
actions := []action{}
var resourceKind string
kindProvider, ok := storage.(rest.KindProvider)
if ok {
resourceKind = kindProvider.Kind()
} else {
resourceKind = kind
}
tableProvider, _ := storage.(rest.TableConvertor)
var apiResource metav1.APIResource
if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) &&
isStorageVersionProvider &&
storageVersionProvider.StorageVersion() != nil {
versioner := storageVersionProvider.StorageVersion()
gvk, err := getStorageVersionKind(versioner, storage, a.group.Typer)
if err != nil {
return nil, err
}
apiResource.StorageVersionHash = discovery.StorageVersionHash(gvk.Group, gvk.Version, gvk.Kind)
}
// Get the list of actions for the given scope.
switch {
case !namespaceScoped:
// Handle non-namespace scoped resources like nodes.
resourcePath := resource
resourceParams := params
itemPath := resourcePath + "/{name}"
nameParams := append(params, nameParam)
proxyParams := append(nameParams, pathParam)
suffix := ""
if isSubresource {
suffix = "/" + subresource
itemPath = itemPath + suffix
resourcePath = itemPath
resourceParams = nameParams
}
apiResource.Name = path
apiResource.Namespaced = false
apiResource.Kind = resourceKind
namer := handlers.ContextBasedNaming{
SelfLinker: a.group.Linker,
ClusterScoped: true,
SelfLinkPathPrefix: gpath.Join(a.prefix, resource) + "/",
SelfLinkPathSuffix: suffix,
}
// Handler for standard REST verbs (GET, PUT, POST and DELETE).
// Add actions at the resource path: /api/apiVersion/resource
actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister)
actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater)
actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList)
// Add actions at the item path: /api/apiVersion/resource/{name}
actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter)
if getSubpath {
actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter)
}
actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater)
actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher)
actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher)
actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter)
actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath)
default:
namespaceParamName := "namespaces"
// Handler for standard REST verbs (GET, PUT, POST and DELETE).
namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string")
namespacedPath := namespaceParamName + "/{namespace}/" + resource
namespaceParams := []*restful.Parameter{namespaceParam}
resourcePath := namespacedPath
resourceParams := namespaceParams
itemPath := namespacedPath + "/{name}"
nameParams := append(namespaceParams, nameParam)
proxyParams := append(nameParams, pathParam)
itemPathSuffix := ""
if isSubresource {
itemPathSuffix = "/" + subresource
itemPath = itemPath + itemPathSuffix
resourcePath = itemPath
resourceParams = nameParams
}
apiResource.Name = path
apiResource.Namespaced = true
apiResource.Kind = resourceKind
namer := handlers.ContextBasedNaming{
SelfLinker: a.group.Linker,
ClusterScoped: false,
SelfLinkPathPrefix: gpath.Join(a.prefix, namespaceParamName) + "/",
SelfLinkPathSuffix: itemPathSuffix,
}
actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister)
actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater)
actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList)
actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter)
if getSubpath {
actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter)
}
actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater)
actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher)
actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher)
actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter)
actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath)
// list or post across namespace.
// For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods.
// TODO: more strongly type whether a resource allows these actions on "all namespaces" (bulk delete)
if !isSubresource {
actions = appendIf(actions, action{"LIST", resource, params, namer, true}, isLister)
// DEPRECATED in 1.11
actions = appendIf(actions, action{"WATCHLIST", "watch/" + resource, params, namer, true}, allowWatchList)
}
}
// Create Routes for the actions.
// TODO: Add status documentation using Returns()
// Errors (see api/errors/errors.go as well as go-restful router):
// http.StatusNotFound, http.StatusMethodNotAllowed,
// http.StatusUnsupportedMediaType, http.StatusNotAcceptable,
// http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden,
// http.StatusRequestTimeout, http.StatusConflict, http.StatusPreconditionFailed,
// http.StatusUnprocessableEntity, http.StatusInternalServerError,
// http.StatusServiceUnavailable
// and api error codes
// Note that if we specify a versioned Status object here, we may need to
// create one for the tests, also
// Success:
// http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent
//
// test/integration/auth_test.go is currently the most comprehensive status code test
for _, s := range a.group.Serializer.SupportedMediaTypes() {
if len(s.MediaTypeSubType) == 0 || len(s.MediaTypeType) == 0 {
return nil, fmt.Errorf("all serializers in the group Serializer must have MediaTypeType and MediaTypeSubType set: %s", s.MediaType)
}
}
mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer)
allMediaTypes := append(mediaTypes, streamMediaTypes...)
ws.Produces(allMediaTypes...)
kubeVerbs := map[string]struct{}{}
reqScope := handlers.RequestScope{
Serializer: a.group.Serializer,
ParameterCodec: a.group.ParameterCodec,
Creater: a.group.Creater,
Convertor: a.group.Convertor,
Defaulter: a.group.Defaulter,
Typer: a.group.Typer,
UnsafeConvertor: a.group.UnsafeConvertor,
Authorizer: a.group.Authorizer,
EquivalentResourceMapper: a.group.EquivalentResourceRegistry,
// TODO: Check for the interface on storage
TableConvertor: tableProvider,
// TODO: This seems wrong for cross-group subresources. It makes an assumption that a subresource and its parent are in the same group version. Revisit this.
Resource: a.group.GroupVersion.WithResource(resource),
Subresource: subresource,
Kind: fqKindToRegister,
HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal},
MetaGroupVersion: metav1.SchemeGroupVersion,
MaxRequestBodyBytes: a.group.MaxRequestBodyBytes,
}
if a.group.MetaGroupVersion != nil {
reqScope.MetaGroupVersion = *a.group.MetaGroupVersion
}
if a.group.OpenAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
fm, err := fieldmanager.NewFieldManager(
a.group.OpenAPIModels,
a.group.UnsafeConvertor,
a.group.Defaulter,
fqKindToRegister.GroupVersion(),
reqScope.HubGroupVersion,
)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
}
reqScope.FieldManager = fm
}
for _, action := range actions {
producedObject := storageMeta.ProducesObject(action.Verb)
if producedObject == nil {
producedObject = defaultVersionedObject
}
reqScope.Namer = action.Namer
requestScope := "cluster"
var namespaced string
var operationSuffix string
if apiResource.Namespaced {
requestScope = "namespace"
namespaced = "Namespaced"
}
if strings.HasSuffix(action.Path, "/{path:*}") {
requestScope = "resource"
operationSuffix = operationSuffix + "WithPath"
}
if action.AllNamespaces {
requestScope = "cluster"
operationSuffix = operationSuffix + "ForAllNamespaces"
namespaced = ""
}
if kubeVerb, found := toDiscoveryKubeVerb[action.Verb]; found {
if len(kubeVerb) != 0 {
kubeVerbs[kubeVerb] = struct{}{}
}
} else {
return nil, fmt.Errorf("unknown action verb for discovery: %s", action.Verb)
}
routes := []*restful.RouteBuilder{}
// If there is a subresource, kind should be the parent's kind.
if isSubresource {
parentStorage, ok := a.group.Storage[resource]
if !ok {
return nil, fmt.Errorf("missing parent storage: %q", resource)
}
fqParentKind, err := GetResourceKind(a.group.GroupVersion, parentStorage, a.group.Typer)
if err != nil {
return nil, err
}
kind = fqParentKind.Kind
}
verbOverrider, needOverride := storage.(StorageMetricsOverride)
switch action.Verb {
case "GET": // Get a resource.
var handler restful.RouteFunction
if isGetterWithOptions {
handler = restfulGetResourceWithOptions(getterWithOptions, reqScope, isSubresource)
} else {
handler = restfulGetResource(getter, exporter, reqScope)
}
if needOverride {
// need change the reported verb
handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler)
} else {
handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler)
}
if a.enableAPIResponseCompression {
handler = genericfilters.RestfulWithCompression(handler)
}
doc := "read the specified " + kind
if isSubresource {
doc = "read " + subresource + " of the specified " + kind
}
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
Writes(producedObject)
if isGetterWithOptions {
if err := AddObjectParams(ws, route, versionedGetOptions); err != nil {
return nil, err
}
}
if isExporter {
if err := AddObjectParams(ws, route, versionedExportOptions); err != nil {
return nil, err
}
}
addParams(route, action.Params)
routes = append(routes, route)
case "LIST": // List all resources of a kind.
doc := "list objects of kind " + kind
if isSubresource {
doc = "list " + subresource + " of objects of kind " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout))
if a.enableAPIResponseCompression {
handler = genericfilters.RestfulWithCompression(handler)
}
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...).
Returns(http.StatusOK, "OK", versionedList).
Writes(versionedList)
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
switch {
case isLister && isWatcher:
doc := "list or watch objects of kind " + kind
if isSubresource {
doc = "list or watch " + subresource + " of objects of kind " + kind
}
route.Doc(doc)
case isWatcher:
doc := "watch objects of kind " + kind
if isSubresource {
doc = "watch " + subresource + "of objects of kind " + kind
}
route.Doc(doc)
}
addParams(route, action.Params)
routes = append(routes, route)
case "PUT": // Update a resource.
doc := "replace the specified " + kind
if isSubresource {
doc = "replace " + subresource + " of the specified " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulUpdateResource(updater, reqScope, admit))
route := ws.PUT(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
// TODO: in some cases, the API may return a v1.Status instead of the versioned object
// but currently go-restful can't handle multiple different objects being returned.
Returns(http.StatusCreated, "Created", producedObject).
Reads(defaultVersionedObject).
Writes(producedObject)
if err := AddObjectParams(ws, route, versionedUpdateOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "PATCH": // Partially update a resource
doc := "partially update the specified " + kind
if isSubresource {
doc = "partially update " + subresource + " of the specified " + kind
}
supportedTypes := []string{
string(types.JSONPatchType),
string(types.MergePatchType),
string(types.StrategicMergePatchType),
}
if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
supportedTypes = append(supportedTypes, string(types.ApplyPatchType))
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulPatchResource(patcher, reqScope, admit, supportedTypes))
route := ws.PATCH(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Consumes(supportedTypes...).
Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
Reads(metav1.Patch{}).
Writes(producedObject)
if err := AddObjectParams(ws, route, versionedPatchOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "POST": // Create a resource.
var handler restful.RouteFunction
if isNamedCreater {
handler = restfulCreateNamedResource(namedCreater, reqScope, admit)
} else {
handler = restfulCreateResource(creater, reqScope, admit)
}
handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler)
article := GetArticleForNoun(kind, " ")
doc := "create" + article + kind
if isSubresource {
doc = "create " + subresource + " of" + article + kind
}
route := ws.POST(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Returns(http.StatusOK, "OK", producedObject).
// TODO: in some cases, the API may return a v1.Status instead of the versioned object
// but currently go-restful can't handle multiple different objects being returned.
Returns(http.StatusCreated, "Created", producedObject).
Returns(http.StatusAccepted, "Accepted", producedObject).
Reads(defaultVersionedObject).
Writes(producedObject)
if err := AddObjectParams(ws, route, versionedCreateOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "DELETE": // Delete a resource.
article := GetArticleForNoun(kind, " ")
doc := "delete" + article + kind
if isSubresource {
doc = "delete " + subresource + " of" + article + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit))
route := ws.DELETE(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Writes(versionedStatus).
Returns(http.StatusOK, "OK", versionedStatus).
Returns(http.StatusAccepted, "Accepted", versionedStatus)
if isGracefulDeleter {
route.Reads(versionedDeleterObject)
route.ParameterNamed("body").Required(false)
if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil {
return nil, err
}
}
addParams(route, action.Params)
routes = append(routes, route)
case "DELETECOLLECTION":
doc := "delete collection of " + kind
if isSubresource {
doc = "delete collection of " + subresource + " of a " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit))
route := ws.DELETE(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...).
Writes(versionedStatus).
Returns(http.StatusOK, "OK", versionedStatus)
if isCollectionDeleter {
route.Reads(versionedDeleterObject)
route.ParameterNamed("body").Required(false)
if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil {
return nil, err
}
}
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
// deprecated in 1.11
case "WATCH": // Watch a resource.
doc := "watch changes to an object of kind " + kind
if isSubresource {
doc = "watch changes to " + subresource + " of an object of kind " + kind
}
doc += ". deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter."
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout))
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix).
Produces(allMediaTypes...).
Returns(http.StatusOK, "OK", versionedWatchEvent).
Writes(versionedWatchEvent)
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
// deprecated in 1.11
case "WATCHLIST": // Watch all resources of a kind.
doc := "watch individual changes to a list of " + kind
if isSubresource {
doc = "watch individual changes to a list of " + subresource + " of " + kind
}
doc += ". deprecated: use the 'watch' parameter with a list operation instead."
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout))
route := ws.GET(action.Path).To(handler).
Doc(doc).
Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")).
Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix).
Produces(allMediaTypes...).
Returns(http.StatusOK, "OK", versionedWatchEvent).
Writes(versionedWatchEvent)
if err := AddObjectParams(ws, route, versionedListOptions); err != nil {
return nil, err
}
addParams(route, action.Params)
routes = append(routes, route)
case "CONNECT":
for _, method := range connecter.ConnectMethods() {
connectProducedObject := storageMeta.ProducesObject(method)
if connectProducedObject == nil {
connectProducedObject = "string"
}
doc := "connect " + method + " requests to " + kind
if isSubresource {
doc = "connect " + method + " requests to " + subresource + " of " + kind
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulConnectResource(connecter, reqScope, admit, path, isSubresource))
route := ws.Method(method).Path(action.Path).
To(handler).
Doc(doc).
Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix).
Produces("*/*").
Consumes("*/*").
Writes(connectProducedObject)
if versionedConnectOptions != nil {
if err := AddObjectParams(ws, route, versionedConnectOptions); err != nil {
return nil, err
}
}
addParams(route, action.Params)
routes = append(routes, route)
// transform ConnectMethods to kube verbs
if kubeVerb, found := toDiscoveryKubeVerb[method]; found {
if len(kubeVerb) != 0 {
kubeVerbs[kubeVerb] = struct{}{}
}
}
}
default:
return nil, fmt.Errorf("unrecognized action verb: %s", action.Verb)
}
for _, route := range routes {
route.Metadata(ROUTE_META_GVK, metav1.GroupVersionKind{
Group: reqScope.Kind.Group,
Version: reqScope.Kind.Version,
Kind: reqScope.Kind.Kind,
})
route.Metadata(ROUTE_META_ACTION, strings.ToLower(action.Verb))
ws.Route(route)
}
// Note: update GetAuthorizerAttributes() when adding a custom handler.
}
apiResource.Verbs = make([]string, 0, len(kubeVerbs))
for kubeVerb := range kubeVerbs {
apiResource.Verbs = append(apiResource.Verbs, kubeVerb)
}
sort.Strings(apiResource.Verbs)
if shortNamesProvider, ok := storage.(rest.ShortNamesProvider); ok {
apiResource.ShortNames = shortNamesProvider.ShortNames()
}
if categoriesProvider, ok := storage.(rest.CategoriesProvider); ok {
apiResource.Categories = categoriesProvider.Categories()
}
if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok {
gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion)
apiResource.Group = gvk.Group
apiResource.Version = gvk.Version
apiResource.Kind = gvk.Kind
}
// Record the existence of the GVR and the corresponding GVK
a.group.EquivalentResourceRegistry.RegisterKindFor(reqScope.Resource, reqScope.Subresource, fqKindToRegister)
return &apiResource, nil
}
// indirectArbitraryPointer returns *ptrToObject for an arbitrary pointer
func indirectArbitraryPointer(ptrToObject interface{}) interface{} {
return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface()
}
func appendIf(actions []action, a action, shouldAppend bool) []action {
if shouldAppend {
actions = append(actions, a)
}
return actions
}
func addParams(route *restful.RouteBuilder, params []*restful.Parameter) {
for _, param := range params {
route.Param(param)
}
}
// AddObjectParams converts a runtime.Object into a set of go-restful Param() definitions on the route.
// The object must be a pointer to a struct; only fields at the top level of the struct that are not
// themselves interfaces or structs are used; only fields with a json tag that is non empty (the standard
// Go JSON behavior for omitting a field) become query parameters. The name of the query parameter is
// the JSON field name. If a description struct tag is set on the field, that description is used on the
// query parameter. In essence, it converts a standard JSON top level object into a query param schema.
func AddObjectParams(ws *restful.WebService, route *restful.RouteBuilder, obj interface{}) error {
sv, err := conversion.EnforcePtr(obj)
if err != nil {
return err
}
st := sv.Type()
switch st.Kind() {
case reflect.Struct:
for i := 0; i < st.NumField(); i++ {
name := st.Field(i).Name
sf, ok := st.FieldByName(name)
if !ok {
continue
}
switch sf.Type.Kind() {
case reflect.Interface, reflect.Struct:
case reflect.Ptr:
// TODO: This is a hack to let metav1.Time through. This needs to be fixed in a more generic way eventually. bug #36191
if (sf.Type.Elem().Kind() == reflect.Interface || sf.Type.Elem().Kind() == reflect.Struct) && strings.TrimPrefix(sf.Type.String(), "*") != "metav1.Time" {
continue
}
fallthrough
default:
jsonTag := sf.Tag.Get("json")
if len(jsonTag) == 0 {
continue
}
jsonName := strings.SplitN(jsonTag, ",", 2)[0]
if len(jsonName) == 0 {
continue
}
var desc string
if docable, ok := obj.(documentable); ok {
desc = docable.SwaggerDoc()[jsonName]
}
route.Param(ws.QueryParameter(jsonName, desc).DataType(typeToJSON(sf.Type.String())))
}
}
}
return nil
}
// TODO: this is incomplete, expand as needed.
// Convert the name of a golang type to the name of a JSON type
func typeToJSON(typeName string) string {
switch typeName {
case "bool", "*bool":
return "boolean"
case "uint8", "*uint8", "int", "*int", "int32", "*int32", "int64", "*int64", "uint32", "*uint32", "uint64", "*uint64":
return "integer"
case "float64", "*float64", "float32", "*float32":
return "number"
case "metav1.Time", "*metav1.Time":
return "string"
case "byte", "*byte":
return "string"
case "v1.DeletionPropagation", "*v1.DeletionPropagation":
return "string"
// TODO: Fix these when go-restful supports a way to specify an array query param:
// https://github.com/emicklei/go-restful/issues/225
case "[]string", "[]*string":
return "string"
case "[]int32", "[]*int32":
return "integer"
default:
return typeName
}
}
// defaultStorageMetadata provides default answers to rest.StorageMetadata.
type defaultStorageMetadata struct{}
// defaultStorageMetadata implements rest.StorageMetadata
var _ rest.StorageMetadata = defaultStorageMetadata{}
func (defaultStorageMetadata) ProducesMIMETypes(verb string) []string {
return nil
}
func (defaultStorageMetadata) ProducesObject(verb string) interface{} {
return nil
}
// splitSubresource checks if the given storage path is the path of a subresource and returns
// the resource and subresource components.
func splitSubresource(path string) (string, string, error) {
var resource, subresource string
switch parts := strings.Split(path, "/"); len(parts) {
case 2:
resource, subresource = parts[0], parts[1]
case 1:
resource = parts[0]
default:
// TODO: support deeper paths
return "", "", fmt.Errorf("api_installer allows only one or two segment paths (resource or resource/subresource)")
}
return resource, subresource, nil
}
// GetArticleForNoun returns the article needed for the given noun.
func GetArticleForNoun(noun string, padding string) string {
if noun[len(noun)-2:] != "ss" && noun[len(noun)-1:] == "s" {
// Plurals don't have an article.
// Don't catch words like class
return fmt.Sprintf("%v", padding)
}
article := "a"
if isVowel(rune(noun[0])) {
article = "an"
}
return fmt.Sprintf("%s%s%s", padding, article, padding)
}
// isVowel returns true if the rune is a vowel (case insensitive).
func isVowel(c rune) bool {
vowels := []rune{'a', 'e', 'i', 'o', 'u'}
for _, value := range vowels {
if value == unicode.ToLower(c) {
return true
}
}
return false
}
func restfulListResource(r rest.Lister, rw rest.Watcher, scope handlers.RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.ListResource(r, rw, &scope, forceWatch, minRequestTimeout)(res.ResponseWriter, req.Request)
}
}
func restfulCreateNamedResource(r rest.NamedCreater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.CreateNamedResource(r, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulCreateResource(r rest.Creater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.CreateResource(r, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulDeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.DeleteResource(r, allowsOptions, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulDeleteCollection(r rest.CollectionDeleter, checkBody bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.DeleteCollection(r, checkBody, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.UpdateResource(r, &scope, admit)(res.ResponseWriter, req.Request)
}
}
func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, supportedTypes []string) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.PatchResource(r, &scope, admit, supportedTypes)(res.ResponseWriter, req.Request)
}
}
func restfulGetResource(r rest.Getter, e rest.Exporter, scope handlers.RequestScope) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.GetResource(r, e, &scope)(res.ResponseWriter, req.Request)
}
}
func restfulGetResourceWithOptions(r rest.GetterWithOptions, scope handlers.RequestScope, isSubresource bool) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.GetResourceWithOptions(r, &scope, isSubresource)(res.ResponseWriter, req.Request)
}
}
func restfulConnectResource(connecter rest.Connecter, scope handlers.RequestScope, admit admission.Interface, restPath string, isSubresource bool) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
handlers.ConnectResource(connecter, &scope, admit, restPath, isSubresource)(res.ResponseWriter, req.Request)
}
}
| NickrenREN/kubernetes | staging/src/k8s.io/apiserver/pkg/endpoints/installer.go | GO | apache-2.0 | 44,987 |
# Obione tularensis (Coville) Ulbr. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Chenopodiaceae/Obione/Obione tularensis/README.md | Markdown | apache-2.0 | 183 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=UTF-8" />
<meta name="generator" content="AsciiDoc 8.6.8" />
<title>git-filter-branch(1)</title>
<style type="text/css">
/* Shared CSS for AsciiDoc xhtml11 and html5 backends */
/* Default font. */
body {
font-family: Georgia,serif;
}
/* Title font. */
h1, h2, h3, h4, h5, h6,
div.title, caption.title,
thead, p.table.header,
#toctitle,
#author, #revnumber, #revdate, #revremark,
#footer {
font-family: Arial,Helvetica,sans-serif;
}
body {
margin: 1em 5% 1em 5%;
}
a {
color: blue;
text-decoration: underline;
}
a:visited {
color: fuchsia;
}
em {
font-style: italic;
color: navy;
}
strong {
font-weight: bold;
color: #083194;
}
h1, h2, h3, h4, h5, h6 {
color: #527bbd;
margin-top: 1.2em;
margin-bottom: 0.5em;
line-height: 1.3;
}
h1, h2, h3 {
border-bottom: 2px solid silver;
}
h2 {
padding-top: 0.5em;
}
h3 {
float: left;
}
h3 + * {
clear: left;
}
h5 {
font-size: 1.0em;
}
div.sectionbody {
margin-left: 0;
}
hr {
border: 1px solid silver;
}
p {
margin-top: 0.5em;
margin-bottom: 0.5em;
}
ul, ol, li > p {
margin-top: 0;
}
ul > li { color: #aaa; }
ul > li > * { color: black; }
.monospaced, code, pre {
font-family: "Courier New", Courier, monospace;
font-size: inherit;
color: navy;
padding: 0;
margin: 0;
}
#author {
color: #527bbd;
font-weight: bold;
font-size: 1.1em;
}
#email {
}
#revnumber, #revdate, #revremark {
}
#footer {
font-size: small;
border-top: 2px solid silver;
padding-top: 0.5em;
margin-top: 4.0em;
}
#footer-text {
float: left;
padding-bottom: 0.5em;
}
#footer-badges {
float: right;
padding-bottom: 0.5em;
}
#preamble {
margin-top: 1.5em;
margin-bottom: 1.5em;
}
div.imageblock, div.exampleblock, div.verseblock,
div.quoteblock, div.literalblock, div.listingblock, div.sidebarblock,
div.admonitionblock {
margin-top: 1.0em;
margin-bottom: 1.5em;
}
div.admonitionblock {
margin-top: 2.0em;
margin-bottom: 2.0em;
margin-right: 10%;
color: #606060;
}
div.content { /* Block element content. */
padding: 0;
}
/* Block element titles. */
div.title, caption.title {
color: #527bbd;
font-weight: bold;
text-align: left;
margin-top: 1.0em;
margin-bottom: 0.5em;
}
div.title + * {
margin-top: 0;
}
td div.title:first-child {
margin-top: 0.0em;
}
div.content div.title:first-child {
margin-top: 0.0em;
}
div.content + div.title {
margin-top: 0.0em;
}
div.sidebarblock > div.content {
background: #ffffee;
border: 1px solid #dddddd;
border-left: 4px solid #f0f0f0;
padding: 0.5em;
}
div.listingblock > div.content {
border: 1px solid #dddddd;
border-left: 5px solid #f0f0f0;
background: #f8f8f8;
padding: 0.5em;
}
div.quoteblock, div.verseblock {
padding-left: 1.0em;
margin-left: 1.0em;
margin-right: 10%;
border-left: 5px solid #f0f0f0;
color: #888;
}
div.quoteblock > div.attribution {
padding-top: 0.5em;
text-align: right;
}
div.verseblock > pre.content {
font-family: inherit;
font-size: inherit;
}
div.verseblock > div.attribution {
padding-top: 0.75em;
text-align: left;
}
/* DEPRECATED: Pre version 8.2.7 verse style literal block. */
div.verseblock + div.attribution {
text-align: left;
}
div.admonitionblock .icon {
vertical-align: top;
font-size: 1.1em;
font-weight: bold;
text-decoration: underline;
color: #527bbd;
padding-right: 0.5em;
}
div.admonitionblock td.content {
padding-left: 0.5em;
border-left: 3px solid #dddddd;
}
div.exampleblock > div.content {
border-left: 3px solid #dddddd;
padding-left: 0.5em;
}
div.imageblock div.content { padding-left: 0; }
span.image img { border-style: none; }
a.image:visited { color: white; }
dl {
margin-top: 0.8em;
margin-bottom: 0.8em;
}
dt {
margin-top: 0.5em;
margin-bottom: 0;
font-style: normal;
color: navy;
}
dd > *:first-child {
margin-top: 0.1em;
}
ul, ol {
list-style-position: outside;
}
ol.arabic {
list-style-type: decimal;
}
ol.loweralpha {
list-style-type: lower-alpha;
}
ol.upperalpha {
list-style-type: upper-alpha;
}
ol.lowerroman {
list-style-type: lower-roman;
}
ol.upperroman {
list-style-type: upper-roman;
}
div.compact ul, div.compact ol,
div.compact p, div.compact p,
div.compact div, div.compact div {
margin-top: 0.1em;
margin-bottom: 0.1em;
}
tfoot {
font-weight: bold;
}
td > div.verse {
white-space: pre;
}
div.hdlist {
margin-top: 0.8em;
margin-bottom: 0.8em;
}
div.hdlist tr {
padding-bottom: 15px;
}
dt.hdlist1.strong, td.hdlist1.strong {
font-weight: bold;
}
td.hdlist1 {
vertical-align: top;
font-style: normal;
padding-right: 0.8em;
color: navy;
}
td.hdlist2 {
vertical-align: top;
}
div.hdlist.compact tr {
margin: 0;
padding-bottom: 0;
}
.comment {
background: yellow;
}
.footnote, .footnoteref {
font-size: 0.8em;
}
span.footnote, span.footnoteref {
vertical-align: super;
}
#footnotes {
margin: 20px 0 20px 0;
padding: 7px 0 0 0;
}
#footnotes div.footnote {
margin: 0 0 5px 0;
}
#footnotes hr {
border: none;
border-top: 1px solid silver;
height: 1px;
text-align: left;
margin-left: 0;
width: 20%;
min-width: 100px;
}
div.colist td {
padding-right: 0.5em;
padding-bottom: 0.3em;
vertical-align: top;
}
div.colist td img {
margin-top: 0.3em;
}
@media print {
#footer-badges { display: none; }
}
#toc {
margin-bottom: 2.5em;
}
#toctitle {
color: #527bbd;
font-size: 1.1em;
font-weight: bold;
margin-top: 1.0em;
margin-bottom: 0.1em;
}
div.toclevel0, div.toclevel1, div.toclevel2, div.toclevel3, div.toclevel4 {
margin-top: 0;
margin-bottom: 0;
}
div.toclevel2 {
margin-left: 2em;
font-size: 0.9em;
}
div.toclevel3 {
margin-left: 4em;
font-size: 0.9em;
}
div.toclevel4 {
margin-left: 6em;
font-size: 0.9em;
}
span.aqua { color: aqua; }
span.black { color: black; }
span.blue { color: blue; }
span.fuchsia { color: fuchsia; }
span.gray { color: gray; }
span.green { color: green; }
span.lime { color: lime; }
span.maroon { color: maroon; }
span.navy { color: navy; }
span.olive { color: olive; }
span.purple { color: purple; }
span.red { color: red; }
span.silver { color: silver; }
span.teal { color: teal; }
span.white { color: white; }
span.yellow { color: yellow; }
span.aqua-background { background: aqua; }
span.black-background { background: black; }
span.blue-background { background: blue; }
span.fuchsia-background { background: fuchsia; }
span.gray-background { background: gray; }
span.green-background { background: green; }
span.lime-background { background: lime; }
span.maroon-background { background: maroon; }
span.navy-background { background: navy; }
span.olive-background { background: olive; }
span.purple-background { background: purple; }
span.red-background { background: red; }
span.silver-background { background: silver; }
span.teal-background { background: teal; }
span.white-background { background: white; }
span.yellow-background { background: yellow; }
span.big { font-size: 2em; }
span.small { font-size: 0.6em; }
span.underline { text-decoration: underline; }
span.overline { text-decoration: overline; }
span.line-through { text-decoration: line-through; }
div.unbreakable { page-break-inside: avoid; }
/*
* xhtml11 specific
*
* */
div.tableblock {
margin-top: 1.0em;
margin-bottom: 1.5em;
}
div.tableblock > table {
border: 3px solid #527bbd;
}
thead, p.table.header {
font-weight: bold;
color: #527bbd;
}
p.table {
margin-top: 0;
}
/* Because the table frame attribute is overriden by CSS in most browsers. */
div.tableblock > table[frame="void"] {
border-style: none;
}
div.tableblock > table[frame="hsides"] {
border-left-style: none;
border-right-style: none;
}
div.tableblock > table[frame="vsides"] {
border-top-style: none;
border-bottom-style: none;
}
/*
* html5 specific
*
* */
table.tableblock {
margin-top: 1.0em;
margin-bottom: 1.5em;
}
thead, p.tableblock.header {
font-weight: bold;
color: #527bbd;
}
p.tableblock {
margin-top: 0;
}
table.tableblock {
border-width: 3px;
border-spacing: 0px;
border-style: solid;
border-color: #527bbd;
border-collapse: collapse;
}
th.tableblock, td.tableblock {
border-width: 1px;
padding: 4px;
border-style: solid;
border-color: #527bbd;
}
table.tableblock.frame-topbot {
border-left-style: hidden;
border-right-style: hidden;
}
table.tableblock.frame-sides {
border-top-style: hidden;
border-bottom-style: hidden;
}
table.tableblock.frame-none {
border-style: hidden;
}
th.tableblock.halign-left, td.tableblock.halign-left {
text-align: left;
}
th.tableblock.halign-center, td.tableblock.halign-center {
text-align: center;
}
th.tableblock.halign-right, td.tableblock.halign-right {
text-align: right;
}
th.tableblock.valign-top, td.tableblock.valign-top {
vertical-align: top;
}
th.tableblock.valign-middle, td.tableblock.valign-middle {
vertical-align: middle;
}
th.tableblock.valign-bottom, td.tableblock.valign-bottom {
vertical-align: bottom;
}
/*
* manpage specific
*
* */
body.manpage h1 {
padding-top: 0.5em;
padding-bottom: 0.5em;
border-top: 2px solid silver;
border-bottom: 2px solid silver;
}
body.manpage h2 {
border-style: none;
}
body.manpage div.sectionbody {
margin-left: 3em;
}
@media print {
body.manpage div#toc { display: none; }
}
</style>
<script type="text/javascript">
/*<+'])');
// Function that scans the DOM tree for header elements (the DOM2
// nodeIterator API would be a better technique but not supported by all
// browsers).
var iterate = function (el) {
for (var i = el.firstChild; i != null; i = i.nextSibling) {
if (i.nodeType == 1 /* Node.ELEMENT_NODE */) {
var mo = re.exec(i.tagName);
if (mo && (i.getAttribute("class") || i.getAttribute("className")) != "float") {
result[result.length] = new TocEntry(i, getText(i), mo[1]-1);
}
iterate(i);
}
}
}
iterate(el);
return result;
}
var toc = document.getElementById("toc");
if (!toc) {
return;
}
// Delete existing TOC entries in case we're reloading the TOC.
var tocEntriesToRemove = [];
var i;
for (i = 0; i < toc.childNodes.length; i++) {
var entry = toc.childNodes[i];
if (entry.nodeName.toLowerCase() == 'div'
&& entry.getAttribute("class")
&& entry.getAttribute("class").match(/^toclevel/))
tocEntriesToRemove.push(entry);
}
for (i = 0; i < tocEntriesToRemove.length; i++) {
toc.removeChild(tocEntriesToRemove[i]);
}
// Rebuild TOC entries.
var entries = tocEntries(document.getElementById("content"), toclevels);
for (var i = 0; i < entries.length; ++i) {
var entry = entries[i];
if (entry.element.id == "")
entry.element.id = "_toc_" + i;
var a = document.createElement("a");
a.href = "#" + entry.element.id;
a.appendChild(document.createTextNode(entry.text));
var div = document.createElement("div");
div.appendChild(a);
div.className = "toclevel" + entry.toclevel;
toc.appendChild(div);
}
if (entries.length == 0)
toc.parentNode.removeChild(toc);
},
/////////////////////////////////////////////////////////////////////
// Footnotes generator
/////////////////////////////////////////////////////////////////////
/* Based on footnote generation code from:
* http://www.brandspankingnew.net/archive/2005/07/format_footnote.html
*/
footnotes: function () {
// Delete existing footnote entries in case we're reloading the footnodes.
var i;
var noteholder = document.getElementById("footnotes");
if (!noteholder) {
return;
}
var entriesToRemove = [];
for (i = 0; i < noteholder.childNodes.length; i++) {
var entry = noteholder.childNodes[i];
if (entry.nodeName.toLowerCase() == 'div' && entry.getAttribute("class") == "footnote")
entriesToRemove.push(entry);
}
for (i = 0; i < entriesToRemove.length; i++) {
noteholder.removeChild(entriesToRemove[i]);
}
// Rebuild footnote entries.
var cont = document.getElementById("content");
var spans = cont.getElementsByTagName("span");
var refs = {};
var n = 0;
for (i=0; i<spans.length; i++) {
if (spans[i].className == "footnote") {
n++;
var note = spans[i].getAttribute("data-note");
if (!note) {
// Use [\s\S] in place of . so multi-line matches work.
// Because JavaScript has no s (dotall) regex flag.
note = spans[i].innerHTML.match(/\s*\[([\s\S]*)]\s*/)[1];
spans[i].innerHTML =
"[<a id='_footnoteref_" + n + "' href='#_footnote_" + n +
"' title='View footnote' class='footnote'>" + n + "</a>]";
spans[i].setAttribute("data-note", note);
}
noteholder.innerHTML +=
"<div class='footnote' id='_footnote_" + n + "'>" +
"<a href='#_footnoteref_" + n + "' title='Return to text'>" +
n + "</a>. " + note + "</div>";
var id =spans[i].getAttribute("id");
if (id != null) refs["#"+id] = n;
}
}
if (n == 0)
noteholder.parentNode.removeChild(noteholder);
else {
// Process footnoterefs.
for (i=0; i<spans.length; i++) {
if (spans[i].className == "footnoteref") {
var href = spans[i].getElementsByTagName("a")[0].getAttribute("href");
href = href.match(/#.*/)[0]; // Because IE return full URL.
n = refs[href];
spans[i].innerHTML =
"[<a href='#_footnote_" + n +
"' title='View footnote' class='footnote'>" + n + "</a>]";
}
}
}
},
install: function(toclevels) {
var timerId;
function reinstall() {
asciidoc.footnotes();
if (toclevels) {
asciidoc.toc(toclevels);
}
}
function reinstallAndRemoveTimer() {
clearInterval(timerId);
reinstall();
}
timerId = setInterval(reinstall, 500);
if (document.addEventListener)
document.addEventListener("DOMContentLoaded", reinstallAndRemoveTimer, false);
else
window.onload = reinstallAndRemoveTimer;
}
}
asciidoc.install();
/*]]>*/
</script>
</head>
<body class="manpage">
<div id="header">
<h1>
git-filter-branch(1) Manual Page
</h1>
<h2>NAME</h2>
<div class="sectionbody">
<p>git-filter-branch -
Rewrite branches
</p>
</div>
</div>
<div id="content">
<div class="sect1">
<h2 id="_synopsis">SYNOPSIS</h2>
<div class="sectionbody">
<div class="verseblock">
<pre class="content"><em>git filter-branch</em> [--env-filter <command>] [--tree-filter <command>]
[--index-filter <command>] [--parent-filter <command>]
[--msg-filter <command>] [--commit-filter <command>]
[--tag-name-filter <command>] [--subdirectory-filter <directory>]
[--prune-empty]
[--original <namespace>] [-d <directory>] [-f | --force]
[--] [<rev-list options>…]</pre>
<div class="attribution">
</div></div>
</div>
</div>
<div class="sect1">
<h2 id="_description">DESCRIPTION</h2>
<div class="sectionbody">
<div class="paragraph"><p>Lets you rewrite git revision history by rewriting the branches mentioned
in the <rev-list options>, applying custom filters on each revision.
Those filters can modify each tree (e.g. removing a file or running
a perl rewrite on all files) or information about each commit.
Otherwise, all information (including original commit times or merge
information) will be preserved.</p></div>
<div class="paragraph"><p>The command will only rewrite the <em>positive</em> refs mentioned in the
command line (e.g. if you pass <em>a..b</em>, only <em>b</em> will be rewritten).
If you specify no filters, the commits will be recommitted without any
changes, which would normally have no effect. Nevertheless, this may be
useful in the future for compensating for some git bugs or such,
therefore such a usage is permitted.</p></div>
<div class="paragraph"><p><strong>NOTE</strong>: This command honors <code>.git/info/grafts</code> file and refs in
the <code>refs/replace/</code> namespace.
If you have any grafts or replacement refs defined, running this command
will make them permanent.</p></div>
<div class="paragraph"><p><strong>WARNING</strong>! The rewritten history will have different object names for all
the objects and will not converge with the original branch. You will not
be able to easily push and distribute the rewritten branch on top of the
original branch. Please do not use this command if you do not know the
full implications, and avoid using it anyway, if a simple single commit
would suffice to fix your problem. (See the "RECOVERING FROM UPSTREAM
REBASE" section in <a href="git-rebase.html">git-rebase(1)</a> for further information about
rewriting published history.)</p></div>
<div class="paragraph"><p>Always verify that the rewritten version is correct: The original refs,
if different from the rewritten ones, will be stored in the namespace
<em>refs/original/</em>.</p></div>
<div class="paragraph"><p>Note that since this operation is very I/O expensive, it might
be a good idea to redirect the temporary directory off-disk with the
<em>-d</em> option, e.g. on tmpfs. Reportedly the speedup is very noticeable.</p></div>
<div class="sect2">
<h3 id="_filters">Filters</h3>
<div class="paragraph"><p>The filters are applied in the order as listed below. The <command>
argument is always evaluated in the shell context using the <em>eval</em> command
(with the notable exception of the commit filter, for technical reasons).
Prior to that, the $GIT_COMMIT environment variable will be set to contain
the id of the commit being rewritten. Also, GIT_AUTHOR_NAME,
GIT_AUTHOR_EMAIL, GIT_AUTHOR_DATE, GIT_COMMITTER_NAME, GIT_COMMITTER_EMAIL,
and GIT_COMMITTER_DATE are set according to the current commit. The values
of these variables after the filters have run, are used for the new commit.
If any evaluation of <command> returns a non-zero exit status, the whole
operation will be aborted.</p></div>
<div class="paragraph"><p>A <em>map</em> function is available that takes an "original sha1 id" argument
and outputs a "rewritten sha1 id" if the commit has been already
rewritten, and "original sha1 id" otherwise; the <em>map</em> function can
return several ids on separate lines if your commit filter emitted
multiple commits.</p></div>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_options">OPTIONS</h2>
<div class="sectionbody">
<div class="dlist"><dl>
<dt class="hdlist1">
--env-filter <command>
</dt>
<dd>
<p>
This filter may be used if you only need to modify the environment
in which the commit will be performed. Specifically, you might
want to rewrite the author/committer name/email/time environment
variables (see <a href="git-commit-tree.html">git-commit-tree(1)</a> for details). Do not forget
to re-export the variables.
</p>
</dd>
<dt class="hdlist1">
--tree-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the tree and its contents.
The argument is evaluated in shell with the working
directory set to the root of the checked out tree. The new tree
is then used as-is (new files are auto-added, disappeared files
are auto-removed - neither .gitignore files nor any other ignore
rules <strong>HAVE ANY EFFECT</strong>!).
</p>
</dd>
<dt class="hdlist1">
--index-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the index. It is similar to the
tree filter but does not check out the tree, which makes it much
faster. Frequently used with <code>git rm --cached
--ignore-unmatch ...</code>, see EXAMPLES below. For hairy
cases, see <a href="git-update-index.html">git-update-index(1)</a>.
</p>
</dd>
<dt class="hdlist1">
--parent-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the commit’s parent list.
It will receive the parent string on stdin and shall output
the new parent string on stdout. The parent string is in
the format described in <a href="git-commit-tree.html">git-commit-tree(1)</a>: empty for
the initial commit, "-p parent" for a normal commit and
"-p parent1 -p parent2 -p parent3 …" for a merge commit.
</p>
</dd>
<dt class="hdlist1">
--msg-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting the commit messages.
The argument is evaluated in the shell with the original
commit message on standard input; its standard output is
used as the new commit message.
</p>
</dd>
<dt class="hdlist1">
--commit-filter <command>
</dt>
<dd>
<p>
This is the filter for performing the commit.
If this filter is specified, it will be called instead of the
<em>git commit-tree</em> command, with arguments of the form
"<TREE_ID> [(-p <PARENT_COMMIT_ID>)…]" and the log message on
stdin. The commit id is expected on stdout.
</p>
<div class="paragraph"><p>As a special extension, the commit filter may emit multiple
commit ids; in that case, the rewritten children of the original commit will
have all of them as parents.</p></div>
<div class="paragraph"><p>You can use the <em>map</em> convenience function in this filter, and other
convenience functions, too. For example, calling <em>skip_commit "$@"</em>
will leave out the current commit (but not its changes! If you want
that, use <em>git rebase</em> instead).</p></div>
<div class="paragraph"><p>You can also use the <code>git_commit_non_empty_tree "$@"</code> instead of
<code>git commit-tree "$@"</code> if you don’t wish to keep commits with a single parent
and that makes no change to the tree.</p></div>
</dd>
<dt class="hdlist1">
--tag-name-filter <command>
</dt>
<dd>
<p>
This is the filter for rewriting tag names. When passed,
it will be called for every tag ref that points to a rewritten
object (or to a tag object which points to a rewritten object).
The original tag name is passed via standard input, and the new
tag name is expected on standard output.
</p>
<div class="paragraph"><p>The original tags are not deleted, but can be overwritten;
use "--tag-name-filter cat" to simply update the tags. In this
case, be very careful and make sure you have the old tags
backed up in case the conversion has run afoul.</p></div>
<div class="paragraph"><p>Nearly proper rewriting of tag objects is supported. If the tag has
a message attached, a new tag object will be created with the same message,
author, and timestamp. If the tag has a signature attached, the
signature will be stripped. It is by definition impossible to preserve
signatures. The reason this is "nearly" proper, is because ideally if
the tag did not change (points to the same object, has the same name, etc.)
it should retain any signature. That is not the case, signatures will always
be removed, buyer beware. There is also no support for changing the
author or timestamp (or the tag message for that matter). Tags which point
to other tags will be rewritten to point to the underlying commit.</p></div>
</dd>
<dt class="hdlist1">
--subdirectory-filter <directory>
</dt>
<dd>
<p>
Only look at the history which touches the given subdirectory.
The result will contain that directory (and only that) as its
project root. Implies <a href="#Remap_to_ancestor">[Remap_to_ancestor]</a>.
</p>
</dd>
<dt class="hdlist1">
--prune-empty
</dt>
<dd>
<p>
Some kind of filters will generate empty commits, that left the tree
untouched. This switch allow git-filter-branch to ignore such
commits. Though, this switch only applies for commits that have one
and only one parent, it will hence keep merges points. Also, this
option is not compatible with the use of <em>--commit-filter</em>. Though you
just need to use the function <em>git_commit_non_empty_tree "$@"</em> instead
of the <code>git commit-tree "$@"</code> idiom in your commit filter to make that
happen.
</p>
</dd>
<dt class="hdlist1">
--original <namespace>
</dt>
<dd>
<p>
Use this option to set the namespace where the original commits
will be stored. The default value is <em>refs/original</em>.
</p>
</dd>
<dt class="hdlist1">
-d <directory>
</dt>
<dd>
<p>
Use this option to set the path to the temporary directory used for
rewriting. When applying a tree filter, the command needs to
temporarily check out the tree to some directory, which may consume
considerable space in case of large projects. By default it
does this in the <em>.git-rewrite/</em> directory but you can override
that choice by this parameter.
</p>
</dd>
<dt class="hdlist1">
-f
</dt>
<dt class="hdlist1">
--force
</dt>
<dd>
<p>
<em>git filter-branch</em> refuses to start with an existing temporary
directory or when there are already refs starting with
<em>refs/original/</em>, unless forced.
</p>
</dd>
<dt class="hdlist1">
<rev-list options>…
</dt>
<dd>
<p>
Arguments for <em>git rev-list</em>. All positive refs included by
these options are rewritten. You may also specify options
such as <em>--all</em>, but you must use <em>--</em> to separate them from
the <em>git filter-branch</em> options. Implies <a href="#Remap_to_ancestor">[Remap_to_ancestor]</a>.
</p>
</dd>
</dl></div>
<div class="sect2">
<h3 id="Remap_to_ancestor">Remap to ancestor</h3>
<div class="paragraph"><p>By using <a href="rev-list.html">rev-list(1)</a> arguments, e.g., path limiters, you can limit the
set of revisions which get rewritten. However, positive refs on the command
line are distinguished: we don’t let them be excluded by such limiters. For
this purpose, they are instead rewritten to point at the nearest ancestor that
was not excluded.</p></div>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_examples">Examples</h2>
<div class="sectionbody">
<div class="paragraph"><p>Suppose you want to remove a file (containing confidential information
or copyright violation) from all commits:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --tree-filter 'rm filename' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>However, if the file is absent from the tree of some commit,
a simple <code>rm filename</code> will fail for that tree and commit.
Thus you may instead want to use <code>rm -f filename</code> as the script.</p></div>
<div class="paragraph"><p>Using <code>--index-filter</code> with <em>git rm</em> yields a significantly faster
version. Like with using <code>rm filename</code>, <code>git rm --cached filename</code>
will fail if the file is absent from the tree of a commit. If you
want to "completely forget" a file, it does not matter when it entered
history, so we also add <code>--ignore-unmatch</code>:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --index-filter 'git rm --cached --ignore-unmatch filename' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>Now, you will get the rewritten history saved in HEAD.</p></div>
<div class="paragraph"><p>To rewrite the repository to look as if <code>foodir/</code> had been its project
root, and discard all other history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --subdirectory-filter foodir -- --all</code></pre>
</div></div>
<div class="paragraph"><p>Thus you can, e.g., turn a library subdirectory into a repository of
its own. Note the <code>--</code> that separates <em>filter-branch</em> options from
revision options, and the <code>--all</code> to rewrite all branches and tags.</p></div>
<div class="paragraph"><p>To set a commit (which typically is at the tip of another
history) to be the parent of the current initial commit, in
order to paste the other history behind the current history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --parent-filter 'sed "s/^\$/-p <graft-id>/"' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>(if the parent string is empty - which happens when we are dealing with
the initial commit - add graftcommit as a parent). Note that this assumes
history with a single root (that is, no merge without common ancestors
happened). If this is not the case, use:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --parent-filter \
'test $GIT_COMMIT = <commit-id> && echo "-p <graft-id>" || cat' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>or even simpler:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>echo "$commit-id $graft-id" >> .git/info/grafts
git filter-branch $graft-id..HEAD</code></pre>
</div></div>
<div class="paragraph"><p>To remove commits authored by "Darl McBribe" from the history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --commit-filter '
if [ "$GIT_AUTHOR_NAME" = "Darl McBribe" ];
then
skip_commit "$@";
else
git commit-tree "$@";
fi' HEAD</code></pre>
</div></div>
<div class="paragraph"><p>The function <em>skip_commit</em> is defined as follows:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>skip_commit()
{
shift;
while [ -n "$1" ];
do
shift;
map "$1";
shift;
done;
}</code></pre>
</div></div>
<div class="paragraph"><p>The shift magic first throws away the tree id and then the -p
parameters. Note that this handles merges properly! In case Darl
committed a merge between P1 and P2, it will be propagated properly
and all children of the merge will become merge commits with P1,P2
as their parents instead of the merge commit.</p></div>
<div class="paragraph"><p><strong>NOTE</strong> the changes introduced by the commits, and which are not reverted
by subsequent commits, will still be in the rewritten branch. If you want
to throw out <em>changes</em> together with the commits, you should use the
interactive mode of <em>git rebase</em>.</p></div>
<div class="paragraph"><p>You can rewrite the commit log messages using <code>--msg-filter</code>. For
example, <em>git svn-id</em> strings in a repository created by <em>git svn</em> can
be removed this way:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --msg-filter '
sed -e "/^git-svn-id:/d"
'</code></pre>
</div></div>
<div class="paragraph"><p>If you need to add <em>Acked-by</em> lines to, say, the last 10 commits (none
of which is a merge), use this command:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --msg-filter '
cat &&
echo "Acked-by: Bugs Bunny <[email protected]>"
' HEAD~10..HEAD</code></pre>
</div></div>
<div class="paragraph"><p>To restrict rewriting to only part of the history, specify a revision
range in addition to the new branch name. The new branch name will
point to the top-most revision that a <em>git rev-list</em> of this range
will print.</p></div>
<div class="paragraph"><p>Consider this history:</p></div>
<div class="listingblock">
<div class="content">
<pre><code> D--E--F--G--H
/ /
A--B-----C</code></pre>
</div></div>
<div class="paragraph"><p>To rewrite only commits D,E,F,G,H, but leave A, B and C alone, use:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch ... C..H</code></pre>
</div></div>
<div class="paragraph"><p>To rewrite commits E,F,G,H, use one of these:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch ... C..H --not D
git filter-branch ... D..H --not C</code></pre>
</div></div>
<div class="paragraph"><p>To move the whole tree into a subdirectory, or remove it from there:</p></div>
<div class="listingblock">
<div class="content">
<pre><code>git filter-branch --index-filter \
'git ls-files -s | sed "s-\t\"*-&newsubdir/-" |
GIT_INDEX_FILE=$GIT_INDEX_FILE.new \
git update-index --index-info &&
mv "$GIT_INDEX_FILE.new" "$GIT_INDEX_FILE"' HEAD</code></pre>
</div></div>
</div>
</div>
<div class="sect1">
<h2 id="_checklist_for_shrinking_a_repository">Checklist for Shrinking a Repository</h2>
<div class="sectionbody">
<div class="paragraph"><p>git-filter-branch is often used to get rid of a subset of files,
usually with some combination of <code>--index-filter</code> and
<code>--subdirectory-filter</code>. People expect the resulting repository to
be smaller than the original, but you need a few more steps to
actually make it smaller, because git tries hard not to lose your
objects until you tell it to. First make sure that:</p></div>
<div class="ulist"><ul>
<li>
<p>
You really removed all variants of a filename, if a blob was moved
over its lifetime. <code>git log --name-only --follow --all -- filename</code>
can help you find renames.
</p>
</li>
<li>
<p>
You really filtered all refs: use <code>--tag-name-filter cat -- --all</code>
when calling git-filter-branch.
</p>
</li>
</ul></div>
<div class="paragraph"><p>Then there are two ways to get a smaller repository. A safer way is
to clone, that keeps your original intact.</p></div>
<div class="ulist"><ul>
<li>
<p>
Clone it with <code>git clone file:///path/to/repo</code>. The clone
will not have the removed objects. See <a href="git-clone.html">git-clone(1)</a>. (Note
that cloning with a plain path just hardlinks everything!)
</p>
</li>
</ul></div>
<div class="paragraph"><p>If you really don’t want to clone it, for whatever reasons, check the
following points instead (in this order). This is a very destructive
approach, so <strong>make a backup</strong> or go back to cloning it. You have been
warned.</p></div>
<div class="ulist"><ul>
<li>
<p>
Remove the original refs backed up by git-filter-branch: say <code>git
for-each-ref --format="%(refname)" refs/original/ | xargs -n 1 git
update-ref -d</code>.
</p>
</li>
<li>
<p>
Expire all reflogs with <code>git reflog expire --expire=now --all</code>.
</p>
</li>
<li>
<p>
Garbage collect all unreferenced objects with <code>git gc --prune=now</code>
(or if your git-gc is not new enough to support arguments to
<code>--prune</code>, use <code>git repack -ad; git prune</code> instead).
</p>
</li>
</ul></div>
</div>
</div>
<div class="sect1">
<h2 id="_git">GIT</h2>
<div class="sectionbody">
<div class="paragraph"><p>Part of the <a href="git.html">git(1)</a> suite</p></div>
</div>
</div>
</div>
<div id="footnotes"><hr /></div>
<div id="footer">
<div id="footer-text">
Last updated 2012-09-18 15:30:10 PDT
</div>
</div>
</body>
</html>
| ArcherCraftStore/ArcherVMPeridot | RailsInstaller_D/Git/doc/git/html/git-filter-branch.html | HTML | apache-2.0 | 36,402 |
// -*- coding: us-ascii-unix -*-
// Copyright 2012 Lukas Kemmer
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You
// may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
#include <cassert>
#include "text/utf8.hh"
#include "text/utf8-string.hh"
namespace faint{
inline bool outside(const std::string& data, size_t pos){
return utf8::num_characters(data) <= pos;
}
utf8_string::utf8_string(size_t n, const utf8_char& ch){
for (size_t i = 0; i != n; i++){
m_data += ch.str();
}
}
utf8_string::utf8_string(const utf8_char& ch)
: utf8_string(1, ch)
{}
utf8_string::utf8_string(const char* str)
: m_data(str)
{}
utf8_string::utf8_string(const std::string& str)
: m_data(str)
{}
utf8_char utf8_string::at(size_t pos) const{
if (outside(m_data, pos)){
throw std::out_of_range("utf8_string::at invalid string position");
}
return operator[](pos);
}
utf8_char utf8_string::back() const{
assert(!m_data.empty());
return operator[](size() - 1);
}
utf8_char utf8_string::front() const{
assert(!m_data.empty());
return operator[](0);
}
size_t utf8_string::bytes() const{
return m_data.size();
}
void utf8_string::clear(){
m_data.clear();
}
utf8_string utf8_string::substr(size_t pos, size_t n) const{
if (outside(m_data, pos)){
throw std::out_of_range("utf8_string::substr invalid string position");
}
size_t startByte = utf8::char_num_to_byte_num_checked(pos, m_data);
size_t numBytes = (n == utf8_string::npos) ?
std::string::npos :
utf8::char_num_to_byte_num_clamped(pos + n, m_data) - startByte;
return utf8_string(m_data.substr(startByte, numBytes));
}
const char* utf8_string::c_str() const{
return m_data.c_str();
}
const std::string& utf8_string::str() const{
return m_data;
}
size_t utf8_string::size() const{
return utf8::num_characters(m_data);
}
bool utf8_string::empty() const{
return m_data.empty();
}
utf8_string& utf8_string::erase(size_t pos, size_t n){
if (outside(m_data, pos)){
throw std::out_of_range("utf8_string::erase invalid string position");
}
size_t startByte = utf8::char_num_to_byte_num_clamped(pos, m_data);
size_t numBytes = (n == npos ? npos :
utf8::char_num_to_byte_num_clamped(pos + n, m_data) - startByte);
m_data.erase(startByte, numBytes);
return *this;
}
utf8_string& utf8_string::insert(size_t pos, const utf8_string& inserted){
if (pos > utf8::num_characters(m_data)){
throw std::out_of_range("invalid insertion index");
}
m_data.insert(utf8::char_num_to_byte_num_checked(pos, m_data), inserted.str());
return *this;
}
utf8_string& utf8_string::insert(size_t pos, size_t num, const utf8_char& c){
if (pos > utf8::num_characters(m_data)){
throw std::out_of_range("invalid insertion index");
}
insert(pos, utf8_string(num, c));
return *this;
}
utf8_char utf8_string::operator[](size_t i) const{
size_t pos = utf8::char_num_to_byte_num_checked(i, m_data);
size_t numBytes = faint::utf8::prefix_num_bytes(m_data[pos]);
return utf8_char(m_data.substr(pos, numBytes));
}
size_t utf8_string::find(const utf8_char& ch, size_t start) const{
// Since the leading byte has a unique pattern, using regular
// std::string find should be OK, I think.
size_t pos = m_data.find(ch.str(),
utf8::char_num_to_byte_num_checked(start, m_data));
if (pos == npos){
return pos;
}
return utf8::byte_num_to_char_num(pos, m_data);
}
size_t utf8_string::find_last_of(const utf8_string& s, size_t inPos) const{
const size_t endPos = inPos == npos ? size() : inPos;
for (size_t i = 0; i != endPos; i++){
auto pos = endPos - i - 1;
if (s.find((*this)[pos]) != utf8_string::npos){
return pos;
}
}
return utf8_string::npos;
}
size_t utf8_string::rfind(const utf8_char& ch, size_t start) const{
// Since the leading byte has a unique pattern, using regular
// std::string rfind should be OK, I think.
if (m_data.empty()){
return npos;
}
size_t startByte = (start == npos) ? m_data.size() - 1 :
utf8::char_num_to_byte_num_checked(start, m_data);
size_t pos = m_data.rfind(ch.str(), startByte);
if (pos == npos){
return pos;
}
return pos == npos ? npos :
utf8::byte_num_to_char_num(pos, m_data);
}
utf8_string& utf8_string::operator=(const utf8_string& other){
if (&other == this){
return *this;
}
m_data = other.m_data;
return *this;
}
utf8_string& utf8_string::operator+=(const utf8_char& ch){
m_data += ch.str();
return *this;
}
utf8_string& utf8_string::operator+=(const utf8_string& str){
m_data += str.str();
return *this;
}
utf8_string operator+(const utf8_string& lhs, const utf8_char& rhs){
return utf8_string(lhs.str() + rhs.str());
}
utf8_string operator+(const utf8_string& lhs, const utf8_string& rhs){
return utf8_string(lhs.str() + rhs.str());
}
utf8_string operator+(const utf8_char& lhs, const utf8_string& rhs){
return utf8_string(lhs.str() + rhs.str());
}
const size_t utf8_string::npos(std::string::npos);
bool utf8_string::operator<(const utf8_string& s) const{
return m_data < s.m_data;
}
bool is_ascii(const utf8_string& s){
const std::string& bytes = s.str();
for (char ch : bytes){
if (utf8::prefix_num_bytes(ch) != 1){
return false;
}
}
return true;
}
std::ostream& operator<<(std::ostream& o, const utf8_string& s){
o << s.str();
return o;
}
bool operator==(const utf8_string& lhs, const utf8_string& rhs){
return lhs.str() == rhs.str();
}
bool operator!=(const utf8_string& lhs, const utf8_string& rhs){
return !(lhs == rhs);
}
utf8_string_const_iterator begin(const utf8_string& s){
return utf8_string_const_iterator(s, 0);
}
utf8_string_const_iterator end(const utf8_string& s){
return utf8_string_const_iterator(s, s.size());
}
} // namespace
| lukas-ke/faint-graphics-editor | text/utf8-string.cpp | C++ | apache-2.0 | 6,218 |
package com.asura.monitor.platform.dao;
import com.asura.framework.base.paging.PagingResult;
import com.asura.framework.base.paging.SearchMap;
import com.asura.framework.dao.mybatis.base.MybatisDaoContext;
import com.asura.framework.dao.mybatis.paginator.domain.PageBounds;
import com.asura.common.dao.BaseDao;
import com.asura.monitor.platform.entity.MonitorPlatformServerEntity;
import org.springframework.stereotype.Repository;
import javax.annotation.Resource;
/**
* <p></p>
* <p/>
* <PRE>
* <BR>
* <BR>-----------------------------------------------
* <BR>
* </PRE>
*
* @author zhaozq14
* @version 1.0
* @date 2016-11-07 11:35:05
* @since 1.0
*/
@Repository("com.asura.monitor.configure.dao.MonitorPlatformServerDao")
public class MonitorPlatformServerDao extends BaseDao<MonitorPlatformServerEntity>{
@Resource(name="monitor.MybatisDaoContext")
private MybatisDaoContext mybatisDaoContext;
/**
*
* @param searchMap
* @param pageBounds
* @return
*/
public PagingResult<MonitorPlatformServerEntity> findAll(SearchMap searchMap, PageBounds pageBounds, String sqlId){
return mybatisDaoContext.findForPage(this.getClass().getName()+"."+sqlId,MonitorPlatformServerEntity.class,searchMap,pageBounds);
}
} | AsuraTeam/monitor | server/src/main/java/com/asura/monitor/platform/dao/MonitorPlatformServerDao.java | Java | apache-2.0 | 1,279 |
/**
* Jakarta Bean Validation TCK
*
* License: Apache License, Version 2.0
* See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>.
*/
package org.hibernate.beanvalidation.tck.tests.constraints.constraintdefinition;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertNoViolations;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertThat;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.violationOf;
import static org.testng.Assert.assertEquals;
import java.util.Set;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.Validator;
import jakarta.validation.constraints.Size;
import jakarta.validation.groups.Default;
import jakarta.validation.metadata.ConstraintDescriptor;
import org.hibernate.beanvalidation.tck.beanvalidation.Sections;
import org.hibernate.beanvalidation.tck.tests.AbstractTCKTest;
import org.hibernate.beanvalidation.tck.util.TestUtil;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.jboss.test.audit.annotations.SpecAssertion;
import org.jboss.test.audit.annotations.SpecVersion;
import org.testng.annotations.Test;
/**
* @author Hardy Ferentschik
* @author Guillaume Smet
*/
@SpecVersion(spec = "beanvalidation", version = "3.0.0")
public class ConstraintDefinitionsTest extends AbstractTCKTest {
@Deployment
public static WebArchive createTestArchive() {
return webArchiveBuilder()
.withTestClassPackage( ConstraintDefinitionsTest.class )
.build();
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES, id = "a")
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a")
public void testConstraintWithCustomAttributes() {
Validator validator = TestUtil.getValidatorUnderTest();
Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Person.class )
.getConstraintsForProperty( "lastName" )
.getConstraintDescriptors();
assertEquals( descriptors.size(), 2, "There should be two constraints on the lastName property." );
for ( ConstraintDescriptor<?> descriptor : descriptors ) {
assertEquals(
descriptor.getAnnotation().annotationType().getName(),
AlwaysValid.class.getName(),
"Wrong annotation type."
);
}
Set<ConstraintViolation<Person>> constraintViolations = validator.validate( new Person( "John", "Doe" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( AlwaysValid.class )
);
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "b")
public void testRepeatableConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Movie.class )
.getConstraintsForProperty( "title" )
.getConstraintDescriptors();
assertEquals( descriptors.size(), 2, "There should be two constraints on the title property." );
for ( ConstraintDescriptor<?> descriptor : descriptors ) {
assertEquals(
descriptor.getAnnotation().annotationType().getName(),
Size.class.getName(),
"Wrong annotation type."
);
}
Set<ConstraintViolation<Movie>> constraintViolations = validator.validate( new Movie( "Title" ) );
assertNoViolations( constraintViolations );
constraintViolations = validator.validate( new Movie( "A" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Size.class )
);
constraintViolations = validator.validate( new Movie( "A movie title far too long that does not respect the constraint" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Size.class )
);
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES_GROUPS, id = "d")
public void testDefaultGroupAssumedWhenNoGroupsSpecified() {
Validator validator = TestUtil.getValidatorUnderTest();
ConstraintDescriptor<?> descriptor = validator.getConstraintsForClass( Person.class )
.getConstraintsForProperty( "firstName" )
.getConstraintDescriptors()
.iterator()
.next();
Set<Class<?>> groups = descriptor.getGroups();
assertEquals( groups.size(), 1, "The group set should only contain one entry." );
assertEquals( groups.iterator().next(), Default.class, "The Default group should be returned." );
}
}
| beanvalidation/beanvalidation-tck | tests/src/main/java/org/hibernate/beanvalidation/tck/tests/constraints/constraintdefinition/ConstraintDefinitionsTest.java | Java | apache-2.0 | 4,672 |
/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.map.impl;
import com.hazelcast.config.MaxSizeConfig;
import com.hazelcast.core.IFunction;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.SerializableByConvention;
import com.hazelcast.spi.partition.IPartitionService;
import com.hazelcast.util.CollectionUtil;
import com.hazelcast.util.UnmodifiableIterator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import static com.hazelcast.config.MaxSizeConfig.MaxSizePolicy.PER_NODE;
import static com.hazelcast.util.MapUtil.createHashMap;
import static com.hazelcast.util.Preconditions.checkNotNull;
public final class MapKeyLoaderUtil {
private MapKeyLoaderUtil() {
}
/**
* Returns the role for the map key loader based on the passed parameters.
* The partition owner of the map name partition is the sender.
* The first replica of the map name partition is the sender backup.
* Other partition owners are receivers and other partition replicas do
* not have a role.
*
* @param isPartitionOwner if this is the partition owner
* @param isMapNamePartition if this is the partition containing the map name
* @param isMapNamePartitionFirstReplica if this is the first replica for the partition
* containing the map name
* @return the map key loader role
*/
static MapKeyLoader.Role assignRole(boolean isPartitionOwner, boolean isMapNamePartition,
boolean isMapNamePartitionFirstReplica) {
if (isMapNamePartition) {
if (isPartitionOwner) {
// map-name partition owner is the SENDER
return MapKeyLoader.Role.SENDER;
} else {
if (isMapNamePartitionFirstReplica) {
// first replica of the map-name partition is the SENDER_BACKUP
return MapKeyLoader.Role.SENDER_BACKUP;
} else {
// other replicas of the map-name partition do not have a role
return MapKeyLoader.Role.NONE;
}
}
} else {
// ordinary partition owners are RECEIVERs, otherwise no role
return isPartitionOwner ? MapKeyLoader.Role.RECEIVER : MapKeyLoader.Role.NONE;
}
}
/**
* Transforms an iterator of entries to an iterator of entry batches
* where each batch is represented as a map from entry key to
* list of entry values.
* The maximum size of the entry value list in any batch is
* determined by the {@code maxBatch} parameter. Only one
* entry value list may have the {@code maxBatch} size, other
* lists will be smaller.
*
* @param entries the entries to be batched
* @param maxBatch the maximum size of an entry group in a single batch
* @return an iterator with entry batches
*/
static Iterator<Map<Integer, List<Data>>> toBatches(final Iterator<Entry<Integer, Data>> entries,
final int maxBatch) {
return new UnmodifiableIterator<Map<Integer, List<Data>>>() {
@Override
public boolean hasNext() {
return entries.hasNext();
}
@Override
public Map<Integer, List<Data>> next() {
if (!entries.hasNext()) {
throw new NoSuchElementException();
}
return nextBatch(entries, maxBatch);
}
};
}
/**
* Groups entries by the entry key. The entries will be grouped
* until at least one group has up to {@code maxBatch}
* entries or until the {@code entries} have been exhausted.
*
* @param entries the entries to be grouped by key
* @param maxBatch the maximum size of a group
* @return the grouped entries by entry key
*/
private static Map<Integer, List<Data>> nextBatch(Iterator<Entry<Integer, Data>> entries, int maxBatch) {
Map<Integer, List<Data>> batch = createHashMap(maxBatch);
while (entries.hasNext()) {
Entry<Integer, Data> e = entries.next();
List<Data> partitionKeys = CollectionUtil.addToValueList(batch, e.getKey(), e.getValue());
if (partitionKeys.size() >= maxBatch) {
break;
}
}
return batch;
}
/**
* Returns the configured maximum entry count per node if the max
* size policy is {@link MaxSizeConfig.MaxSizePolicy#PER_NODE}
* and is not the default, otherwise returns {@code -1}.
*
* @param maxSizeConfig the max size configuration
* @return the max size per node or {@code -1} if not configured or is the default
* @see MaxSizeConfig#getMaxSizePolicy()
* @see MaxSizeConfig#getSize()
*/
public static int getMaxSizePerNode(MaxSizeConfig maxSizeConfig) {
// max size or -1 if policy is different or not set
double maxSizePerNode = maxSizeConfig.getMaxSizePolicy() == PER_NODE ? maxSizeConfig.getSize() : -1D;
if (maxSizePerNode == MaxSizeConfig.DEFAULT_MAX_SIZE) {
// unlimited
return -1;
}
return (int) maxSizePerNode;
}
/**
* Returns a {@link IFunction} that transforms a {@link Data}
* parameter to an map entry where the key is the partition ID
* and the value is the provided parameter.
*
* @param partitionService the partition service
*/
static IFunction<Data, Entry<Integer, Data>> toPartition(final IPartitionService partitionService) {
return new DataToEntry(partitionService);
}
@SerializableByConvention
private static class DataToEntry implements IFunction<Data, Entry<Integer, Data>> {
private final IPartitionService partitionService;
public DataToEntry(IPartitionService partitionService) {
this.partitionService = partitionService;
}
@Override
public Entry<Integer, Data> apply(Data input) {
// Null-pointer here, in case of null key loaded by MapLoader
checkNotNull(input, "Key loaded by a MapLoader cannot be null.");
Integer partition = partitionService.getPartitionId(input);
return new MapEntrySimple<Integer, Data>(partition, input);
}
}
}
| dbrimley/hazelcast | hazelcast/src/main/java/com/hazelcast/map/impl/MapKeyLoaderUtil.java | Java | apache-2.0 | 7,145 |
<div class="btn-group mw-version-selector">
<button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
<span class="descriptor">Version </span>
<span class="descriptor-sm">V. </span>
{{currentVersionModel.attributes[versionNumberKey]}}
<span ng-if="currentVersionModel.attributes.published" mw-icon="rln-icon published"></span>
</button>
<ul class="version-dropdown dropdown-menu pull-right" style="min-width:100%" role="menu">
<li ng-repeat="version in versionCollection.models" ng-class="{active:(version.attributes.uuid === currentVersionModel.attributes.uuid)}">
<a ng-href="{{getUrl(version.attributes.uuid)}}">
{{version.attributes[versionNumberKey]}}
<span ng-if="version.attributes.published" mw-icon="rln-icon published"></span>
</a>
</li>
</ul>
</div> | mwaylabs/uikit | src-relution/templates/mwComponentsBb/mwVersionSelector.html | HTML | apache-2.0 | 848 |
## Slurm :o:
| | |
| -------- | --------------------------- |
| title | Slurm |
| status | 10 |
| section | Cluster Resource Management |
| keywords | Cluster Resource Management |
Simple Linux Utility for Resource Management (SLURM) workload manager
is an open source, scalable cluster resource management tool used for
job scheduling in small to large Linux cluster using multi-core
architecture. SLURM has three key functions. First, it allocates
resources to users for some duration with exclusive and/or
non-exclusive access. Second, it enables users to start, execute and
monitor jobs on the resources allocated to them. Finally, it
intermediates to resolve conflicts on resources for pending work by
maintaining them in a queue [@www-slurmSchedmdSite]. The slurm
architecture has following components: a centralized manager to
monitor resources and work, may have a backup manager, daemon on each
server to provide fault-tolerant communications, an optional daemon
for clusters with multiple mangers and tools to initiate, terminate
and report about jobs in a graphical view with network topology. It
also provides around twenty additional plugins that could be used for
functionalities like accounting, advanced reservation, gang
scheduling, back fill scheduling and multifactor job
prioritization. Though originally developed for Linux, SLURM also
provides full support on platforms like AIX, FreeBSD, NetBSD and
Solaris [@www-slurmPlatformsSite] [@www-slurm].
| cloudmesh/book | cloud-technologies/chapters/tech/15-12-Slurm.md | Markdown | apache-2.0 | 1,574 |
<?php
defined('ABSPATH') or die('No script kiddies please!');
/**
* Undocumented function
*
* @return void
*/
function Yonk_numeric_posts_nav() {
if(is_singular())
return;
global $wp_query;
/** Stop execution if there's only 1 page */
if($wp_query->max_num_pages <= 1)
return;
$paged = get_query_var('paged') ? absint(get_query_var('paged')) : 1;
$max = intval($wp_query->max_num_pages);
/** Add current page to the array */
if ($paged >= 1)
$links[] = $paged;
/** Add the pages around the current page to the array */
if ($paged >= 3) {
$links[] = $paged - 1;
$links[] = $paged - 2;
}
if (($paged + 2) <= $max) {
$links[] = $paged + 2;
$links[] = $paged + 1;
}
echo '<div class="navigation"><ul class="pagination">' . "\n";
/** Previous Post Link */
if (get_previous_posts_link())
printf( '<li>%s</li>' . "\n", get_previous_posts_link() );
/** Link to first page, plus ellipses if necessary */
if (!in_array(1, $links )) {
$class = 1 == $paged ? ' class="active"' : '';
printf('<li%s><a href="%s" aria-label="Previous"><span aria-hidden="true"%s</span></a></li>' . "\n", $class, esc_url(get_pagenum_link(1)), '1');
if (!in_array(2, $links))
echo '<li>…</li>';
}
/** Link to current page, plus 2 pages in either direction if necessary */
sort($links);
foreach ((array)$links as $link) {
$class = $paged == $link ? ' class="active"' : '';
printf('<li%s><a href="%s">%s</a></li>' . "\n", $class, esc_url(get_pagenum_link($link)), $link);
}
/** Link to last page, plus ellipses if necessary */
if (!in_array($max, $links)) {
if (!in_array($max - 1, $links))
echo '<li>…</li>' . "\n";
$class = $paged == $max ? ' class="active"' : '';
printf('<li%s><a href="%s" aria-label="Next"><span aria-hidden="true">%s</span></a></li>' . "\n", $class, esc_url(get_pagenum_link($max)), $max);
}
/** Next Post Link */
if (get_next_posts_link())
printf('<li>%s</li>' . "\n", get_next_posts_link());
echo '</ul></div>' . "\n";
} | Patreo/yonk | wp-content/themes/YonkTheme/yonk-core/plugins/pagenavi.php | PHP | apache-2.0 | 2,213 |
package org.targettest.org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.targettest.org.apache.lucene.document.Document;
import org.targettest.org.apache.lucene.document.FieldSelector;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermDocs;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermEnum;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermPositions;
import org.targettest.org.apache.lucene.search.DefaultSimilarity;
import org.targettest.org.apache.lucene.search.FieldCache;
/** An IndexReader which reads multiple indexes, appending
* their content. */
public class MultiReader extends IndexReader implements Cloneable {
protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader... subReaders) {
initialize(subReaders, true);
}
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
initialize(subReaders, closeSubReaders);
}
private void initialize(IndexReader[] subReaders, boolean closeSubReaders) {
this.subReaders = subReaders.clone();
starts = new int[subReaders.length + 1]; // build starts array
decrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (!closeSubReaders) {
subReaders[i].incRef();
decrefOnClose[i] = true;
} else {
decrefOnClose[i] = false;
}
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
}
/**
* Tries to reopen the subreaders.
* <br>
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
* returned a new instance != subReader), then a new MultiReader instance
* is returned, otherwise this instance is returned.
* <p>
* A re-opened instance might share one or more subreaders with the old
* instance. Index modification operations result in undefined behavior
* when performed before the old instance is closed.
* (see {@link IndexReader#reopen()}).
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
/**
* Clones the subreaders.
* (see {@link IndexReader#clone()}).
* <br>
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*/
@Override
public synchronized Object clone() {
try {
return doReopen(true);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* If clone is true then we clone each of the subreaders
* @param doClone
* @return New IndexReader, or same one (this) if
* reopen/clone is not necessary
* @throws CorruptIndexException
* @throws IOException
*/
protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
ensureOpen();
boolean reopened = false;
IndexReader[] newSubReaders = new IndexReader[subReaders.length];
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++) {
if (doClone)
newSubReaders[i] = (IndexReader) subReaders[i].clone();
else
newSubReaders[i] = subReaders[i].reopen();
// if at least one of the subreaders was updated we remember that
// and return a new MultiReader
if (newSubReaders[i] != subReaders[i]) {
reopened = true;
}
}
success = true;
} finally {
if (!success && reopened) {
for (int i = 0; i < newSubReaders.length; i++) {
if (newSubReaders[i] != subReaders[i]) {
try {
newSubReaders[i].close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (reopened) {
boolean[] newDecrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
if (newSubReaders[i] == subReaders[i]) {
newSubReaders[i].incRef();
newDecrefOnClose[i] = true;
}
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
return mr;
} else {
return this;
}
}
@Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
@Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
@Override
public boolean isOptimized() {
return false;
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
// NOTE: multiple threads may wind up init'ing
// numDocs... but that's harmless
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
}
@Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
}
@Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
} else if (bytes != null) { // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
} else {
for (int i = 0; i < subReaders.length; i++) { // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
}
}
}
@Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
@Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit(commitUserData);
}
@Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < subReaders.length; i++) {
if (decrefOnClose[i]) {
subReaders[i].decRef();
} else {
subReaders[i].close();
}
}
// NOTE: only needed in case someone had asked for
// FieldCache for top-level reader (which is generally
// not a good idea):
FieldCache.DEFAULT.purge(this);
}
@Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
}
/**
* Checks recursively if all subreaders are up to date.
*/
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++) {
if (!subReaders[i].isCurrent()) {
return false;
}
}
// all subreaders are up to date
return true;
}
/** Not implemented.
* @throws UnsupportedOperationException
*/
@Override
public long getVersion() {
throw new UnsupportedOperationException("MultiReader does not support this method.");
}
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
}
| chrishumphreys/provocateur | provocateur-thirdparty/src/main/java/org/targettest/org/apache/lucene/index/MultiReader.java | Java | apache-2.0 | 14,073 |
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.kafka;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecurityContextBuilder;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.KafkaTopicList;
import io.strimzi.api.kafka.model.EntityOperatorSpec;
import io.strimzi.api.kafka.model.EntityTopicOperatorSpec;
import io.strimzi.api.kafka.model.EntityUserOperatorSpec;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.KafkaClusterSpec;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.KafkaTopic;
import io.strimzi.api.kafka.model.SystemProperty;
import io.strimzi.api.kafka.model.SystemPropertyBuilder;
import io.strimzi.api.kafka.model.ZookeeperClusterSpec;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder;
import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType;
import io.strimzi.api.kafka.model.storage.JbodStorage;
import io.strimzi.api.kafka.model.storage.JbodStorageBuilder;
import io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.systemtest.AbstractST;
import io.strimzi.systemtest.Constants;
import io.strimzi.systemtest.Environment;
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
import io.strimzi.systemtest.annotations.OpenShiftOnly;
import io.strimzi.systemtest.annotations.ParallelNamespaceTest;
import io.strimzi.systemtest.cli.KafkaCmdClient;
import io.strimzi.systemtest.kafkaclients.internalClients.InternalKafkaClient;
import io.strimzi.systemtest.resources.ResourceOperation;
import io.strimzi.systemtest.resources.crd.KafkaResource;
import io.strimzi.systemtest.resources.crd.KafkaTopicResource;
import io.strimzi.systemtest.templates.crd.KafkaClientsTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates;
import io.strimzi.systemtest.templates.crd.KafkaUserTemplates;
import io.strimzi.systemtest.utils.StUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.ConfigMapUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.StatefulSetUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils;
import io.strimzi.test.TestUtils;
import io.strimzi.test.executor.ExecResult;
import io.strimzi.test.timemeasuring.Operation;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.hamcrest.CoreMatchers;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtensionContext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.stream.Collectors;
import static io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName;
import static io.strimzi.api.kafka.model.KafkaResources.zookeeperStatefulSetName;
import static io.strimzi.systemtest.Constants.CRUISE_CONTROL;
import static io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED;
import static io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED;
import static io.strimzi.systemtest.Constants.REGRESSION;
import static io.strimzi.systemtest.Constants.STATEFUL_SET;
import static io.strimzi.systemtest.utils.StUtils.configMap2Properties;
import static io.strimzi.systemtest.utils.StUtils.stringToProperties;
import static io.strimzi.test.TestUtils.fromYamlString;
import static io.strimzi.test.TestUtils.map;
import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyOrNullString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
@Tag(REGRESSION)
@SuppressWarnings("checkstyle:ClassFanOutComplexity")
class KafkaST extends AbstractST {
private static final Logger LOGGER = LogManager.getLogger(KafkaST.class);
private static final String TEMPLATE_PATH = TestUtils.USER_PATH + "/../packaging/examples/templates/cluster-operator";
public static final String NAMESPACE = "kafka-cluster-test";
private static final String OPENSHIFT_CLUSTER_NAME = "openshift-my-cluster";
@ParallelNamespaceTest
@OpenShiftOnly
void testDeployKafkaClusterViaTemplate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
cluster.createCustomResources(extensionContext, TEMPLATE_PATH);
String templateName = "strimzi-ephemeral";
cmdKubeClient(namespaceName).createResourceAndApply(templateName, map("CLUSTER_NAME", OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME), 1);
//Testing docker images
testDockerImagesForKafkaCluster(OPENSHIFT_CLUSTER_NAME, NAMESPACE, namespaceName, 3, 3, false);
//Testing labels
verifyLabelsForKafkaCluster(NAMESPACE, namespaceName, OPENSHIFT_CLUSTER_NAME, templateName);
LOGGER.info("Deleting Kafka cluster {} after test", OPENSHIFT_CLUSTER_NAME);
cmdKubeClient(namespaceName).deleteByName("Kafka", OPENSHIFT_CLUSTER_NAME);
//Wait for kafka deletion
cmdKubeClient(namespaceName).waitForResourceDeletion(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME));
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME));
}
@ParallelNamespaceTest
void testEODeletion(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Get pod name to check termination process
Pod pod = kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findAny()
.orElseThrow();
assertThat("Entity operator pod does not exist", pod, notNullValue());
LOGGER.info("Setting entity operator to null");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().setEntityOperator(null), namespaceName);
// Wait when EO(UO + TO) will be removed
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
PodUtils.deletePodWithWait(namespaceName, pod.getMetadata().getName());
LOGGER.info("Entity operator was deleted");
}
@ParallelNamespaceTest
@SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS"})
void testCustomAndUpdatedValues(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
envVarGeneral.put("TEST_ENV_1", "test.env.one");
envVarGeneral.put("TEST_ENV_2", "test.env.two");
LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
envVarUpdated.put("TEST_ENV_3", "test.env.three");
// Kafka Broker config
Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "1");
kafkaConfig.put("transaction.state.log.replication.factor", "1");
kafkaConfig.put("default.replication.factor", "1");
Map<String, Object> updatedKafkaConfig = new HashMap<>();
updatedKafkaConfig.put("offsets.topic.replication.factor", "2");
updatedKafkaConfig.put("transaction.state.log.replication.factor", "2");
updatedKafkaConfig.put("default.replication.factor", "2");
// Zookeeper Config
Map<String, Object> zookeeperConfig = new HashMap<>();
zookeeperConfig.put("tickTime", "2000");
zookeeperConfig.put("initLimit", "5");
zookeeperConfig.put("syncLimit", "2");
zookeeperConfig.put("autopurge.purgeInterval", "1");
Map<String, Object> updatedZookeeperConfig = new HashMap<>();
updatedZookeeperConfig.put("tickTime", "2500");
updatedZookeeperConfig.put("initLimit", "3");
updatedZookeeperConfig.put("syncLimit", "5");
final int initialDelaySeconds = 30;
final int timeoutSeconds = 10;
final int updatedInitialDelaySeconds = 31;
final int updatedTimeoutSeconds = 11;
final int periodSeconds = 10;
final int successThreshold = 1;
final int failureThreshold = 3;
final int updatedPeriodSeconds = 5;
final int updatedFailureThreshold = 1;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 2)
.editSpec()
.editKafka()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.withConfig(kafkaConfig)
.withNewTemplate()
.withNewKafkaContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withReplicas(2)
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endLivenessProbe()
.withConfig(zookeeperConfig)
.withNewTemplate()
.withNewZookeeperContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTopicOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endUserOperatorContainer()
.withNewTlsSidecarContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTlsSidecarContainer()
.endTemplate()
.editUserOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endUserOperator()
.editTopicOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTopicOperator()
.withNewTlsSidecar()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTlsSidecar()
.endEntityOperator()
.endSpec()
.build());
final Map<String, String> kafkaSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
final Map<String, String> zkSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName));
final Map<String, String> eoPod = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
LOGGER.info("Verify values before update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), kafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarGeneral);
String kafkaConfiguration = kubeClient().getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=1"));
String kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=1"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", zookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarGeneral);
LOGGER.info("Checking configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarGeneral);
LOGGER.info("Updating configuration of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
KafkaClusterSpec kafkaClusterSpec = k.getSpec().getKafka();
kafkaClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.setConfig(updatedKafkaConfig);
kafkaClusterSpec.getTemplate().getKafkaContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
ZookeeperClusterSpec zookeeperClusterSpec = k.getSpec().getZookeeper();
zookeeperClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.setConfig(updatedZookeeperConfig);
zookeeperClusterSpec.getTemplate().getZookeeperContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
// Configuring TO and UO to use new values for InitialDelaySeconds and TimeoutSeconds
EntityOperatorSpec entityOperatorSpec = k.getSpec().getEntityOperator();
entityOperatorSpec.getTopicOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTemplate().getTopicOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getUserOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getTlsSidecarContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName), 2, zkSnapshot);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 2, kafkaSnapshot);
DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPod);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
LOGGER.info("Verify values after update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), updatedKafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarUpdated);
kafkaConfiguration = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=2"));
kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=2"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", updatedZookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarUpdated);
LOGGER.info("Getting entity operator to check configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarUpdated);
}
@ParallelNamespaceTest
void testJvmAndResources(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
ArrayList<SystemProperty> javaSystemProps = new ArrayList<>();
javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug")
.withValue("verbose").build());
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1)
.editSpec()
.editKafka()
.withResources(new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1.5Gi"))
.addToLimits("cpu", new Quantity("1"))
.addToRequests("memory", new Quantity("1Gi"))
.addToRequests("cpu", new Quantity("50m"))
.build())
.withNewJvmOptions()
.withXmx("1g")
.withXms("512m")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endKafka()
.editZookeeper()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1G"))
.addToLimits("cpu", new Quantity("0.5"))
.addToRequests("memory", new Quantity("0.5G"))
.addToRequests("cpu", new Quantity("25m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endZookeeper()
.withNewEntityOperator()
.withNewTopicOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1024Mi"))
.addToLimits("cpu", new Quantity("500m"))
.addToRequests("memory", new Quantity("384Mi"))
.addToRequests("cpu", new Quantity("0.025"))
.build())
.withNewJvmOptions()
.withXmx("2G")
.withXms("1024M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endTopicOperator()
.withNewUserOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("512M"))
.addToLimits("cpu", new Quantity("300m"))
.addToRequests("memory", new Quantity("256M"))
.addToRequests("cpu", new Quantity("30m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
// Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation
final String zkStsName = KafkaResources.zookeeperStatefulSetName(clusterName);
final String kafkaStsName = kafkaStatefulSetName(clusterName);
final String eoDepName = KafkaResources.entityOperatorDeploymentName(clusterName);
final Map<String, String> zkPods = StatefulSetUtils.ssSnapshot(namespaceName, zkStsName);
final Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStsName);
final Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDepName);
assertResources(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"1536Mi", "1", "1Gi", "50m");
assertExpectedJavaOpts(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"-Xmx1g", "-Xms512m", "-XX:+UseG1GC");
assertResources(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"1G", "500m", "500M", "25m");
assertExpectedJavaOpts(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"-Xmx1G", "-Xms512M", "-XX:+UseG1GC");
Optional<Pod> pod = kubeClient(namespaceName).listPods(namespaceName)
.stream().filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findFirst();
assertThat("EO pod does not exist", pod.isPresent(), is(true));
assertResources(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"1Gi", "500m", "384Mi", "25m");
assertResources(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"512M", "300m", "256M", "30m");
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"-Xmx2G", "-Xms1024M", null);
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"-Xmx1G", "-Xms512M", null);
String eoPod = eoPods.keySet().toArray()[0].toString();
kubeClient(namespaceName).getPod(namespaceName, eoPod).getSpec().getContainers().forEach(container -> {
if (!container.getName().equals("tls-sidecar")) {
LOGGER.info("Check if -D java options are present in {}", container.getName());
String javaSystemProp = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findFirst().orElseThrow().getValue();
String javaOpts = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_OPTS")).findFirst().orElseThrow().getValue();
assertThat(javaSystemProp, is("-Djavax.net.debug=verbose"));
if (container.getName().equals("topic-operator")) {
assertThat(javaOpts, is("-Xms1024M -Xmx2G"));
}
if (container.getName().equals("user-operator")) {
assertThat(javaOpts, is("-Xms512M -Xmx1G"));
}
}
});
LOGGER.info("Checking no rolling update for Kafka cluster");
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, zkStsName, zkPods);
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, kafkaStsName, kafkaPods);
DeploymentUtils.waitForNoRollingUpdate(namespaceName, eoDepName, eoPods);
}
@ParallelNamespaceTest
void testForTopicOperator(ExtensionContext extensionContext) throws InterruptedException {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final String cliTopicName = "topic-from-cli";
//Creating topics for testing
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaTopicUtils.waitForKafkaTopicReady(namespaceName, topicName);
assertThat(KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(topicName).get().getMetadata().getName(), is(topicName));
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItem(topicName));
KafkaCmdClient.createTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName, 1, 1);
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItems(topicName, cliTopicName));
assertThat(cmdKubeClient(namespaceName).list(KafkaTopic.RESOURCE_KIND), hasItems(cliTopicName, topicName));
//Updating first topic using pod CLI
KafkaCmdClient.updateTopicPartitionsCountUsingPodCli(namespaceName, clusterName, 0, topicName, 2);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, topicName),
hasItems("PartitionCount:2"));
KafkaTopic testTopic = fromYamlString(cmdKubeClient().get(KafkaTopic.RESOURCE_KIND, topicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Updating second topic via KafkaTopic update
KafkaTopicResource.replaceTopicResourceInSpecificNamespace(cliTopicName, topic -> topic.getSpec().setPartitions(2), namespaceName);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName),
hasItems("PartitionCount:2"));
testTopic = fromYamlString(cmdKubeClient(namespaceName).get(KafkaTopic.RESOURCE_KIND, cliTopicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Deleting first topic by deletion of CM
cmdKubeClient(namespaceName).deleteByName(KafkaTopic.RESOURCE_KIND, cliTopicName);
//Deleting another topic using pod CLI
KafkaCmdClient.deleteTopicUsingPodCli(namespaceName, clusterName, 0, topicName);
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, topicName);
//Checking all topics were deleted
Thread.sleep(Constants.TIMEOUT_TEARDOWN);
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems(topicName)));
assertThat(topics, not(hasItems(cliTopicName)));
}
@ParallelNamespaceTest
void testRemoveTopicOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(null), namespaceName);
//Waiting when EO pod will be recreated without TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that TO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that TO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
}
@ParallelNamespaceTest
void testRemoveUserOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(null), namespaceName);
//Waiting when EO pod will be recreated without UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that UO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that UO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testRemoveUserAndTopicOperatorsFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// TODO issue #4152 - temporarily disabled for Namespace RBAC scoped
assumeFalse(Environment.isNamespaceRbacScope());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(null);
k.getSpec().getEntityOperator().setUserOperator(null);
}, namespaceName);
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, eoDeploymentName, 0);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec());
k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec());
}, namespaceName);
DeploymentUtils.waitForDeploymentReady(namespaceName, eoDeploymentName);
//Checking that EO was created
kubeClient().listPodsByPrefixInName(namespaceName, eoDeploymentName).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testEntityOperatorWithoutTopicOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewUserOperator()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that TO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewTopicOperator()
.endTopicOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that UO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserAndTopicOperators(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO and TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that EO was not deployed
assertThat("EO should not be deployed", kubeClient().listPodsByPrefixInName(KafkaResources.entityOperatorDeploymentName(clusterName)).size(), is(0));
}
@ParallelNamespaceTest
void testTopicWithoutLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// Negative scenario: creating topic without any labels and make sure that TO can't handle this topic
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Creating topic without any label
resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, "topic-without-labels", 1, 1, 1)
.editMetadata()
.withLabels(null)
.endMetadata()
.build());
// Checking that resource was created
assertThat(cmdKubeClient(namespaceName).list("kafkatopic"), hasItems("topic-without-labels"));
// Checking that TO didn't handle new topic and zk pods don't contain new topic
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), not(hasItems("topic-without-labels")));
// Checking TO logs
String tOPodName = cmdKubeClient(namespaceName).listResourcesByLabel("pod", Labels.STRIMZI_NAME_LABEL + "=" + clusterName + "-entity-operator").get(0);
String tOlogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, tOPodName, "topic-operator");
assertThat(tOlogs, not(containsString("Created topic 'topic-without-labels'")));
//Deleting topic
cmdKubeClient(namespaceName).deleteByName("kafkatopic", "topic-without-labels");
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, "topic-without-labels");
//Checking all topics were deleted
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems("topic-without-labels")));
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrueFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testPersistentStorageSize(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String[] diskSizes = {"70Gi", "20Gi"};
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder()
.withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()
).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl)
.editSpec()
.editKafka()
.withStorage(jbodStorage)
.endKafka()
.editZookeeper().
withReplicas(1)
.endZookeeper()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Creating kafka without external listener");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
final String brokerSecret = clusterName + "-kafka-brokers";
Secret secretsWithoutExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Editing kafka with external listener");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
List<GenericKafkaListener> lst = asList(
new GenericKafkaListenerBuilder()
.withName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.withPort(9092)
.withType(KafkaListenerType.INTERNAL)
.withTls(false)
.build(),
new GenericKafkaListenerBuilder()
.withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)
.withPort(9094)
.withType(KafkaListenerType.LOADBALANCER)
.withTls(true)
.withNewConfiguration()
.withFinalizers(LB_FINALIZERS)
.endConfiguration()
.build()
);
kafka.getSpec().getKafka().setListeners(lst);
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName)));
Secret secretsWithExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Checking secrets");
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).forEach(kafkaPod -> {
String kafkaPodName = kafkaPod.getMetadata().getName();
assertThat(secretsWithExt.getData().get(kafkaPodName + ".crt"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".crt"))));
assertThat(secretsWithExt.getData().get(kafkaPodName + ".key"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".key"))));
});
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testLabelModificationDoesNotBreakCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
Map<String, String> labels = new HashMap<>();
final String[] labelKeys = {"label-name-1", "label-name-2", ""};
final String[] labelValues = {"name-of-the-label-1", "name-of-the-label-2", ""};
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editMetadata()
.withLabels(labels)
.endMetadata()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Getting labels from stateful set resource");
StatefulSet statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying default labels in the Kafka CR");
assertThat("Label exists in stateful set with concrete value",
labelValues[0].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[0])));
assertThat("Label exists in stateful set with concrete value",
labelValues[1].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[1])));
labelValues[0] = "new-name-of-the-label-1";
labelValues[1] = "new-name-of-the-label-2";
labelKeys[2] = "label-name-3";
labelValues[2] = "name-of-the-label-3";
LOGGER.info("Setting new values of labels from {} to {} | from {} to {} and adding one {} with value {}",
"name-of-the-label-1", labelValues[0], "name-of-the-label-2", labelValues[1], labelKeys[2], labelValues[2]);
LOGGER.info("Edit kafka labels in Kafka CR");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().put(labelKeys[0], labelValues[0]);
resource.getMetadata().getLabels().put(labelKeys[1], labelValues[1]);
resource.getMetadata().getLabels().put(labelKeys[2], labelValues[2]);
}, namespaceName);
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
labels.put(labelKeys[2], labelValues[2]);
LOGGER.info("Waiting for kafka service labels changed {}", labels);
ServiceUtils.waitForServiceLabelsChange(namespaceName, KafkaResources.brokersServiceName(clusterName), labels);
LOGGER.info("Verifying kafka labels via services");
Service service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyPresentLabels(labels, service);
LOGGER.info("Waiting for kafka config map labels changed {}", labels);
ConfigMapUtils.waitForConfigMapLabelsChange(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labels);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMap configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyPresentLabels(labels, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Verifying kafka labels via stateful set");
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
verifyPresentLabels(labels, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Verifying via kafka pods");
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
assertThat("Label exists in kafka pods", labelValues[0].equals(labels.get(labelKeys[0])));
assertThat("Label exists in kafka pods", labelValues[1].equals(labels.get(labelKeys[1])));
assertThat("Label exists in kafka pods", labelValues[2].equals(labels.get(labelKeys[2])));
LOGGER.info("Removing labels: {} -> {}, {} -> {}, {} -> {}", labelKeys[0], labels.get(labelKeys[0]),
labelKeys[1], labels.get(labelKeys[1]), labelKeys[2], labels.get(labelKeys[2]));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().remove(labelKeys[0]);
resource.getMetadata().getLabels().remove(labelKeys[1]);
resource.getMetadata().getLabels().remove(labelKeys[2]);
}, namespaceName);
labels.remove(labelKeys[0]);
labels.remove(labelKeys[1]);
labels.remove(labelKeys[2]);
LOGGER.info("Waiting for kafka service labels deletion {}", labels.toString());
ServiceUtils.waitForServiceLabelsDeletion(namespaceName, KafkaResources.brokersServiceName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying kafka labels via services");
service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyNullLabels(labelKeys, service);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMapUtils.waitForConfigMapLabelsDeletion(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyNullLabels(labelKeys, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
String statefulSetName = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getName();
StatefulSetUtils.waitForStatefulSetLabelsDeletion(namespaceName, statefulSetName, labelKeys[0], labelKeys[1], labelKeys[2]);
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying kafka labels via stateful set");
verifyNullLabels(labelKeys, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Waiting for kafka pod labels deletion {}", labels.toString());
PodUtils.waitUntilPodLabelsDeletion(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), labelKeys[0], labelKeys[1], labelKeys[2]);
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
LOGGER.info("Verifying via kafka pods");
verifyNullLabels(labelKeys, labels);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testAppDomainLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> labels;
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(namespaceName).listPods(namespaceName, clusterName).stream()
.filter(pod -> pod.getMetadata().getName().startsWith(clusterName))
.filter(pod -> !pod.getMetadata().getName().startsWith(clusterName + "-" + Constants.KAFKA_CLIENTS))
.collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
LOGGER.info("Getting labels from stateful set of kafka resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("Getting labels from stateful set of zookeeper resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(namespaceName).listServices(namespaceName).stream()
.filter(service -> service.getMetadata().getName().startsWith(clusterName))
.collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(namespaceName).listSecrets(namespaceName).stream()
.filter(secret -> secret.getMetadata().getName().startsWith(clusterName) && secret.getType().equals("Opaque"))
.collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(namespaceName).listConfigMapsInSpecificNamespace(namespaceName, clusterName);
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
void testUOListeningOnlyUsersInSameCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String firstClusterName = "my-cluster-1";
final String secondClusterName = "my-cluster-2";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(firstClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(firstClusterName, userName).build());
LOGGER.info("Verifying that user {} in cluster {} is created", userName, firstClusterName);
String entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(firstClusterName)).get(0);
String uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, containsString("User " + userName + " in namespace " + namespaceName + " was ADDED"));
LOGGER.info("Verifying that user {} in cluster {} is not created", userName, secondClusterName);
entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(secondClusterName)).get(0);
uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, not(containsString("User " + userName + " in namespace " + namespaceName + " was ADDED")));
LOGGER.info("Verifying that user belongs to {} cluster", firstClusterName);
String kafkaUserResource = cmdKubeClient(namespaceName).getResourceAsYaml("kafkauser", userName);
assertThat(kafkaUserResource, containsString(Labels.STRIMZI_CLUSTER_LABEL + ": " + firstClusterName));
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testMessagesAreStoredInDisk(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).build());
Map<String, String> kafkaPodsSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
TestUtils.waitFor("KafkaTopic creation inside kafka pod", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT,
() -> cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(topicName));
String topicDirNameInPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + topicName + "/p'").out();
String commandToGetDataFromTopic =
"cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log";
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
String topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetDataFromTopic).out();
LOGGER.info("Topic {} is present in kafka broker {} with no data", topicName, KafkaResources.kafkaPodName(clusterName, 0));
assertThat("Topic contains data", topicData, emptyOrNullString());
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
List<Pod> kafkaPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
for (Pod kafkaPod : kafkaPods) {
LOGGER.info("Deleting kafka pod {}", kafkaPod.getMetadata().getName());
kubeClient(namespaceName).deletePod(namespaceName, kafkaPod);
}
LOGGER.info("Wait for kafka to rolling restart ...");
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 1, kafkaPodsSnapshot);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testConsumerOffsetFiles(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "3");
kafkaConfig.put("offsets.topic.num.partitions", "100");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.withConfig(kafkaConfig)
.endKafka()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
String commandToGetFiles = "cd /var/lib/kafka/data/kafka-log0/;" +
"ls -1 | sed -n \"s#__consumer_offsets-\\([0-9]*\\)#\\1#p\" | sort -V";
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
String result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
// TODO / FIXME
//assertThat("Folder kafka-log0 has data in files:\n" + result, result.equals(""));
LOGGER.info("Result: \n" + result);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
StringBuilder stringToMatch = new StringBuilder();
for (int i = 0; i < 100; i++) {
stringToMatch.append(i).append("\n");
}
assertThat("Folder kafka-log0 doesn't contain 100 files", result, containsString(stringToMatch.toString()));
}
@ParallelNamespaceTest
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String labelAnnotationKey = "testKey";
final String firstValue = "testValue";
final String changedValue = "editedTestValue";
Map<String, String> pvcLabel = new HashMap<>();
pvcLabel.put(labelAnnotationKey, firstValue);
Map<String, String> pvcAnnotation = pvcLabel;
Map<String, String> statefulSetLabels = new HashMap<>();
statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewStatefulset()
.withNewMetadata()
.withLabels(statefulSetLabels)
.endMetadata()
.endStatefulset()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withStorage(new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder()
.withDeleteClaim(false)
.withId(0)
.withSize("20Gi")
.build(),
new PersistentClaimStorageBuilder()
.withDeleteClaim(true)
.withId(1)
.withSize("10Gi")
.build())
.build())
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withNewPersistentClaimStorage()
.withDeleteClaim(false)
.withId(0)
.withSize("3Gi")
.endPersistentClaimStorage()
.endZookeeper()
.endSpec()
.build());
LOGGER.info("Check if Kubernetes labels are applied");
Map<String, String> actualStatefulSetLabels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
LOGGER.info("Kubernetes labels are correctly set and present");
List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
}
pvcLabel.put(labelAnnotationKey, changedValue);
pvcAnnotation.put(labelAnnotationKey, changedValue);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
}, namespaceName);
PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
LOGGER.info(pvcs.toString());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
}
}
@ParallelNamespaceTest
void testKafkaOffsetsReplicationFactorHigherThanReplicas(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.addToConfig("offsets.topic.replication.factor", 4)
.addToConfig("transaction.state.log.min.isr", 4)
.addToConfig("transaction.state.log.replication.factor", 4)
.endKafka()
.endSpec().build());
KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(clusterName, namespaceName,
"Kafka configuration option .* should be set to " + 3 + " or less because 'spec.kafka.replicas' is " + 3);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@Tag(CRUISE_CONTROL)
void testReadOnlyRootFileSystem(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewKafkaContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewZookeeperContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewTopicOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endUserOperatorContainer()
.endTemplate()
.endEntityOperator()
.editOrNewKafkaExporter()
.withNewTemplate()
.withNewContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endContainer()
.endTemplate()
.endKafkaExporter()
.editOrNewCruiseControl()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewCruiseControlContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endCruiseControlContainer()
.endTemplate()
.endCruiseControl()
.endSpec()
.build());
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
protected void checkKafkaConfiguration(String namespaceName, String podNamePrefix, Map<String, Object> config, String clusterName) {
LOGGER.info("Checking kafka configuration");
List<Pod> pods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, podNamePrefix);
Properties properties = configMap2Properties(kubeClient(namespaceName).getConfigMap(namespaceName, clusterName + "-kafka-config"));
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(properties.keySet().contains(key), is(true));
assertThat(properties.getProperty(key), is(val));
}
for (Pod pod: pods) {
ExecResult result = cmdKubeClient(namespaceName).execInPod(pod.getMetadata().getName(), "/bin/bash", "-c", "cat /tmp/strimzi.properties");
Properties execProperties = stringToProperties(result.out());
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(execProperties.keySet().contains(key), is(true));
assertThat(execProperties.getProperty(key), is(val));
}
}
}
void checkStorageSizeForVolumes(List<PersistentVolumeClaim> volumes, String[] diskSizes, int kafkaRepl, int diskCount) {
int k = 0;
for (int i = 0; i < kafkaRepl; i++) {
for (int j = 0; j < diskCount; j++) {
LOGGER.info("Checking volume {} and size of storage {}", volumes.get(k).getMetadata().getName(),
volumes.get(k).getSpec().getResources().getRequests().get("storage"));
assertThat(volumes.get(k).getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizes[i])));
k++;
}
}
}
void verifyVolumeNamesAndLabels(String namespaceName, String clusterName, int kafkaReplicas, int diskCountPerReplica, String diskSizeGi) {
ArrayList<String> pvcs = new ArrayList<>();
kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream()
.filter(pvc -> pvc.getMetadata().getName().contains(clusterName + "-kafka"))
.forEach(volume -> {
String volumeName = volume.getMetadata().getName();
pvcs.add(volumeName);
LOGGER.info("Checking labels for volume:" + volumeName);
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is(Kafka.RESOURCE_KIND));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(clusterName.concat("-kafka")));
assertThat(volume.getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizeGi, "Gi")));
});
LOGGER.info("Checking PVC names included in JBOD array");
for (int i = 0; i < kafkaReplicas; i++) {
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(pvcs.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
LOGGER.info("Checking PVC on Kafka pods");
for (int i = 0; i < kafkaReplicas; i++) {
ArrayList<String> dataSourcesOnPod = new ArrayList<>();
ArrayList<String> pvcsOnPod = new ArrayList<>();
LOGGER.info("Getting list of mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
dataSourcesOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getName());
pvcsOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getPersistentVolumeClaim().getClaimName());
}
LOGGER.info("Verifying mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(dataSourcesOnPod.contains("data-" + j), is(true));
assertThat(pvcsOnPod.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
}
void verifyPresentLabels(Map<String, String> labels, HasMetadata resources) {
for (Map.Entry<String, String> label : labels.entrySet()) {
assertThat("Label exists with concrete value in HasMetadata(Services, CM, STS) resources",
label.getValue().equals(resources.getMetadata().getLabels().get(label.getKey())));
}
}
void verifyNullLabels(String[] labelKeys, Map<String, String> labels) {
for (String labelKey : labelKeys) {
assertThat(labels.get(labelKey), nullValue());
}
}
void verifyNullLabels(String[] labelKeys, HasMetadata resources) {
for (String labelKey : labelKeys) {
assertThat(resources.getMetadata().getLabels().get(labelKey), nullValue());
}
}
void verifyAppLabels(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
assertThat("Label " + Labels.STRIMZI_NAME_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_NAME_LABEL));
}
void verifyAppLabelsForSecretsAndConfigMaps(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
}
@BeforeAll
void setup(ExtensionContext extensionContext) {
install = new SetupClusterOperator.SetupClusterOperatorBuilder()
.withExtensionContext(extensionContext)
.withNamespace(NAMESPACE)
.withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES)
.createInstallation()
.runInstallation();
}
protected void afterEachMayOverride(ExtensionContext extensionContext) throws Exception {
resourceManager.deleteResources(extensionContext);
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
if (cluster.getListOfDeployedResources().contains(TEMPLATE_PATH)) {
cluster.deleteCustomResources(extensionContext, TEMPLATE_PATH);
}
if (KafkaResource.kafkaClient().inNamespace(namespaceName).withName(OPENSHIFT_CLUSTER_NAME).get() != null) {
cmdKubeClient(namespaceName).deleteByName(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
}
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
kubeClient(namespaceName).getClient().customResources(CustomResourceDefinitionContext.fromCrd(Crds.kafkaTopic()), KafkaTopic.class, KafkaTopicList.class).inNamespace(namespaceName).delete();
kubeClient(namespaceName).getClient().persistentVolumeClaims().inNamespace(namespaceName).delete();
}
}
| scholzj/barnabas | systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java | Java | apache-2.0 | 107,527 |
# Tanarius kingii (Hook.f.) Kuntze SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Malpighiales/Euphorbiaceae/Macaranga/Macaranga kingii/ Syn. Tanarius kingii/README.md | Markdown | apache-2.0 | 189 |
import os,json
from cgi import escape
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
# this has to be last:
s = s.replace("&", "&")
return s
class FilesystemMixin:
def h_fs_get(_,path,eltName=''):
from stat import S_ISDIR
data = (escape(open(path).read())
if not S_ISDIR(os.stat(path).st_mode)
else [(p,S_ISDIR(os.stat(path+'/'+p).st_mode))
for p in os.listdir(path)])
_.ws.send(json.dumps({"method":"fs_get","result":[path,data,eltName]}))
pass
def h_fs_put(_,path,data):
f=open(path,'w')
for x in data: f.write(unescape(x))
f.close()
pass
def h_fs_system(_,path,eltName='',cwd=None):
import subprocess as sp
import shlex
data=sp.Popen(shlex.split(path),cwd=cwd,stdout=sp.PIPE, stderr=sp.PIPE).communicate()
_.ws.send(json.dumps({"method":"fs_system","result":[path,data,eltName]}));
pass
def h_fs_mkdir (_,path): os.mkdir(path)
def h_fs_rmdir (_,path): os.rmdir(path)
def h_fs_touch (_,path): open(path,'w').close()
def h_fs_unlink(_,path): os.unlink(path)
pass
class FsApp(FilesystemMixin):
def __init__(_,ws):_.ws=ws
| val314159/framist | fssvr/fs.py | Python | apache-2.0 | 1,267 |
# Tephrosia retamoides var. genuina R.Vig. VARIETY
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Fabales/Fabaceae/Tephrosia/Tephrosia retamoides/ Syn. Tephrosia retamoides genuina/README.md | Markdown | apache-2.0 | 197 |
# Marasmius microhaedinus Singer SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
Sydowia 18(1-6): 260, 338 (1965)
#### Original name
Marasmius microhaedinus Singer
### Remarks
null | mdoering/backbone | life/Fungi/Basidiomycota/Agaricomycetes/Agaricales/Marasmiaceae/Marasmius/Marasmius microhaedinus/README.md | Markdown | apache-2.0 | 217 |
# Bryum lamprostegum C. Müller, 1853 SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Bryophyta/Bryopsida/Bryales/Bryaceae/Bryum/Bryum lamprostegum/README.md | Markdown | apache-2.0 | 193 |
# Malus domestica var. asiatica (Nakai) Ponomar. VARIETY
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Rosales/Rosaceae/Malus/Malus asiatica/ Syn. Malus domestica asiatica/README.md | Markdown | apache-2.0 | 203 |
# Zanthoxylum ochroxylum DC. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Sapindales/Rutaceae/Zanthoxylum/Zanthoxylum ochroxylum/README.md | Markdown | apache-2.0 | 176 |
# Ponerorchis hemipilioides (Finet) Soó SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Liliopsida/Asparagales/Orchidaceae/Amitostigma/Amitostigma hemipilioides/ Syn. Ponerorchis hemipilioides/README.md | Markdown | apache-2.0 | 195 |
# Navicula directa var. remota (Grunow) Cleve VARIETY
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Bacillariophyta/Bacillariophyceae/Naviculales/Naviculaceae/Navicula/Navicula directa/Navicula directa remota/README.md | Markdown | apache-2.0 | 201 |
# Sisymbrium nudum (Bél. ex Boiss.) Boiss. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Brassicales/Brassicaceae/Draba/Draba nuda/ Syn. Sisymbrium nudum/README.md | Markdown | apache-2.0 | 198 |
# Anthemis cossyrensis Guss. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Asterales/Asteraceae/Anthemis/Anthemis secundiramea/ Syn. Anthemis cossyrensis/README.md | Markdown | apache-2.0 | 183 |
# Lepidodendron crenatum SPECIES
#### Status
ACCEPTED
#### According to
Interim Register of Marine and Nonmarine Genera
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Pteridophyta/Lepidodendraceae/Lepidodendron/Lepidodendron crenatum/README.md | Markdown | apache-2.0 | 188 |
# Pleroma erigeron Spruce ex Triana SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Myrtales/Melastomataceae/Pleroma/Pleroma erigeron/README.md | Markdown | apache-2.0 | 183 |
# Ruagea insignis (C.DC.) T.D.Penn. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
Cabralea insignis C.DC.
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Sapindales/Meliaceae/Ruagea/Ruagea insignis/README.md | Markdown | apache-2.0 | 202 |
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"database/sql"
"database/sql/driver"
"fmt"
"hash/crc32"
"strings"
"time"
pbinlog "github.com/cwen0/cdb-syncer/protocol"
"github.com/go-sql-driver/mysql"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
tddl "github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
tmysql "github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/terror"
)
type job struct {
tp pbinlog.BinlogType
sql string
args []interface{}
key string
retry bool
pos Position
}
func newJob(tp pbinlog.BinlogType, sql string, args []interface{}, key string, retry bool, pos Position) *job {
return &job{tp: tp, sql: sql, args: args, key: key, retry: retry, pos: pos}
}
func genHashKey(key string) uint32 {
return crc32.ChecksumIEEE([]byte(key))
}
func genPKey(rows []*pbinlog.Row) string {
var values []string
for _, row := range rows {
values = append(values, row.GetColumnValue())
}
return strings.Join(values, ",")
}
func genInsertSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "replace into " + binlog.GetDbName() + "." + binlog.GetTableName() + "("
rows := binlog.GetRows()
for _, row := range rows {
sql += row.GetColumnName() + ","
values = append(values, row.GetColumnValue())
}
sql = sql[0:len(sql)-1] + ") values ("
for _, _ = range rows {
sql += "?,"
}
sql = sql[0:len(sql)-1] + ")"
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genUpdateSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "update " + binlog.GetDbName() + "." + binlog.GetTableName() + " set "
rows := binlog.GetRows()
for _, row := range rows {
sql += row.GetColumnName() + "=?,"
values = append(values, row.GetColumnValue())
}
sql = sql[0:len(sql)-1] + " where 1=1 "
for _, row := range binlog.GetPrimaryKey() {
sql += " and " + row.GetColumnName() + " = ? "
values = append(values, row.GetColumnValue())
}
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genDeleteSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) {
var sql string
var values []string
sql += "delete from " + binlog.GetDbName() + "." + binlog.GetTableName() + " where 1=1 "
for _, row := range binlog.GetPrimaryKey() {
sql += " and " + row.GetColumnName() + " = ? "
values = append(values, row.GetColumnValue())
}
return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil
}
func genDdlSQL(binlog *pbinlog.Binlog) ([]string, string, []interface{}, error) {
var sqls []string
empty := make([]interface{}, 0)
rows := binlog.GetRows()
for _, row := range rows {
tmpSqls, ok, err := resolveDDLSQL(row.GetSql())
if err != nil {
return sqls, "", empty, errors.Errorf("parse ddk sql: %v failed: %v", row.GetSql(), err)
}
if !ok {
continue
}
for _, sql := range tmpSqls {
//var sql string
//if binlog.GetDbName() != "" {
//sql += "use " + binlog.GetDbName() + ";"
//}
//sql += s + ";"
sqls = append(sqls, sql)
}
}
return sqls, "", empty, nil
}
func ignoreDDLError(err error) bool {
mysqlErr, ok := errors.Cause(err).(*mysql.MySQLError)
if !ok {
return false
}
errCode := terror.ErrCode(mysqlErr.Number)
switch errCode {
case infoschema.ErrDatabaseExists.Code(), infoschema.ErrDatabaseNotExists.Code(), infoschema.ErrDatabaseDropExists.Code(),
infoschema.ErrTableExists.Code(), infoschema.ErrTableNotExists.Code(), infoschema.ErrTableDropExists.Code(),
infoschema.ErrColumnExists.Code(), infoschema.ErrColumnNotExists.Code(),
infoschema.ErrIndexExists.Code(), tddl.ErrCantDropFieldOrKey.Code():
return true
default:
return false
}
}
func isRetryableError(err error) bool {
if err == driver.ErrBadConn {
return true
}
var e error
for {
e = errors.Cause(err)
if err == e {
break
}
err = e
}
mysqlErr, ok := err.(*mysql.MySQLError)
if ok {
if mysqlErr.Number == tmysql.ErrUnknown {
return true
}
return false
}
return true
}
func querySQL(db *sql.DB, query string) (*sql.Rows, error) {
var (
err error
rows *sql.Rows
)
for i := 0; i < maxRetryCount; i++ {
if i > 0 {
log.Warnf("query sql retry %d - %s", i, query)
time.Sleep(retryTimeout)
}
log.Debugf("[query][sql]%s", query)
rows, err = db.Query(query)
if err != nil {
if !isRetryableError(err) {
return rows, errors.Trace(err)
}
log.Warnf("[query][sql]%s[error]%v", query, err)
continue
}
return rows, nil
}
if err != nil {
log.Errorf("query sql[%s] failed %v", query, errors.ErrorStack(err))
return nil, errors.Trace(err)
}
return nil, errors.Errorf("query sql[%s] failed", query)
}
func executeSQL(db *sql.DB, sqls []string, args [][]interface{}, retry bool) error {
if len(sqls) == 0 {
return nil
}
var (
err error
txn *sql.Tx
)
retryCount := 1
if retry {
retryCount = maxRetryCount
}
LOOP:
for i := 0; i < retryCount; i++ {
if i > 0 {
log.Warnf("exec sql retry %d - %v - %v", i, sqls, args)
time.Sleep(retryTimeout)
}
txn, err = db.Begin()
if err != nil {
log.Errorf("exec sqls[%v] begin failed %v", sqls, errors.ErrorStack(err))
continue
}
for i := range sqls {
log.Debugf("[exec][sql]%s[args]%v", sqls[i], args[i])
_, err = txn.Exec(sqls[i], args[i]...)
if err != nil {
if !isRetryableError(err) {
rerr := txn.Rollback()
if rerr != nil {
log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr)
}
break LOOP
}
log.Warnf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], err)
rerr := txn.Rollback()
if rerr != nil {
log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr)
}
continue LOOP
}
}
err = txn.Commit()
if err != nil {
log.Errorf("exec sqls[%v] commit failed %v", sqls, errors.ErrorStack(err))
continue
}
return nil
}
if err != nil {
log.Errorf("exec sqls[%v] failed %v", sqls, errors.ErrorStack(err))
return errors.Trace(err)
}
return errors.Errorf("exec sqls[%v] failed", sqls)
}
func createDB(cfg DBConfig) (*sql.DB, error) {
dbDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8&interpolateParams=true", cfg.User, cfg.Password, cfg.Host, cfg.Port)
db, err := sql.Open("mysql", dbDSN)
if err != nil {
return nil, errors.Trace(err)
}
return db, nil
}
func closeDB(db *sql.DB) error {
if db == nil {
return nil
}
return errors.Trace(db.Close())
}
func createDBs(cfg DBConfig, count int) ([]*sql.DB, error) {
dbs := make([]*sql.DB, 0, count)
for i := 0; i < count; i++ {
db, err := createDB(cfg)
if err != nil {
return nil, errors.Trace(err)
}
dbs = append(dbs, db)
}
return dbs, nil
}
func closeDBs(dbs ...*sql.DB) {
for _, db := range dbs {
err := closeDB(db)
if err != nil {
log.Errorf("close db failed - %v", err)
}
}
}
func parserDDLTableName(sql string) (TableName, error) {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
return TableName{}, errors.Trace(err)
}
var res TableName
switch v := stmt.(type) {
case *ast.CreateDatabaseStmt:
res = genTableName(v.Name, "")
case *ast.DropDatabaseStmt:
res = genTableName(v.Name, "")
case *ast.CreateIndexStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.CreateTableStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.DropIndexStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.TruncateTableStmt:
res = genTableName(v.Table.Schema.L, v.Table.Name.L)
case *ast.DropTableStmt:
if len(v.Tables) != 1 {
return res, errors.Errorf("may resovle DDL sql failed")
}
res = genTableName(v.Tables[0].Schema.L, v.Tables[0].Name.L)
default:
return res, errors.Errorf("unkown DDL type")
}
return res, nil
}
func genTableName(schema string, table string) TableName {
return TableName{Schema: schema, Name: table}
}
// resolveDDLSQL resolve to one ddl sql
// example: drop table test.a,test2.b -> drop table test.a; drop table test2.b;
func resolveDDLSQL(sql string) (sqls []string, ok bool, err error) {
stmt, err := parser.New().ParseOneStmt(sql, "", "")
if err != nil {
log.Errorf("Parser SQL error: %s", sql)
return nil, false, errors.Trace(err)
}
_, isDDL := stmt.(ast.DDLNode)
if !isDDL {
sqls = append(sqls, sql)
return
}
switch v := stmt.(type) {
case *ast.DropTableStmt:
var ex string
if v.IfExists {
ex = "if exists"
}
for _, t := range v.Tables {
var db string
if t.Schema.O != "" {
db = fmt.Sprintf("`%s`.", t.Schema.O)
}
s := fmt.Sprintf("drop table %s %s`%s`", ex, db, t.Name.O)
sqls = append(sqls, s)
}
default:
sqls = append(sqls, sql)
}
return sqls, true, nil
}
| cwen0/cdb-syncer | db.go | GO | apache-2.0 | 9,435 |
package com.basicalgorithms.coding_games;
import java.util.HashSet;
import java.util.Objects;
import java.util.Scanner;
import java.util.Set;
/**
* Original question: https://www.codingame.com/multiplayer/bot-programming/coders-strike-back
*/
public class CodersStrikeBack {
static double longestDist = Integer.MIN_VALUE;
static Point initialPoint = null;
static boolean hasFinishedOneLap;
static Point from = null;
static Point lastCheckpoint = null;
static final Set<Point> visitedCheckPoints = new HashSet<>();
static boolean hasBoosted = false;
public static void main(String args[]) {
Scanner in = new Scanner(System.in);
// game loop
while (true) {
int x = in.nextInt();
int y = in.nextInt();
int nextCheckpointX = in.nextInt(); // x position of the next check point
int nextCheckpointY = in.nextInt(); // y position of the next check point
int nextCheckpointDist = in.nextInt(); // distance to the next checkpoint
int nextCheckpointAngle = in.nextInt(); // angle between your pod orientation and the direction of the next checkpoint
int opponentX = in.nextInt();
int opponentY = in.nextInt();
// Write an action using System.out.println()
// To debug: System.err.println("Debug messages...");
// You have to output the target position
// followed by the power (0 <= thrust <= 100)
// i.e.: "x y thrust"
final Point nextCheckpoint = new Point(nextCheckpointX, nextCheckpointY);
final Point currentPosition = new Point(x, y);
final Point enemyPosition = new Point(opponentX, opponentY);
if (visitedCheckPoints.size() > 1 && enemyInRange(currentPosition, enemyPosition)) {
ramEnemyShip(currentPosition, enemyPosition);
} else {
cruise(currentPosition, nextCheckpoint, nextCheckpointAngle);
}
if (!nextCheckpoint.equals(lastCheckpoint)) {
from = lastCheckpoint;
}
lastCheckpoint = nextCheckpoint;
}
}
private static void ramEnemyShip(final Point currentPosition, final Point enemyPosition) {
sailToDestination((enemyPosition.x), enemyPosition.y, "100");
}
private static boolean enemyInRange(final Point currentPosition, final Point enemyPosition) {
return getDistant(currentPosition, enemyPosition) <= 1000;
}
private static void cruise(
final Point currentPosition,
final Point nextCheckpoint,
final int nextCheckpointAngle) {
if (initialPoint == null) {
initialPoint = currentPosition;
}
int thrust = isWithinAngle(nextCheckpointAngle) ? 100 : 0;
String power = String.valueOf(thrust);
visitedCheckPoints.add(nextCheckpoint);
System.err.println(
"Checkpoint added:" + " nextCheckpointX=" + nextCheckpoint.x + ", nextCheckpointY=" + nextCheckpoint.y);
for (final Point visitedCheckPoint : visitedCheckPoints) {
System.err.println("Visited checkpoint: (" + visitedCheckPoint.x + ", " + visitedCheckPoint.y + ")");
}
if (shouldSlowDown(currentPosition, nextCheckpoint)) {
power = String.valueOf(35);
}
if (hasFinishedOneLap(nextCheckpoint) &&
isLongestDistant(from, nextCheckpoint) &&
isWithinSharpAngle(nextCheckpointAngle) &&
!hasBoosted) {
power = "BOOST";
hasBoosted = true;
System.err.println("Boosted!!!");
}
sailToDestination(nextCheckpoint.x, nextCheckpoint.y, power);
}
private static boolean shouldSlowDown(
final Point currentPosition,
final Point nextCheckpoint) {
return getDistant(currentPosition, nextCheckpoint) < 1000;
}
private static void sailToDestination(final int nextCheckpointX, final int nextCheckpointY, final String power) {
System.out.println(nextCheckpointX + " " + nextCheckpointY + " " + power);
System.err.println("Thrust:" + power);
}
private static boolean isWithinAngle(final int nextCheckpointAngle) {
return -90 < nextCheckpointAngle && nextCheckpointAngle < 90;
}
private static boolean isWithinSharpAngle(final int nextCheckpointAngle) {
return -15 < nextCheckpointAngle && nextCheckpointAngle < 15;
}
private static boolean hasFinishedOneLap(final Point point) {
if (hasFinishedOneLap) {
return true;
}
if (initialPoint == null) { return false; }
hasFinishedOneLap = getDistant(initialPoint, point) <= 600;
return hasFinishedOneLap;
}
private static boolean isLongestDistant(final Point from, final Point endPoint) {
if (from == null) {
return false;
}
System.err.println("Start Point: (" + from.x + ", " + from.y + "); End Point: ("
+ endPoint.x + ", " + endPoint.y + ") ");
double dist = getDistant(from, endPoint);
System.err.println("dist=" + dist + ", longestDist=" + longestDist);
if (dist >= longestDist) {
longestDist = dist;
return true;
}
return false;
}
private static double getDistant(final Point from, final Point endPoint) {
return Math.sqrt(Math.pow(from.x - endPoint.x, 2) + Math.pow(from.y - endPoint.y, 2));
}
private static class Point {
final int x;
final int y;
private Point(final int t1, final int t2) {
this.x = t1;
this.y = t2;
}
@Override
public boolean equals(final Object o) {
if (this == o) { return true; }
if (!(o instanceof Point)) { return false; }
final Point point = (Point) o;
return x == point.x &&
y == point.y;
}
@Override
public int hashCode() {
return Objects.hash(x, y);
}
}
}
| Ericliu001/basic-algorithms | src/test/java/com/basicalgorithms/coding_games/CodersStrikeBack.java | Java | apache-2.0 | 6,186 |
'use strict';
var path = require('path');
var util = require('util');
module.exports = function(grunt) {
grunt.registerMultiTask('vjslanguages', 'A Grunt plugin for compiling VideoJS language assets.', function() {
var createLanguageFile = function(languageName, languageData, jsFilePath) {
var jsTemplate = 'videojs.addLanguage("' + languageName + '",' + JSON.stringify(languageData,null,' ') + ');';
grunt.file.write(jsFilePath, jsTemplate);
grunt.log.writeln('- [' + languageName +'] Language Built. File "' + jsFilePath + '" created.');
};
this.files.forEach(function(f) {
var languageName, languageData, jsFilePath;
// Multiple Files Case
if(util.isArray(f.src)){
for(var i =0; i < f.src.length; i++) {
languageName = path.basename(f.src[i], '.json');
languageData = grunt.file.readJSON(f.src[i]);
jsFilePath = path.join(f.dest, languageName + '.js');
createLanguageFile(languageName, languageData, jsFilePath);
}
}
// Singular File Case
else {
languageName = path.basename(f.src, '.json');
languageData = grunt.file.readJSON(f.src);
jsFilePath = path.join(f.dest, languageName + '.js');
createLanguageFile(languageName, languageData, jsFilePath);
}
});
});
}; | videojs/grunt-videojs-languages | tasks/videojs_languages.js | JavaScript | apache-2.0 | 1,338 |
//
// WJAuthorView.h
// 糗百框架
//
// Created by 孙文君 on 15/6/30.
// Copyright (c) 2015年 sunwenjun. All rights reserved.
//
#import <UIKit/UIKit.h>
@class WJFrameAuthor,WJAuthor;
@interface WJAuthorView : UIView
//@property(nonatomic,strong)WJAuthor *author;
@property(nonatomic,strong)WJFrameAuthor *authorFrame;
@end
| shuizhuqing/WJQiuBai | 糗百框架(API数据)/糗百框架/WJAuthorView.h | C | apache-2.0 | 337 |
//
// AddHomeViewController.h
// AirTouch
//
// Created by kenny on 15/8/12.
// Copyright (c) 2015年 Honeywell. All rights reserved.
//
#import "BaseViewController.h"
#import "IContainerViewControllerDelegate.h"
@interface AddHomeViewController : BaseViewController
@property (nonatomic, weak) id<IContainerViewControllerDelegate> delegate;
@end
| CanIFuckYou/AirTouch | AirTouch/Framework/App/Controller/HomeManagementVC/AddHomeVC/AddHomeViewController.h | C | apache-2.0 | 355 |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Filter
* @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: BaseName.php 8064 2008-02-16 10:58:39Z thomas $
*/
/**
* @see Zend_Filter_Interface
*/
require_once 'Zend/Filter/Interface.php';
/**
* @category Zend
* @package Zend_Filter
* @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Filter_BaseName implements Zend_Filter_Interface
{
/**
* Defined by Zend_Filter_Interface
*
* Returns basename($value)
*
* @param string $value
* @return string
*/
public function filter($value)
{
return basename((string) $value);
}
}
| ankuradhey/dealtrip | library/Zend/Filter/BaseName.php | PHP | apache-2.0 | 1,408 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
tests for catalog module
"""
import os
import fabric.api
from fabric.operations import _AttributeString
from mock import patch
from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase
class TestCatalog(BaseUnitCase):
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True)
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy')
@patch('prestoadmin.catalog.validate')
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
isfile_mock.return_value = True
catalog.add('tpch')
filenames = ['tpch.properties']
deploy_mock.assert_called_with(filenames,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
validate_mock.assert_called_with(filenames)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.validate')
def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
deploy_mock):
catalogs = ['tpch.properties', 'another.properties']
listdir_mock.return_value = catalogs
catalog.add()
deploy_mock.assert_called_with(catalogs,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
isdir_mock.return_value = False
self.assertRaisesRegexp(ConfigFileNotFoundError,
r'Cannot add catalogs because directory .+'
r' does not exist',
catalog.add)
self.assertFalse(deploy_mock.called)
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
@patch('prestoadmin.catalog.os.remove')
def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
'then rm /etc/presto/catalog/tpch.properties ; '
'else echo "Could not remove catalog \'tpch\'. '
'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
exists_mock.return_value = True
fabric.api.env.host = 'localhost'
catalog.remove('tpch')
sudo_mock.assert_called_with(script)
local_rm_mock.assert_called_with(get_catalog_directory() +
'/tpch.properties')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_failure(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
out = _AttributeString()
out.succeeded = False
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] Failed to remove catalog tpch.',
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_no_such_file(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
error_msg = ('Could not remove catalog tpch: No such file ' +
os.path.join(get_catalog_directory(), 'tpch.properties'))
out = _AttributeString(error_msg)
out.succeeded = True
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] %s' % error_msg,
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = []
catalog.add()
self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
' be deployed\n\n' % get_catalog_directory(),
self.test_stderr.getvalue())
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_permission_denied(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
error_msg = ('Permission denied')
listdir_mock.side_effect = OSError(13, error_msg)
fabric.api.env.host = 'localhost'
self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
catalog.add)
@patch('prestoadmin.catalog.os.remove')
@patch('prestoadmin.catalog.remove_file')
def test_remove_os_error(self, remove_file_mock, remove_mock):
fabric.api.env.host = 'localhost'
error = OSError(13, 'Permission denied')
remove_mock.side_effect = error
self.assertRaisesRegexp(OSError, 'Permission denied',
catalog.remove, 'tpch')
@patch('prestoadmin.catalog.secure_create_directory')
@patch('prestoadmin.util.fabricapi.put')
def test_deploy_files(self, put_mock, create_dir_mock):
local_dir = '/my/local/dir'
remote_dir = '/my/remote/dir'
catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
PRESTO_STANDALONE_USER_GROUP)
create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
mode=0600)
put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
mode=0600)
@patch('prestoadmin.catalog.os.path.isfile')
@patch("__builtin__.open")
def test_validate(self, open_mock, is_file_mock):
is_file_mock.return_value = True
file_obj = open_mock.return_value.__enter__.return_value
file_obj.read.return_value = 'connector.noname=example'
self.assertRaisesRegexp(ConfigurationError,
'Catalog configuration example.properties '
'does not contain connector.name',
catalog.add, 'example')
@patch('prestoadmin.catalog.os.path.isfile')
def test_validate_fail(self, is_file_mock):
is_file_mock.return_value = True
self.assertRaisesRegexp(
SystemExit,
'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
'Underlying exception:\n No such file or directory',
catalog.add, 'example')
@patch('prestoadmin.catalog.get')
@patch('prestoadmin.catalog.files.exists')
@patch('prestoadmin.catalog.ensure_directory_exists')
@patch('prestoadmin.catalog.os.path.exists')
def test_gather_connectors(self, path_exists, ensure_dir_exists,
files_exists, get_mock):
fabric.api.env.host = 'any_host'
path_exists.return_value = False
files_exists.return_value = True
catalog.gather_catalogs('local_config_dir')
get_mock.assert_called_once_with(
constants.REMOTE_CATALOG_DIR, 'local_config_dir/any_host/catalog', use_sudo=True)
# if remote catalog dir does not exist
get_mock.reset_mock()
files_exists.return_value = False
results = catalog.gather_catalogs('local_config_dir')
self.assertEqual([], results)
self.assertFalse(get_mock.called)
| prestodb/presto-admin | tests/unit/test_catalog.py | Python | apache-2.0 | 9,157 |
$(document).ready(function(){
$("#inc_tab #tb1").removeClass();
$("#inc_tab #tb4").addClass("active");
$("#user_name").blur(function(){
var user_name = $.trim($(this).val());
$(this).val(user_name);
if (user_name.length==0){
$(this).parent().find("#user_name_null_warn").show();
$(this).parent().find("#user_name_exist_warn").hide();
return;
}
$(this).parent().find("#user_name_null_warn").hide();
var user_id = $(this).parent().find("#user_id").val();
var obj = $(this).parent().find("#user_name_exist_warn");
$.post(app.global.variable.base_path +"user/name/verify", {user_id:user_id, user_name:user_name}, function(data) {
if(data.toString().length > 0){
obj.show();
}else{
obj.hide();
}
})
})
$('#user_save_cancel').click(function(){
window.location.href=app.global.variable.base_path +'user/list';
})
selectRoleChange();
})
function selectRoleChange(){
var obj = $("#select_role_id");
var role_id_obj = obj.parent().find("#role_id");
$("#role_authority_"+role_id_obj.val()).hide();
$("#role_authority_"+obj.val()).show();
role_id_obj.val(obj.val());
}
function user_sava_check(){
var obj = $("#user_editor_form");
var valid = true;
obj.find(".functionWarn").each(function(){
if($(this).is(":visible")){
valid = false;
}
})
// 用户名
var user_name = obj.find("#user_name").val();
if(isSpace(user_name)){
obj.find("#user_name_null_warn").show();
valid = false;
}else{
obj.find("#user_name_null_warn").hide();
}
return valid;
}
| wxiwei/manage | src/main/webapp/WEB-INF/js/user/userEditor.js | JavaScript | apache-2.0 | 1,536 |
# Entyloma ficariae A.A. Fisch. Waldh., 1877 SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Bull. Soc. nat. Moscou, Biol. 52: 309 (1877)
#### Original name
Entyloma ficariae A.A. Fisch. Waldh., 1877
### Remarks
null | mdoering/backbone | life/Fungi/Basidiomycota/Exobasidiomycetes/Entylomatales/Entylomataceae/Entyloma/Entyloma ficariae/README.md | Markdown | apache-2.0 | 278 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.client;
import com.google.inject.AbstractModule;
import com.google.inject.Inject;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* @author Simon Thoresen Hult
*/
public class ClientDriverTestCase {
@Test
public void requireThatApplicationInstanceInjectionWorks() throws Exception {
MyModule module = new MyModule();
ClientDriver.runApplication(new MyApplication(module));
assertEquals(5, module.state);
}
@Test
public void requireThatApplicationClassInjectionWorks() throws Exception {
MyModule module = new MyModule();
ClientDriver.runApplication(MyApplication.class, module);
assertEquals(5, module.state);
}
private static class MyApplication implements ClientApplication {
final MyModule module;
@Inject
MyApplication(MyModule module) {
this.module = module;
module.state = 1;
}
@Override
public void start() {
if (++module.state != 2) {
throw new IllegalStateException();
}
}
@Override
public void run() {
if (++module.state != 3) {
throw new IllegalStateException();
}
}
@Override
public void stop() {
if (++module.state != 4) {
throw new IllegalStateException();
}
}
@Override
public void destroy() {
if (++module.state != 5) {
throw new IllegalStateException();
}
}
}
private static class MyModule extends AbstractModule {
int state = 0;
@Override
protected void configure() {
bind(MyModule.class).toInstance(this);
}
}
}
| vespa-engine/vespa | jdisc_core/src/test/java/com/yahoo/jdisc/client/ClientDriverTestCase.java | Java | apache-2.0 | 1,946 |
Public Class mysqlSettings
Private Sub btnSalvar_Click(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles btnSalvar.Click
'Guardamos en sus respectivas variables globales
_varglobal.ip = txtIp.Text
_varglobal.pass = txtPass.Text
_varglobal.user = txtUser.Text
'Cerramos el formulario
Me.Close()
End Sub
Private Sub mysqlSettings_Load(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles MyBase.Load
'Guardamos los parámetros de conexión dentro de sus respectivas variables
_varglobal.ip = txtIp.Text
_varglobal.pass = txtPass.Text
_varglobal.user = txtUser.Text
End Sub
End Class | cecortes/Rekor | Vb/rekorRfidWebCam/rekorRfidWebCam/mysqlSettings.vb | Visual Basic | apache-2.0 | 714 |
package sample.multiversion;
public interface Core {
String getVersion();
String getDependencyVersion();
}
| omacarena/only-short-poc | java.multiversion/v1/src/main/sample/multiversion/Core.java | Java | apache-2.0 | 117 |
package org.example;
import org.camunda.bpm.spring.boot.starter.annotation.EnableProcessApplication;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
@EnableProcessApplication("dynamic-tenant-designation")
public class CamundaApplication {
public static void main(String... args) {
SpringApplication.run(CamundaApplication.class, args);
}
}
| camunda/camunda-consulting | snippets/dynamic-tenant-designation/src/main/java/org/example/CamundaApplication.java | Java | apache-2.0 | 445 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<helpdesk.org>.
=cut
use warnings;
use strict;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Utils::Sequence qw(reverse_comp expand);
use Getopt::Long;
use Fcntl qw( LOCK_SH LOCK_EX );
use Progress;
#ÊA hard-coded hash containing the subroutines to call for each check
my %ALLELE_PREDICATE = (
4 => \&novariation_alleles,
13 => \&illegal_character_alleles,
14 => \&ambiguous_alleles
);
my %SUBSNP_PREDICATE = (
);
my %VARIATION_ALLELE_PREDICATE = (
11 => \&mismatched_allele_string,
12 => \&multiple_alleles
);
my %VARIATION_FEATURE_PREDICATE = (
1 => \&multiple_mappings,
2 => \&reference_mismatch,
3 => \&multiple_alleles,
5 => \&no_mapping,
13 => \&illegal_character_alleles,
14 => \&ambiguous_alleles,
15 => \&inconsistent_coords
);
# Accepted alleles
my @ACCEPTED_ALLELE = (
'HGMD_MUTATION'
);
my %AMBIG_REGEXP_HASH = (
'M' => '[AC]',
'R' => '[AG]',
'W' => '[AT]',
'S' => '[CG]',
'Y' => '[CT]',
'K' => '[GT]',
'V' => '[ACG]',
'H' => '[ACT]',
'D' => '[AGT]',
'B' => '[CGT]',
'X' => '[ACGT]',
'N' => '[ACGT]'
);
#ÊGet a string containing the possible ambiguity nucleotides
my $AMBIGUITIES = join("",keys(%AMBIG_REGEXP_HASH));
# Add the code for uracil in case some allele should have that
%AMBIG_REGEXP_HASH = (%AMBIG_REGEXP_HASH,('U' => 'T'));
# The maximum number of mappings before the variation is flagged
my $MAX_MAP_WEIGHT = 3;
# The maximum number of different alleles a variation is permitted to have
my $MAX_ALLELES = 3;
#ÊThe option definitions
my @defs = (
'registry_file=s',
'qc=s@',
'output_dir=s',
'variation_id_range=s',
'task_management_file=s',
'task_id=i',
'species=s',
'group=s',
'scope=s',
'parallelize=i',
'source_id=i@',
'help!'
);
#ÊParse the command line and store the results in the options hash
my %options;
GetOptions(\%options,@defs);
# Check that we got a registry configuration file
die ("You need to provide a registry configuration file") unless (defined($options{'registry_file'}));
# Check that a species was specified
die ("You need to provide a species") unless (defined($options{'species'}));
#ÊIf no output dir was specified, use the current working one
my $outdir = $options{'output_dir'};
$outdir ||= "";
# Append a slash if we have a directory
if (length($outdir)) {
$outdir .= "/";
}
#ÊLoad the registry and get a DBAdaptor to the variation database we're processing (or the group specified on the command line)
my $registry = 'Bio::EnsEMBL::Registry';
$registry->load_all($options{'registry_file'});
my $species = $options{'species'};
my $group = $options{'group'};
$group ||= 'variation';
my $dba = $registry->get_DBAdaptor($species,$group) or die ("Could not get a DBAdaptor for $species - $group");
#ÊIf the option to parallelize was specified, we will chunk the task into the desired sizes and create the corresponding task management file
if ($options{'parallelize'}) {
# Check that a desired task_management_file was specified
die ("You must specify a file where the task parameters will be written") unless (defined($options{'task_management_file'}));
my $chunksize = $options{'parallelize'};
# Get the min and max variation_ids and simply assume that the data is evenly distributed on average w.r.t. variation_id
my $stmt = qq{
SELECT
MIN(variation_id),
MAX(variation_id)
FROM
variation
};
my ($min_id,$max_id) = @{$dba->dbc->db_handle->selectall_arrayref($stmt)->[0]};
# Divide the id range into chunks and write to management file
open (TASK,">",$options{'task_management_file'}) or die ("Could not open " . $options{'task_management_file'} . " for writing");
my $offset = $min_id;
my $task_id = 0;
while ($offset <= $max_id) {
$task_id++;
print TASK join("\t",($task_id,$offset,($offset+$chunksize-1))) . "\n";
$offset += $chunksize;
}
close(TASK);
print STDOUT "The task has been divided into chunks of $chunksize. The parameters have been written to " . $options{'task_management_file'} . ". You should submit this as a job array over the indexes 1-$task_id\n";
exit(0);
}
# We will probably need a core dbadaptor as well so create one
my $dba_core = $registry->get_DBAdaptor($species,'core') or warn ("Could not get a DBAdaptor for $species - core");
#ÊGet the range of variations we should work on. This can either be specified by:
# 1. A variation_id range specified on the command line
# 2. Provided in a task management file specified on the command line. This overrides a specified range.
# If this is the case then a job index corresponding to a row in the task management file must be specified.
# This can either be done on the command line or through the LSB_JOBINDEX environment variable (which gets set by LSF in a jobarray submission).
# The latter overrides the former.
# 3. None of the above, in which case all variations will be processed
my ($lower_id,$upper_id);
if (defined($options{'task_management_file'})) {
my $job_index = $ENV{'LSB_JOBINDEX'};
$job_index ||= $options{'task_id'};
# Check that we have a job index
die ("A task management file was specified but not a task index, can not proceed") unless (defined($job_index));
# Get the variation_id range for this job index
open(TASK,"<",$options{'task_management_file'}) or die ("Could not open task management file " . $options{'task_management_file'} . " for parsing");
while (<TASK>) {
chomp;
my @arr = split(/\s+/,$_);
($lower_id,$upper_id) = ($arr[1],$arr[2]) if ($arr[0] == $job_index);
}
close(TASK);
# Check that we could find the range
die ("Could not find the corresponding variation_id range for task index $job_index") unless (defined($lower_id) && defined($upper_id));
# Print the job assignment to STDERR
print STDERR "Job $job_index works on range $lower_id - $upper_id ";
}
#ÊElse, we check for a comma-separated range
elsif (defined($options{'variation_id_range'})) {
($lower_id,$upper_id) = split(",",$options{'variation_id_range'});
}
my $failed_variation_file = $outdir . "failed_variation.txt";
my $failed_allele_file = $outdir . "failed_allele.txt";
my $loadfile = {
'variation' => $failed_variation_file,
'allele' => $failed_allele_file
};
### Now, get the data from the database
# Get the haplotype seq region ids
our $HAPLOTYPE_IDS = get_haplotype_seq_region_ids($dba_core);
# Get the failed description ids
my %failed_description = %{get_failed_description($dba,$options{'qc'})};
my @failed_description_ids = keys(%failed_description);
# A hash to hold the variation_ids and the tests that it failed
my %failed_variation;
# A hash to hold the allele_ids and the tests that it failed
my %failed_allele;
#ÊCheck if we should do the checking for variations
my $scope = lc($options{'scope'});
$scope ||= 'variation';
if ($scope eq 'variation') {
#ÊLoop over the variation features and flag them as appropriate
#ÊIf a variation_id range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id,"v");
# If a source_id condition was specified, append this to the condition
$condition .= " AND " . get_source_condition($options{'source_id'},"v");
my $stmt = qq{
SELECT
v.variation_id,
v.name,
vf.variation_feature_id,
vf.seq_region_id,
vf.seq_region_start,
vf.seq_region_end,
vf.seq_region_strand,
vf.allele_string,
ras.ref_allele,
ra.seq_region_strand,
'variation'
FROM
variation v LEFT JOIN
variation_feature vf ON (
vf.variation_id = v.variation_id
) LEFT JOIN
(
tmp_ref_allele ra JOIN
tmp_ref_allele_seq ras ON (
ras.ref_allele_seq_id = ra.ref_allele_seq_id
)
) ON (
ra.variation_feature_id = vf.variation_feature_id
)
WHERE
$condition
ORDER BY
v.variation_id;
};
my $sth = $dba->dbc->prepare($stmt);
# Execute the query
$sth->execute();
# Loop over the variation features
my @vf_arr;
my @row = $sth->fetchrow_array();
while (@row) {
# Add the row to the array grouping the same variation_ids into an array
push(@vf_arr,[@row]);
# Get the next row
my @nextrow = $sth->fetchrow_array();
#ÊIf we are switching variation or we have no more rows, do the checks
if (!scalar(@nextrow) || $nextrow[0] != $row[0]) {
#ÊExecute the predicates
if (scalar(@vf_arr)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($VARIATION_FEATURE_PREDICATE{$_}) && $VARIATION_FEATURE_PREDICATE{$_}->(\@vf_arr,$cache));
} @failed_description_ids;
$failed_variation{$row[0]} = \@failed if (scalar(@failed));
}
# Empty the variation array
splice(@vf_arr);
}
@row = @nextrow;
}
}
if ($scope eq 'allele') {
#ÊLoop over the variation features and flag them as appropriate
#ÊIf a variation_id range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id,"a");
my $stmt = qq{
SELECT
a.allele_id,
a.subsnp_id,
a.variation_id,
vf.seq_region_id,
vf.allele_string,
vf.seq_region_end,
vf.seq_region_strand,
a.allele,
NULL,
NULL,
'allele'
FROM
allele a LEFT JOIN
variation_feature vf ON (
vf.variation_id = a.variation_id
)
WHERE
$condition
ORDER BY
a.variation_id,
a.subsnp_id;
};
my $sth = $dba->dbc->prepare($stmt);
# Execute the query
$sth->execute();
# Loop over the joined rows. We'll send off checks both for individual alleles, subsnps and variations
my @variation;
my @subsnp;
my @allele;
my @row = $sth->fetchrow_array();
while (@row) {
# Variation array
push(@variation,[@row]);
push(@subsnp,[@row]);
push(@allele,[@row]);
# Get the next row
my @nextrow = $sth->fetchrow_array();
#ÊIf we are switching allele or we have no more rows, do the checks for alleles
if (!scalar(@nextrow) || $nextrow[0] != $row[0]) {
#ÊExecute the predicates
if (scalar(@allele)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($ALLELE_PREDICATE{$_}) && $ALLELE_PREDICATE{$_}->(\@allele,$cache));
} @failed_description_ids;
if (scalar(@failed)) {
map {$failed_allele{$_->[0]} = \@failed} @allele;
}
}
# Empty the array
splice(@allele);
}
#ÊIf we are switching subsnp or we have no more rows, do the checks for subsnp
if (!scalar(@nextrow) || $nextrow[1] != $row[1]) {
#ÊExecute the predicates
if (scalar(@subsnp)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($SUBSNP_PREDICATE{$_}) && $SUBSNP_PREDICATE{$_}->(\@subsnp,$cache));
} @failed_description_ids;
if (scalar(@failed)) {
map {$failed_allele{$_->[0]} = \@failed} @subsnp;
}
}
# Empty the array
splice(@subsnp);
}
#ÊIf we are switching variation or we have no more rows, do the checks for variations
if (!scalar(@nextrow) || $nextrow[2] != $row[2]) {
#ÊExecute the predicates
if (scalar(@variation)) {
my @failed;
# Cache the results in a hash
my $cache = {};
map {
push(@failed,$_) if (exists($VARIATION_ALLELE_PREDICATE{$_}) && $VARIATION_ALLELE_PREDICATE{$_}->(\@variation,$cache));
} @failed_description_ids;
if (scalar(@failed)) {
$failed_variation{$row[2]} = \@failed;
}
}
# Empty the variation feature array
splice(@variation);
}
@row = @nextrow;
}
}
foreach my $scope (('variation','allele')) {
my %h;
if ($scope eq 'variation') {
%h = %failed_variation;
}
else {
%h = %failed_allele;
}
# Only dump to file if we have any results
next unless (scalar(keys(%h)));
# Open the loadfile (append) and get a lock on it
open(LOAD,">>",$loadfile->{$scope}) or die ("Could not open loadfile " . $loadfile->{$scope} . " for writing");
flock(LOAD,LOCK_EX);
#ÊWrite the ids and the failed_description_id to the load file
foreach my $id (keys(%h)) {
map {print LOAD "$id\t$_\n"} @{$h{$id}};
}
close(LOAD);
}
#ÊIf we finished successfully, print that to STDERR
print STDERR " Finished ok!\n";
#ÊCheck if a variation is mapped to more than the maximum allowed number of (non-haplotype) genomic locations
sub multiple_mappings {
my $variation_features = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 1;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _multiple_mappings($variation_features,$cache);
}
return $cache->{$failed_description_id};
}
sub _multiple_mappings {
my $variation_features = shift;
my $cache = shift;
my $count = 0;
foreach my $vf (@{$variation_features}) {
next unless (defined($vf->[3]));
next if (grep {$vf->[3] == $_} @{$HAPLOTYPE_IDS});
$count++;
return 1 if ($count > $MAX_MAP_WEIGHT);
}
return 0;
}
#ÊCheck if the allele string provided by dbSNP is in agreement with the alleles of all subsnps belonging to the variation
sub mismatched_allele_string {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 11;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _mismatched_allele_string($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _mismatched_allele_string {
my $rows = shift;
my $cache = shift;
# If this variation has no mapping, it won't have any allele string associated
return 0 if (no_mapping($rows,$cache));
# Get the unique alleles from the subsnps
my %ss = map {$_->[7] => 1} @{$rows};
# Get the unique alleles from the variation feature allele string
my %vf = map {map {$_ => 1} split(/\//,$_->[4])} @{$rows};
# Check that all subsnp alleles are present in the allele_string
map {return 1 unless (exists($vf{$_}))} keys(%ss);
# Check that all allele_string alleles are present in the subsnp alleles
map {return 1 unless (exists($ss{$_}))} keys(%vf);
return 0;
}
# Check if a variation has no mappings
sub no_mapping {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 5;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _no_mapping($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _no_mapping {
my $rows = shift;
my $cache = shift;
return (defined($rows->[0][3]) ? 0 : 1);
}
# Check if the coordinates given for a variation is not compatible with its allele string
sub inconsistent_coords {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 15;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _inconsistent_coords($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _inconsistent_coords {
my $rows = shift;
my $cache = shift;
# If this variation has no mappings, it shouldn't be classified as inconsistent
return 0 if (no_mapping($rows,$cache));
# If this variation contains illegal characters, there's no point in checking for inconsistent coordinates
return 0 if (illegal_character_alleles($rows,$cache));
#ÊThe only things we accept is if the position is a deletion or if at least one of the alleles are of the same length as the position
foreach my $variation_feature (@{$rows}) {
expand(\$variation_feature->[7]);
my $ref_len = ($variation_feature->[5] - $variation_feature->[4] + 1);
#ÊMatching lengths or deletion and insertion in allele string?
next if (grep {($_ eq '-' && $ref_len == 0) || (length($_) == $ref_len)} split(/\//,$variation_feature->[7]));
# Else, this is inconsistent coordinates
return 1;
}
return 0;
}
#ÊCheck if the allele string alleles does not agree with the reference sequence
sub reference_mismatch {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 2;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _reference_mismatch($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _reference_mismatch {
my $rows = shift;
my $cache = shift;
# If this variation has no mappings, it shouldn't be classified as a mismatch
return 0 if (no_mapping($rows,$cache));
# Get the unique reference alleles
my $ref_allele = _unique_reference_allele($rows);
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
# Loop over the allele strings and match them to the reference alleles
foreach my $as (@{$allele_string}) {
expand(\$as);
map {
my $allele = $_;
return 0 if (grep {mismatch($allele,$_) == 0} @{$ref_allele});
} split(/\//,$as);
}
# Nothing matched
return 1;
}
# Check if a sequence (possibly) ambiguous mismatches another
sub mismatch {
my $allele = shift;
my $reference = shift;
# If they match
return 0 if ($allele eq $reference);
#ÊReturn mismatch if allele doesn't contains ambig codes
return 1 unless (ambiguous(\$allele));
# Turn the sequence into regexps if necessary
ambiguity_to_regexp(\$allele);
# By now, the allele should only contain nucleotide characters and brackets.
# Do a regexp matching
return 0 if ($reference =~ m/^$allele$/);
return 1;
}
# Check if the allele string contains too many single nucleotide alleles
sub multiple_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = ($rows->[0][10] eq 'variation' ? 3 : 12);
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _multiple_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _multiple_alleles {
my $rows = shift;
my $cache = shift;
# If this variation has no mappings, it won't have any allele strings
#return 0 if (no_mapping($rows,$cache) && $rows->[0][10] eq 'variation');
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
foreach my $a_string (@{$allele_string}) {
expand(\$a_string);
my $count = grep {$_ =~ m/^[ACGT]$/i} split(/\//,$a_string);
return 1 if ($count > $MAX_ALLELES);
}
return 0;
}
#ÊCheck if a variation's allele strings contain ambiguity codes
sub ambiguous_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 14;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _ambiguous_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _ambiguous_alleles {
my $rows = shift;
my $cache = shift;
my @alleles;
#ÊCheck if we are dealing with a variation feature or alleles
if ($rows->[0][10] eq 'variation') {
# If this variation has no mappings, it won't have any illegal characters in the allele_string
#return 0 if (no_mapping($rows,$cache));
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
map {push(@alleles,split(/\//,$_))} @{$allele_string};
}
else {
push(@alleles,$rows->[0][7]);
}
foreach my $allele (@alleles) {
#ÊExpand the allele
expand(\$allele);
#ÊReport the allele if it contains 'illegal' characters
return 1 if (ambiguous(\$allele));
}
return 0;
}
# Check if an allele contains ambiguity codes, but make sure that it doesn't contain 'illegal' characters
sub ambiguous {
my $allele_ref = shift;
return (${$allele_ref} =~ m/[$AMBIGUITIES]/i && !illegal_characters($allele_ref));
}
#ÊCheck if a variation's allele strings contain illegal characters
sub illegal_character_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 13;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _illegal_character_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _illegal_character_alleles {
my $rows = shift;
my $cache = shift;
my @alleles;
#ÊCheck if we are dealing with a variation feature or alleles
if ($rows->[0][10] eq 'variation') {
# If this variation has no mappings, it won't have any illegal characters in the allele_string
#return 0 if (no_mapping($rows,$cache));
# Get the unique allele strings
my $allele_string = _unique_allele_string($rows);
map {push(@alleles,split(/\//,$_))} @{$allele_string};
}
else {
push(@alleles,$rows->[0][7]);
}
foreach my $allele (@alleles) {
#ÊExpand the allele
expand(\$allele);
#ÊReport the allele if it contains 'illegal' characters
return 1 if (illegal_characters(\$allele));
}
return 0;
}
#ÊCheck if an allele is a 'NOVARIATION'
sub novariation_alleles {
my $rows = shift;
my $cache = shift;
# If the result of this test has been cached return it
my $failed_description_id = 4;
unless (exists($cache->{$failed_description_id})) {
$cache->{$failed_description_id} = _novariation_alleles($rows,$cache);
}
return $cache->{$failed_description_id};
}
sub _novariation_alleles {
my $rows = shift;
my $cache = shift;
return 1 if (grep {novariation(\$_->[7])} @{$rows});
return 0;
}
#ÊKeep a list of accepted alleles that won't be flagged as containing illegal characters. Check if an allele is in this list
sub accepted {
my $allele_ref = shift;
map {return 1 if ($_ eq ${$allele_ref})} @ACCEPTED_ALLELE;
return 0;
}
# Check if an allele is 'NOVARIATION'
sub novariation {
my $allele_ref = shift;
return (${$allele_ref} eq 'NOVARIATION');
}
# Check if an allele contains 'illegal' characters
sub illegal_characters {
my $allele_ref = shift;
return (${$allele_ref} =~ m/[^ACGTU\-$AMBIGUITIES]/i && !accepted($allele_ref) && !novariation($allele_ref));
}
# Replace ambiguity codes in a sequence with a suitable regular expression
sub ambiguity_to_regexp {
my $seq_ref = shift;
${$seq_ref} =~ s/([U$AMBIGUITIES])/$AMBIG_REGEXP_HASH{$1}/ig;
};
#ÊPrivate method to get the unique allele strings from variation features
sub _unique_allele_string {
my $variation_features = shift;
# Check first if this is just a single row
return [$variation_features->[0][7]] if (scalar(@{$variation_features}) == 1);
# Get the unique allele strings
my %allele_string;
map {
$allele_string{$_->[7]}++;
} @{$variation_features};
my @unique = keys(%allele_string);
# If it is alleles rather than a variation we're looking at, create an allele string from the alleles
if ($variation_features->[0][10] eq 'allele') {
my $as = join("/",@unique);
@unique = ($as);
}
return \@unique;
}
#ÊPrivate method to get the reference alleles from variation features
sub _unique_reference_allele {
my $variation_features = shift;
# Check first if this is just a single row
if (scalar(@{$variation_features}) == 1) {
# Flip the reference allele if necessary
reverse_comp($variation_features->[0][8]) unless ($variation_features->[0][9] == $variation_features->[0][6]);
return [$variation_features->[0][8]];
}
# Get the unique reference alleles
my %ref_allele;
map {
# Flip the reference allele if necessary
reverse_comp(\$_->[8]) unless ($_->[9] == $_->[6]);
$ref_allele{$_->[8]}++;
} @{$variation_features};
my @unique = keys(%ref_allele);
return \@unique;
}
sub get_haplotype_seq_region_ids {
my $dba_core = shift;
#ÊThe haplotype regions have attribs 'non reference'. So do the LRGs however, so filter by name to exclude these
my $stmt = qq{
SELECT
sr.seq_region_id
FROM
seq_region sr JOIN
seq_region_attrib sra ON (
sra.seq_region_id = sr.seq_region_id
) JOIN
attrib_type at ON (
at.attrib_type_id = sra.attrib_type_id
)
WHERE
sr.name NOT LIKE 'lrg%' AND
at.name LIKE 'non reference'
};
my $haplotype_ids = $dba_core->dbc->db_handle->selectcol_arrayref($stmt);
return $haplotype_ids;
}
sub get_range_condition {
my $lower_id = shift;
my $upper_id = shift;
my $alias = shift;
return " 1 " unless (defined($lower_id) && defined($upper_id));
return (defined($alias) ? " $alias\." : " ") . qq{variation_id BETWEEN $lower_id AND $upper_id };
}
sub get_source_condition {
my $ids = shift;
my $alias = shift;
return " 1 " unless (defined($ids) && scalar(@{$ids}));
my $condition = " (" . (defined($alias) ? "$alias\." : "") . "source_id = " . join(" OR " . (defined($alias) ? "$alias\." : "") . "source_id = ",@{$ids}) . ") ";
return $condition;
}
sub get_failed_description {
my $dba = shift;
my $ids = shift;
my $condition = " 1 ";
if (defined($ids) && scalar(@{$ids})) {
$condition = " failed_description_id IN (" . join(",",@{$ids}) . ") ";
}
my $stmt = qq{
SELECT
failed_description_id,
description
FROM
failed_description
WHERE
$condition
};
#ÊGet a hashref of the descriptions with the failed_description_id as key
my $description = $dba->dbc->db_handle->selectall_hashref($stmt,'failed_description_id');
return $description;
}
=head
#ÊLoop over the failed_description_ids and for each, call the corresponding subroutine. Each check will return a hashref with arrayrefs of failed variation_ids and allele_ids, respectively and we write these to the corresponding dump file.
foreach my $failed_description_id (keys(%{$failed_description})) {
# Print some progress information to stdout
print STDOUT Progress::location() . "\tFlagging variations/alleles for '" . $failed_description->{$failed_description_id}{'description'} . "' (failed_description_id = $failed_description_id)\n";
# Warn and skip if we don't know how to perform this check
unless (exists($PREDICATE{$failed_description_id})) {
warn ("Can not determine the corresponding subroutine to use for consistency check '" . $failed_description->{$failed_description_id}{'description'} . "' (failed_description_id = $failed_description_id). Skipping");
next;
}
# Call the checking subroutine
my $routine = $PREDICATE{$failed_description_id};
my $flagged = $routine->($dba,$lower_id,$upper_id,$dba_core);
# Loop over the flagged variations and alleles and write them to the dump files
foreach my $type (('variation','allele')) {
#ÊGet the ids that were returned
my $ids = $flagged->{$type} || [];
#ÊIf no ids were flagged, skip
next unless (scalar(@{$ids}));
# Print some progress information to stdout
print STDOUT Progress::location() . "\tDumping flagged " . $type . "s to loadfile\n";
# Open the loadfile (append) and get a lock on it
open(LOAD,">>",$loadfile->{$type}) or die ("Could not open loadfile " . $loadfile->{$type} . " for writing");
flock(LOAD,LOCK_EX);
#ÊWrite the ids and the failed_description_id to the load file
while (my $id = shift(@{$ids})) {
print LOAD join("\t",($id,$failed_description_id)) . "\n";
}
close(LOAD);
}
}
sub get_haplotype_condition {
my $dba_core = shift;
my $haplotype_seq_region_ids = get_haplotype_seq_region_ids($dba_core);
return " 1 " unless (defined($haplotype_seq_region_ids) && scalar(@{$haplotype_seq_region_ids}));
return " seq_region_id NOT IN (" . join(",",@{$haplotype_seq_region_ids}) . ") ";
}
#ÊCheck if a variation is mapped to more than the maximum allowed number of (non-haplotype) genomic locations
sub multiple_mappings {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
my $dba_core = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
$condition .= " AND " . get_haplotype_condition($dba_core);
my $stmt = qq{
SELECT
variation_id
FROM
variation_feature
WHERE
$condition
};
# Add the group and condition on maximum mappings
$stmt .= qq{
GROUP BY
variation_id
HAVING
COUNT(*) > $MAX_MAP_WEIGHT
};
# Execute the query and get the result
my $flagged_variation_ids = $dba->dbc->db_handle->selectcol_arrayref($stmt);
# Return a hashref with the result
return {'variation' => $flagged_variation_ids};
}
#ÊCheck whether the variation has at least one allele that matches the reference
sub reference_mismatch {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
# Statement to get the variation alleles
my $stmt = qq{
SELECT
allele_id,
subsnp_id,
variation_id,
allele
FROM
allele
WHERE
$condition
ORDER BY
variation_id
};
my $sth = $dba->dbc->db_handle($stmt);
#ÊStatement to get the reference sequence for each variation_feature
$stmt = qq{
SELECT
ras.ref_allele,
ra.seq_region_strand
FROM
variation_feature vf JOIN
tmp_ref_allele ra ON (
ra.variation_feature_id = vf.variation_feature_id
) JOIN
tmp_ref_allele_seq ON (
ras.ref_allele_seq_id = ra.ref_allele_seq_id
)
WHERE
vf.variation_id = ?
};
my $seq_sth = $dba->dbc->prepare($stmt);
# Get the alleles
$sth->execute();
my ($allele_id,$subsnp_id,$variation_id,$allele,$refseq,$refstrand,$last_variation_id);
$sth->bind_columns(\$allele_id,\$subsnp_id,\$variation_id,\$allele);
$last_variation_id = -1;
while ($sth->fetch()) {
#ÊIf we switched variation, get the possible reference sequences
if ($variation_id != $last_variation_id) {
$seq_sth->execute($variation_id);
$seq_sth->bind_columns(\$refseq,\$refstrand);
}
}
}
#ÊCheck that a variation does not have more than the maximum allowed number of single-nucleotide alleles (based on subsnps)
sub multiple_alleles {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
#ÊStatement to get the alleles for the variation_id range
my $stmt = qq{
SELECT
variation_id,
allele
FROM
allele
WHERE
$condition
ORDER BY
variation_id
};
my $sth = $dba->dbc->prepare($stmt);
# Execute the statement and bind the result columns
$sth->execute();
my ($variation_id,$allele,$last_variation_id,$last_flagged);
$sth->bind_columns(\$variation_id,\$allele);
my %alleles;
$last_variation_id = -1;
# An array to hold the variation_id for flagged variations
my @flagged;
#ÊLoop over the alleles
while ($sth->fetch()) {
#ÊReset the allele hash and the flagged status if we are moving to a new variation_id
if ($variation_id != $last_variation_id) {
%alleles = ();
$last_flagged = 0;
$last_variation_id = $variation_id;
}
# Skip if we have already flagged this variation
next if ($last_flagged);
# If this is a single bp allele and it's not a deletion, add it to the hash
if (length($allele) == 1 && $allele ne '-') {
$alleles{$allele}++;
# Check the size of the hash and flag the variation if it is greater than the maximum number of allowed alleles
if (scalar(keys(%alleles)) > $MAX_ALLELES) {
push(@flagged,$variation_id);
$last_flagged = 1;
}
}
}
# Return the flagged variations
return {'variation' => \@flagged};
}
# Check that the variation has a mapping to the genome
sub no_mapping {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id,"v");
#ÊStatement to check for unmapped variations
my $stmt = qq{
SELECT
v.variation_id
FROM
variation v LEFT JOIN
variation_feature vf ON (
vf.variation_id = v.variation_id
)
WHERE
$condition AND
vf.variation_feature_id IS NULL
};
# Execute the query and get the result
my $flagged_variation_ids = $dba->dbc->db_handle->selectcol_arrayref($stmt);
return {'variation' => $flagged_variation_ids};
}
#ÊCheck if this is a 'NoVariation'
sub no_variation {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
#ÊStatement to get the alleles that are 'NoVariation'
my $stmt = qq{
SELECT
allele_id,
variation_id
FROM
allele
WHERE
$condition AND
allele LIKE 'novariation'
};
return _check_allele_variation($dba,$stmt);
}
# Check that there are no disallowed (e.g. 'N') alleles
sub disallowed_alleles {
my $dba = shift;
my $lower_id = shift;
my $upper_id = shift;
#ÊIf a range was specified, create a condition on it
my $condition = get_range_condition($lower_id,$upper_id);
# Define a number of regexps for things that we do allow and catch the rest
my $normal_variation = '^[-ACGT]+$';
my $microsatellite = '^\\([ACGTMRWSYKVHDBXN]+\\)[0-9]+';
my $novariation = '^NOVARIATION$';
my $hgmd = '^HGMD_MUTATION$';
#ÊStatement to catch non-accepted alleles
my $stmt = qq{
SELECT
allele_id,
variation_id
FROM
allele
WHERE
$condition AND
allele NOT REGEXP '$normal_variation' AND
allele NOT REGEXP '$microsatellite' AND
allele NOT REGEXP '$novariation' AND
allele NOT REGEXP '$hgmd'
};
return _check_allele_variation($dba,$stmt);
}
# 'internal' function that checks alleles and whether all alleles for the corresponding variation have failed
sub _check_allele_variation {
my $dba = shift;
my $stmt = shift;
my $sth = $dba->dbc->prepare($stmt);
$sth->execute();
my ($allele_id,$variation_id);
$sth->bind_columns(\$allele_id,\$variation_id);
my %variation_ids;
my @flagged_alleles;
my @flagged_variations;
# Loop over the alleles and flag them. At the same time, count the number of alleles for each variation_id that has this allele string
while ($sth->fetch()) {
push(@flagged_alleles,$allele_id);
$variation_ids{$variation_id}++;
}
# In order to determine if the variation should be flagged as well as the allele, count the number of alleles for each variation and see if it corresponds to the number of failed alleles
$stmt = qq{
SELECT
COUNT(*)
FROM
allele
WHERE
variation_id = ?
};
$sth = $dba->dbc->prepare($stmt);
# Loop over the variaiton_ids concerned
while (my ($variation_id,$count) = each(%variation_ids)) {
$sth->execute($variation_id);
# If the count matches the number of alleles, we should flag the variation as well
if ($count == $sth->fetchrow_arrayref()->[0]) {
push(@flagged_variations,$variation_id);
}
}
# Return the flagged variations and alleles
return {'variation' => \@flagged_variations, 'allele' => \@flagged_alleles};
}
=cut
| dbolser/ensembl-variation | scripts/import/quality_check.pl | Perl | apache-2.0 | 40,355 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.