code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
#ifndef ALIMESTENDER_H
#define ALIMESTENDER_H
////////////////////////////////////////////////////////////////////////////
// Tender for Multiplicity and Event Shape group //
// Tender configuration //
// Authors: //
// Cristi Andrei <[email protected]> //
// Andrei Herghelegiu <[email protected]> //
// Madalina Tarzila <[email protected]> //
// //
////////////////////////////////////////////////////////////////////////////
#ifndef ALIANALYSISTASKSE_H
#include <AliAnalysisTaskSE.h>
#endif
class AliAnalysisFilter;
class AliPIDCombined;
//class AliESDevent;
class AliMCEvent;
class TObjArray;
class AliMESeventInfo;
class AliAnalysisUtils;
class AliMEStender : public AliAnalysisTaskSE
{
public:
class AliMESconfigTender : public TObject
{
public:
friend class AliMEStender;
enum EMESconfigEventCuts{
kNoEC = 0 // no event cuts
,k7TeV // vertex and trigger cuts for 7TeV
,k13TeV // vertex and trigger cuts for 13TeV
};
enum EMESconfigTrackCuts{
kNoTC = 0 // no track cuts
,kStandardITSTPCTrackCuts2010 // 2010 standard cuts
,kStandardITSTPCTrackCuts2011 // 2011 standard cuts
};
enum EMESconfigPIDpriors{
kNoPP = 0 // no priors
,kTPC // TPC priors
,kIterative // iterative priors
};
AliMESconfigTender();
void Print(Option_t *o="") const; // *MENU*
protected:
UChar_t fTrackCuts; // track cuts selector
UChar_t fEventCuts; // event cuts selector
UChar_t fPIDpriors; // PID prior selector
ClassDef(AliMESconfigTender, 1)
};
//_______________________________________________________________________________________
enum AliMEStenderSteering{
kMCdata = BIT(18) // MC presence bit
,kPP = BIT(19) // pp/pA data
,kPostProcess = BIT(20) // run pos processing of QA histos
};
enum EMEStenderQA{
kConfig = 0
,kEfficiency
,kEvInfo
,kTrkInfo
,kMCevInfo
,kMCtrkInfo
,kNqa
};
AliMEStender();
AliMEStender(const char *name);
virtual ~AliMEStender();
//static Int_t MakeMultiplicityESD(AliESDEvent* const, const char *opt);
static Int_t MakeMultiplicityMC(AliMCEvent* const);
static Int_t MakeMultiplicity0408MC(AliMCEvent* const);
static Int_t MakeMultiplicityV0MMC(AliMCEvent* const);
virtual Bool_t ConfigTask(AliMESconfigTender::EMESconfigEventCuts ec,
AliMESconfigTender::EMESconfigTrackCuts tc,
AliMESconfigTender::EMESconfigPIDpriors pp);
Bool_t HasMCdata() const { return TestBit(kMCdata);};
virtual void SetDebugLevel(Int_t level);
virtual void SetMCdata(Bool_t mc = kTRUE);
virtual void SetPriors();
virtual Bool_t PostProcess();
virtual void UserCreateOutputObjects();
virtual void UserExec(Option_t *opt);
protected:
Bool_t BuildQAHistos();
private:
AliMEStender(const AliMEStender&);
AliMEStender& operator=(const AliMEStender&);
AliMESconfigTender fConfig; // currrent configuration of task
AliAnalysisFilter *fTrackFilter; // working track filter
AliPIDCombined *fPIDcomb; // working PID combined service
TObjArray *fTracks; //!
AliMESeventInfo *fEvInfo; //!
TObjArray *fMCtracks; //!
AliMESeventInfo *fMCevInfo; //!
AliPPVsMultUtils *fUtils;
ClassDef(AliMEStender, 5) // Tender task for the Multi Event Shape
};
#endif
| amaringarcia/AliPhysics | PWGLF/SPECTRA/MultEvShape/AliMEStender.h | C | bsd-3-clause | 3,846 |
/*===================================================================
BlueBerry Platform
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#include "berryObjectStringMap.h"
namespace berry {
ObjectStringMap::ObjectStringMap()
{
}
ObjectStringMap::ObjectStringMap(const QHash<QString, QString> &other)
: QHash<QString,QString>(other), Object()
{
}
}
| NifTK/MITK | Plugins/org.blueberry.core.runtime/src/berryObjectStringMap.cpp | C++ | bsd-3-clause | 688 |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_UI_BUBBLE_BUBBLE_UTIL_H_
#define IOS_CHROME_BROWSER_UI_BUBBLE_BUBBLE_UTIL_H_
#import "ios/chrome/browser/ui/bubble/bubble_view.h"
namespace bubble_util {
// The fixed distance from the leading edge of the bubble to the anchor point if
// leading aligned, and from the trailing edge of the bubble to the anchor point
// if trailing aligned.
CGFloat BubbleAlignmentOffset();
// Calculate the coordinates of the point of the bubble's arrow based on the
// |targetFrame| of the target UI element and the bubble's |arrowDirection|. The
// returned point is in the same coordinate system as |targetFrame|.
CGPoint AnchorPoint(CGRect targetFrame, BubbleArrowDirection arrowDirection);
// Calculate the maximum size of the bubble such that it stays within its
// superview's bounding coordinate space and does not overlap the other side of
// the anchor point. |anchorPoint| is the point on the target UI element the
// bubble is anchored at in the bubble's superview's coordinate system.
// |direction| is the bubble's direction. |alignment| is the bubble's alignment.
// |boundingSize| is the size of the superview. Uses the ICU default locale of
// the device to determine whether the language is RTL.
CGSize BubbleMaxSize(CGPoint anchorPoint,
BubbleArrowDirection direction,
BubbleAlignment alignment,
CGSize boundingSize);
// Calculate the bubble's frame. |anchorPoint| is the point on the UI element
// the bubble is pointing to. |size| is the size of the bubble. |direction| is
// the direction the bubble's arrow is pointing. |alignment| is the alignment of
// the anchor (either leading, centered, or trailing). |boundingWidth| is the
// width of the bubble's superview. Uses the ICU default locale of the device to
// determine whether the language is RTL.
CGRect BubbleFrame(CGPoint anchorPoint,
CGSize size,
BubbleArrowDirection direction,
BubbleAlignment alignment,
CGFloat boundingWidth);
} // namespace bubble_util
#endif // IOS_CHROME_BROWSER_UI_BUBBLE_BUBBLE_UTIL_H_
| scheib/chromium | ios/chrome/browser/ui/bubble/bubble_util.h | C | bsd-3-clause | 2,319 |
package dotty.tools
package dotc
package core
package tasty
import util.Positions._
import collection.mutable
import TastyBuffer.Addr
object PositionUnpickler {
type AddrToPosition = mutable.HashMap[Addr, Position]
}
/** Unpickler for tree positions */
class PositionUnpickler(reader: TastyReader) {
import PositionUnpickler._
import reader._
def unpickle(): (Position, AddrToPosition) = {
val positions = new mutable.HashMap[Addr, Position] // Dotty deviation: Can't use new AddrToPosition here. TODO: fix this!
val sourceLength = readNat()
def readDelta() = if (isAtEnd) 0 else readInt()
var curIndex: Addr = Addr(readDelta())
while (!isAtEnd) {
val delta1 = readDelta()
val delta2 = readDelta()
val (startDelta, endDelta, indexDelta) =
if (delta2 <= 0) (delta1, -delta2, readDelta())
else if (delta1 < 0) (0, -delta1, delta2)
else (delta1, 0, delta2)
positions(curIndex) = Position(startDelta, endDelta, startDelta)
// make non-synthetic position; will be made synthetic by normalization.
curIndex += indexDelta
}
(Position(0, sourceLength), positions)
}
}
| folone/dotty | src/dotty/tools/dotc/core/tasty/PositionUnpickler.scala | Scala | bsd-3-clause | 1,165 |
SUBROUTINE DLAORD( JOB, N, X, INCX )
*
* -- LAPACK auxiliary routine (version 3.1) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
CHARACTER JOB
INTEGER INCX, N
* ..
* .. Array Arguments ..
DOUBLE PRECISION X( * )
* ..
*
* Purpose
* =======
*
* DLAORD sorts the elements of a vector x in increasing or decreasing
* order.
*
* Arguments
* =========
*
* JOB (input) CHARACTER
* = 'I': Sort in increasing order
* = 'D': Sort in decreasing order
*
* N (input) INTEGER
* The length of the vector X.
*
* X (input/output) DOUBLE PRECISION array, dimension
* (1+(N-1)*INCX)
* On entry, the vector of length n to be sorted.
* On exit, the vector x is sorted in the prescribed order.
*
* INCX (input) INTEGER
* The spacing between successive elements of X. INCX >= 0.
*
* =====================================================================
*
* .. Local Scalars ..
INTEGER I, INC, IX, IXNEXT
DOUBLE PRECISION TEMP
* ..
* .. External Functions ..
LOGICAL LSAME
EXTERNAL LSAME
* ..
* .. Intrinsic Functions ..
INTRINSIC ABS
* ..
* .. Executable Statements ..
*
INC = ABS( INCX )
IF( LSAME( JOB, 'I' ) ) THEN
*
* Sort in increasing order
*
DO 20 I = 2, N
IX = 1 + ( I-1 )*INC
10 CONTINUE
IF( IX.EQ.1 )
$ GO TO 20
IXNEXT = IX - INC
IF( X( IX ).GT.X( IXNEXT ) ) THEN
GO TO 20
ELSE
TEMP = X( IX )
X( IX ) = X( IXNEXT )
X( IXNEXT ) = TEMP
END IF
IX = IXNEXT
GO TO 10
20 CONTINUE
*
ELSE IF( LSAME( JOB, 'D' ) ) THEN
*
* Sort in decreasing order
*
DO 40 I = 2, N
IX = 1 + ( I-1 )*INC
30 CONTINUE
IF( IX.EQ.1 )
$ GO TO 40
IXNEXT = IX - INC
IF( X( IX ).LT.X( IXNEXT ) ) THEN
GO TO 40
ELSE
TEMP = X( IX )
X( IX ) = X( IXNEXT )
X( IXNEXT ) = TEMP
END IF
IX = IXNEXT
GO TO 30
40 CONTINUE
END IF
RETURN
*
* End of DLAORD
*
END
| maxhutch/magma | testing/lin/dlaord.f | FORTRAN | bsd-3-clause | 2,489 |
/**
* CartoDB orange header popup styles
*/
div.cartodb-popup.header.orange div.cartodb-popup-header {
background:url('../img/headers.png') no-repeat -756px -40px;
}
div.cartodb-popup.header.orange div.cartodb-popup-header h4 {
color:#CC2929;
}
div.cartodb-popup.header.orange div.cartodb-popup-header span.separator {
background:#CC2929;
}
div.cartodb-popup.header.orange a.cartodb-popup-close-button {
background:url('../img/headers.png') no-repeat -982px -40px;
}
div.cartodb-popup.header.orange a.cartodb-popup-close-button:hover {
background-position:-982px -66px;
}
/* NEW CartoDB 2.0 orange header popups */
div.cartodb-popup.v2.header.orange div.cartodb-popup-header {
background: none;
background: -ms-linear-gradient(top, #FF6825, #FF3333);
background: -o-linear-gradient(right, #FF6825, #FF3333);
background: -webkit-linear-gradient(top, #FF6825, #FF3333);
background: -moz-linear-gradient(right, #FF6825, #FF3333);
-ms-filter: "progid:DXImageTransform.Microsoft.Gradient(startColorStr='#FF6825',endColorStr='#FF3333',GradientType=0)";
}
div.cartodb-popup.v2.header.orange a.cartodb-popup-close-button {
background:white;
}
div.cartodb-popup.v2.header.orange a.cartodb-popup-close-button:before,
div.cartodb-popup.v2.header.orange a.cartodb-popup-close-button:after {
background:#CC2929;
}
/* Hello IE */
@media \0screen\,screen\9 {
div.cartodb-popup.v2.header.orange a.cartodb-popup-close-button {
color:#CC2929;
}
} | CartoDB/cartodb.js | themes/css/infowindow/cartodb-infowindow-header-orange.css | CSS | bsd-3-clause | 1,479 |
/*
* Copyright (c) 2006 Kungliga Tekniska Högskolan
* (Royal Institute of Technology, Stockholm, Sweden).
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of KTH nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "gsskrb5_locl.h"
struct range {
size_t lower;
size_t upper;
};
struct range tests[] = {
{ 0, 1040 },
{ 2040, 2080 },
{ 4080, 5000 },
{ 8180, 8292 },
{ 9980, 10010 }
};
static void
test_range(const struct range *r, int integ,
krb5_context context, krb5_crypto crypto)
{
krb5_error_code ret;
size_t size, rsize;
struct gsskrb5_ctx ctx;
for (size = r->lower; size < r->upper; size++) {
size_t cksumsize;
uint16_t padsize;
OM_uint32 minor;
OM_uint32 max_wrap_size;
ctx.crypto = crypto;
ret = _gssapi_wrap_size_cfx(&minor,
&ctx,
context,
integ,
0,
size,
&max_wrap_size);
if (ret)
krb5_errx(context, 1, "_gsskrb5cfx_max_wrap_length_cfx: %d", ret);
if (max_wrap_size == 0)
continue;
ret = _gsskrb5cfx_wrap_length_cfx(context,
crypto,
integ,
0,
max_wrap_size,
&rsize, &cksumsize, &padsize);
if (ret)
krb5_errx(context, 1, "_gsskrb5cfx_wrap_length_cfx: %d", ret);
if (size < rsize)
krb5_errx(context, 1,
"size (%d) < rsize (%d) for max_wrap_size %d",
(int)size, (int)rsize, (int)max_wrap_size);
}
}
static void
test_special(krb5_context context, krb5_crypto crypto,
int integ, size_t testsize)
{
krb5_error_code ret;
size_t rsize;
OM_uint32 max_wrap_size;
size_t cksumsize;
uint16_t padsize;
struct gsskrb5_ctx ctx;
OM_uint32 minor;
ctx.crypto = crypto;
ret = _gssapi_wrap_size_cfx(&minor,
&ctx,
context,
integ,
0,
testsize,
&max_wrap_size);
if (ret)
krb5_errx(context, 1, "_gsskrb5cfx_max_wrap_length_cfx: %d", ret);
if (ret)
krb5_errx(context, 1, "_gsskrb5cfx_max_wrap_length_cfx: %d", ret);
ret = _gsskrb5cfx_wrap_length_cfx(context,
crypto,
integ,
0,
max_wrap_size,
&rsize, &cksumsize, &padsize);
if (ret)
krb5_errx(context, 1, "_gsskrb5cfx_wrap_length_cfx: %d", ret);
if (testsize < rsize)
krb5_errx(context, 1,
"testsize (%d) < rsize (%d) for max_wrap_size %d",
(int)testsize, (int)rsize, (int)max_wrap_size);
}
int
main(int argc, char **argv)
{
krb5_keyblock keyblock;
krb5_error_code ret;
krb5_context context;
krb5_crypto crypto;
int i;
ret = krb5_init_context(&context);
if (ret)
errx(1, "krb5_context_init: %d", ret);
ret = krb5_generate_random_keyblock(context,
ENCTYPE_AES256_CTS_HMAC_SHA1_96,
&keyblock);
if (ret)
krb5_err(context, 1, ret, "krb5_generate_random_keyblock");
ret = krb5_crypto_init(context, &keyblock, 0, &crypto);
if (ret)
krb5_err(context, 1, ret, "krb5_crypto_init");
test_special(context, crypto, 1, 60);
test_special(context, crypto, 0, 60);
for (i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) {
test_range(&tests[i], 1, context, crypto);
test_range(&tests[i], 0, context, crypto);
}
krb5_free_keyblock_contents(context, &keyblock);
krb5_crypto_destroy(context, crypto);
krb5_free_context(context);
return 0;
}
| GaloisInc/hacrypto | src/C/FreeBSD/FreeBSD-10.0/heimdal/lib/gssapi/krb5/test_cfx.c | C | bsd-3-clause | 4,712 |
<html>
<script>
document.cookie = "SessionID=1234567890";
window.test_result = JSON.stringify(document.cookie.indexOf('SessionID'));
</script>
</html>
| scheib/chromium | headless/test/data/cookie.html | HTML | bsd-3-clause | 151 |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// test sized operator delete replacement.
// Note that sized delete operator definitions below are simply ignored
// when sized deallocation is not supported, e.g., prior to C++14.
// UNSUPPORTED: c++14, c++17, c++2a
// UNSUPPORTED: sanitizer-new-delete
#include <new>
#include <cstddef>
#include <cstdlib>
#include <cassert>
#include "test_macros.h"
int unsized_delete_called = 0;
int unsized_delete_nothrow_called = 0;
int sized_delete_called = 0;
void operator delete(void* p) TEST_NOEXCEPT
{
++unsized_delete_called;
std::free(p);
}
void operator delete(void* p, const std::nothrow_t&) TEST_NOEXCEPT
{
++unsized_delete_nothrow_called;
std::free(p);
}
void operator delete(void* p, std::size_t) TEST_NOEXCEPT
{
++sized_delete_called;
std::free(p);
}
int main(int, char**)
{
int *x = new int(42);
DoNotOptimize(x);
assert(0 == unsized_delete_called);
assert(0 == unsized_delete_nothrow_called);
assert(0 == sized_delete_called);
delete x;
DoNotOptimize(x);
assert(1 == unsized_delete_called);
assert(0 == sized_delete_called);
assert(0 == unsized_delete_nothrow_called);
return 0;
}
| endlessm/chromium-browser | third_party/llvm/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/sized_delete11.pass.cpp | C++ | bsd-3-clause | 1,533 |
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_SOCKET_MOCK_CLIENT_SOCKET_POOL_MANAGER_H_
#define NET_SOCKET_MOCK_CLIENT_SOCKET_POOL_MANAGER_H_
#pragma once
#include "base/basictypes.h"
#include "net/socket/client_socket_pool_manager.h"
#include "net/socket/client_socket_pool_manager_impl.h"
namespace net {
class MockClientSocketPoolManager : public ClientSocketPoolManager {
public:
MockClientSocketPoolManager();
virtual ~MockClientSocketPoolManager();
// Sets "override" socket pools that get used instead.
void SetTransportSocketPool(TransportClientSocketPool* pool);
void SetSSLSocketPool(SSLClientSocketPool* pool);
void SetSocketPoolForSOCKSProxy(const HostPortPair& socks_proxy,
SOCKSClientSocketPool* pool);
void SetSocketPoolForHTTPProxy(const HostPortPair& http_proxy,
HttpProxyClientSocketPool* pool);
void SetSocketPoolForSSLWithProxy(const HostPortPair& proxy_server,
SSLClientSocketPool* pool);
// ClientSocketPoolManager methods:
virtual void FlushSocketPools() OVERRIDE;
virtual void CloseIdleSockets() OVERRIDE;
virtual TransportClientSocketPool* GetTransportSocketPool() OVERRIDE;
virtual SSLClientSocketPool* GetSSLSocketPool() OVERRIDE;
virtual SOCKSClientSocketPool* GetSocketPoolForSOCKSProxy(
const HostPortPair& socks_proxy) OVERRIDE;
virtual HttpProxyClientSocketPool* GetSocketPoolForHTTPProxy(
const HostPortPair& http_proxy) OVERRIDE;
virtual SSLClientSocketPool* GetSocketPoolForSSLWithProxy(
const HostPortPair& proxy_server) OVERRIDE;
virtual base::Value* SocketPoolInfoToValue() const OVERRIDE;
private:
typedef internal::OwnedPoolMap<HostPortPair, TransportClientSocketPool*>
TransportSocketPoolMap;
typedef internal::OwnedPoolMap<HostPortPair, SOCKSClientSocketPool*>
SOCKSSocketPoolMap;
typedef internal::OwnedPoolMap<HostPortPair, HttpProxyClientSocketPool*>
HTTPProxySocketPoolMap;
typedef internal::OwnedPoolMap<HostPortPair, SSLClientSocketPool*>
SSLSocketPoolMap;
scoped_ptr<TransportClientSocketPool> transport_socket_pool_;
scoped_ptr<SSLClientSocketPool> ssl_socket_pool_;
SOCKSSocketPoolMap socks_socket_pools_;
HTTPProxySocketPoolMap http_proxy_socket_pools_;
SSLSocketPoolMap ssl_socket_pools_for_proxies_;
DISALLOW_COPY_AND_ASSIGN(MockClientSocketPoolManager);
};
} // namespace net
#endif // NET_SOCKET_MOCK_CLIENT_SOCKET_POOL_MANAGER_H_
| aYukiSekiguchi/ACCESS-Chromium | net/socket/mock_client_socket_pool_manager.h | C | bsd-3-clause | 2,636 |
from wagtail.wagtailsearch.views.frontend import search | benemery/wagtail | wagtail/wagtailsearch/views/__init__.py | Python | bsd-3-clause | 55 |
/*!
* "Fork me on GitHub" CSS ribbon v0.1.1 | MIT License
* https://github.com/simonwhitaker/github-fork-ribbon-css
*/
/* Left will inherit from right (so we don't need to duplicate code) */
.github-fork-ribbon {
/* The right and left classes determine the side we attach our banner to */
position: absolute;
/* Add a bit of padding to give some substance outside the "stitching" */
padding: 2px 0;
/* Set the base colour */
background-color: #a00;
/* Set a gradient: transparent black at the top to almost-transparent black at the bottom */
background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(0, 0, 0, 0)), to(rgba(0, 0, 0, 0.15)));
background-image: -webkit-linear-gradient(top, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.15));
background-image: -moz-linear-gradient(top, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.15));
background-image: -ms-linear-gradient(top, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.15));
background-image: -o-linear-gradient(top, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.15));
background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.15));
/* Add a drop shadow */
-webkit-box-shadow: 0 2px 3px 0 rgba(0, 0, 0, 0.5);
-moz-box-shadow: 0 2px 3px 0 rgba(0, 0, 0, 0.5);
box-shadow: 0 2px 3px 0 rgba(0, 0, 0, 0.5);
/* Set the font */
font: 700 13px "Helvetica Neue", Helvetica, Arial, sans-serif;
z-index: 9999;
pointer-events: auto;
}
.github-fork-ribbon a,
.github-fork-ribbon a:hover {
/* Set the text properties */
color: #fff;
text-decoration: none;
text-shadow: 0 -1px rgba(0, 0, 0, 0.5);
text-align: center;
/* Set the geometry. If you fiddle with these you'll also need
to tweak the top and right values in .github-fork-ribbon. */
width: 200px;
line-height: 20px;
/* Set the layout properties */
display: inline-block;
padding: 2px 0;
/* Add "stitching" effect */
border-width: 1px 0;
border-style: dotted;
border-color: #fff;
border-color: rgba(255, 255, 255, 0.7);
}
.github-fork-ribbon-wrapper {
width: 150px;
height: 150px;
position: absolute;
overflow: hidden;
top: 0;
z-index: 9999;
pointer-events: none;
}
.github-fork-ribbon-wrapper.fixed {
position: fixed;
}
.github-fork-ribbon-wrapper.left {
left: 0;
}
.github-fork-ribbon-wrapper.right {
right: 0;
}
.github-fork-ribbon-wrapper.left-bottom {
position: fixed;
top: inherit;
bottom: 0;
left: 0;
}
.github-fork-ribbon-wrapper.right-bottom {
position: fixed;
top: inherit;
bottom: 0;
right: 0;
}
.github-fork-ribbon-wrapper.right .github-fork-ribbon {
top: 42px;
right: -43px;
-webkit-transform: rotate(45deg);
-moz-transform: rotate(45deg);
-ms-transform: rotate(45deg);
-o-transform: rotate(45deg);
transform: rotate(45deg);
}
.github-fork-ribbon-wrapper.left .github-fork-ribbon {
top: 42px;
left: -43px;
-webkit-transform: rotate(-45deg);
-moz-transform: rotate(-45deg);
-ms-transform: rotate(-45deg);
-o-transform: rotate(-45deg);
transform: rotate(-45deg);
}
.github-fork-ribbon-wrapper.left-bottom .github-fork-ribbon {
top: 80px;
left: -43px;
-webkit-transform: rotate(45deg);
-moz-transform: rotate(45deg);
-ms-transform: rotate(45deg);
-o-transform: rotate(45deg);
transform: rotate(45deg);
}
.github-fork-ribbon-wrapper.right-bottom .github-fork-ribbon {
top: 80px;
right: -43px;
-webkit-transform: rotate(-45deg);
-moz-transform: rotate(-45deg);
-ms-transform: rotate(-45deg);
-o-transform: rotate(-45deg);
transform: rotate(-45deg);
}
| BiG-CZ/notebook_data_demo | webpage/jupyter_notebook_jekyll_theme/assets/css/gh-fork-ribbon.css | CSS | bsd-3-clause | 3,546 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc -->
<title>Predicate</title>
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Predicate";
}
}
catch(err) {
}
//-->
var methods = {"i0":6};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/firstinspires/ftc/robotcore/external/NonConst.html" title="annotation in org.firstinspires.ftc.robotcore.external"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/firstinspires/ftc/robotcore/external/Supplier.html" title="interface in org.firstinspires.ftc.robotcore.external"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/firstinspires/ftc/robotcore/external/Predicate.html" target="_top">Frames</a></li>
<li><a href="Predicate.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.firstinspires.ftc.robotcore.external</div>
<h2 title="Interface Predicate" class="title">Interface Predicate<T></h2>
</div>
<div class="contentContainer">
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public interface <span class="typeNameLabel">Predicate<T></span></pre>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t3" class="tableTab"><span><a href="javascript:show(4);">Abstract Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/firstinspires/ftc/robotcore/external/Predicate.html#test-T-">test</a></span>(<a href="../../../../../org/firstinspires/ftc/robotcore/external/Predicate.html" title="type parameter in Predicate">T</a> t)</code> </td>
</tr>
</table>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="test-java.lang.Object-">
<!-- -->
</a><a name="test-T-">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>test</h4>
<pre>boolean test(<a href="../../../../../org/firstinspires/ftc/robotcore/external/Predicate.html" title="type parameter in Predicate">T</a> t)</pre>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/firstinspires/ftc/robotcore/external/NonConst.html" title="annotation in org.firstinspires.ftc.robotcore.external"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/firstinspires/ftc/robotcore/external/Supplier.html" title="interface in org.firstinspires.ftc.robotcore.external"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/firstinspires/ftc/robotcore/external/Predicate.html" target="_top">Frames</a></li>
<li><a href="Predicate.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| BlueDevilsCode/5968 | ftc_app-master/doc/javadoc/org/firstinspires/ftc/robotcore/external/Predicate.html | HTML | mit | 7,781 |
package org.knowm.xchange.bitfinex.v1.dto.account;
import com.fasterxml.jackson.annotation.JsonAnyGetter;
import com.fasterxml.jackson.annotation.JsonAnySetter;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.Generated;
@JsonInclude(JsonInclude.Include.NON_NULL)
@Generated("org.jsonschema2pojo")
@JsonPropertyOrder({
"margin_balance",
"tradable_balance",
"unrealized_pl",
"unrealized_swap",
"net_value",
"required_margin",
"leverage",
"margin_requirement",
"margin_limits",
"message"
})
public class BitfinexMarginInfosResponse {
@JsonProperty("margin_balance")
private BigDecimal marginBalance;
@JsonProperty("tradable_balance")
private BigDecimal tradableBalance;
@JsonProperty("unrealized_pl")
private BigDecimal unrealizedPl;
@JsonProperty("unrealized_swap")
private BigDecimal unrealizedSwap;
@JsonProperty("net_value")
private BigDecimal netValue;
@JsonProperty("required_margin")
private BigDecimal requiredMargin;
@JsonProperty("leverage")
private BigDecimal leverage;
@JsonProperty("margin_requirement")
private BigDecimal marginRequirement;
@JsonProperty("margin_limits")
private List<BitfinexMarginLimit> marginLimits = new ArrayList<BitfinexMarginLimit>();
@JsonProperty("message")
private String message;
@JsonIgnore private Map<String, Object> additionalProperties = new HashMap<String, Object>();
@JsonProperty("margin_balance")
public BigDecimal getMarginBalance() {
return marginBalance;
}
@JsonProperty("margin_balance")
public void setMarginBalance(BigDecimal marginBalance) {
this.marginBalance = marginBalance;
}
@JsonProperty("tradable_balance")
public BigDecimal getTradableBalance() {
return tradableBalance;
}
@JsonProperty("tradable_balance")
public void setTradableBalance(BigDecimal tradableBalance) {
this.tradableBalance = tradableBalance;
}
@JsonProperty("unrealized_pl")
public BigDecimal getUnrealizedPl() {
return unrealizedPl;
}
@JsonProperty("unrealized_pl")
public void setUnrealizedPl(BigDecimal unrealizedPl) {
this.unrealizedPl = unrealizedPl;
}
@JsonProperty("unrealized_swap")
public BigDecimal getUnrealizedSwap() {
return unrealizedSwap;
}
@JsonProperty("unrealized_swap")
public void setUnrealizedSwap(BigDecimal unrealizedSwap) {
this.unrealizedSwap = unrealizedSwap;
}
@JsonProperty("net_value")
public BigDecimal getNetValue() {
return netValue;
}
@JsonProperty("net_value")
public void setNetValue(BigDecimal netValue) {
this.netValue = netValue;
}
@JsonProperty("required_margin")
public BigDecimal getRequiredMargin() {
return requiredMargin;
}
@JsonProperty("required_margin")
public void setRequiredMargin(BigDecimal requiredMargin) {
this.requiredMargin = requiredMargin;
}
@JsonProperty("leverage")
public BigDecimal getLeverage() {
return leverage;
}
@JsonProperty("leverage")
public void setLeverage(BigDecimal leverage) {
this.leverage = leverage;
}
@JsonProperty("margin_requirement")
public BigDecimal getMarginRequirement() {
return marginRequirement;
}
@JsonProperty("margin_requirement")
public void setMarginRequirement(BigDecimal marginRequirement) {
this.marginRequirement = marginRequirement;
}
@JsonProperty("margin_limits")
public List<BitfinexMarginLimit> getMarginLimits() {
return marginLimits;
}
@JsonProperty("margin_limits")
public void setMarginLimits(List<BitfinexMarginLimit> marginLimits) {
this.marginLimits = marginLimits;
}
@JsonProperty("message")
public String getMessage() {
return message;
}
@JsonProperty("message")
public void setMessage(String message) {
this.message = message;
}
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
@JsonAnySetter
public void setAdditionalProperty(String name, Object value) {
this.additionalProperties.put(name, value);
}
@Override
public String toString() {
return "BitfinexMarginInfosResponse{"
+ "marginBalance="
+ marginBalance
+ ", tradableBalance="
+ tradableBalance
+ ", unrealizedPl="
+ unrealizedPl
+ ", unrealizedSwap="
+ unrealizedSwap
+ ", netValue="
+ netValue
+ ", requiredMargin="
+ requiredMargin
+ ", leverage="
+ leverage
+ ", marginRequirement="
+ marginRequirement
+ ", marginLimits="
+ marginLimits
+ ", message='"
+ message
+ '\''
+ '}';
}
}
| stachon/XChange | xchange-bitfinex/src/main/java/org/knowm/xchange/bitfinex/v1/dto/account/BitfinexMarginInfosResponse.java | Java | mit | 4,991 |
# tests for things that are not implemented, or have non-compliant behaviour
try:
import array
import ustruct
except ImportError:
print("SKIP")
raise SystemExit
# when super can't find self
try:
exec('def f(): super()')
except SyntaxError:
print('SyntaxError')
# store to exception attribute is not allowed
try:
ValueError().x = 0
except AttributeError:
print('AttributeError')
# array deletion not implemented
try:
a = array.array('b', (1, 2, 3))
del a[1]
except TypeError:
print('TypeError')
# slice with step!=1 not implemented
try:
a = array.array('b', (1, 2, 3))
print(a[3:2:2])
except NotImplementedError:
print('NotImplementedError')
# containment, looking for integer not implemented
try:
print(1 in array.array('B', b'12'))
except NotImplementedError:
print('NotImplementedError')
# should raise type error
try:
print(set('12') >= '1')
except TypeError:
print('TypeError')
# should raise type error
try:
print(set('12') <= '123')
except TypeError:
print('TypeError')
# uPy raises TypeError, shold be ValueError
try:
'%c' % b'\x01\x02'
except (TypeError, ValueError):
print('TypeError, ValueError')
# attributes/subscr not implemented
try:
print('{a[0]}'.format(a=[1, 2]))
except NotImplementedError:
print('NotImplementedError')
# str(...) with keywords not implemented
try:
str(b'abc', encoding='utf8')
except NotImplementedError:
print('NotImplementedError')
# str.rsplit(None, n) not implemented
try:
'a a a'.rsplit(None, 1)
except NotImplementedError:
print('NotImplementedError')
# str.endswith(s, start) not implemented
try:
'abc'.endswith('c', 1)
except NotImplementedError:
print('NotImplementedError')
# str subscr with step!=1 not implemented
try:
print('abc'[1:2:3])
except NotImplementedError:
print('NotImplementedError')
# bytes(...) with keywords not implemented
try:
bytes('abc', encoding='utf8')
except NotImplementedError:
print('NotImplementedError')
# bytes subscr with step!=1 not implemented
try:
b'123'[0:3:2]
except NotImplementedError:
print('NotImplementedError')
# tuple load with step!=1 not implemented
try:
()[2:3:4]
except NotImplementedError:
print('NotImplementedError')
# list store with step!=1 not implemented
try:
[][2:3:4] = []
except NotImplementedError:
print('NotImplementedError')
# list delete with step!=1 not implemented
try:
del [][2:3:4]
except NotImplementedError:
print('NotImplementedError')
# struct pack with too many args, not checked by uPy
print(ustruct.pack('bb', 1, 2, 3))
# struct pack with too few args, not checked by uPy
print(ustruct.pack('bb', 1))
# array slice assignment with unsupported RHS
try:
bytearray(4)[0:1] = [1, 2]
except NotImplementedError:
print('NotImplementedError')
# can't assign attributes to a function
def f():
pass
try:
f.x = 1
except AttributeError:
print('AttributeError')
| SHA2017-badge/micropython-esp32 | tests/misc/non_compliant.py | Python | mit | 2,980 |
#include "TestSuite.h"
#include "../Source/JSONNode.h"
void TestSuite::TestConstructors(void){
UnitTest::SetPrefix("TestCtor.cpp - Constructors");
#ifdef JSON_LIBRARY
JSONNODE * test = json_new(JSON_NULL);
assertEquals(json_type(test), JSON_NULL);
json_delete(test);
test = json_new_a(JSON_TEXT("hello"), JSON_TEXT("world"));
json_char * res = json_as_string(test);
assertCStringSame(res, JSON_TEXT("world"));
json_free(res);
res = json_name(test);
assertCStringSame(res, JSON_TEXT("hello"));
json_free(res);
assertEquals(json_type(test), JSON_STRING);
json_delete(test);
test = json_new_i(JSON_TEXT("hello"), 15);
#ifdef JSON_CASTABLE
res = json_as_string(test);
assertCStringSame(res, JSON_TEXT("15"));
json_free(res);
#endif
assertEquals_Primitive(json_as_int(test), 15);
assertEquals_Primitive(json_as_float(test), 15.0f);
res = json_name(test);
assertCStringSame(res, JSON_TEXT("hello"));
json_free(res);
assertEquals(json_type(test), JSON_NUMBER);
json_delete(test);
test = json_new_f(JSON_TEXT("hello"), 15.5f);
assertEquals_Primitive(json_as_int(test), 15);
assertEquals_Primitive(json_as_float(test), 15.5f);
#ifdef JSON_CASTABLE
res = json_as_string(test);
assertCStringSame(res, JSON_TEXT("15.5"));
json_free(res);
#endif
res = json_name(test);
assertCStringSame(res, JSON_TEXT("hello"));
json_free(res);
assertEquals(json_type(test), JSON_NUMBER);
json_delete(test);
test = json_new_b(JSON_TEXT("hello"), (int)true);
#ifdef JSON_CASTABLE
res = json_as_string(test);
assertCStringSame(res, JSON_TEXT("true"));
json_free(res);
#endif
assertEquals(json_as_bool(test), (int)true);
res = json_name(test);
assertCStringSame(res, JSON_TEXT("hello"));
json_free(res);
assertEquals(json_type(test), JSON_BOOL);
JSONNODE * cpy = json_copy(test);
assertTrue(json_equal(cpy, test));
json_delete(cpy);
json_delete(test);
#else
JSONNode test = JSONNode(JSON_NULL);
assertEquals(test.type(), JSON_NULL);
test = JSONNode(JSON_TEXT("hello"), JSON_TEXT("world"));
assertEquals(test, JSON_TEXT("world"));
assertEquals(test.as_string(), JSON_TEXT("world"));
assertEquals(test.name(), JSON_TEXT("hello"));
assertEquals(test.type(), JSON_STRING);
test = JSONNode(JSON_TEXT("hello"), 15);
assertEquals(test, 15);
#ifdef JSON_CASTABLE
assertEquals(test.as_string(), JSON_TEXT("15"));
#endif
assertEquals(test.as_int(), 15);
assertEquals(test.as_float(), 15.0f);
assertEquals(test.name(), JSON_TEXT("hello"));
assertEquals(test.type(), JSON_NUMBER);
test = JSONNode(JSON_TEXT("hello"), 15.5f);
assertEquals(test, 15.5f);
assertEquals(test.as_int(), 15);
assertEquals(test.as_float(), 15.5f);
#ifdef JSON_CASTABLE
assertEquals(test.as_string(), JSON_TEXT("15.5"));
#endif
assertEquals(test.name(), JSON_TEXT("hello"));
assertEquals(test.type(), JSON_NUMBER);
test = JSONNode(JSON_TEXT("hello"), true);
assertEquals(test, true);
#ifdef JSON_CASTABLE
assertEquals(test.as_string(), JSON_TEXT("true"));
#endif
assertEquals(test.as_bool(), true);
assertEquals(test.name(), JSON_TEXT("hello"));
assertEquals(test.type(), JSON_BOOL);
test = JSONNode(json_string(JSON_TEXT("hello")), JSON_TEXT('\0'));
assertEquals(test, 0);
#ifdef JSON_CASTABLE
assertEquals(test.as_string(), JSON_TEXT("0"));
#endif
assertEquals(test.as_int(), 0);
assertEquals(test.as_float(), 0.0f);
assertEquals(test.name(), JSON_TEXT("hello"));
assertEquals(test.type(), JSON_NUMBER);
#endif
}
| Hual/SAMPSON | src/libjson/_internal/TestSuite/TestCtors.cpp | C++ | mit | 3,725 |
/*=============================================================================
Boost.Wave: A Standard compliant C++ preprocessor library
Re2C based C++ lexer
http://www.boost.org/
Copyright (c) 2001-2012 Hartmut Kaiser. Distributed under the Boost
Software License, Version 1.0. (See accompanying file
LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(BOOST_CPP_RE2C_LEXER_HPP_B81A2629_D5B1_4944_A97D_60254182B9A8_INCLUDED)
#define BOOST_CPP_RE2C_LEXER_HPP_B81A2629_D5B1_4944_A97D_60254182B9A8_INCLUDED
#include <string>
#include <cstdio>
#include <cstdarg>
#if defined(BOOST_SPIRIT_DEBUG)
#include <iostream>
#endif // defined(BOOST_SPIRIT_DEBUG)
#include <boost/concept_check.hpp>
#include <boost/assert.hpp>
#include <boost/spirit/include/classic_core.hpp>
#include <boost/wave/wave_config.hpp>
#include <boost/wave/language_support.hpp>
#include <boost/wave/token_ids.hpp>
#include <boost/wave/util/file_position.hpp>
#include <boost/wave/cpplexer/validate_universal_char.hpp>
#include <boost/wave/cpplexer/cpplexer_exceptions.hpp>
#include <boost/wave/cpplexer/token_cache.hpp>
#include <boost/wave/cpplexer/convert_trigraphs.hpp>
#include <boost/wave/cpplexer/cpp_lex_interface.hpp>
#include <boost/wave/cpplexer/re2clex/scanner.hpp>
#include <boost/wave/cpplexer/re2clex/cpp_re.hpp>
#if BOOST_WAVE_SUPPORT_PRAGMA_ONCE != 0
#include <boost/wave/cpplexer/detect_include_guards.hpp>
#endif
#include <boost/wave/cpplexer/cpp_lex_interface_generator.hpp>
// this must occur after all of the includes and before any code appears
#ifdef BOOST_HAS_ABI_HEADERS
#include BOOST_ABI_PREFIX
#endif
///////////////////////////////////////////////////////////////////////////////
namespace boost {
namespace wave {
namespace cpplexer {
namespace re2clex {
///////////////////////////////////////////////////////////////////////////////
//
// encapsulation of the re2c based cpp lexer
//
///////////////////////////////////////////////////////////////////////////////
template <typename IteratorT,
typename PositionT = boost::wave::util::file_position_type,
typename TokenT = lex_token<PositionT> >
class lexer
{
public:
typedef TokenT token_type;
typedef typename token_type::string_type string_type;
lexer(IteratorT const &first, IteratorT const &last,
PositionT const &pos, boost::wave::language_support language_);
~lexer();
token_type& get(token_type&);
void set_position(PositionT const &pos)
{
// set position has to change the file name and line number only
filename = pos.get_file();
scanner.line = pos.get_line();
// scanner.column = scanner.curr_column = pos.get_column();
scanner.file_name = filename.c_str();
}
#if BOOST_WAVE_SUPPORT_PRAGMA_ONCE != 0
bool has_include_guards(std::string& guard_name) const
{
return guards.detected(guard_name);
}
#endif
// error reporting from the re2c generated lexer
static int report_error(Scanner<IteratorT> const* s, int code, char const *, ...);
private:
static char const *tok_names[];
Scanner<IteratorT> scanner;
string_type filename;
string_type value;
bool at_eof;
boost::wave::language_support language;
#if BOOST_WAVE_SUPPORT_PRAGMA_ONCE != 0
include_guards<token_type> guards;
#endif
#if BOOST_WAVE_SUPPORT_THREADING == 0
static token_cache<string_type> const cache;
#else
token_cache<string_type> const cache;
#endif
};
///////////////////////////////////////////////////////////////////////////////
// initialize cpp lexer
template <typename IteratorT, typename PositionT, typename TokenT>
inline
lexer<IteratorT, PositionT, TokenT>::lexer(IteratorT const &first,
IteratorT const &last, PositionT const &pos,
boost::wave::language_support language_)
: scanner(first, last),
filename(pos.get_file()), at_eof(false), language(language_)
#if BOOST_WAVE_SUPPORT_THREADING != 0
, cache()
#endif
{
using namespace std; // some systems have memset in std
scanner.line = pos.get_line();
scanner.column = scanner.curr_column = pos.get_column();
scanner.error_proc = report_error;
scanner.file_name = filename.c_str();
#if BOOST_WAVE_SUPPORT_MS_EXTENSIONS != 0
scanner.enable_ms_extensions = true;
#else
scanner.enable_ms_extensions = false;
#endif
#if BOOST_WAVE_SUPPORT_VARIADICS_PLACEMARKERS != 0
scanner.act_in_c99_mode = boost::wave::need_c99(language_);
#endif
#if BOOST_WAVE_SUPPORT_IMPORT_KEYWORD != 0
scanner.enable_import_keyword = !boost::wave::need_c99(language_);
#else
scanner.enable_import_keyword = false;
#endif
scanner.detect_pp_numbers = boost::wave::need_prefer_pp_numbers(language_);
scanner.single_line_only = boost::wave::need_single_line(language_);
#if BOOST_WAVE_SUPPORT_CPP0X != 0
scanner.act_in_cpp0x_mode = boost::wave::need_cpp0x(language_);
#else
scanner.act_in_cpp0x_mode = false;
#endif
#if BOOST_WAVE_SUPPORT_CPP2A != 0
scanner.act_in_cpp2a_mode = boost::wave::need_cpp2a(language_);
scanner.act_in_cpp0x_mode = boost::wave::need_cpp2a(language_)
|| boost::wave::need_cpp0x(language_);
#else
scanner.act_in_cpp2a_mode = false;
#endif
}
template <typename IteratorT, typename PositionT, typename TokenT>
inline
lexer<IteratorT, PositionT, TokenT>::~lexer()
{
using namespace std; // some systems have free in std
free(scanner.bot);
}
///////////////////////////////////////////////////////////////////////////////
// get the next token from the input stream
template <typename IteratorT, typename PositionT, typename TokenT>
inline TokenT&
lexer<IteratorT, PositionT, TokenT>::get(TokenT& result)
{
if (at_eof)
return result = token_type(); // return T_EOI
std::size_t actline = scanner.line;
token_id id = token_id(scan(&scanner));
switch (id) {
case T_IDENTIFIER:
// test identifier characters for validity (throws if invalid chars found)
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
if (!boost::wave::need_no_character_validation(language))
impl::validate_identifier_name(value, actline, scanner.column, filename);
break;
case T_STRINGLIT:
case T_CHARLIT:
case T_RAWSTRINGLIT:
// test literal characters for validity (throws if invalid chars found)
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
if (boost::wave::need_convert_trigraphs(language))
value = impl::convert_trigraphs(value);
if (!boost::wave::need_no_character_validation(language))
impl::validate_literal(value, actline, scanner.column, filename);
break;
case T_PP_HHEADER:
case T_PP_QHEADER:
case T_PP_INCLUDE:
// convert to the corresponding ..._next token, if appropriate
{
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
#if BOOST_WAVE_SUPPORT_INCLUDE_NEXT != 0
// Skip '#' and whitespace and see whether we find an 'include_next' here.
typename string_type::size_type start = value.find("include");
if (value.compare(start, 12, "include_next", 12) == 0)
id = token_id(id | AltTokenType);
#endif
break;
}
case T_LONGINTLIT: // supported in C++11, C99 and long_long mode
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
if (!boost::wave::need_long_long(language)) {
// syntax error: not allowed in C++ mode
BOOST_WAVE_LEXER_THROW(lexing_exception, invalid_long_long_literal,
value.c_str(), actline, scanner.column, filename.c_str());
}
break;
case T_OCTALINT:
case T_DECIMALINT:
case T_HEXAINT:
case T_INTLIT:
case T_FLOATLIT:
case T_FIXEDPOINTLIT:
case T_CCOMMENT:
case T_CPPCOMMENT:
case T_SPACE:
case T_SPACE2:
case T_ANY:
case T_PP_NUMBER:
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
break;
case T_EOF:
// T_EOF is returned as a valid token, the next call will return T_EOI,
// i.e. the actual end of input
at_eof = true;
value.clear();
break;
case T_OR_TRIGRAPH:
case T_XOR_TRIGRAPH:
case T_LEFTBRACE_TRIGRAPH:
case T_RIGHTBRACE_TRIGRAPH:
case T_LEFTBRACKET_TRIGRAPH:
case T_RIGHTBRACKET_TRIGRAPH:
case T_COMPL_TRIGRAPH:
case T_POUND_TRIGRAPH:
if (boost::wave::need_convert_trigraphs(language)) {
value = cache.get_token_value(BASEID_FROM_TOKEN(id));
}
else {
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
}
break;
case T_ANY_TRIGRAPH:
if (boost::wave::need_convert_trigraphs(language)) {
value = impl::convert_trigraph(
string_type((char const *)scanner.tok));
}
else {
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
}
break;
default:
if (CATEGORY_FROM_TOKEN(id) != EXTCATEGORY_FROM_TOKEN(id) ||
IS_CATEGORY(id, UnknownTokenType))
{
value = string_type((char const *)scanner.tok,
scanner.cur-scanner.tok);
}
else {
value = cache.get_token_value(id);
}
break;
}
// std::cerr << boost::wave::get_token_name(id) << ": " << value << std::endl;
// the re2c lexer reports the new line number for newline tokens
result = token_type(id, value, PositionT(filename, actline, scanner.column));
#if BOOST_WAVE_SUPPORT_PRAGMA_ONCE != 0
return guards.detect_guard(result);
#else
return result;
#endif
}
template <typename IteratorT, typename PositionT, typename TokenT>
inline int
lexer<IteratorT, PositionT, TokenT>::report_error(Scanner<IteratorT> const *s, int errcode,
char const *msg, ...)
{
BOOST_ASSERT(0 != s);
BOOST_ASSERT(0 != msg);
using namespace std; // some system have vsprintf in namespace std
char buffer[200]; // should be large enough
va_list params;
va_start(params, msg);
vsprintf(buffer, msg, params);
va_end(params);
BOOST_WAVE_LEXER_THROW_VAR(lexing_exception, errcode, buffer, s->line,
s->column, s->file_name);
// BOOST_UNREACHABLE_RETURN(0);
return 0;
}
///////////////////////////////////////////////////////////////////////////////
//
// lex_functor
//
///////////////////////////////////////////////////////////////////////////////
template <typename IteratorT,
typename PositionT = boost::wave::util::file_position_type,
typename TokenT = typename lexer<IteratorT, PositionT>::token_type>
class lex_functor
: public lex_input_interface_generator<TokenT>
{
public:
typedef TokenT token_type;
lex_functor(IteratorT const &first, IteratorT const &last,
PositionT const &pos, boost::wave::language_support language)
: re2c_lexer(first, last, pos, language)
{}
virtual ~lex_functor() {}
// get the next token from the input stream
token_type& get(token_type& result) BOOST_OVERRIDE { return re2c_lexer.get(result); }
void set_position(PositionT const &pos) BOOST_OVERRIDE { re2c_lexer.set_position(pos); }
#if BOOST_WAVE_SUPPORT_PRAGMA_ONCE != 0
bool has_include_guards(std::string& guard_name) const BOOST_OVERRIDE
{ return re2c_lexer.has_include_guards(guard_name); }
#endif
private:
lexer<IteratorT, PositionT, TokenT> re2c_lexer;
};
#if BOOST_WAVE_SUPPORT_THREADING == 0
///////////////////////////////////////////////////////////////////////////////
template <typename IteratorT, typename PositionT, typename TokenT>
token_cache<typename lexer<IteratorT, PositionT, TokenT>::string_type> const
lexer<IteratorT, PositionT, TokenT>::cache =
token_cache<typename lexer<IteratorT, PositionT, TokenT>::string_type>();
#endif
} // namespace re2clex
///////////////////////////////////////////////////////////////////////////////
//
// The new_lexer_gen<>::new_lexer function (declared in cpp_lex_interface.hpp)
// should be defined inline, if the lex_functor shouldn't be instantiated
// separately from the lex_iterator.
//
// Separate (explicit) instantiation helps to reduce compilation time.
//
///////////////////////////////////////////////////////////////////////////////
#if BOOST_WAVE_SEPARATE_LEXER_INSTANTIATION != 0
#define BOOST_WAVE_RE2C_NEW_LEXER_INLINE
#else
#define BOOST_WAVE_RE2C_NEW_LEXER_INLINE inline
#endif
///////////////////////////////////////////////////////////////////////////////
//
// The 'new_lexer' function allows the opaque generation of a new lexer object.
// It is coupled to the iterator type to allow to decouple the lexer/iterator
// configurations at compile time.
//
// This function is declared inside the cpp_lex_token.hpp file, which is
// referenced by the source file calling the lexer and the source file, which
// instantiates the lex_functor. But it is defined here, so it will be
// instantiated only while compiling the source file, which instantiates the
// lex_functor. While the cpp_re2c_token.hpp file may be included everywhere,
// this file (cpp_re2c_lexer.hpp) should be included only once. This allows
// to decouple the lexer interface from the lexer implementation and reduces
// compilation time.
//
///////////////////////////////////////////////////////////////////////////////
template <typename IteratorT, typename PositionT, typename TokenT>
BOOST_WAVE_RE2C_NEW_LEXER_INLINE
lex_input_interface<TokenT> *
new_lexer_gen<IteratorT, PositionT, TokenT>::new_lexer(IteratorT const &first,
IteratorT const &last, PositionT const &pos,
boost::wave::language_support language)
{
using re2clex::lex_functor;
return new lex_functor<IteratorT, PositionT, TokenT>(first, last, pos, language);
}
#undef BOOST_WAVE_RE2C_NEW_LEXER_INLINE
///////////////////////////////////////////////////////////////////////////////
} // namespace cpplexer
} // namespace wave
} // namespace boost
// the suffix header occurs after all of the code
#ifdef BOOST_HAS_ABI_HEADERS
#include BOOST_ABI_SUFFIX
#endif
#endif // !defined(BOOST_CPP_RE2C_LEXER_HPP_B81A2629_D5B1_4944_A97D_60254182B9A8_INCLUDED)
| kumakoko/KumaGL | third_lib/boost/1.75.0/boost/wave/cpplexer/re2clex/cpp_re2c_lexer.hpp | C++ | mit | 14,567 |
// Copyright (c) 2006, 2007 Julio M. Merino Vidal
// Copyright (c) 2008 Ilya Sokolov, Boris Schaeling
// Copyright (c) 2009 Boris Schaeling
// Copyright (c) 2010 Felipe Tanus, Boris Schaeling
// Copyright (c) 2011, 2012 Jeff Flinn, Boris Schaeling
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_TEST_MAIN
#define BOOST_TEST_IGNORE_SIGCHLD
#include <boost/test/included/unit_test.hpp>
#include <boost/process.hpp>
#include <boost/process/posix.hpp>
#include <system_error>
#include <string>
#include <sys/wait.h>
#include <errno.h>
namespace bp = boost::process;
#if defined(BOOST_POSIX_HAS_VFORK)
BOOST_AUTO_TEST_CASE(bind_fd, *boost::unit_test::timeout(2))
{
using boost::unit_test::framework::master_test_suite;
bp::pipe p;
std::error_code ec;
bp::child c(
master_test_suite().argv[1],
"test", "--posix-echo-one", "3", "hello",
bp::posix::fd.bind(3, p.native_sink()),
bp::posix::use_vfork,
ec
);
BOOST_CHECK(!ec);
bp::ipstream is(std::move(p));
std::string s;
is >> s;
BOOST_CHECK_EQUAL(s, "hello");
}
BOOST_AUTO_TEST_CASE(execve_set_on_error, *boost::unit_test::timeout(2))
{
std::error_code ec;
bp::spawn(
"doesnt-exist",
bp::posix::use_vfork,
ec
);
BOOST_CHECK(ec);
BOOST_CHECK_EQUAL(ec.value(), ENOENT);
}
BOOST_AUTO_TEST_CASE(execve_throw_on_error, *boost::unit_test::timeout(2))
{
try
{
bp::spawn("doesnt-exist", bp::posix::use_vfork);
BOOST_CHECK(false);
}
catch (std::system_error &e)
{
BOOST_CHECK(e.code());
BOOST_CHECK_EQUAL(e.code().value(), ENOENT);
}
}
#else
BOOST_AUTO_TEST_CASE(dummy) {}
#endif
| davehorton/drachtio-server | deps/boost_1_77_0/libs/process/test/vfork.cpp | C++ | mit | 1,832 |
# -*- coding: utf-8 -*- #
# frozen_string_literal: true
module Rouge
module Lexers
class CMake < RegexLexer
title 'CMake'
desc 'The cross-platform, open-source build system'
tag 'cmake'
filenames 'CMakeLists.txt', '*.cmake'
mimetypes 'text/x-cmake'
SPACE = '[ \t]'
BRACKET_OPEN = '\[=*\['
STATES_MAP = {
:root => Text,
:bracket_string => Str::Double,
:quoted_argument => Str::Double,
:bracket_comment => Comment::Multiline,
:variable_reference => Name::Variable,
}
BUILTIN_COMMANDS = Set.new %w[
add_compile_options
add_custom_command
add_custom_target
add_definitions
add_dependencies
add_executable
add_library
add_subdirectory
add_test
aux_source_directory
break
build_command
build_name
cmake_host_system_information
cmake_minimum_required
cmake_policy
configure_file
create_test_sourcelist
define_property
else
elseif
enable_language
enable_testing
endforeach
endfunction
endif
endmacro
endwhile
exec_program
execute_process
export
export_library_dependencies
file
find_file
find_library
find_package
find_path
find_program
fltk_wrap_ui
foreach
function
get_cmake_property
get_directory_property
get_filename_component
get_property
get_source_file_property
get_target_property
get_test_property
if
include
include_directories
include_external_msproject
include_regular_expression
install
install_files
install_programs
install_targets
link_directories
link_libraries
list
load_cache
load_command
macro
make_directory
mark_as_advanced
math
message
option
output_required_files
project
qt_wrap_cpp
qt_wrap_ui
remove
remove_definitions
return
separate_arguments
set
set_directory_properties
set_property
set_source_files_properties
set_target_properties
set_tests_properties
site_name
source_group
string
subdir_depends
subdirs
target_compile_definitions
target_compile_options
target_include_directories
target_link_libraries
try_compile
try_run
unset
use_mangled_mesa
utility_source
variable_requires
variable_watch
while
write_file
]
state :default do
rule %r/\r\n?|\n/ do
token STATES_MAP[state.name.to_sym]
end
rule %r/./ do
token STATES_MAP[state.name.to_sym]
end
end
state :variable_interpolation do
rule %r/\$\{/ do
token Str::Interpol
push :variable_reference
end
end
state :bracket_close do
rule %r/\]=*\]/ do |m|
token STATES_MAP[state.name.to_sym]
goto :root if m[0].length == @bracket_len
end
end
state :root do
mixin :variable_interpolation
rule %r/#{SPACE}/, Text
rule %r/[()]/, Punctuation
rule %r/##{BRACKET_OPEN}/ do |m|
token Comment::Multiline
@bracket_len = m[0].length - 1 # decount '#'
goto :bracket_comment
end
rule %r/#{BRACKET_OPEN}/ do |m|
token Str::Double
@bracket_len = m[0].length
goto :bracket_string
end
rule %r/\\"/, Text
rule %r/"/, Str::Double, :quoted_argument
rule %r/([A-Za-z_][A-Za-z0-9_]*)(#{SPACE}*)(\()/ do |m|
groups BUILTIN_COMMANDS.include?(m[1]) ? Name::Builtin : Name::Function, Text, Punctuation
end
rule %r/#.*/, Comment::Single
mixin :default
end
state :bracket_string do
mixin :bracket_close
mixin :variable_interpolation
mixin :default
end
state :bracket_comment do
mixin :bracket_close
mixin :default
end
state :variable_reference do
mixin :variable_interpolation
rule %r/}/, Str::Interpol, :pop!
mixin :default
end
state :quoted_argument do
mixin :variable_interpolation
rule %r/"/, Str::Double, :root
rule %r/\\[()#" \\$@^trn;]/, Str::Escape
mixin :default
end
end
end
end
| Jean-njoroge/jean-njoroge.github.io | vendor/bundle/ruby/2.7.0/gems/rouge-3.19.0/lib/rouge/lexers/cmake.rb | Ruby | mit | 4,761 |
function TreeView($dom, store, adapter) {
this.$view = $dom.find('.octotree_treeview')
this.store = store
this.adapter = adapter
this.$view
.find('.octotree_view_body')
.on('click.jstree', '.jstree-open>a', function() {
$.jstree.reference(this).close_node(this)
})
.on('click.jstree', '.jstree-closed>a', function() {
$.jstree.reference(this).open_node(this)
})
.on('click', function(event) {
var $target = $(event.target)
// handle icon click, fix #122
if ($target.is('i.jstree-icon')) $target = $target.parent()
if (!$target.is('a.jstree-anchor')) return
var href = $target.attr('href')
, $icon = $target.children().length
? $target.children(':first')
: $target.siblings(':first') // handles child links in submodule
// refocus after complete so that keyboard navigation works, fix #158
$(document).one('pjax:success', function () {
$.jstree.reference(this).get_container().focus()
}.bind(this))
if ($icon.hasClass('commit')) adapter.selectSubmodule(href)
else if ($icon.hasClass('blob')) adapter.selectPath(href, store.get(STORE.TABSIZE))
})
.jstree({
core : { multiple: false, themes : { responsive : false } },
plugins : ['wholerow']
})
}
TreeView.prototype.showHeader = function(repo) {
var adapter = this.adapter
this.$view.find('.octotree_view_header')
.html(
'<div class="octotree_header_repo">' +
'<a href="/' + repo.username + '">' + repo.username +'</a>' +
' / ' +
'<a data-pjax href="/' + repo.username + '/' + repo.reponame + '">' + repo.reponame +'</a>' +
'</div>' +
'<div class="octotree_header_branch">' +
repo.branch +
'</div>'
)
.on('click', 'a[data-pjax]', function(event) {
event.preventDefault()
adapter.selectPath($(this).attr('href') /* a.href always return absolute URL, don't want that */)
})
}
TreeView.prototype.show = function(repo, treeData) {
var self = this
, treeContainer = self.$view.find('.octotree_view_body')
, tree = treeContainer.jstree(true)
, collapseTree = self.store.get(STORE.COLLAPSE)
treeData = sort(treeData)
if (collapseTree) treeData = collapse(treeData)
tree.settings.core.data = treeData
treeContainer.one('refresh.jstree', function() {
self.syncSelection()
$(self).trigger(EVENT.VIEW_READY)
})
tree.refresh(true)
function sort(folder) {
folder.sort(function(a, b) {
if (a.type === b.type) return a.text === b.text ? 0 : a.text < b.text ? -1 : 1
return a.type === 'blob' ? 1 : -1
})
folder.forEach(function(item) {
if (item.type === 'tree') sort(item.children)
})
return folder
}
function collapse(folder) {
return folder.map(function(item) {
if (item.type === 'tree') {
item.children = collapse(item.children)
if (item.children.length === 1 && item.children[0].type === 'tree') {
var onlyChild = item.children[0]
onlyChild.text = item.text + '/' + onlyChild.text
return onlyChild
}
}
return item
})
}
}
TreeView.prototype.syncSelection = function() {
var tree = this.$view.find('.octotree_view_body').jstree(true)
, path = location.pathname
if (!tree) return
tree.deselect_all()
// e.g. converts /buunguyen/octotree/type/branch/path to path
var match = path.match(/(?:[^\/]+\/){4}(.*)/)
, nodeId
if (match) {
nodeId = PREFIX + decodeURIComponent(match[1])
tree.select_node(nodeId)
tree.open_node(nodeId)
}
}
| likaiwalkman/octotree | src/view.tree.js | JavaScript | mit | 3,634 |
function dec(target, name, descriptor) {
expect(target).toBeTruthy();
expect(typeof name).toBe("string");
expect(typeof descriptor).toBe("object");
target.decoratedProps = (target.decoratedProps || []).concat([name]);
let value = descriptor.value;
Object.assign(descriptor, {
enumerable: name.indexOf("enum") !== -1,
configurable: name.indexOf("conf") !== -1,
writable: name.indexOf("write") !== -1,
value: function(...args) {
return "__" + value.apply(this, args) + "__";
},
});
}
class Example {
@dec
static enumconfwrite(){
return 1;
}
@dec
static enumconf(){
return 2;
}
@dec
static enumwrite(){
return 3;
}
@dec
static enum(){
return 4;
}
@dec
static confwrite(){
return 5;
}
@dec
static conf(){
return 6;
}
@dec
static write(){
return 7;
}
@dec
static _(){
return 8;
}
}
expect(Example).toHaveProperty("decoratedProps");
expect(Example.decoratedProps).toEqual([
"enumconfwrite",
"enumconf",
"enumwrite",
"enum",
"confwrite",
"conf",
"write",
"_",
]);
const descs = Object.getOwnPropertyDescriptors(Example);
expect(descs.enumconfwrite.enumerable).toBeTruthy();
expect(descs.enumconfwrite.writable).toBeTruthy();
expect(descs.enumconfwrite.configurable).toBeTruthy();
expect(Example.enumconfwrite()).toBe("__1__");
expect(descs.enumconf.enumerable).toBeTruthy();
expect(descs.enumconf.writable).toBe(false);
expect(descs.enumconf.configurable).toBeTruthy();
expect(Example.enumconf()).toBe("__2__");
expect(descs.enumwrite.enumerable).toBeTruthy();
expect(descs.enumwrite.writable).toBeTruthy();
expect(descs.enumwrite.configurable).toBe(false);
expect(Example.enumwrite()).toBe("__3__");
expect(descs.enum.enumerable).toBeTruthy();
expect(descs.enum.writable).toBe(false);
expect(descs.enum.configurable).toBe(false);
expect(Example.enum()).toBe("__4__");
expect(descs.confwrite.enumerable).toBe(false);
expect(descs.confwrite.writable).toBeTruthy();
expect(descs.confwrite.configurable).toBeTruthy();
expect(Example.confwrite()).toBe("__5__");
expect(descs.conf.enumerable).toBe(false);
expect(descs.conf.writable).toBe(false);
expect(descs.conf.configurable).toBeTruthy();
expect(Example.conf()).toBe("__6__");
expect(descs.write.enumerable).toBe(false);
expect(descs.write.writable).toBeTruthy();
expect(descs.write.configurable).toBe(false);
expect(Example.write()).toBe("__7__");
expect(descs._.enumerable).toBe(false);
expect(descs._.writable).toBe(false);
expect(descs._.configurable).toBe(false);
expect(Example._()).toBe("__8__");
| Skillupco/babel | packages/babel-plugin-proposal-decorators/test/fixtures/legacy-class-static-methods/mutate-descriptor/exec.js | JavaScript | mit | 2,602 |
package org.knowm.xchange.ripple.service.params;
import org.knowm.xchange.service.trade.params.TradeHistoryParams;
/**
* Keeps track of and restrict the number of notification and order detail queries that are made for
* a trade history. Due to there not being a single simple API call to return an account's trade
* history the number of API queries can spiral out of control. This interface helps prevent that.
*/
public interface RippleTradeHistoryCount extends TradeHistoryParams {
int DEFAULT_API_CALL_COUNT = 100;
int DEFAULT_TRADE_COUNT_LIMIT = 10;
void resetApiCallCount();
void incrementApiCallCount();
int getApiCallCount();
int getApiCallCountLimit();
void resetTradeCount();
void incrementTradeCount();
int getTradeCount();
int getTradeCountLimit();
}
| douggie/XChange | xchange-ripple/src/main/java/org/knowm/xchange/ripple/service/params/RippleTradeHistoryCount.java | Java | mit | 799 |
/* $OpenLDAP: pkg/ldap/tests/progs/slapd-tester.c,v 1.15.2.5 2003/03/03 17:10:12 kurt Exp $ */
/*
* Copyright 1998-2003 The OpenLDAP Foundation, All Rights Reserved.
* COPYING RESTRICTIONS APPLY, see COPYRIGHT file
*/
#include "portable.h"
#include <stdio.h>
#include <ac/stdlib.h>
#include <ac/ctype.h>
#include <ac/dirent.h>
#include <ac/param.h>
#include <ac/socket.h>
#include <ac/string.h>
#include <ac/unistd.h>
#include <ac/wait.h>
#include "ldap_defaults.h"
#define SEARCHCMD "slapd-search"
#define READCMD "slapd-read"
#define ADDCMD "slapd-addel"
#define MODRDNCMD "slapd-modrdn"
#define MAXARGS 100
#define MAXREQS 20
#define LOOPS "100"
#define TSEARCHFILE "do_search.0"
#define TREADFILE "do_read.0"
#define TADDFILE "do_add."
#define TMODRDNFILE "do_modrdn.0"
static char *get_file_name( char *dirname, char *filename );
static int get_search_filters( char *filename, char *filters[] );
static int get_read_entries( char *filename, char *entries[] );
static void fork_child( char *prog, char **args );
static void wait4kids( int nkidval );
static int maxkids = 20;
static int nkids;
#ifdef HAVE_WINSOCK
static HANDLE *children;
static char argbuf[BUFSIZ];
#define ArgDup(x) strdup(strcat(strcat(strcpy(argbuf,"\""),x),"\""))
#else
#define ArgDup(x) strdup(x)
#endif
static void
usage( char *name )
{
fprintf( stderr, "usage: %s [-h <host>] -p <port> -D <manager> -w <passwd> -d <datadir> -b <baseDN> [-j <maxchild>] [-l <loops>] -P <progdir>\n", name );
exit( EXIT_FAILURE );
}
int
main( int argc, char **argv )
{
int i, j;
char *uri = NULL;
char *host = "localhost";
char *port = NULL;
char *manager = NULL;
char *passwd = NULL;
char *dirname = NULL;
char *sbase = NULL;
char *progdir = NULL;
char *loops = LOOPS;
DIR *datadir;
struct dirent *file;
char *sfile = NULL;
char *sreqs[MAXREQS];
int snum = 0;
char *rfile = NULL;
char *rreqs[MAXREQS];
int rnum = 0;
char *afiles[MAXREQS];
int anum = 0;
char *mfile = NULL;
char *mreqs[MAXREQS];
int mnum = 0;
char *sargs[MAXARGS];
int sanum;
char scmd[MAXPATHLEN];
char *rargs[MAXARGS];
int ranum;
char rcmd[MAXPATHLEN];
char *aargs[MAXARGS];
int aanum;
char acmd[MAXPATHLEN];
char *margs[MAXARGS];
int manum;
char mcmd[MAXPATHLEN];
while ( (i = getopt( argc, argv, "H:h:p:D:w:b:d:j:l:P:" )) != EOF ) {
switch( i ) {
case 'H': /* slapd uri */
uri = strdup( optarg );
break;
case 'h': /* slapd host */
host = strdup( optarg );
break;
case 'p': /* the servers port number */
port = strdup( optarg );
break;
case 'D': /* slapd manager */
manager = ArgDup( optarg );
break;
case 'w': /* the managers passwd */
passwd = ArgDup( optarg );
break;
case 'b': /* the base DN */
sbase = ArgDup( optarg );
break;
case 'd': /* data directory */
dirname = strdup( optarg );
break;
case 'P': /* prog directory */
progdir = strdup( optarg );
break;
case 'j': /* the number of parallel clients */
maxkids = atoi( optarg );
break;
case 'l': /* the number of loops per client */
loops = strdup( optarg );
break;
default:
usage( argv[0] );
break;
}
}
if (( dirname == NULL ) || ( sbase == NULL ) || ( port == NULL && uri == NULL ) ||
( manager == NULL ) || ( passwd == NULL ) || ( progdir == NULL ))
usage( argv[0] );
#ifdef HAVE_WINSOCK
children = malloc( maxkids * sizeof(HANDLE) );
#endif
/* get the file list */
if ( ( datadir = opendir( dirname )) == NULL ) {
fprintf( stderr, "%s: couldn't open data directory \"%s\".\n",
argv[0], dirname );
exit( EXIT_FAILURE );
}
/* look for search, read, modrdn, and add/delete files */
for ( file = readdir( datadir ); file; file = readdir( datadir )) {
if ( !strcasecmp( file->d_name, TSEARCHFILE )) {
sfile = get_file_name( dirname, file->d_name );
continue;
} else if ( !strcasecmp( file->d_name, TREADFILE )) {
rfile = get_file_name( dirname, file->d_name );
continue;
} else if ( !strcasecmp( file->d_name, TMODRDNFILE )) {
mfile = get_file_name( dirname, file->d_name );
continue;
} else if ( !strncasecmp( file->d_name, TADDFILE, strlen( TADDFILE ))
&& ( anum < MAXREQS )) {
afiles[anum++] = get_file_name( dirname, file->d_name );
continue;
}
}
closedir( datadir );
/* look for search requests */
if ( sfile ) {
snum = get_search_filters( sfile, sreqs );
}
/* look for read requests */
if ( rfile ) {
rnum = get_read_entries( rfile, rreqs );
}
/* look for modrdn requests */
if ( mfile ) {
mnum = get_read_entries( mfile, mreqs );
}
/*
* generate the search clients
*/
sanum = 0;
snprintf( scmd, sizeof scmd, "%s" LDAP_DIRSEP SEARCHCMD,
progdir );
sargs[sanum++] = scmd;
if ( uri ) {
sargs[sanum++] = "-H";
sargs[sanum++] = uri;
} else {
sargs[sanum++] = "-h";
sargs[sanum++] = host;
sargs[sanum++] = "-p";
sargs[sanum++] = port;
}
sargs[sanum++] = "-b";
sargs[sanum++] = sbase;
sargs[sanum++] = "-l";
sargs[sanum++] = loops;
sargs[sanum++] = "-f";
sargs[sanum++] = NULL; /* will hold the search request */
sargs[sanum++] = NULL;
/*
* generate the read clients
*/
ranum = 0;
snprintf( rcmd, sizeof rcmd, "%s" LDAP_DIRSEP READCMD,
progdir );
rargs[ranum++] = rcmd;
if ( uri ) {
rargs[ranum++] = "-H";
rargs[ranum++] = uri;
} else {
rargs[ranum++] = "-h";
rargs[ranum++] = host;
rargs[ranum++] = "-p";
rargs[ranum++] = port;
}
rargs[ranum++] = "-l";
rargs[ranum++] = loops;
rargs[ranum++] = "-e";
rargs[ranum++] = NULL; /* will hold the read entry */
rargs[ranum++] = NULL;
/*
* generate the modrdn clients
*/
manum = 0;
snprintf( mcmd, sizeof mcmd, "%s" LDAP_DIRSEP MODRDNCMD,
progdir );
margs[manum++] = mcmd;
if ( uri ) {
margs[manum++] = "-H";
margs[manum++] = uri;
} else {
margs[manum++] = "-h";
margs[manum++] = host;
margs[manum++] = "-p";
margs[manum++] = port;
}
margs[manum++] = "-D";
margs[manum++] = manager;
margs[manum++] = "-w";
margs[manum++] = passwd;
margs[manum++] = "-l";
margs[manum++] = loops;
margs[manum++] = "-e";
margs[manum++] = NULL; /* will hold the modrdn entry */
margs[manum++] = NULL;
/*
* generate the add/delete clients
*/
aanum = 0;
snprintf( acmd, sizeof acmd, "%s" LDAP_DIRSEP ADDCMD,
progdir );
aargs[aanum++] = acmd;
if ( uri ) {
aargs[aanum++] = "-H";
aargs[aanum++] = uri;
} else {
aargs[aanum++] = "-h";
aargs[aanum++] = host;
aargs[aanum++] = "-p";
aargs[aanum++] = port;
}
aargs[aanum++] = "-D";
aargs[aanum++] = manager;
aargs[aanum++] = "-w";
aargs[aanum++] = passwd;
aargs[aanum++] = "-l";
aargs[aanum++] = loops;
aargs[aanum++] = "-f";
aargs[aanum++] = NULL; /* will hold the add data file */
aargs[aanum++] = NULL;
for ( j = 0; j < MAXREQS; j++ ) {
if ( j < snum ) {
sargs[sanum - 2] = sreqs[j];
fork_child( scmd, sargs );
}
if ( j < rnum ) {
rargs[ranum - 2] = rreqs[j];
fork_child( rcmd, rargs );
}
if ( j < mnum ) {
margs[manum - 2] = mreqs[j];
fork_child( mcmd, margs );
}
if ( j < anum ) {
aargs[aanum - 2] = afiles[j];
fork_child( acmd, aargs );
}
}
wait4kids( -1 );
exit( EXIT_SUCCESS );
}
static char *
get_file_name( char *dirname, char *filename )
{
char buf[MAXPATHLEN];
snprintf( buf, sizeof buf, "%s" LDAP_DIRSEP "%s",
dirname, filename );
return( strdup( buf ));
}
static int
get_search_filters( char *filename, char *filters[] )
{
FILE *fp;
int filter = 0;
if ( (fp = fopen( filename, "r" )) != NULL ) {
char line[BUFSIZ];
while (( filter < MAXREQS ) && ( fgets( line, BUFSIZ, fp ))) {
char *nl;
if (( nl = strchr( line, '\r' )) || ( nl = strchr( line, '\n' )))
*nl = '\0';
filters[filter++] = ArgDup( line );
}
fclose( fp );
}
return( filter );
}
static int
get_read_entries( char *filename, char *entries[] )
{
FILE *fp;
int entry = 0;
if ( (fp = fopen( filename, "r" )) != NULL ) {
char line[BUFSIZ];
while (( entry < MAXREQS ) && ( fgets( line, BUFSIZ, fp ))) {
char *nl;
if (( nl = strchr( line, '\r' )) || ( nl = strchr( line, '\n' )))
*nl = '\0';
entries[entry++] = ArgDup( line );
}
fclose( fp );
}
return( entry );
}
#ifndef HAVE_WINSOCK
static void
fork_child( char *prog, char **args )
{
pid_t pid;
wait4kids( maxkids );
switch ( pid = fork() ) {
case 0: /* child */
#ifdef HAVE_EBCDIC
/* The __LIBASCII execvp only handles ASCII "prog",
* we still need to translate the arg vec ourselves.
*/
{ char *arg2[MAXREQS];
int i;
for (i=0; args[i]; i++) {
arg2[i] = ArgDup(args[i]);
__atoe(arg2[i]);
}
arg2[i] = NULL;
args = arg2; }
#endif
execvp( prog, args );
fprintf( stderr, "%s: ", prog );
perror( "execv" );
exit( EXIT_FAILURE );
break;
case -1: /* trouble */
fprintf( stderr, "Could not fork to run %s\n", prog );
perror( "fork" );
break;
default: /* parent */
nkids++;
break;
}
}
static void
wait4kids( int nkidval )
{
int status;
while ( nkids >= nkidval ) {
wait( &status );
if ( WIFSTOPPED(status) ) {
fprintf( stderr,
"stopping: child stopped with signal %d\n",
(int) WSTOPSIG(status) );
} else if ( WIFSIGNALED(status) ) {
fprintf( stderr,
"stopping: child terminated with signal %d%s\n",
(int) WTERMSIG(status),
#ifdef WCOREDUMP
WCOREDUMP(status) ? ", core dumped" : ""
#else
""
#endif
);
exit( WEXITSTATUS(status) );
} else if ( WEXITSTATUS(status) != 0 ) {
fprintf( stderr,
"stopping: child exited with status %d\n",
(int) WEXITSTATUS(status) );
exit( WEXITSTATUS(status) );
} else {
nkids--;
}
}
}
#else
static void
wait4kids( int nkidval )
{
int rc, i;
while ( nkids >= nkidval ) {
rc = WaitForMultipleObjects( nkids, children, FALSE, INFINITE );
for ( i=rc - WAIT_OBJECT_0; i<nkids-1; i++)
children[i] = children[i+1];
nkids--;
}
}
static void
fork_child( char *prog, char **args )
{
int rc;
wait4kids( maxkids );
rc = _spawnvp( _P_NOWAIT, prog, args );
if ( rc == -1 ) {
fprintf( stderr, "%s: ", prog );
perror("spawnvp");
} else {
children[nkids++] = (HANDLE)rc;
}
}
#endif
| impedimentToProgress/UCI-BlueChip | snapgear_linux/lib/libldap/tests/progs/slapd-tester.c | C | mit | 10,316 |
// Copyright 2008 Google Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: [email protected] (Vlad Losev)
// This sample shows how to test common properties of multiple
// implementations of an interface (aka interface tests) using
// value-parameterized tests. Each test in the test case has
// a parameter that is an interface pointer to an implementation
// tested.
// The interface and its implementations are in this header.
#include "prime_tables.h"
#include "gtest/gtest.h"
namespace {
#if GTEST_HAS_PARAM_TEST
using ::testing::TestWithParam;
using ::testing::Values;
// As a general rule, to prevent a test from affecting the tests that come
// after it, you should create and destroy the tested objects for each test
// instead of reusing them. In this sample we will define a simple factory
// function for PrimeTable objects. We will instantiate objects in test's
// SetUp() method and delete them in TearDown() method.
typedef PrimeTable* CreatePrimeTableFunc();
PrimeTable* CreateOnTheFlyPrimeTable() {
return new OnTheFlyPrimeTable();
}
template <size_t max_precalculated>
PrimeTable* CreatePreCalculatedPrimeTable() {
return new PreCalculatedPrimeTable(max_precalculated);
}
// Inside the test body, fixture constructor, SetUp(), and TearDown() you
// can refer to the test parameter by GetParam(). In this case, the test
// parameter is a factory function which we call in fixture's SetUp() to
// create and store an instance of PrimeTable.
class PrimeTableTestSmpl7 : public TestWithParam<CreatePrimeTableFunc*> {
public:
virtual ~PrimeTableTestSmpl7() { delete table_; }
virtual void SetUp() { table_ = (*GetParam())(); }
virtual void TearDown() {
delete table_;
table_ = NULL;
}
protected:
PrimeTable* table_;
};
TEST_P(PrimeTableTestSmpl7, ReturnsFalseForNonPrimes) {
EXPECT_FALSE(table_->IsPrime(-5));
EXPECT_FALSE(table_->IsPrime(0));
EXPECT_FALSE(table_->IsPrime(1));
EXPECT_FALSE(table_->IsPrime(4));
EXPECT_FALSE(table_->IsPrime(6));
EXPECT_FALSE(table_->IsPrime(100));
}
TEST_P(PrimeTableTestSmpl7, ReturnsTrueForPrimes) {
EXPECT_TRUE(table_->IsPrime(2));
EXPECT_TRUE(table_->IsPrime(3));
EXPECT_TRUE(table_->IsPrime(5));
EXPECT_TRUE(table_->IsPrime(7));
EXPECT_TRUE(table_->IsPrime(11));
EXPECT_TRUE(table_->IsPrime(131));
}
TEST_P(PrimeTableTestSmpl7, CanGetNextPrime) {
EXPECT_EQ(2, table_->GetNextPrime(0));
EXPECT_EQ(3, table_->GetNextPrime(2));
EXPECT_EQ(5, table_->GetNextPrime(3));
EXPECT_EQ(7, table_->GetNextPrime(5));
EXPECT_EQ(11, table_->GetNextPrime(7));
EXPECT_EQ(131, table_->GetNextPrime(128));
}
// In order to run value-parameterized tests, you need to instantiate them,
// or bind them to a list of values which will be used as test parameters.
// You can instantiate them in a different translation module, or even
// instantiate them several times.
//
// Here, we instantiate our tests with a list of two PrimeTable object
// factory functions:
INSTANTIATE_TEST_CASE_P(OnTheFlyAndPreCalculated, PrimeTableTestSmpl7,
Values(&CreateOnTheFlyPrimeTable,
&CreatePreCalculatedPrimeTable<1000>));
#else
// Google Test may not support value-parameterized tests with some
// compilers. If we use conditional compilation to compile out all
// code referring to the gtest_main library, MSVC linker will not link
// that library at all and consequently complain about missing entry
// point defined in that library (fatal error LNK1561: entry point
// must be defined). This dummy test keeps gtest_main linked in.
TEST(DummyTest, ValueParameterizedTestsAreNotSupportedOnThisPlatform) {}
#endif // GTEST_HAS_PARAM_TEST
} // namespace
| MasonLeeBack/PolyEngine | thirdparty/vulkan/shaderc/third_party/googletest/googletest/samples/sample7_unittest.cc | C++ | mit | 5,198 |
<?php
use Mockery as m;
use Illuminate\Database\Schema\Blueprint;
class DatabasePostgresSchemaGrammarTest extends PHPUnit_Framework_TestCase {
public function tearDown()
{
m::close();
}
public function testBasicCreateTable()
{
$blueprint = new Blueprint('users');
$blueprint->create();
$blueprint->increments('id');
$blueprint->string('email');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('create table "users" ("id" serial primary key not null, "email" varchar(255) not null)', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->increments('id');
$blueprint->string('email');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "id" serial primary key not null, add column "email" varchar(255) not null', $statements[0]);
}
public function testDropTable()
{
$blueprint = new Blueprint('users');
$blueprint->drop();
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('drop table "users"', $statements[0]);
}
public function testDropTableIfExists()
{
$blueprint = new Blueprint('users');
$blueprint->dropIfExists();
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('drop table if exists "users"', $statements[0]);
}
public function testDropColumn()
{
$blueprint = new Blueprint('users');
$blueprint->dropColumn('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop column "foo"', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->dropColumn(array('foo', 'bar'));
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop column "foo", drop column "bar"', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->dropColumn('foo', 'bar');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop column "foo", drop column "bar"', $statements[0]);
}
public function testDropPrimary()
{
$blueprint = new Blueprint('users');
$blueprint->dropPrimary();
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop constraint users_pkey', $statements[0]);
}
public function testDropUnique()
{
$blueprint = new Blueprint('users');
$blueprint->dropUnique('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop constraint foo', $statements[0]);
}
public function testDropIndex()
{
$blueprint = new Blueprint('users');
$blueprint->dropIndex('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('drop index foo', $statements[0]);
}
public function testDropForeign()
{
$blueprint = new Blueprint('users');
$blueprint->dropForeign('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop constraint foo', $statements[0]);
}
public function testDropTimestamps()
{
$blueprint = new Blueprint('users');
$blueprint->dropTimestamps();
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" drop column "created_at", drop column "updated_at"', $statements[0]);
}
public function testRenameTable()
{
$blueprint = new Blueprint('users');
$blueprint->rename('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" rename to "foo"', $statements[0]);
}
public function testAddingPrimaryKey()
{
$blueprint = new Blueprint('users');
$blueprint->primary('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add primary key ("foo")', $statements[0]);
}
public function testAddingUniqueKey()
{
$blueprint = new Blueprint('users');
$blueprint->unique('foo', 'bar');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add constraint bar unique ("foo")', $statements[0]);
}
public function testAddingIndex()
{
$blueprint = new Blueprint('users');
$blueprint->index(array('foo', 'bar'), 'baz');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('create index baz on "users" ("foo", "bar")', $statements[0]);
}
public function testAddingIncrementingID()
{
$blueprint = new Blueprint('users');
$blueprint->increments('id');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "id" serial primary key not null', $statements[0]);
}
public function testAddingBigIncrementingID()
{
$blueprint = new Blueprint('users');
$blueprint->bigIncrements('id');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "id" bigserial primary key not null', $statements[0]);
}
public function testAddingString()
{
$blueprint = new Blueprint('users');
$blueprint->string('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" varchar(255) not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->string('foo', 100);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" varchar(100) not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->string('foo', 100)->nullable()->default('bar');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" varchar(100) null default \'bar\'', $statements[0]);
}
public function testAddingText()
{
$blueprint = new Blueprint('users');
$blueprint->text('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" text not null', $statements[0]);
}
public function testAddingBigInteger()
{
$blueprint = new Blueprint('users');
$blueprint->bigInteger('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" bigint not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->bigInteger('foo', true);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" bigserial primary key not null', $statements[0]);
}
public function testAddingInteger()
{
$blueprint = new Blueprint('users');
$blueprint->integer('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" integer not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->integer('foo', true);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" serial primary key not null', $statements[0]);
}
public function testAddingMediumInteger()
{
$blueprint = new Blueprint('users');
$blueprint->mediumInteger('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" integer not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->mediumInteger('foo', true);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" serial primary key not null', $statements[0]);
}
public function testAddingTinyInteger()
{
$blueprint = new Blueprint('users');
$blueprint->tinyInteger('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" smallint not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->tinyInteger('foo', true);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" smallserial primary key not null', $statements[0]);
}
public function testAddingSmallInteger()
{
$blueprint = new Blueprint('users');
$blueprint->smallInteger('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" smallint not null', $statements[0]);
$blueprint = new Blueprint('users');
$blueprint->smallInteger('foo', true);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" smallserial primary key not null', $statements[0]);
}
public function testAddingFloat()
{
$blueprint = new Blueprint('users');
$blueprint->float('foo', 5, 2);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" double precision not null', $statements[0]);
}
public function testAddingDouble()
{
$blueprint = new Blueprint('users');
$blueprint->double('foo', 15, 8);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" double precision not null', $statements[0]);
}
public function testAddingDecimal()
{
$blueprint = new Blueprint('users');
$blueprint->decimal('foo', 5, 2);
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" decimal(5, 2) not null', $statements[0]);
}
public function testAddingBoolean()
{
$blueprint = new Blueprint('users');
$blueprint->boolean('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" boolean not null', $statements[0]);
}
public function testAddingEnum()
{
$blueprint = new Blueprint('users');
$blueprint->enum('foo', array('bar', 'baz'));
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" varchar(255) check ("foo" in (\'bar\', \'baz\')) not null', $statements[0]);
}
public function testAddingDate()
{
$blueprint = new Blueprint('users');
$blueprint->date('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" date not null', $statements[0]);
}
public function testAddingJson()
{
$blueprint = new Blueprint('users');
$blueprint->json('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" json not null', $statements[0]);
}
public function testAddingJsonb()
{
$blueprint = new Blueprint('users');
$blueprint->jsonb('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" jsonb not null', $statements[0]);
}
public function testAddingDateTime()
{
$blueprint = new Blueprint('users');
$blueprint->dateTime('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" timestamp(0) without time zone not null', $statements[0]);
}
public function testAddingDateTimeTz()
{
$blueprint = new Blueprint('users');
$blueprint->dateTimeTz('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" timestamp(0) with time zone not null', $statements[0]);
}
public function testAddingTime()
{
$blueprint = new Blueprint('users');
$blueprint->time('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" time(0) without time zone not null', $statements[0]);
}
public function testAddingTimeTz()
{
$blueprint = new Blueprint('users');
$blueprint->timeTz('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" time(0) with time zone not null', $statements[0]);
}
public function testAddingTimeStamp()
{
$blueprint = new Blueprint('users');
$blueprint->timestamp('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" timestamp(0) without time zone not null', $statements[0]);
}
public function testAddingTimeStampTz()
{
$blueprint = new Blueprint('users');
$blueprint->timestampTz('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" timestamp(0) with time zone not null', $statements[0]);
}
public function testAddingTimeStamps()
{
$blueprint = new Blueprint('users');
$blueprint->timestamps();
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "created_at" timestamp(0) without time zone not null, add column "updated_at" timestamp(0) without time zone not null', $statements[0]);
}
public function testAddingBinary()
{
$blueprint = new Blueprint('users');
$blueprint->binary('foo');
$statements = $blueprint->toSql($this->getConnection(), $this->getGrammar());
$this->assertEquals(1, count($statements));
$this->assertEquals('alter table "users" add column "foo" bytea not null', $statements[0]);
}
protected function getConnection()
{
return m::mock('Illuminate\Database\Connection');
}
public function getGrammar()
{
return new Illuminate\Database\Schema\Grammars\PostgresGrammar;
}
}
| phanan/framework | tests/Database/DatabasePostgresSchemaGrammarTest.php | PHP | mit | 16,727 |
/* $Id$ $Revision$ */
/* vim:set shiftwidth=4 ts=8: */
/*************************************************************************
* Copyright (c) 2011 AT&T Intellectual Property
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors: See CVS logs. Details at http://www.graphviz.org/
*************************************************************************/
#include "sfhdr.h"
/* Poll a set of streams to see if any is available for I/O.
** Ready streams are moved to front of array but retain the
** same relative order.
**
** Written by Kiem-Phong Vo.
*/
#if __STD_C
int sfpoll(Sfio_t ** fa, reg int n, int tm)
#else
int sfpoll(fa, n, tm)
Sfio_t **fa; /* array of streams to poll */
reg int n; /* number of streams in array */
int tm; /* the amount of time in ms to wait for selecting */
#endif
{
reg int r, c, m;
reg Sfio_t *f;
reg Sfdisc_t *d;
reg int *status, *check;
if (n <= 0 || !fa)
return -1;
if (!(status = (int *) malloc(2 * n * sizeof(int))))
return -1;
else
check = status + n;
/* this loop partitions the streams into 3 sets: Check, Ready, Notready */
retry:for (r = c = 0; r < n; ++r) {
f = fa[r];
/* this loop pops a stream stack as necessary */
for (;;) { /* check accessibility */
m = f->mode & SF_RDWR;
if ((int) f->mode != m && _sfmode(f, m, 0) < 0)
goto do_never;
/* clearly ready */
if (f->next < f->endb)
goto do_ready;
/* has discipline, ask its opinion */
for (d = f->disc; d; d = d->disc)
if (d->exceptf)
break;
if (d) {
if ((m = (*d->exceptf) (f, SF_DPOLL, &tm, d)) < 0)
goto do_never;
else if (m > 0)
goto do_ready;
/*else check file descriptor */
}
/* unseekable stream, must check for blockability */
if (f->extent < 0)
goto do_check;
/* string/regular streams with no possibility of blocking */
if (!f->push)
goto do_ready;
/* stacked regular file stream with I/O possibility */
if (!(f->flags & SF_STRING) &&
((f->mode & SF_WRITE) || f->here < f->extent))
goto do_ready;
/* at an apparent eof, pop stack if ok, then recheck */
SETLOCAL(f);
switch (_sfexcept(f, f->mode & SF_RDWR, 0, f->disc)) {
case SF_EDONE:
if (f->flags & SF_STRING)
goto do_never;
else
goto do_ready;
case SF_EDISC:
if (f->flags & SF_STRING)
goto do_ready;
case SF_ESTACK:
case SF_ECONT:
continue;
}
}
do_check: /* local function to set a stream for further checking */
{
status[r] = 0;
check[c] = r;
c += 1;
continue;
}
do_ready: /* local function to set the ready streams */
{
status[r] = 1;
continue;
}
do_never: /* local function to set the not-ready streams */
{
status[r] = -1;
continue;
}
}
#if _lib_poll
if (c > 0) {
struct pollfd *fds;
/* construct the poll array */
if (!(fds = (struct pollfd *) malloc(c * sizeof(struct pollfd))))
return -1;
for (r = 0; r < c; r++) {
fds[r].fd = fa[check[r]]->file;
fds[r].events =
(fa[check[r]]->mode & SF_READ) ? POLLIN : POLLOUT;
fds[r].revents = 0;
}
for (;;) { /* this loop takes care of interrupts */
if ((r = SFPOLL(fds, c, tm)) == 0)
break;
else if (r < 0) {
if (errno == EINTR || errno == EAGAIN) {
errno = 0;
continue;
} else
break;
}
for (r = 0; r < c; ++r) {
f = fa[check[r]];
if (((f->mode & SF_READ) && (fds[r].revents & POLLIN)) ||
((f->mode & SF_WRITE) && (fds[r].revents & POLLOUT)))
status[check[r]] = 1;
}
break;
}
free((Void_t *) fds);
}
#endif /*_lib_poll*/
#if _lib_select
if (c > 0) {
fd_set rd, wr;
struct timeval tmb, *tmp;
FD_ZERO(&rd);
FD_ZERO(&wr);
m = 0;
for (r = 0; r < c; ++r) {
f = fa[check[r]];
if (f->file > m)
m = f->file;
if (f->mode & SF_READ)
FD_SET(f->file, &rd);
else
FD_SET(f->file, &wr);
}
if (tm < 0)
tmp = NIL(struct timeval *);
else {
tmp = &tmb;
tmb.tv_sec = tm / SECOND;
tmb.tv_usec = (tm % SECOND) * SECOND;
}
for (;;) {
if ((r = select(m + 1, &rd, &wr, NIL(fd_set *), tmp)) == 0)
break;
else if (r < 0) {
if (errno == EINTR)
continue;
else
break;
}
for (r = 0; r < c; ++r) {
f = fa[check[r]];
if (((f->mode & SF_READ) && FD_ISSET(f->file, &rd)) ||
((f->mode & SF_WRITE) && FD_ISSET(f->file, &wr)))
status[check[r]] = 1;
}
break;
}
}
#endif /*_lib_select*/
/* call exception functions */
for (c = 0; c < n; ++c) {
if (status[c] <= 0)
continue;
if ((d = fa[c]->disc) && d->exceptf) {
if ((r = (*d->exceptf) (fa[c], SF_READY, (Void_t *) 0, d)) < 0)
goto done;
else if (r > 0)
goto retry;
}
}
/* move ready streams to the front */
for (r = c = 0; c < n; ++c) {
if (status[c] > 0) {
if (c > r) {
f = fa[r];
fa[r] = fa[c];
fa[c] = f;
}
r += 1;
}
}
done:
free((Void_t *) status);
return r;
}
| JasonGross/graphviz-packaging | reference-graphviz-2.39.20141222.0545/lib/sfio/sfpoll.c | C | mit | 5,236 |
/// <reference path="fromnow.d.ts" />
import fromnow = require( 'fromnow' );
function dateOnly() {
fromnow( '2015-12-31' );
}
function maxChunks() {
fromnow( '2015-12-31', {
maxChunks: 12
});
}
function useAgo() {
fromnow( '2015-12-31', {
useAgo: true
});
}
function useAnd() {
fromnow( '2015-12-31', {
useAnd: true
});
} | Dashlane/DefinitelyTyped | fromnow/fromnow-tests.ts | TypeScript | mit | 378 |
class Role < ActiveRecord::Base
has_many :role_memberships, :as => :roleable, :dependent => :destroy
has_many :roles, :through => :role_memberships, :source => :role
has_many :roleables, :class_name => "RoleMembership", :foreign_key => "role_id", :dependent => :destroy
has_many :subroles, :through => :roleables, :source => :roleable, :source_type => 'Role'
has_many :users, :through => :roleables, :source => :roleable, :source_type => 'User'
validates :name, :presence => true, :uniqueness => true
acts_as_permissible
end | tshifflett/balder | app/models/role.rb | Ruby | mit | 547 |
<?php
/**
* @file
* Contains \Drupal\views\Plugin\views\style\StylePluginBase.
*/
namespace Drupal\views\Plugin\views\style;
use Drupal\Component\Utility\Html;
use Drupal\Core\Form\FormStateInterface;
use Drupal\views\Plugin\views\PluginBase;
use Drupal\views\Plugin\views\display\DisplayPluginBase;
use Drupal\views\Plugin\views\wizard\WizardInterface;
use Drupal\views\ViewExecutable;
/**
* @defgroup views_style_plugins Views style plugins
* @{
* Plugins that control how the collection of results is rendered in a view.
*
* Style plugins control how a view is displayed. For the most part, they are
* object wrappers around theme templates. Examples of styles include HTML
* lists, tables, full or teaser content views, etc.
*
* Many (but not all) style plugins have an optional row plugin, which
* displays a single record. Not all style plugins use row plugins, so it is
* up to the style plugin to set this up and call the row plugin. See the
* @link views_row_plugins Views row plugins topic @endlink for more
* information.
*
* Style plugins extend \Drupal\views\Plugin\views\style\StylePluginBase. They
* must be annotated with \Drupal\views\Annotation\ViewsStyle
* annotation, and they must be in namespace directory Plugin\views\style.
*
* @ingroup views_plugins
* @see plugin_api
*/
/**
* Base class for views style plugins.
*/
abstract class StylePluginBase extends PluginBase {
/**
* Overrides Drupal\views\Plugin\Plugin::$usesOptions.
*/
protected $usesOptions = TRUE;
/**
* Store all available tokens row rows.
*/
protected $rowTokens = array();
/**
* Does the style plugin allows to use style plugins.
*
* @var bool
*/
protected $usesRowPlugin = FALSE;
/**
* Does the style plugin support custom css class for the rows.
*
* @var bool
*/
protected $usesRowClass = FALSE;
/**
* Does the style plugin support grouping of rows.
*
* @var bool
*/
protected $usesGrouping = TRUE;
/**
* Does the style plugin for itself support to add fields to it's output.
*
* This option only makes sense on style plugins without row plugins, like
* for example table.
*
* @var bool
*/
protected $usesFields = FALSE;
/**
* Stores the rendered field values, keyed by the row index and field name.
*
* @see \Drupal\views\Plugin\views\style\StylePluginBase::renderFields()
* @see \Drupal\views\Plugin\views\style\StylePluginBase::getField()
*
* @var array|null
*/
protected $rendered_fields;
/**
* The theme function used to render the grouping set.
*
* Plugins may override this attribute if they wish to use some other theme
* function to render the grouping set.
*
* @var string
*
* @see StylePluginBase::renderGroupingSets()
*/
protected $groupingTheme = 'views_view_grouping';
/**
* Should field labels be enabled by default.
*
* @var bool
*/
protected $defaultFieldLabels = FALSE;
/**
* Overrides \Drupal\views\Plugin\views\PluginBase::init().
*
* The style options might come externally as the style can be sourced from at
* least two locations. If it's not included, look on the display.
*/
public function init(ViewExecutable $view, DisplayPluginBase $display, array &$options = NULL) {
parent::init($view, $display, $options);
if ($this->usesRowPlugin() && $display->getOption('row')) {
$this->view->rowPlugin = $display->getPlugin('row');
}
$this->options += array(
'grouping' => array(),
);
}
public function destroy() {
parent::destroy();
if (isset($this->view->rowPlugin)) {
$this->view->rowPlugin->destroy();
}
}
/**
* Returns the usesRowPlugin property.
*
* @return bool
*/
function usesRowPlugin() {
return $this->usesRowPlugin;
}
/**
* Returns the usesRowClass property.
*
* @return bool
*/
function usesRowClass() {
return $this->usesRowClass;
}
/**
* Returns the usesGrouping property.
*
* @return bool
*/
function usesGrouping() {
return $this->usesGrouping;
}
/**
* Return TRUE if this style also uses fields.
*
* @return bool
*/
function usesFields() {
// If we use a row plugin, ask the row plugin. Chances are, we don't
// care, it does.
$row_uses_fields = FALSE;
if ($this->usesRowPlugin() && ($row_plugin = $this->displayHandler->getPlugin('row'))) {
$row_uses_fields = $row_plugin->usesFields();
}
// Otherwise, check the definition or the option.
return $row_uses_fields || $this->usesFields || !empty($this->options['uses_fields']);
}
/**
* Return TRUE if this style uses tokens.
*
* Used to ensure we don't fetch tokens when not needed for performance.
*/
public function usesTokens() {
if ($this->usesRowClass()) {
$class = $this->options['row_class'];
if (strpos($class, '[') !== FALSE || strpos($class, '!') !== FALSE || strpos($class, '%') !== FALSE) {
return TRUE;
}
}
}
/**
* Return TRUE if this style enables field labels by default.
*
* @return bool
*/
public function defaultFieldLabels() {
return $this->defaultFieldLabels;
}
/**
* Return the token replaced row class for the specified row.
*/
public function getRowClass($row_index) {
if ($this->usesRowClass()) {
$class = $this->options['row_class'];
if ($this->usesFields() && $this->view->field) {
$class = strip_tags($this->tokenizeValue($class, $row_index));
}
$classes = explode(' ', $class);
foreach ($classes as &$class) {
$class = Html::cleanCssIdentifier($class);
}
return implode(' ', $classes);
}
}
/**
* Take a value and apply token replacement logic to it.
*/
public function tokenizeValue($value, $row_index) {
if (strpos($value, '[') !== FALSE || strpos($value, '!') !== FALSE || strpos($value, '%') !== FALSE) {
// Row tokens might be empty, for example for node row style.
$tokens = isset($this->rowTokens[$row_index]) ? $this->rowTokens[$row_index] : array();
if (!empty($this->view->build_info['substitutions'])) {
$tokens += $this->view->build_info['substitutions'];
}
if ($tokens) {
$value = strtr($value, $tokens);
}
}
return $value;
}
/**
* Should the output of the style plugin be rendered even if it's a empty view.
*/
public function evenEmpty() {
return !empty($this->definition['even empty']);
}
protected function defineOptions() {
$options = parent::defineOptions();
$options['grouping'] = array('default' => array());
if ($this->usesRowClass()) {
$options['row_class'] = array('default' => '');
$options['default_row_class'] = array('default' => TRUE);
}
$options['uses_fields'] = array('default' => FALSE);
return $options;
}
public function buildOptionsForm(&$form, FormStateInterface $form_state) {
parent::buildOptionsForm($form, $form_state);
// Only fields-based views can handle grouping. Style plugins can also exclude
// themselves from being groupable by setting their "usesGrouping" property
// to FALSE.
// @TODO: Document "usesGrouping" in docs.php when docs.php is written.
if ($this->usesFields() && $this->usesGrouping()) {
$options = array('' => $this->t('- None -'));
$field_labels = $this->displayHandler->getFieldLabels(TRUE);
$options += $field_labels;
// If there are no fields, we can't group on them.
if (count($options) > 1) {
// This is for backward compatibility, when there was just a single
// select form.
if (is_string($this->options['grouping'])) {
$grouping = $this->options['grouping'];
$this->options['grouping'] = array();
$this->options['grouping'][0]['field'] = $grouping;
}
if (isset($this->options['group_rendered']) && is_string($this->options['group_rendered'])) {
$this->options['grouping'][0]['rendered'] = $this->options['group_rendered'];
unset($this->options['group_rendered']);
}
$c = count($this->options['grouping']);
// Add a form for every grouping, plus one.
for ($i = 0; $i <= $c; $i++) {
$grouping = !empty($this->options['grouping'][$i]) ? $this->options['grouping'][$i] : array();
$grouping += array('field' => '', 'rendered' => TRUE, 'rendered_strip' => FALSE);
$form['grouping'][$i]['field'] = array(
'#type' => 'select',
'#title' => $this->t('Grouping field Nr.@number', array('@number' => $i + 1)),
'#options' => $options,
'#default_value' => $grouping['field'],
'#description' => $this->t('You may optionally specify a field by which to group the records. Leave blank to not group.'),
);
$form['grouping'][$i]['rendered'] = array(
'#type' => 'checkbox',
'#title' => $this->t('Use rendered output to group rows'),
'#default_value' => $grouping['rendered'],
'#description' => $this->t('If enabled the rendered output of the grouping field is used to group the rows.'),
'#states' => array(
'invisible' => array(
':input[name="style_options[grouping][' . $i . '][field]"]' => array('value' => ''),
),
),
);
$form['grouping'][$i]['rendered_strip'] = array(
'#type' => 'checkbox',
'#title' => $this->t('Remove tags from rendered output'),
'#default_value' => $grouping['rendered_strip'],
'#states' => array(
'invisible' => array(
':input[name="style_options[grouping][' . $i . '][field]"]' => array('value' => ''),
),
),
);
}
}
}
if ($this->usesRowClass()) {
$form['row_class'] = array(
'#title' => $this->t('Row class'),
'#description' => $this->t('The class to provide on each row.'),
'#type' => 'textfield',
'#default_value' => $this->options['row_class'],
);
if ($this->usesFields()) {
$form['row_class']['#description'] .= ' ' . $this->t('You may use field tokens from as per the "Replacement patterns" used in "Rewrite the output of this field" for all fields.');
}
$form['default_row_class'] = array(
'#title' => $this->t('Add views row classes'),
'#description' => $this->t('Add the default row classes like views-row-1 to the output. You can use this to quickly reduce the amount of markup the view provides by default, at the cost of making it more difficult to apply CSS.'),
'#type' => 'checkbox',
'#default_value' => $this->options['default_row_class'],
);
}
if (!$this->usesFields() || !empty($this->options['uses_fields'])) {
$form['uses_fields'] = array(
'#type' => 'checkbox',
'#title' => $this->t('Force using fields'),
'#description' => $this->t('If neither the row nor the style plugin supports fields, this field allows to enable them, so you can for example use groupby.'),
'#default_value' => $this->options['uses_fields'],
);
}
}
public function validateOptionsForm(&$form, FormStateInterface $form_state) {
// Don't run validation on style plugins without the grouping setting.
if ($form_state->hasValue(array('style_options', 'grouping'))) {
// Don't save grouping if no field is specified.
$groupings = $form_state->getValue(array('style_options', 'grouping'));
foreach ($groupings as $index => $grouping) {
if (empty($grouping['field'])) {
$form_state->unsetValue(array('style_options', 'grouping', $index));
}
}
}
}
/**
* Provide a form in the views wizard if this style is selected.
*
* @param array $form
* An associative array containing the structure of the form.
* @param \Drupal\Core\Form\FormStateInterface $form_state
* The current state of the form.
* @param string $type
* The display type, either block or page.
*/
public function wizardForm(&$form, FormStateInterface $form_state, $type) {
}
/**
* Alter the options of a display before they are added to the view.
*
* @param array $form
* An associative array containing the structure of the form.
* @param \Drupal\Core\Form\FormStateInterface $form_state
* The current state of the form.
* @param \Drupal\views\Plugin\views\wizard\WizardInterface $wizard
* The current used wizard.
* @param array $display_options
* The options which will be used on the view. The style plugin should
* alter this to its own needs.
* @param string $display_type
* The display type, either block or page.
*/
public function wizardSubmit(&$form, FormStateInterface $form_state, WizardInterface $wizard, &$display_options, $display_type) {
}
/**
* Called by the view builder to see if this style handler wants to
* interfere with the sorts. If so it should build; if it returns
* any non-TRUE value, normal sorting will NOT be added to the query.
*/
public function buildSort() { return TRUE; }
/**
* Called by the view builder to let the style build a second set of
* sorts that will come after any other sorts in the view.
*/
public function buildSortPost() { }
/**
* Allow the style to do stuff before each row is rendered.
*
* @param $result
* The full array of results from the query.
*/
public function preRender($result) {
if (!empty($this->view->rowPlugin)) {
$this->view->rowPlugin->preRender($result);
}
}
/**
* Renders a group of rows of the grouped view.
*
* @param array $rows
* The result rows rendered in this group.
*
* @return array
* The render array containing the single group theme output.
*/
protected function renderRowGroup(array $rows = array()) {
return array(
'#theme' => $this->themeFunctions(),
'#view' => $this->view,
'#rows' => $rows,
);
}
/**
* Render the display in this style.
*/
public function render() {
if ($this->usesRowPlugin() && empty($this->view->rowPlugin)) {
debug('Drupal\views\Plugin\views\style\StylePluginBase: Missing row plugin');
return;
}
// Group the rows according to the grouping instructions, if specified.
$sets = $this->renderGrouping(
$this->view->result,
$this->options['grouping'],
TRUE
);
return $this->renderGroupingSets($sets);
}
/**
* Render the grouping sets.
*
* Plugins may override this method if they wish some other way of handling
* grouping.
*
* @param $sets
* Array containing the grouping sets to render.
* @param $level
* Integer indicating the hierarchical level of the grouping.
*
* @return string
* Rendered output of given grouping sets.
*/
public function renderGroupingSets($sets, $level = 0) {
$output = array();
$theme_functions = $this->view->buildThemeFunctions($this->groupingTheme);
foreach ($sets as $set) {
$row = reset($set['rows']);
// Render as a grouping set.
if (is_array($row) && isset($row['group'])) {
$output[] = array(
'#theme' => $theme_functions,
'#view' => $this->view,
'#grouping' => $this->options['grouping'][$level],
'#grouping_level' => $level,
'#rows' => $set['rows'],
'#title' => $set['group'],
);
}
// Render as a record set.
else {
if ($this->usesRowPlugin()) {
foreach ($set['rows'] as $index => $row) {
$this->view->row_index = $index;
$set['rows'][$index] = $this->view->rowPlugin->render($row);
}
}
$single_output = $this->renderRowGroup($set['rows']);
$single_output['#grouping_level'] = $level;
$single_output['#title'] = $set['group'];
$output[] = $single_output;
}
}
unset($this->view->row_index);
return $output;
}
/**
* Group records as needed for rendering.
*
* @param $records
* An array of records from the view to group.
* @param $groupings
* An array of grouping instructions on which fields to group. If empty, the
* result set will be given a single group with an empty string as a label.
* @param $group_rendered
* Boolean value whether to use the rendered or the raw field value for
* grouping. If set to NULL the return is structured as before
* Views 7.x-3.0-rc2. After Views 7.x-3.0 this boolean is only used if
* $groupings is an old-style string or if the rendered option is missing
* for a grouping instruction.
* @return
* The grouped record set.
* A nested set structure is generated if multiple grouping fields are used.
*
* @code
* array(
* 'grouping_field_1:grouping_1' => array(
* 'group' => 'grouping_field_1:content_1',
* 'rows' => array(
* 'grouping_field_2:grouping_a' => array(
* 'group' => 'grouping_field_2:content_a',
* 'rows' => array(
* $row_index_1 => $row_1,
* $row_index_2 => $row_2,
* // ...
* )
* ),
* ),
* ),
* 'grouping_field_1:grouping_2' => array(
* // ...
* ),
* )
* @endcode
*/
public function renderGrouping($records, $groupings = array(), $group_rendered = NULL) {
// This is for backward compatibility, when $groupings was a string
// containing the ID of a single field.
if (is_string($groupings)) {
$rendered = $group_rendered === NULL ? TRUE : $group_rendered;
$groupings = array(array('field' => $groupings, 'rendered' => $rendered));
}
// Make sure fields are rendered
$this->renderFields($this->view->result);
$sets = array();
if ($groupings) {
foreach ($records as $index => $row) {
// Iterate through configured grouping fields to determine the
// hierarchically positioned set where the current row belongs to.
// While iterating, parent groups, that do not exist yet, are added.
$set = &$sets;
foreach ($groupings as $info) {
$field = $info['field'];
$rendered = isset($info['rendered']) ? $info['rendered'] : $group_rendered;
$rendered_strip = isset($info['rendered_strip']) ? $info['rendered_strip'] : FALSE;
$grouping = '';
$group_content = '';
// Group on the rendered version of the field, not the raw. That way,
// we can control any special formatting of the grouping field through
// the admin or theme layer or anywhere else we'd like.
if (isset($this->view->field[$field])) {
$group_content = $this->getField($index, $field);
if ($this->view->field[$field]->options['label']) {
$group_content = $this->view->field[$field]->options['label'] . ': ' . $group_content;
}
if ($rendered) {
$grouping = $group_content;
if ($rendered_strip) {
$group_content = $grouping = strip_tags(htmlspecialchars_decode($group_content));
}
}
else {
$grouping = $this->getFieldValue($index, $field);
// Not all field handlers return a scalar value,
// e.g. views_handler_field_field.
if (!is_scalar($grouping)) {
$grouping = hash('sha256', serialize($grouping));
}
}
}
// Create the group if it does not exist yet.
if (empty($set[$grouping])) {
$set[$grouping]['group'] = $group_content;
$set[$grouping]['rows'] = array();
}
// Move the set reference into the row set of the group we just determined.
$set = &$set[$grouping]['rows'];
}
// Add the row to the hierarchically positioned row set we just determined.
$set[$index] = $row;
}
}
else {
// Create a single group with an empty grouping field.
$sets[''] = array(
'group' => '',
'rows' => $records,
);
}
// If this parameter isn't explicitely set modify the output to be fully
// backward compatible to code before Views 7.x-3.0-rc2.
// @TODO Remove this as soon as possible e.g. October 2020
if ($group_rendered === NULL) {
$old_style_sets = array();
foreach ($sets as $group) {
$old_style_sets[$group['group']] = $group['rows'];
}
$sets = $old_style_sets;
}
return $sets;
}
/**
* Renders all of the fields for a given style and store them on the object.
*
* @param array $result
* The result array from $view->result
*/
protected function renderFields(array $result) {
if (!$this->usesFields()) {
return;
}
if (!isset($this->rendered_fields)) {
$this->rendered_fields = array();
$this->view->row_index = 0;
$keys = array_keys($this->view->field);
// If all fields have a field::access FALSE there might be no fields, so
// there is no reason to execute this code.
if (!empty($keys)) {
foreach ($result as $count => $row) {
$this->view->row_index = $count;
foreach ($keys as $id) {
$this->rendered_fields[$count][$id] = $this->view->field[$id]->theme($row);
}
$this->rowTokens[$count] = $this->view->field[$id]->getRenderTokens(array());
}
}
unset($this->view->row_index);
}
}
/**
* Gets a rendered field.
*
* @param int $index
* The index count of the row.
* @param string $field
* The ID of the field.
*
* @return string|null
* The output of the field, or NULL if it was empty.
*/
public function getField($index, $field) {
if (!isset($this->rendered_fields)) {
$this->renderFields($this->view->result);
}
if (isset($this->rendered_fields[$index][$field])) {
return $this->rendered_fields[$index][$field];
}
}
/**
* Get the raw field value.
*
* @param $index
* The index count of the row.
* @param $field
* The id of the field.
*/
protected function getFieldValue($index, $field) {
$this->view->row_index = $index;
$value = $this->view->field[$field]->getValue($this->view->result[$index]);
unset($this->view->row_index);
return $value;
}
public function validate() {
$errors = parent::validate();
if ($this->usesRowPlugin()) {
$plugin = $this->displayHandler->getPlugin('row');
if (empty($plugin)) {
$errors[] = $this->t('Style @style requires a row style but the row plugin is invalid.', array('@style' => $this->definition['title']));
}
else {
$result = $plugin->validate();
if (!empty($result) && is_array($result)) {
$errors = array_merge($errors, $result);
}
}
}
return $errors;
}
public function query() {
parent::query();
if (isset($this->view->rowPlugin)) {
$this->view->rowPlugin->query();
}
}
}
/**
* @}
*/
| ital-lion/Drupal4Lions | core/modules/views/src/Plugin/views/style/StylePluginBase.php | PHP | mit | 23,498 |
package com.pluralsight.springbatch.patientbatchloader.config;
import javax.sql.DataSource;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.liquibase.LiquibaseProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.jmx.export.MBeanExporter;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.orm.jpa.JpaVendorAdapter;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import liquibase.integration.spring.SpringLiquibase;
import java.util.Properties;
/**
* Database configurations for the spring batch application. For the purposes of
* this course, I'm simply leveraging an H2 database; however, its recommended
* that you using a real production database server for all non-development
* implementations. Includes support for JPA auditing.
*
*/
@Configuration
@EnableJpaRepositories(
value = "com.pluralsight.springbatch.patientbatchloader",
entityManagerFactoryRef = "batchEntityManagerFactory")
@EnableTransactionManagement
public class DatabaseConfiguration {
private final Logger log = LoggerFactory.getLogger(DatabaseConfiguration.class);
private final Environment env;
public DatabaseConfiguration(Environment env) {
this.env = env;
}
@Bean(name = "batchDataSource")
public DataSource batchDataSource() {
HikariConfig config = new HikariConfig();
config.setJdbcUrl(env.getRequiredProperty("spring.datasource.url"));
config.setUsername(env.getProperty("spring.datasource.username"));
config.setPassword(env.getProperty("spring.datasource.password"));
config.setMinimumIdle(env.getProperty("spring.datasource.min-idle",
Integer.class, 2));
config.setMaximumPoolSize(env.getProperty("spring.datasource.max-active",
Integer.class, 100));
config.setTransactionIsolation("TRANSACTION_READ_COMMITTED");
config.setRegisterMbeans(true);
return new HikariDataSource(config);
}
@Bean(name = "batchJpaVendorAdapter")
public JpaVendorAdapter batchJpaVendorAdapter() {
return new HibernateJpaVendorAdapter();
}
@Bean(name = "batchEntityManagerFactory")
public LocalContainerEntityManagerFactoryBean batchEntityManagerFactory() {
LocalContainerEntityManagerFactoryBean emfBean =
new LocalContainerEntityManagerFactoryBean();
emfBean.setDataSource(batchDataSource());
emfBean.setPackagesToScan("com.pluralsight.springbatch.patientbatchloader");
emfBean.setBeanName("batchEntityManagerFactory");
emfBean.setJpaVendorAdapter(batchJpaVendorAdapter());
Properties jpaProps = new Properties();
jpaProps.put("hibernate.physical_naming_strategy",
env.getProperty("spring.jpa.hibernate.naming.physical-strategy"));
jpaProps.put("hibernate.hbm2ddl.auto", env.getProperty(
"spring.jpa.hibernate.ddl-auto", "none"));
jpaProps.put("hibernate.jdbc.fetch_size", env.getProperty(
"spring.jpa.properties.hibernate.jdbc.fetch_size",
"200"));
Integer batchSize = env.getProperty(
"spring.jpa.properties.hibernate.jdbc.batch_size",
Integer.class, 100);
if (batchSize > 0) {
jpaProps.put("hibernate.jdbc.batch_size", batchSize);
jpaProps.put("hibernate.order_inserts", "true");
jpaProps.put("hibernate.order_updates", "true");
}
jpaProps.put("hibernate.show_sql", env.getProperty(
"spring.jpa.properties.hibernate.show_sql", "false"));
jpaProps.put("hibernate.format_sql",env.getProperty(
"spring.jpa.properties.hibernate.format_sql", "false"));
emfBean.setJpaProperties(jpaProps);
return emfBean;
}
@Bean(name = "batchTransactionManager")
public PlatformTransactionManager transactionManager() {
return new JpaTransactionManager(batchEntityManagerFactory().getObject());
}
@Bean
public MBeanExporter exporter() {
final MBeanExporter exporter = new MBeanExporter();
exporter.setExcludedBeans("batchDataSource");
return exporter;
}
@Bean
public SpringLiquibase liquibase(LiquibaseProperties liquibaseProperties) {
SpringLiquibase liquibase = new SpringLiquibase();
liquibase.setDataSource(batchDataSource());
liquibase.setChangeLog("classpath:config/liquibase/master.xml");
liquibase.setContexts(liquibaseProperties.getContexts());
liquibase.setDefaultSchema(liquibaseProperties.getDefaultSchema());
liquibase.setDropFirst(liquibaseProperties.isDropFirst());
if (env.acceptsProfiles(Constants.SPRING_PROFILE_NO_LIQUIBASE)) {
liquibase.setShouldRun(false);
} else {
liquibase.setShouldRun(liquibaseProperties.isEnabled());
log.debug("Configuring Liquibase");
}
return liquibase;
}
}
| kidchenko/playground | getting-started-spring-batch/patient-batch-loader-start/src/main/java/com/pluralsight/springbatch/patientbatchloader/config/DatabaseConfiguration.java | Java | mit | 5,420 |
<?php
/*
* This file is part of the Sylius package.
*
* (c) Paweł Jędrzejewski
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sylius\Bundle\InstallerBundle\Command;
use Symfony\Bundle\FrameworkBundle\Command\ContainerAwareCommand;
use Symfony\Component\Console\Helper\ProgressHelper;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Output\OutputInterface;
use Symfony\Component\Validator\ConstraintViolationList;
abstract class AbstractInstallCommand extends ContainerAwareCommand
{
const APP_CACHE = 'app/cache/';
const WEB_ASSETS_DIRECTORY = 'web/assets/';
const WEB_BUNDLES_DIRECTORY = 'web/bundles/';
const WEB_MEDIA_DIRECTORY = 'web/media/';
const WEB_MEDIA_IMAGE_DIRECTORY = 'web/media/image/';
/**
* @var CommandExecutor
*/
protected $commandExecutor;
/**
* {@inheritdoc}
*/
protected function initialize(InputInterface $input, OutputInterface $output)
{
$application = $this->getApplication();
$application->setCatchExceptions(false);
$this->commandExecutor = new CommandExecutor(
$input,
$output,
$application
);
}
/**
* @param $id
*
* @return object
*/
protected function get($id)
{
return $this->getContainer()->get($id);
}
/**
* @return string
*/
protected function getEnvironment()
{
return $this->get('kernel')->getEnvironment();
}
/**
* @return boolean
*/
protected function isDebug()
{
return $this->get('kernel')->isDebug();
}
/**
* @param array $headers
* @param array $rows
* @param OutputInterface $output
*/
protected function renderTable(array $headers, array $rows, OutputInterface $output)
{
$table = $this->getHelper('table');
$table
->setHeaders($headers)
->setRows($rows)
->render($output);
}
/**
* @param OutputInterface $output
* @param int $length
*
* @return ProgressHelper
*/
protected function createProgressBar(OutputInterface $output, $length = 10)
{
$progress = $this->getHelper('progress');
$progress->setBarCharacter('<info>|</info>');
$progress->setEmptyBarCharacter(' ');
$progress->setProgressCharacter('|');
$progress->start($output, $length);
return $progress;
}
/**
* @param array $commands
* @param InputInterface $input
* @param OutputInterface $output
* @param boolean $displayProgress
*/
protected function runCommands(array $commands, InputInterface $input, OutputInterface $output, $displayProgress = true)
{
if ($displayProgress) {
$progress = $this->createProgressBar($output, count($commands));
}
foreach ($commands as $key => $value) {
if (is_string($key)) {
$command = $key;
$parameters = $value;
} else {
$command = $value;
$parameters = array();
}
$this->commandExecutor->runCommand($command, $parameters);
// PDO does not always close the connection after Doctrine commands.
// See https://github.com/symfony/symfony/issues/11750.
$this->get('doctrine')->getManager()->getConnection()->close();
if ($displayProgress) {
$progress->advance();
}
}
if ($displayProgress) {
$progress->finish();
}
}
/**
* @param OutputInterface $output
* @param string $question
* @param array $constraints
*
* @return mixed
*/
protected function askHidden(OutputInterface $output, $question, array $constraints = array())
{
return $this->proceedAskRequest($output, $question, $constraints, null, true);
}
/**
* @param OutputInterface $output
* @param string $question
* @param array $constraints
* @param mixed $default
*
* @return mixed
*/
protected function ask(OutputInterface $output, $question, array $constraints = array(), $default = null)
{
return $this->proceedAskRequest($output, $question, $constraints, $default);
}
/**
* @param mixed $value
* @param array $constraints
*
* @return boolean
*/
protected function validate($value, array $constraints = array())
{
return $this->get('validator')->validateValue($value, $constraints);
}
/**
* @param OutputInterface $output
* @param ConstraintViolationList $errors
*/
protected function writeErrors(OutputInterface $output, ConstraintViolationList $errors)
{
foreach ($errors as $error) {
$output->writeln(sprintf('<error>%s</error>', $error->getMessage()));
}
}
/**
* @param OutputInterface $output
* @param string $question
* @param array $constraints
* @param string $default
* @param boolean $hidden
*
* @return mixed
*/
private function proceedAskRequest(OutputInterface $output, $question, array $constraints = array(), $default = null, $hidden = false)
{
do {
$value = $this->getAnswerFromDialog($output, $question, $default, $hidden);
// do not validate value if no constraints were given
if (empty($constraints)) {
return $value;
}
$valid = 0 === count($errors = $this->validate($value, $constraints));
if (!$valid) {
foreach ($errors as $error) {
$output->writeln(sprintf('<error>%s</error>', $error->getMessage()));
}
}
} while (!$valid);
return $value;
}
/**
* @param OutputInterface $output
* @param string $question
* @param string|null $default
* @param boolean $hidden
*
* @return string
*/
private function getAnswerFromDialog(OutputInterface $output, $question, $default = null, $hidden)
{
$dialog = $this->getHelperSet()->get('dialog');
if (!$hidden) {
return $dialog->ask($output, sprintf('<question>%s</question> ', $question), $default);
}
return $dialog->askHiddenResponse($output, sprintf('<question>%s</question> ', $question));
}
/**
* @param string $directory
* @param OutputInterface $output
*/
protected function ensureDirectoryExistsAndIsWritable($directory, OutputInterface $output)
{
$checker = $this->get('sylius.installer.checker.command_directory');
$checker->setCommandName($this->getName());
$checker->ensureDirectoryExists($directory, $output);
$checker->ensureDirectoryIsWritable($directory, $output);
}
}
| Lakion/Sylius | src/Sylius/Bundle/InstallerBundle/Command/AbstractInstallCommand.php | PHP | mit | 7,127 |
#!/usr/bin/env python3
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2010, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""Shared settings for the regression suite (bold builder and
test scripts rely on this)
"""
import os
# -------------------------------------------------------------------------------
# Files to ignore (with reason)
#
# pond.0.ply - loads with 40k identical triangles, causing postprocessing
# to have quadratic runtime.
# -------------------------------------------------------------------------------
files_to_ignore = ["pond.0.ply"]
# -------------------------------------------------------------------------------
# List of file extensions to be excluded from the regression suite
# File extensions are case insensitive
# -------------------------------------------------------------------------------
exclude_extensions = [
".assbin", ".assxml", ".txt", ".md",
".jpeg", ".jpg", ".png", ".gif", ".tga", ".bmp",
".skeleton", ".skeleton.xml", ".license", ".mtl", ".material", ".pk3"
]
# -------------------------------------------------------------------------------
# Post processing configurations to be included in the test. The
# strings are parameters for assimp_cmd, see assimp_cmd's doxydoc
# for more details.
# The defaults are (validate-data-structure is always enabled, for
# self-explanatory reasons :-):
#
# '-cfull' :apply all post processing except 'og' and 'ptv' (optimize-scenegraph)
# '-og -om' :run optimize-scenegraph in combination with optimize-meshes.
# '-vds -jiv' :join-identical-vertices alone. This is a hotspot where
# floating-point inaccuracies can cause severe damage.
# '-ptv': transform all meshes to world-space
# As you can see, not all possible combinations of pp steps are covered -
# but at least each step is executed at least once on each model.
# -------------------------------------------------------------------------------
pp_configs_to_test = [
"-cfull",
"-og -om -vds",
"-vds -jiv",
"-ptv -gsn -cts -db",
# this is especially important: if no failures are present with this
# preset, the regression is most likely caused by the post
# processing pipeline.
""
]
# -------------------------------------------------------------------------------
# Name of the regression database file to be used
# gen_db.py writes to this directory, run.py checks against this directory.
# If a zip file with the same name exists, its contents are favoured to a
# normal directory, so in order to test against unzipped files the ZIP needs
# to be deleted.
# -------------------------------------------------------------------------------
database_name = "db"
# -------------------------------------------------------------------------------
# List of directories to be processed. Paths are processed recursively.
# -------------------------------------------------------------------------------
model_directories = [
os.path.join("..","models"),
os.path.join("..","models-nonbsd")
]
# -------------------------------------------------------------------------------
# Remove the original database files after the ZIP has been built?
# -------------------------------------------------------------------------------
remove_old = True
# -------------------------------------------------------------------------------
# Bytes to skip at the beginning of a dump. This skips the file header, which
# is currently the same 500 bytes header for both assbin, assxml and minidumps.
# -------------------------------------------------------------------------------
dump_header_skip = 500
# -------------------------------------------------------------------------------
# Directory to write all results and logs to. The dumps pertaining to failed
# tests are written to a subfolder of this directory ('tmp').
# -------------------------------------------------------------------------------
results = os.path.join("..","results")
# Create results directory if it does not exist
if not os.path.exists(results):
os.makedirs(results)
# vim: ai ts=4 sts=4 et sw=4
| MadManRises/Madgine | shared/assimp/test/regression/settings.py | Python | mit | 5,929 |
define(['./loop1'], function (loop1) {
return loop1;
}); | vtex/curl | test/support/loop/loop-start.js | JavaScript | mit | 57 |
/*!
* AngularJS Material Design
* https://github.com/angular/material
* @license MIT
* v1.1.5
*/
goog.provide('ngmaterial.components.gridList');
goog.require('ngmaterial.core');
/**
* @ngdoc module
* @name material.components.gridList
*/
GridListController['$inject'] = ["$mdUtil"];
GridLayoutFactory['$inject'] = ["$mdUtil"];
GridListDirective['$inject'] = ["$interpolate", "$mdConstant", "$mdGridLayout", "$mdMedia"];
GridTileDirective['$inject'] = ["$mdMedia"];
angular.module('material.components.gridList', ['material.core'])
.directive('mdGridList', GridListDirective)
.directive('mdGridTile', GridTileDirective)
.directive('mdGridTileFooter', GridTileCaptionDirective)
.directive('mdGridTileHeader', GridTileCaptionDirective)
.factory('$mdGridLayout', GridLayoutFactory);
/**
* @ngdoc directive
* @name mdGridList
* @module material.components.gridList
* @restrict E
* @description
* Grid lists are an alternative to standard list views. Grid lists are distinct
* from grids used for layouts and other visual presentations.
*
* A grid list is best suited to presenting a homogenous data type, typically
* images, and is optimized for visual comprehension and differentiating between
* like data types.
*
* A grid list is a continuous element consisting of tessellated, regular
* subdivisions called cells that contain tiles (`md-grid-tile`).
*
* <img src="//material-design.storage.googleapis.com/publish/v_2/material_ext_publish/0Bx4BSt6jniD7OVlEaXZ5YmU1Xzg/components_grids_usage2.png"
* style="width: 300px; height: auto; margin-right: 16px;" alt="Concept of grid explained visually">
* <img src="//material-design.storage.googleapis.com/publish/v_2/material_ext_publish/0Bx4BSt6jniD7VGhsOE5idWlJWXM/components_grids_usage3.png"
* style="width: 300px; height: auto;" alt="Grid concepts legend">
*
* Cells are arrayed vertically and horizontally within the grid.
*
* Tiles hold content and can span one or more cells vertically or horizontally.
*
* ### Responsive Attributes
*
* The `md-grid-list` directive supports "responsive" attributes, which allow
* different `md-cols`, `md-gutter` and `md-row-height` values depending on the
* currently matching media query.
*
* In order to set a responsive attribute, first define the fallback value with
* the standard attribute name, then add additional attributes with the
* following convention: `{base-attribute-name}-{media-query-name}="{value}"`
* (ie. `md-cols-lg="8"`)
*
* @param {number} md-cols Number of columns in the grid.
* @param {string} md-row-height One of
* <ul>
* <li>CSS length - Fixed height rows (eg. `8px` or `1rem`)</li>
* <li>`{width}:{height}` - Ratio of width to height (eg.
* `md-row-height="16:9"`)</li>
* <li>`"fit"` - Height will be determined by subdividing the available
* height by the number of rows</li>
* </ul>
* @param {string=} md-gutter The amount of space between tiles in CSS units
* (default 1px)
* @param {expression=} md-on-layout Expression to evaluate after layout. Event
* object is available as `$event`, and contains performance information.
*
* @usage
* Basic:
* <hljs lang="html">
* <md-grid-list md-cols="5" md-gutter="1em" md-row-height="4:3">
* <md-grid-tile></md-grid-tile>
* </md-grid-list>
* </hljs>
*
* Fixed-height rows:
* <hljs lang="html">
* <md-grid-list md-cols="4" md-row-height="200px" ...>
* <md-grid-tile></md-grid-tile>
* </md-grid-list>
* </hljs>
*
* Fit rows:
* <hljs lang="html">
* <md-grid-list md-cols="4" md-row-height="fit" style="height: 400px;" ...>
* <md-grid-tile></md-grid-tile>
* </md-grid-list>
* </hljs>
*
* Using responsive attributes:
* <hljs lang="html">
* <md-grid-list
* md-cols-sm="2"
* md-cols-md="4"
* md-cols-lg="8"
* md-cols-gt-lg="12"
* ...>
* <md-grid-tile></md-grid-tile>
* </md-grid-list>
* </hljs>
*/
function GridListDirective($interpolate, $mdConstant, $mdGridLayout, $mdMedia) {
return {
restrict: 'E',
controller: GridListController,
scope: {
mdOnLayout: '&'
},
link: postLink
};
function postLink(scope, element, attrs, ctrl) {
element.addClass('_md'); // private md component indicator for styling
// Apply semantics
element.attr('role', 'list');
// Provide the controller with a way to trigger layouts.
ctrl.layoutDelegate = layoutDelegate;
var invalidateLayout = angular.bind(ctrl, ctrl.invalidateLayout),
unwatchAttrs = watchMedia();
scope.$on('$destroy', unwatchMedia);
/**
* Watches for changes in media, invalidating layout as necessary.
*/
function watchMedia() {
for (var mediaName in $mdConstant.MEDIA) {
$mdMedia(mediaName); // initialize
$mdMedia.getQuery($mdConstant.MEDIA[mediaName])
.addListener(invalidateLayout);
}
return $mdMedia.watchResponsiveAttributes(
['md-cols', 'md-row-height', 'md-gutter'], attrs, layoutIfMediaMatch);
}
function unwatchMedia() {
ctrl.layoutDelegate = angular.noop;
unwatchAttrs();
for (var mediaName in $mdConstant.MEDIA) {
$mdMedia.getQuery($mdConstant.MEDIA[mediaName])
.removeListener(invalidateLayout);
}
}
/**
* Performs grid layout if the provided mediaName matches the currently
* active media type.
*/
function layoutIfMediaMatch(mediaName) {
if (mediaName == null) {
// TODO(shyndman): It would be nice to only layout if we have
// instances of attributes using this media type
ctrl.invalidateLayout();
} else if ($mdMedia(mediaName)) {
ctrl.invalidateLayout();
}
}
var lastLayoutProps;
/**
* Invokes the layout engine, and uses its results to lay out our
* tile elements.
*
* @param {boolean} tilesInvalidated Whether tiles have been
* added/removed/moved since the last layout. This is to avoid situations
* where tiles are replaced with properties identical to their removed
* counterparts.
*/
function layoutDelegate(tilesInvalidated) {
var tiles = getTileElements();
var props = {
tileSpans: getTileSpans(tiles),
colCount: getColumnCount(),
rowMode: getRowMode(),
rowHeight: getRowHeight(),
gutter: getGutter()
};
if (!tilesInvalidated && angular.equals(props, lastLayoutProps)) {
return;
}
var performance =
$mdGridLayout(props.colCount, props.tileSpans, tiles)
.map(function(tilePositions, rowCount) {
return {
grid: {
element: element,
style: getGridStyle(props.colCount, rowCount,
props.gutter, props.rowMode, props.rowHeight)
},
tiles: tilePositions.map(function(ps, i) {
return {
element: angular.element(tiles[i]),
style: getTileStyle(ps.position, ps.spans,
props.colCount, rowCount,
props.gutter, props.rowMode, props.rowHeight)
}
})
}
})
.reflow()
.performance();
// Report layout
scope.mdOnLayout({
$event: {
performance: performance
}
});
lastLayoutProps = props;
}
// Use $interpolate to do some simple string interpolation as a convenience.
var startSymbol = $interpolate.startSymbol();
var endSymbol = $interpolate.endSymbol();
// Returns an expression wrapped in the interpolator's start and end symbols.
function expr(exprStr) {
return startSymbol + exprStr + endSymbol;
}
// The amount of space a single 1x1 tile would take up (either width or height), used as
// a basis for other calculations. This consists of taking the base size percent (as would be
// if evenly dividing the size between cells), and then subtracting the size of one gutter.
// However, since there are no gutters on the edges, each tile only uses a fration
// (gutterShare = numGutters / numCells) of the gutter size. (Imagine having one gutter per
// tile, and then breaking up the extra gutter on the edge evenly among the cells).
var UNIT = $interpolate(expr('share') + '% - (' + expr('gutter') + ' * ' + expr('gutterShare') + ')');
// The horizontal or vertical position of a tile, e.g., the 'top' or 'left' property value.
// The position comes the size of a 1x1 tile plus gutter for each previous tile in the
// row/column (offset).
var POSITION = $interpolate('calc((' + expr('unit') + ' + ' + expr('gutter') + ') * ' + expr('offset') + ')');
// The actual size of a tile, e.g., width or height, taking rowSpan or colSpan into account.
// This is computed by multiplying the base unit by the rowSpan/colSpan, and then adding back
// in the space that the gutter would normally have used (which was already accounted for in
// the base unit calculation).
var DIMENSION = $interpolate('calc((' + expr('unit') + ') * ' + expr('span') + ' + (' + expr('span') + ' - 1) * ' + expr('gutter') + ')');
/**
* Gets the styles applied to a tile element described by the given parameters.
* @param {{row: number, col: number}} position The row and column indices of the tile.
* @param {{row: number, col: number}} spans The rowSpan and colSpan of the tile.
* @param {number} colCount The number of columns.
* @param {number} rowCount The number of rows.
* @param {string} gutter The amount of space between tiles. This will be something like
* '5px' or '2em'.
* @param {string} rowMode The row height mode. Can be one of:
* 'fixed': all rows have a fixed size, given by rowHeight,
* 'ratio': row height defined as a ratio to width, or
* 'fit': fit to the grid-list element height, divinding evenly among rows.
* @param {string|number} rowHeight The height of a row. This is only used for 'fixed' mode and
* for 'ratio' mode. For 'ratio' mode, this is the *ratio* of width-to-height (e.g., 0.75).
* @returns {Object} Map of CSS properties to be applied to the style element. Will define
* values for top, left, width, height, marginTop, and paddingTop.
*/
function getTileStyle(position, spans, colCount, rowCount, gutter, rowMode, rowHeight) {
// TODO(shyndman): There are style caching opportunities here.
// Percent of the available horizontal space that one column takes up.
var hShare = (1 / colCount) * 100;
// Fraction of the gutter size that each column takes up.
var hGutterShare = (colCount - 1) / colCount;
// Base horizontal size of a column.
var hUnit = UNIT({share: hShare, gutterShare: hGutterShare, gutter: gutter});
// The width and horizontal position of each tile is always calculated the same way, but the
// height and vertical position depends on the rowMode.
var ltr = document.dir != 'rtl' && document.body.dir != 'rtl';
var style = ltr ? {
left: POSITION({ unit: hUnit, offset: position.col, gutter: gutter }),
width: DIMENSION({ unit: hUnit, span: spans.col, gutter: gutter }),
// resets
paddingTop: '',
marginTop: '',
top: '',
height: ''
} : {
right: POSITION({ unit: hUnit, offset: position.col, gutter: gutter }),
width: DIMENSION({ unit: hUnit, span: spans.col, gutter: gutter }),
// resets
paddingTop: '',
marginTop: '',
top: '',
height: ''
};
switch (rowMode) {
case 'fixed':
// In fixed mode, simply use the given rowHeight.
style.top = POSITION({ unit: rowHeight, offset: position.row, gutter: gutter });
style.height = DIMENSION({ unit: rowHeight, span: spans.row, gutter: gutter });
break;
case 'ratio':
// Percent of the available vertical space that one row takes up. Here, rowHeight holds
// the ratio value. For example, if the width:height ratio is 4:3, rowHeight = 1.333.
var vShare = hShare / rowHeight;
// Base veritcal size of a row.
var vUnit = UNIT({ share: vShare, gutterShare: hGutterShare, gutter: gutter });
// padidngTop and marginTop are used to maintain the given aspect ratio, as
// a percentage-based value for these properties is applied to the *width* of the
// containing block. See http://www.w3.org/TR/CSS2/box.html#margin-properties
style.paddingTop = DIMENSION({ unit: vUnit, span: spans.row, gutter: gutter});
style.marginTop = POSITION({ unit: vUnit, offset: position.row, gutter: gutter });
break;
case 'fit':
// Fraction of the gutter size that each column takes up.
var vGutterShare = (rowCount - 1) / rowCount;
// Percent of the available vertical space that one row takes up.
var vShare = (1 / rowCount) * 100;
// Base vertical size of a row.
var vUnit = UNIT({share: vShare, gutterShare: vGutterShare, gutter: gutter});
style.top = POSITION({unit: vUnit, offset: position.row, gutter: gutter});
style.height = DIMENSION({unit: vUnit, span: spans.row, gutter: gutter});
break;
}
return style;
}
function getGridStyle(colCount, rowCount, gutter, rowMode, rowHeight) {
var style = {};
switch(rowMode) {
case 'fixed':
style.height = DIMENSION({ unit: rowHeight, span: rowCount, gutter: gutter });
style.paddingBottom = '';
break;
case 'ratio':
// rowHeight is width / height
var hGutterShare = colCount === 1 ? 0 : (colCount - 1) / colCount,
hShare = (1 / colCount) * 100,
vShare = hShare * (1 / rowHeight),
vUnit = UNIT({ share: vShare, gutterShare: hGutterShare, gutter: gutter });
style.height = '';
style.paddingBottom = DIMENSION({ unit: vUnit, span: rowCount, gutter: gutter});
break;
case 'fit':
// noop, as the height is user set
break;
}
return style;
}
function getTileElements() {
return [].filter.call(element.children(), function(ele) {
return ele.tagName == 'MD-GRID-TILE' && !ele.$$mdDestroyed;
});
}
/**
* Gets an array of objects containing the rowspan and colspan for each tile.
* @returns {Array<{row: number, col: number}>}
*/
function getTileSpans(tileElements) {
return [].map.call(tileElements, function(ele) {
var ctrl = angular.element(ele).controller('mdGridTile');
return {
row: parseInt(
$mdMedia.getResponsiveAttribute(ctrl.$attrs, 'md-rowspan'), 10) || 1,
col: parseInt(
$mdMedia.getResponsiveAttribute(ctrl.$attrs, 'md-colspan'), 10) || 1
};
});
}
function getColumnCount() {
var colCount = parseInt($mdMedia.getResponsiveAttribute(attrs, 'md-cols'), 10);
if (isNaN(colCount)) {
throw 'md-grid-list: md-cols attribute was not found, or contained a non-numeric value';
}
return colCount;
}
function getGutter() {
return applyDefaultUnit($mdMedia.getResponsiveAttribute(attrs, 'md-gutter') || 1);
}
function getRowHeight() {
var rowHeight = $mdMedia.getResponsiveAttribute(attrs, 'md-row-height');
if (!rowHeight) {
throw 'md-grid-list: md-row-height attribute was not found';
}
switch (getRowMode()) {
case 'fixed':
return applyDefaultUnit(rowHeight);
case 'ratio':
var whRatio = rowHeight.split(':');
return parseFloat(whRatio[0]) / parseFloat(whRatio[1]);
case 'fit':
return 0; // N/A
}
}
function getRowMode() {
var rowHeight = $mdMedia.getResponsiveAttribute(attrs, 'md-row-height');
if (!rowHeight) {
throw 'md-grid-list: md-row-height attribute was not found';
}
if (rowHeight == 'fit') {
return 'fit';
} else if (rowHeight.indexOf(':') !== -1) {
return 'ratio';
} else {
return 'fixed';
}
}
function applyDefaultUnit(val) {
return /\D$/.test(val) ? val : val + 'px';
}
}
}
/* ngInject */
function GridListController($mdUtil) {
this.layoutInvalidated = false;
this.tilesInvalidated = false;
this.$timeout_ = $mdUtil.nextTick;
this.layoutDelegate = angular.noop;
}
GridListController.prototype = {
invalidateTiles: function() {
this.tilesInvalidated = true;
this.invalidateLayout();
},
invalidateLayout: function() {
if (this.layoutInvalidated) {
return;
}
this.layoutInvalidated = true;
this.$timeout_(angular.bind(this, this.layout));
},
layout: function() {
try {
this.layoutDelegate(this.tilesInvalidated);
} finally {
this.layoutInvalidated = false;
this.tilesInvalidated = false;
}
}
};
/* ngInject */
function GridLayoutFactory($mdUtil) {
var defaultAnimator = GridTileAnimator;
/**
* Set the reflow animator callback
*/
GridLayout.animateWith = function(customAnimator) {
defaultAnimator = !angular.isFunction(customAnimator) ? GridTileAnimator : customAnimator;
};
return GridLayout;
/**
* Publish layout function
*/
function GridLayout(colCount, tileSpans) {
var self, layoutInfo, gridStyles, layoutTime, mapTime, reflowTime;
layoutTime = $mdUtil.time(function() {
layoutInfo = calculateGridFor(colCount, tileSpans);
});
return self = {
/**
* An array of objects describing each tile's position in the grid.
*/
layoutInfo: function() {
return layoutInfo;
},
/**
* Maps grid positioning to an element and a set of styles using the
* provided updateFn.
*/
map: function(updateFn) {
mapTime = $mdUtil.time(function() {
var info = self.layoutInfo();
gridStyles = updateFn(info.positioning, info.rowCount);
});
return self;
},
/**
* Default animator simply sets the element.css( <styles> ). An alternate
* animator can be provided as an argument. The function has the following
* signature:
*
* function({grid: {element: JQLite, style: Object}, tiles: Array<{element: JQLite, style: Object}>)
*/
reflow: function(animatorFn) {
reflowTime = $mdUtil.time(function() {
var animator = animatorFn || defaultAnimator;
animator(gridStyles.grid, gridStyles.tiles);
});
return self;
},
/**
* Timing for the most recent layout run.
*/
performance: function() {
return {
tileCount: tileSpans.length,
layoutTime: layoutTime,
mapTime: mapTime,
reflowTime: reflowTime,
totalTime: layoutTime + mapTime + reflowTime
};
}
};
}
/**
* Default Gridlist animator simple sets the css for each element;
* NOTE: any transitions effects must be manually set in the CSS.
* e.g.
*
* md-grid-tile {
* transition: all 700ms ease-out 50ms;
* }
*
*/
function GridTileAnimator(grid, tiles) {
grid.element.css(grid.style);
tiles.forEach(function(t) {
t.element.css(t.style);
})
}
/**
* Calculates the positions of tiles.
*
* The algorithm works as follows:
* An Array<Number> with length colCount (spaceTracker) keeps track of
* available tiling positions, where elements of value 0 represents an
* empty position. Space for a tile is reserved by finding a sequence of
* 0s with length <= than the tile's colspan. When such a space has been
* found, the occupied tile positions are incremented by the tile's
* rowspan value, as these positions have become unavailable for that
* many rows.
*
* If the end of a row has been reached without finding space for the
* tile, spaceTracker's elements are each decremented by 1 to a minimum
* of 0. Rows are searched in this fashion until space is found.
*/
function calculateGridFor(colCount, tileSpans) {
var curCol = 0,
curRow = 0,
spaceTracker = newSpaceTracker();
return {
positioning: tileSpans.map(function(spans, i) {
return {
spans: spans,
position: reserveSpace(spans, i)
};
}),
rowCount: curRow + Math.max.apply(Math, spaceTracker)
};
function reserveSpace(spans, i) {
if (spans.col > colCount) {
throw 'md-grid-list: Tile at position ' + i + ' has a colspan ' +
'(' + spans.col + ') that exceeds the column count ' +
'(' + colCount + ')';
}
var start = 0,
end = 0;
// TODO(shyndman): This loop isn't strictly necessary if you can
// determine the minimum number of rows before a space opens up. To do
// this, recognize that you've iterated across an entire row looking for
// space, and if so fast-forward by the minimum rowSpan count. Repeat
// until the required space opens up.
while (end - start < spans.col) {
if (curCol >= colCount) {
nextRow();
continue;
}
start = spaceTracker.indexOf(0, curCol);
if (start === -1 || (end = findEnd(start + 1)) === -1) {
start = end = 0;
nextRow();
continue;
}
curCol = end + 1;
}
adjustRow(start, spans.col, spans.row);
curCol = start + spans.col;
return {
col: start,
row: curRow
};
}
function nextRow() {
curCol = 0;
curRow++;
adjustRow(0, colCount, -1); // Decrement row spans by one
}
function adjustRow(from, cols, by) {
for (var i = from; i < from + cols; i++) {
spaceTracker[i] = Math.max(spaceTracker[i] + by, 0);
}
}
function findEnd(start) {
var i;
for (i = start; i < spaceTracker.length; i++) {
if (spaceTracker[i] !== 0) {
return i;
}
}
if (i === spaceTracker.length) {
return i;
}
}
function newSpaceTracker() {
var tracker = [];
for (var i = 0; i < colCount; i++) {
tracker.push(0);
}
return tracker;
}
}
}
/**
* @ngdoc directive
* @name mdGridTile
* @module material.components.gridList
* @restrict E
* @description
* Tiles contain the content of an `md-grid-list`. They span one or more grid
* cells vertically or horizontally, and use `md-grid-tile-{footer,header}` to
* display secondary content.
*
* ### Responsive Attributes
*
* The `md-grid-tile` directive supports "responsive" attributes, which allow
* different `md-rowspan` and `md-colspan` values depending on the currently
* matching media query.
*
* In order to set a responsive attribute, first define the fallback value with
* the standard attribute name, then add additional attributes with the
* following convention: `{base-attribute-name}-{media-query-name}="{value}"`
* (ie. `md-colspan-sm="4"`)
*
* @param {number=} md-colspan The number of columns to span (default 1). Cannot
* exceed the number of columns in the grid. Supports interpolation.
* @param {number=} md-rowspan The number of rows to span (default 1). Supports
* interpolation.
*
* @usage
* With header:
* <hljs lang="html">
* <md-grid-tile>
* <md-grid-tile-header>
* <h3>This is a header</h3>
* </md-grid-tile-header>
* </md-grid-tile>
* </hljs>
*
* With footer:
* <hljs lang="html">
* <md-grid-tile>
* <md-grid-tile-footer>
* <h3>This is a footer</h3>
* </md-grid-tile-footer>
* </md-grid-tile>
* </hljs>
*
* Spanning multiple rows/columns:
* <hljs lang="html">
* <md-grid-tile md-colspan="2" md-rowspan="3">
* </md-grid-tile>
* </hljs>
*
* Responsive attributes:
* <hljs lang="html">
* <md-grid-tile md-colspan="1" md-colspan-sm="3" md-colspan-md="5">
* </md-grid-tile>
* </hljs>
*/
function GridTileDirective($mdMedia) {
return {
restrict: 'E',
require: '^mdGridList',
template: '<figure ng-transclude></figure>',
transclude: true,
scope: {},
// Simple controller that exposes attributes to the grid directive
controller: ["$attrs", function($attrs) {
this.$attrs = $attrs;
}],
link: postLink
};
function postLink(scope, element, attrs, gridCtrl) {
// Apply semantics
element.attr('role', 'listitem');
// If our colspan or rowspan changes, trigger a layout
var unwatchAttrs = $mdMedia.watchResponsiveAttributes(['md-colspan', 'md-rowspan'],
attrs, angular.bind(gridCtrl, gridCtrl.invalidateLayout));
// Tile registration/deregistration
gridCtrl.invalidateTiles();
scope.$on('$destroy', function() {
// Mark the tile as destroyed so it is no longer considered in layout,
// even if the DOM element sticks around (like during a leave animation)
element[0].$$mdDestroyed = true;
unwatchAttrs();
gridCtrl.invalidateLayout();
});
if (angular.isDefined(scope.$parent.$index)) {
scope.$watch(function() { return scope.$parent.$index; },
function indexChanged(newIdx, oldIdx) {
if (newIdx === oldIdx) {
return;
}
gridCtrl.invalidateTiles();
});
}
}
}
function GridTileCaptionDirective() {
return {
template: '<figcaption ng-transclude></figcaption>',
transclude: true
};
}
ngmaterial.components.gridList = angular.module("material.components.gridList"); | romulus3799/pyramid | public/libs/angular-material/modules/closure/gridList/gridList.js | JavaScript | mit | 26,091 |
// gc_dynamic_list_test.cc -- Check that --gc-sections honors --dynamic-list.
// Copyright (C) 2014-2016 Free Software Foundation, Inc.
// Written by Cary Coutant <[email protected]>.
// This file is part of gold.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
// MA 02110-1301, USA.
// The goal of this program is to verify that the symbol "keep" is not
// garbage-collected when it is named in a --dynamic-list script.
extern void keep(void);
void
keep(void)
{}
int
main(void)
{ return 0; }
| swigger/gdb-ios | gold/testsuite/gc_dynamic_list_test.c | C | gpl-2.0 | 1,150 |
/* -*- c-basic-offset: 2 -*-
*
* GStreamer
* Copyright (C) 1999-2001 Erik Walthinsen <[email protected]>
* 2006 Dreamlab Technologies Ltd. <[email protected]>
* 2007-2009 Sebastian Dröge <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*
* this windowed sinc filter is taken from the freely downloadable DSP book,
* "The Scientist and Engineer's Guide to Digital Signal Processing",
* chapter 16
* available at http://www.dspguide.com/
*
* For the window functions see
* http://en.wikipedia.org/wiki/Window_function
*/
/**
* SECTION:element-audiowsincband
*
* Attenuates all frequencies outside (bandpass) or inside (bandreject) of a frequency
* band. The length parameter controls the rolloff, the window parameter
* controls rolloff and stopband attenuation. The Hamming window provides a faster rolloff but a bit
* worse stopband attenuation, the other way around for the Blackman window.
*
* This element has the advantage over the Chebyshev bandpass and bandreject filter that it has
* a much better rolloff when using a larger kernel size and almost linear phase. The only
* disadvantage is the much slower execution time with larger kernels.
*
* <refsect2>
* <title>Example launch line</title>
* |[
* gst-launch audiotestsrc freq=1500 ! audioconvert ! audiosincband mode=band-pass lower-frequency=3000 upper-frequency=10000 length=501 window=blackman ! audioconvert ! alsasink
* gst-launch filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiowsincband mode=band-reject lower-frequency=59 upper-frequency=61 length=10001 window=hamming ! audioconvert ! alsasink
* gst-launch audiotestsrc wave=white-noise ! audioconvert ! audiowsincband mode=band-pass lower-frequency=1000 upper-frequency=2000 length=31 ! audioconvert ! alsasink
* ]|
* </refsect2>
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <math.h>
#include <gst/gst.h>
#include <gst/audio/gstaudiofilter.h>
#include <gst/controller/gstcontroller.h>
#include "audiowsincband.h"
#define GST_CAT_DEFAULT gst_gst_audio_wsincband_debug
GST_DEBUG_CATEGORY_STATIC (GST_CAT_DEFAULT);
enum
{
PROP_0,
PROP_LENGTH,
PROP_LOWER_FREQUENCY,
PROP_UPPER_FREQUENCY,
PROP_MODE,
PROP_WINDOW
};
enum
{
MODE_BAND_PASS = 0,
MODE_BAND_REJECT
};
#define GST_TYPE_AUDIO_WSINC_BAND_MODE (gst_gst_audio_wsincband_mode_get_type ())
static GType
gst_gst_audio_wsincband_mode_get_type (void)
{
static GType gtype = 0;
if (gtype == 0) {
static const GEnumValue values[] = {
{MODE_BAND_PASS, "Band pass (default)",
"band-pass"},
{MODE_BAND_REJECT, "Band reject",
"band-reject"},
{0, NULL, NULL}
};
gtype = g_enum_register_static ("GstAudioWSincBandMode", values);
}
return gtype;
}
enum
{
WINDOW_HAMMING = 0,
WINDOW_BLACKMAN,
WINDOW_GAUSSIAN,
WINDOW_COSINE,
WINDOW_HANN
};
#define GST_TYPE_AUDIO_WSINC_BAND_WINDOW (gst_gst_audio_wsincband_window_get_type ())
static GType
gst_gst_audio_wsincband_window_get_type (void)
{
static GType gtype = 0;
if (gtype == 0) {
static const GEnumValue values[] = {
{WINDOW_HAMMING, "Hamming window (default)",
"hamming"},
{WINDOW_BLACKMAN, "Blackman window",
"blackman"},
{WINDOW_GAUSSIAN, "Gaussian window",
"gaussian"},
{WINDOW_COSINE, "Cosine window",
"cosine"},
{WINDOW_HANN, "Hann window",
"hann"},
{0, NULL, NULL}
};
gtype = g_enum_register_static ("GstAudioWSincBandWindow", values);
}
return gtype;
}
#define DEBUG_INIT(bla) \
GST_DEBUG_CATEGORY_INIT (gst_gst_audio_wsincband_debug, "audiowsincband", 0, \
"Band-pass and Band-reject Windowed sinc filter plugin");
GST_BOILERPLATE_FULL (GstAudioWSincBand, gst_audio_wsincband, GstAudioFilter,
GST_TYPE_AUDIO_FX_BASE_FIR_FILTER, DEBUG_INIT);
static void gst_audio_wsincband_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
static void gst_audio_wsincband_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static void gst_audio_wsincband_finalize (GObject * object);
static gboolean gst_audio_wsincband_setup (GstAudioFilter * base,
GstRingBufferSpec * format);
#define POW2(x) (x)*(x)
/* Element class */
static void
gst_audio_wsincband_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
gst_element_class_set_details_simple (element_class,
"Band pass & band reject filter", "Filter/Effect/Audio",
"Band pass and band reject windowed sinc filter",
"Thomas Vander Stichele <thomas at apestaart dot org>, "
"Steven W. Smith, "
"Dreamlab Technologies Ltd. <[email protected]>, "
"Sebastian Dröge <[email protected]>");
}
static void
gst_audio_wsincband_class_init (GstAudioWSincBandClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
GstAudioFilterClass *filter_class = (GstAudioFilterClass *) klass;
gobject_class->set_property = gst_audio_wsincband_set_property;
gobject_class->get_property = gst_audio_wsincband_get_property;
gobject_class->finalize = gst_audio_wsincband_finalize;
/* FIXME: Don't use the complete possible range but restrict the upper boundary
* so automatically generated UIs can use a slider */
g_object_class_install_property (gobject_class, PROP_LOWER_FREQUENCY,
g_param_spec_float ("lower-frequency", "Lower Frequency",
"Cut-off lower frequency (Hz)", 0.0, 100000.0, 0,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_UPPER_FREQUENCY,
g_param_spec_float ("upper-frequency", "Upper Frequency",
"Cut-off upper frequency (Hz)", 0.0, 100000.0, 0,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_LENGTH,
g_param_spec_int ("length", "Length",
"Filter kernel length, will be rounded to the next odd number", 3,
256000, 101,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_MODE,
g_param_spec_enum ("mode", "Mode",
"Band pass or band reject mode", GST_TYPE_AUDIO_WSINC_BAND_MODE,
MODE_BAND_PASS,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_WINDOW,
g_param_spec_enum ("window", "Window",
"Window function to use", GST_TYPE_AUDIO_WSINC_BAND_WINDOW,
WINDOW_HAMMING,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
filter_class->setup = GST_DEBUG_FUNCPTR (gst_audio_wsincband_setup);
}
static void
gst_audio_wsincband_init (GstAudioWSincBand * self,
GstAudioWSincBandClass * g_class)
{
self->kernel_length = 101;
self->lower_frequency = 0.0;
self->upper_frequency = 0.0;
self->mode = MODE_BAND_PASS;
self->window = WINDOW_HAMMING;
self->lock = g_mutex_new ();
}
static void
gst_audio_wsincband_build_kernel (GstAudioWSincBand * self)
{
gint i = 0;
gdouble sum = 0.0;
gint len = 0;
gdouble *kernel_lp, *kernel_hp;
gdouble w;
gdouble *kernel;
len = self->kernel_length;
if (GST_AUDIO_FILTER (self)->format.rate == 0) {
GST_DEBUG ("rate not set yet");
return;
}
if (GST_AUDIO_FILTER (self)->format.channels == 0) {
GST_DEBUG ("channels not set yet");
return;
}
/* Clamp frequencies */
self->lower_frequency =
CLAMP (self->lower_frequency, 0.0,
GST_AUDIO_FILTER (self)->format.rate / 2);
self->upper_frequency =
CLAMP (self->upper_frequency, 0.0,
GST_AUDIO_FILTER (self)->format.rate / 2);
if (self->lower_frequency > self->upper_frequency) {
gint tmp = self->lower_frequency;
self->lower_frequency = self->upper_frequency;
self->upper_frequency = tmp;
}
GST_DEBUG ("gst_audio_wsincband: initializing filter kernel of length %d "
"with lower frequency %.2lf Hz "
", upper frequency %.2lf Hz for mode %s",
len, self->lower_frequency, self->upper_frequency,
(self->mode == MODE_BAND_PASS) ? "band-pass" : "band-reject");
/* fill the lp kernel */
w = 2 * G_PI * (self->lower_frequency / GST_AUDIO_FILTER (self)->format.rate);
kernel_lp = g_new (gdouble, len);
for (i = 0; i < len; ++i) {
if (i == (len - 1) / 2.0)
kernel_lp[i] = w;
else
kernel_lp[i] = sin (w * (i - (len - 1) / 2.0)) / (i - (len - 1) / 2.0);
/* windowing */
switch (self->window) {
case WINDOW_HAMMING:
kernel_lp[i] *= (0.54 - 0.46 * cos (2 * G_PI * i / (len - 1)));
break;
case WINDOW_BLACKMAN:
kernel_lp[i] *= (0.42 - 0.5 * cos (2 * G_PI * i / (len - 1)) +
0.08 * cos (4 * G_PI * i / (len - 1)));
break;
case WINDOW_GAUSSIAN:
kernel_lp[i] *= exp (-0.5 * POW2 (3.0 / len * (2 * i - (len - 1))));
break;
case WINDOW_COSINE:
kernel_lp[i] *= cos (G_PI * i / (len - 1) - G_PI / 2);
break;
case WINDOW_HANN:
kernel_lp[i] *= 0.5 * (1 - cos (2 * G_PI * i / (len - 1)));
break;
}
}
/* normalize for unity gain at DC */
sum = 0.0;
for (i = 0; i < len; ++i)
sum += kernel_lp[i];
for (i = 0; i < len; ++i)
kernel_lp[i] /= sum;
/* fill the hp kernel */
w = 2 * G_PI * (self->upper_frequency / GST_AUDIO_FILTER (self)->format.rate);
kernel_hp = g_new (gdouble, len);
for (i = 0; i < len; ++i) {
if (i == (len - 1) / 2.0)
kernel_hp[i] = w;
else
kernel_hp[i] = sin (w * (i - (len - 1) / 2.0)) / (i - (len - 1) / 2.0);
/* Windowing */
switch (self->window) {
case WINDOW_HAMMING:
kernel_hp[i] *= (0.54 - 0.46 * cos (2 * G_PI * i / (len - 1)));
break;
case WINDOW_BLACKMAN:
kernel_hp[i] *= (0.42 - 0.5 * cos (2 * G_PI * i / (len - 1)) +
0.08 * cos (4 * G_PI * i / (len - 1)));
break;
case WINDOW_GAUSSIAN:
kernel_hp[i] *= exp (-0.5 * POW2 (3.0 / len * (2 * i - (len - 1))));
break;
case WINDOW_COSINE:
kernel_hp[i] *= cos (G_PI * i / (len - 1) - G_PI / 2);
break;
case WINDOW_HANN:
kernel_hp[i] *= 0.5 * (1 - cos (2 * G_PI * i / (len - 1)));
break;
}
}
/* normalize for unity gain at DC */
sum = 0.0;
for (i = 0; i < len; ++i)
sum += kernel_hp[i];
for (i = 0; i < len; ++i)
kernel_hp[i] /= sum;
/* do spectral inversion to go from lowpass to highpass */
for (i = 0; i < len; ++i)
kernel_hp[i] = -kernel_hp[i];
if (len % 2 == 1) {
kernel_hp[(len - 1) / 2] += 1.0;
} else {
kernel_hp[len / 2 - 1] += 0.5;
kernel_hp[len / 2] += 0.5;
}
/* combine the two kernels */
kernel = g_new (gdouble, len);
for (i = 0; i < len; ++i)
kernel[i] = kernel_lp[i] + kernel_hp[i];
/* free the helper kernels */
g_free (kernel_lp);
g_free (kernel_hp);
/* do spectral inversion to go from bandreject to bandpass
* if specified */
if (self->mode == MODE_BAND_PASS) {
for (i = 0; i < len; ++i)
kernel[i] = -kernel[i];
kernel[len / 2] += 1;
}
gst_audio_fx_base_fir_filter_set_kernel (GST_AUDIO_FX_BASE_FIR_FILTER (self),
kernel, self->kernel_length, (len - 1) / 2);
}
/* GstAudioFilter vmethod implementations */
/* get notified of caps and plug in the correct process function */
static gboolean
gst_audio_wsincband_setup (GstAudioFilter * base, GstRingBufferSpec * format)
{
GstAudioWSincBand *self = GST_AUDIO_WSINC_BAND (base);
gst_audio_wsincband_build_kernel (self);
return GST_AUDIO_FILTER_CLASS (parent_class)->setup (base, format);
}
static void
gst_audio_wsincband_finalize (GObject * object)
{
GstAudioWSincBand *self = GST_AUDIO_WSINC_BAND (object);
g_mutex_free (self->lock);
self->lock = NULL;
G_OBJECT_CLASS (parent_class)->finalize (object);
}
static void
gst_audio_wsincband_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstAudioWSincBand *self = GST_AUDIO_WSINC_BAND (object);
g_return_if_fail (GST_IS_AUDIO_WSINC_BAND (self));
switch (prop_id) {
case PROP_LENGTH:{
gint val;
g_mutex_lock (self->lock);
val = g_value_get_int (value);
if (val % 2 == 0)
val++;
if (val != self->kernel_length) {
gst_audio_fx_base_fir_filter_push_residue (GST_AUDIO_FX_BASE_FIR_FILTER
(self));
self->kernel_length = val;
gst_audio_wsincband_build_kernel (self);
}
g_mutex_unlock (self->lock);
break;
}
case PROP_LOWER_FREQUENCY:
g_mutex_lock (self->lock);
self->lower_frequency = g_value_get_float (value);
gst_audio_wsincband_build_kernel (self);
g_mutex_unlock (self->lock);
break;
case PROP_UPPER_FREQUENCY:
g_mutex_lock (self->lock);
self->upper_frequency = g_value_get_float (value);
gst_audio_wsincband_build_kernel (self);
g_mutex_unlock (self->lock);
break;
case PROP_MODE:
g_mutex_lock (self->lock);
self->mode = g_value_get_enum (value);
gst_audio_wsincband_build_kernel (self);
g_mutex_unlock (self->lock);
break;
case PROP_WINDOW:
g_mutex_lock (self->lock);
self->window = g_value_get_enum (value);
gst_audio_wsincband_build_kernel (self);
g_mutex_unlock (self->lock);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audio_wsincband_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstAudioWSincBand *self = GST_AUDIO_WSINC_BAND (object);
switch (prop_id) {
case PROP_LENGTH:
g_value_set_int (value, self->kernel_length);
break;
case PROP_LOWER_FREQUENCY:
g_value_set_float (value, self->lower_frequency);
break;
case PROP_UPPER_FREQUENCY:
g_value_set_float (value, self->upper_frequency);
break;
case PROP_MODE:
g_value_set_enum (value, self->mode);
break;
case PROP_WINDOW:
g_value_set_enum (value, self->window);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
| loveyoupeng/rt | modules/media/src/main/native/gstreamer/gstreamer-lite/gst-plugins-good/gst/audiofx/audiowsincband.c | C | gpl-2.0 | 15,199 |
/*
Copyright_License {
XCSoar Glide Computer - http://www.xcsoar.org/
Copyright (C) 2000-2015 The XCSoar Project
A detailed list of copyright holders can be found in the file "AUTHORS".
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
}
*/
#ifndef XCSOAR_CU_RENDERER_HPP
#define XCSOAR_CU_RENDERER_HPP
#include <tchar.h>
struct PixelRect;
class Canvas;
struct ChartLook;
class CuSonde;
void
RenderTemperatureChart(Canvas &canvas, const PixelRect rc,
const ChartLook &chart_look,
const CuSonde &cu_sonde);
void
TemperatureChartCaption(TCHAR *buffer, const CuSonde &cu_sonde);
#endif
| ppara/XCSoar | src/Renderer/CuRenderer.hpp | C++ | gpl-2.0 | 1,293 |
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include <linux/stddef.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include <linux/migrate.h>
#include <linux/backing-dev.h>
#include <linux/freezer.h>
#include <linux/list_sort.h>
#include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
static kmem_zone_t *xfs_buf_zone;
STATIC int xfsbufd(void *);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
struct workqueue_struct *xfsconvertd_workqueue;
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
#else
# define XB_SET_OWNER(bp) do { } while (0)
# define XB_CLEAR_OWNER(bp) do { } while (0)
# define XB_GET_OWNER(bp) do { } while (0)
#endif
#define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
#define xb_to_km(flags) \
(((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
#define xfs_buf_allocate(flags) \
kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
#define xfs_buf_deallocate(bp) \
kmem_zone_free(xfs_buf_zone, (bp));
static inline int
xfs_buf_is_vmapped(
struct xfs_buf *bp)
{
/*
* Return true if the buffer is vmapped.
*
* The XBF_MAPPED flag is set if the buffer should be mapped, but the
* code is clever enough to know it doesn't have to map a single page,
* so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
*/
return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
}
static inline int
xfs_buf_vmap_len(
struct xfs_buf *bp)
{
return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}
/*
* Page Region interfaces.
*
* For pages in filesystems where the blocksize is smaller than the
* pagesize, we use the page->private field (long) to hold a bitmap
* of uptodate regions within the page.
*
* Each such region is "bytes per page / bits per long" bytes long.
*
* NBPPR == number-of-bytes-per-page-region
* BTOPR == bytes-to-page-region (rounded up)
* BTOPRT == bytes-to-page-region-truncated (rounded down)
*/
#if (BITS_PER_LONG == 32)
#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
#elif (BITS_PER_LONG == 64)
#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
#else
#error BITS_PER_LONG must be 32 or 64
#endif
#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
STATIC unsigned long
page_region_mask(
size_t offset,
size_t length)
{
unsigned long mask;
int first, final;
first = BTOPR(offset);
final = BTOPRT(offset + length - 1);
first = min(first, final);
mask = ~0UL;
mask <<= BITS_PER_LONG - (final - first);
mask >>= BITS_PER_LONG - (final);
ASSERT(offset + length <= PAGE_CACHE_SIZE);
ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
return mask;
}
STATIC void
set_page_region(
struct page *page,
size_t offset,
size_t length)
{
set_page_private(page,
page_private(page) | page_region_mask(offset, length));
if (page_private(page) == ~0UL)
SetPageUptodate(page);
}
STATIC int
test_page_region(
struct page *page,
size_t offset,
size_t length)
{
unsigned long mask = page_region_mask(offset, length);
return (mask && (page_private(page) & mask) == mask);
}
/*
* xfs_buf_lru_add - add a buffer to the LRU.
*
* The LRU takes a new reference to the buffer so that it will only be freed
* once the shrinker takes the buffer off the LRU.
*/
STATIC void
xfs_buf_lru_add(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;
spin_lock(&btp->bt_lru_lock);
if (list_empty(&bp->b_lru)) {
atomic_inc(&bp->b_hold);
list_add_tail(&bp->b_lru, &btp->bt_lru);
btp->bt_lru_nr++;
}
spin_unlock(&btp->bt_lru_lock);
}
/*
* xfs_buf_lru_del - remove a buffer from the LRU
*
* The unlocked check is safe here because it only occurs when there are not
* b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
* to optimise the shrinker removing the buffer from the LRU and calling
* xfs_buf_free(). i.e. it removes an unneccessary round trip on the
* bt_lru_lock.
*/
STATIC void
xfs_buf_lru_del(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;
if (list_empty(&bp->b_lru))
return;
spin_lock(&btp->bt_lru_lock);
if (!list_empty(&bp->b_lru)) {
list_del_init(&bp->b_lru);
btp->bt_lru_nr--;
}
spin_unlock(&btp->bt_lru_lock);
}
/*
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
* b_lru_ref count so that the buffer is freed immediately when the buffer
* reference count falls to zero. If the buffer is already on the LRU, we need
* to remove the reference that LRU holds on the buffer.
*
* This prevents build-up of stale buffers on the LRU.
*/
void
xfs_buf_stale(
struct xfs_buf *bp)
{
bp->b_flags |= XBF_STALE;
atomic_set(&(bp)->b_lru_ref, 0);
if (!list_empty(&bp->b_lru)) {
struct xfs_buftarg *btp = bp->b_target;
spin_lock(&btp->bt_lru_lock);
if (!list_empty(&bp->b_lru)) {
list_del_init(&bp->b_lru);
btp->bt_lru_nr--;
atomic_dec(&bp->b_hold);
}
spin_unlock(&btp->bt_lru_lock);
}
ASSERT(atomic_read(&bp->b_hold) >= 1);
}
STATIC void
_xfs_buf_initialize(
xfs_buf_t *bp,
xfs_buftarg_t *target,
xfs_off_t range_base,
size_t range_length,
xfs_buf_flags_t flags)
{
/*
* We don't want certain flags to appear in b_flags.
*/
flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
memset(bp, 0, sizeof(xfs_buf_t));
atomic_set(&bp->b_hold, 1);
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
RB_CLEAR_NODE(&bp->b_rbnode);
sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
bp->b_file_offset = range_base;
/*
* Set buffer_length and count_desired to the same value initially.
* I/O routines should use count_desired, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
*/
bp->b_buffer_length = bp->b_count_desired = range_length;
bp->b_flags = flags;
bp->b_bn = XFS_BUF_DADDR_NULL;
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
XFS_STATS_INC(xb_create);
trace_xfs_buf_init(bp, _RET_IP_);
}
/*
* Allocate a page array capable of holding a specified number
* of pages, and point the page buf at it.
*/
STATIC int
_xfs_buf_get_pages(
xfs_buf_t *bp,
int page_count,
xfs_buf_flags_t flags)
{
/* Make sure that we have a page list */
if (bp->b_pages == NULL) {
bp->b_offset = xfs_buf_poff(bp->b_file_offset);
bp->b_page_count = page_count;
if (page_count <= XB_PAGES) {
bp->b_pages = bp->b_page_array;
} else {
bp->b_pages = kmem_alloc(sizeof(struct page *) *
page_count, xb_to_km(flags));
if (bp->b_pages == NULL)
return -ENOMEM;
}
memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
}
return 0;
}
/*
* Frees b_pages if it was allocated.
*/
STATIC void
_xfs_buf_free_pages(
xfs_buf_t *bp)
{
if (bp->b_pages != bp->b_page_array) {
kmem_free(bp->b_pages);
bp->b_pages = NULL;
}
}
/*
* Releases the specified buffer.
*
* The modification state of any associated pages is left unchanged.
* The buffer most not be on any hash - use xfs_buf_rele instead for
* hashed and refcounted buffers
*/
void
xfs_buf_free(
xfs_buf_t *bp)
{
trace_xfs_buf_free(bp, _RET_IP_);
ASSERT(list_empty(&bp->b_lru));
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i;
if (xfs_buf_is_vmapped(bp))
vm_unmap_ram(bp->b_addr - bp->b_offset,
bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
if (bp->b_flags & _XBF_PAGE_CACHE)
ASSERT(!PagePrivate(page));
page_cache_release(page);
}
}
_xfs_buf_free_pages(bp);
xfs_buf_deallocate(bp);
}
/*
* Finds all pages for buffer in question and builds it's page list.
*/
STATIC int
_xfs_buf_lookup_pages(
xfs_buf_t *bp,
uint flags)
{
struct address_space *mapping = bp->b_target->bt_mapping;
size_t blocksize = bp->b_target->bt_bsize;
size_t size = bp->b_count_desired;
size_t nbytes, offset;
gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i;
pgoff_t first;
xfs_off_t end;
int error;
end = bp->b_file_offset + bp->b_buffer_length;
page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
bp->b_flags |= _XBF_PAGE_CACHE;
offset = bp->b_offset;
first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
for (i = 0; i < bp->b_page_count; i++) {
struct page *page;
uint retries = 0;
retry:
page = find_or_create_page(mapping, first + i, gfp_mask);
if (unlikely(page == NULL)) {
if (flags & XBF_READ_AHEAD) {
bp->b_page_count = i;
for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->b_pages[i]);
return -ENOMEM;
}
/*
* This could deadlock.
*
* But until all the XFS lowlevel code is revamped to
* handle buffer allocation failures we can't do much.
*/
if (!(++retries % 100))
printk(KERN_ERR
"XFS: possible memory allocation "
"deadlock in %s (mode:0x%x)\n",
__func__, gfp_mask);
XFS_STATS_INC(xb_page_retries);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
XFS_STATS_INC(xb_page_found);
nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
size -= nbytes;
ASSERT(!PagePrivate(page));
if (!PageUptodate(page)) {
page_count--;
if (blocksize >= PAGE_CACHE_SIZE) {
if (flags & XBF_READ)
bp->b_flags |= _XBF_PAGE_LOCKED;
} else if (!PagePrivate(page)) {
if (test_page_region(page, offset, nbytes))
page_count++;
}
}
bp->b_pages[i] = page;
offset = 0;
}
if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->b_pages[i]);
}
if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE;
return error;
}
/*
* Map buffer into kernel address-space if nessecary.
*/
STATIC int
_xfs_buf_map_pages(
xfs_buf_t *bp,
uint flags)
{
/* A single page buffer is always mappable */
if (bp->b_page_count == 1) {
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
-1, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
return -ENOMEM;
bp->b_addr += bp->b_offset;
bp->b_flags |= XBF_MAPPED;
}
return 0;
}
/*
* Finding and Reading Buffers
*/
/*
* Look up, and creates if absent, a lockable buffer for
* a given range of an inode. The buffer is returned
* locked. If other overlapping buffers exist, they are
* released before the new buffer is created and locked,
* which may imply that this call will block until those buffers
* are unlocked. No I/O is implied by this call.
*/
xfs_buf_t *
_xfs_buf_find(
xfs_buftarg_t *btp, /* block device target */
xfs_off_t ioff, /* starting offset of range */
size_t isize, /* length of range */
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
xfs_off_t range_base;
size_t range_length;
struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
range_base = (ioff << BBSHIFT);
range_length = (isize << BBSHIFT);
/* Check for IOs smaller than the sector size / not sector aligned */
ASSERT(!(range_length < (1 << btp->bt_sshift)));
ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
/* get tree root */
pag = xfs_perag_get(btp->bt_mount,
xfs_daddr_to_agno(btp->bt_mount, ioff));
/* walk tree */
spin_lock(&pag->pag_buf_lock);
rbp = &pag->pag_buf_tree.rb_node;
parent = NULL;
bp = NULL;
while (*rbp) {
parent = *rbp;
bp = rb_entry(parent, struct xfs_buf, b_rbnode);
if (range_base < bp->b_file_offset)
rbp = &(*rbp)->rb_left;
else if (range_base > bp->b_file_offset)
rbp = &(*rbp)->rb_right;
else {
/*
* found a block offset match. If the range doesn't
* match, the only way this is allowed is if the buffer
* in the cache is stale and the transaction that made
* it stale has not yet committed. i.e. we are
* reallocating a busy extent. Skip this buffer and
* continue searching to the right for an exact match.
*/
if (bp->b_buffer_length != range_length) {
ASSERT(bp->b_flags & XBF_STALE);
rbp = &(*rbp)->rb_right;
continue;
}
atomic_inc(&bp->b_hold);
goto found;
}
}
/* No match found */
if (new_bp) {
_xfs_buf_initialize(new_bp, btp, range_base,
range_length, flags);
rb_link_node(&new_bp->b_rbnode, parent, rbp);
rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
spin_unlock(&pag->pag_buf_lock);
} else {
XFS_STATS_INC(xb_miss_locked);
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
}
return new_bp;
found:
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
if (xfs_buf_cond_lock(bp)) {
/* failed, so wait for the lock if requested. */
if (!(flags & XBF_TRYLOCK)) {
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
} else {
xfs_buf_rele(bp);
XFS_STATS_INC(xb_busy_locked);
return NULL;
}
}
if (bp->b_flags & XBF_STALE) {
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
bp->b_flags &= XBF_MAPPED;
}
trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(xb_get_locked);
return bp;
}
/*
* Assembles a buffer covering the specified range.
* Storage in memory for all portions of the buffer will be allocated,
* although backing storage may not be.
*/
xfs_buf_t *
xfs_buf_get(
xfs_buftarg_t *target,/* target for buffer */
xfs_off_t ioff, /* starting offset of range */
size_t isize, /* length of range */
xfs_buf_flags_t flags)
{
xfs_buf_t *bp, *new_bp;
int error = 0, i;
new_bp = xfs_buf_allocate(flags);
if (unlikely(!new_bp))
return NULL;
bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
if (bp == new_bp) {
error = _xfs_buf_lookup_pages(bp, flags);
if (error)
goto no_buffer;
} else {
xfs_buf_deallocate(new_bp);
if (unlikely(bp == NULL))
return NULL;
}
for (i = 0; i < bp->b_page_count; i++)
mark_page_accessed(bp->b_pages[i]);
if (!(bp->b_flags & XBF_MAPPED)) {
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
printk(KERN_WARNING "%s: failed to map pages\n",
__func__);
goto no_buffer;
}
}
XFS_STATS_INC(xb_get);
/*
* Always fill in the block number now, the mapped cases can do
* their own overlay of this later.
*/
bp->b_bn = ioff;
bp->b_count_desired = bp->b_buffer_length;
trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp;
no_buffer:
if (flags & (XBF_LOCK | XBF_TRYLOCK))
xfs_buf_unlock(bp);
xfs_buf_rele(bp);
return NULL;
}
STATIC int
_xfs_buf_read(
xfs_buf_t *bp,
xfs_buf_flags_t flags)
{
int status;
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
status = xfs_buf_iorequest(bp);
if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
return status;
return xfs_buf_iowait(bp);
}
xfs_buf_t *
xfs_buf_read(
xfs_buftarg_t *target,
xfs_off_t ioff,
size_t isize,
xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
flags |= XBF_READ;
bp = xfs_buf_get(target, ioff, isize, flags);
if (bp) {
trace_xfs_buf_read(bp, flags, _RET_IP_);
if (!XFS_BUF_ISDONE(bp)) {
XFS_STATS_INC(xb_get_read);
_xfs_buf_read(bp, flags);
} else if (flags & XBF_ASYNC) {
/*
* Read ahead call which is already satisfied,
* drop the buffer
*/
goto no_buffer;
} else {
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
}
}
return bp;
no_buffer:
if (flags & (XBF_LOCK | XBF_TRYLOCK))
xfs_buf_unlock(bp);
xfs_buf_rele(bp);
return NULL;
}
/*
* If we are not low on memory then do the readahead in a deadlock
* safe manner.
*/
void
xfs_buf_readahead(
xfs_buftarg_t *target,
xfs_off_t ioff,
size_t isize)
{
struct backing_dev_info *bdi;
bdi = target->bt_mapping->backing_dev_info;
if (bdi_read_congested(bdi))
return;
xfs_buf_read(target, ioff, isize,
XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
}
/*
* Read an uncached buffer from disk. Allocates and returns a locked
* buffer containing the disk contents or nothing.
*/
struct xfs_buf *
xfs_buf_read_uncached(
struct xfs_mount *mp,
struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t length,
int flags)
{
xfs_buf_t *bp;
int error;
bp = xfs_buf_get_uncached(target, length, flags);
if (!bp)
return NULL;
/* set up the buffer for a read IO */
xfs_buf_lock(bp);
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
XFS_BUF_BUSY(bp);
xfsbdstrat(mp, bp);
error = xfs_buf_iowait(bp);
if (error || bp->b_error) {
xfs_buf_relse(bp);
return NULL;
}
return bp;
}
xfs_buf_t *
xfs_buf_get_empty(
size_t len,
xfs_buftarg_t *target)
{
xfs_buf_t *bp;
bp = xfs_buf_allocate(0);
if (bp)
_xfs_buf_initialize(bp, target, 0, len, 0);
return bp;
}
static inline struct page *
mem_to_page(
void *addr)
{
if ((!is_vmalloc_addr(addr))) {
return virt_to_page(addr);
} else {
return vmalloc_to_page(addr);
}
}
int
xfs_buf_associate_memory(
xfs_buf_t *bp,
void *mem,
size_t len)
{
int rval;
int i = 0;
unsigned long pageaddr;
unsigned long offset;
size_t buflen;
int page_count;
pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
offset = (unsigned long)mem - pageaddr;
buflen = PAGE_CACHE_ALIGN(len + offset);
page_count = buflen >> PAGE_CACHE_SHIFT;
/* Free any previous set of page pointers */
if (bp->b_pages)
_xfs_buf_free_pages(bp);
bp->b_pages = NULL;
bp->b_addr = mem;
rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
if (rval)
return rval;
bp->b_offset = offset;
for (i = 0; i < bp->b_page_count; i++) {
bp->b_pages[i] = mem_to_page((void *)pageaddr);
pageaddr += PAGE_CACHE_SIZE;
}
bp->b_count_desired = len;
bp->b_buffer_length = buflen;
bp->b_flags |= XBF_MAPPED;
bp->b_flags &= ~_XBF_PAGE_LOCKED;
return 0;
}
xfs_buf_t *
xfs_buf_get_uncached(
struct xfs_buftarg *target,
size_t len,
int flags)
{
unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
int error, i;
xfs_buf_t *bp;
bp = xfs_buf_allocate(0);
if (unlikely(bp == NULL))
goto fail;
_xfs_buf_initialize(bp, target, 0, len, 0);
error = _xfs_buf_get_pages(bp, page_count, 0);
if (error)
goto fail_free_buf;
for (i = 0; i < page_count; i++) {
bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
if (!bp->b_pages[i])
goto fail_free_mem;
}
bp->b_flags |= _XBF_PAGES;
error = _xfs_buf_map_pages(bp, XBF_MAPPED);
if (unlikely(error)) {
printk(KERN_WARNING "%s: failed to map pages\n",
__func__);
goto fail_free_mem;
}
xfs_buf_unlock(bp);
trace_xfs_buf_get_uncached(bp, _RET_IP_);
return bp;
fail_free_mem:
while (--i >= 0)
__free_page(bp->b_pages[i]);
_xfs_buf_free_pages(bp);
fail_free_buf:
xfs_buf_deallocate(bp);
fail:
return NULL;
}
/*
* Increment reference count on buffer, to hold the buffer concurrently
* with another thread which may release (free) the buffer asynchronously.
* Must hold the buffer already to call this function.
*/
void
xfs_buf_hold(
xfs_buf_t *bp)
{
trace_xfs_buf_hold(bp, _RET_IP_);
atomic_inc(&bp->b_hold);
}
/*
* Releases a hold on the specified buffer. If the
* the hold count is 1, calls xfs_buf_free.
*/
void
xfs_buf_rele(
xfs_buf_t *bp)
{
struct xfs_perag *pag = bp->b_pag;
trace_xfs_buf_rele(bp, _RET_IP_);
if (!pag) {
ASSERT(list_empty(&bp->b_lru));
ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
if (atomic_dec_and_test(&bp->b_hold))
xfs_buf_free(bp);
return;
}
ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
ASSERT(atomic_read(&bp->b_hold) > 0);
if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
if (!(bp->b_flags & XBF_STALE) &&
atomic_read(&bp->b_lru_ref)) {
xfs_buf_lru_add(bp);
spin_unlock(&pag->pag_buf_lock);
} else {
xfs_buf_lru_del(bp);
ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
xfs_buf_free(bp);
}
}
}
/*
* Mutual exclusion on buffers. Locking model:
*
* Buffers associated with inodes for which buffer locking
* is not enabled are not protected by semaphores, and are
* assumed to be exclusively owned by the caller. There is a
* spinlock in the buffer, used by the caller when concurrent
* access is possible.
*/
/*
* Locks a buffer object, if it is not already locked. Note that this in
* no way locks the underlying pages, so it is only useful for
* synchronizing concurrent use of buffer objects, not for synchronizing
* independent access to the underlying pages.
*
* If we come across a stale, pinned, locked buffer, we know that we are
* being asked to lock a buffer that has been reallocated. Because it is
* pinned, we know that the log has not been pushed to disk and hence it
* will still be locked. Rather than continuing to have trylock attempts
* fail until someone else pushes the log, push it ourselves before
* returning. This means that the xfsaild will not get stuck trying
* to push on stale inode buffers.
*/
int
xfs_buf_cond_lock(
xfs_buf_t *bp)
{
int locked;
locked = down_trylock(&bp->b_sema) == 0;
if (locked)
XB_SET_OWNER(bp);
else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
trace_xfs_buf_cond_lock(bp, _RET_IP_);
return locked ? 0 : -EBUSY;
}
int
xfs_buf_lock_value(
xfs_buf_t *bp)
{
return bp->b_sema.count;
}
/*
* Locks a buffer object.
* Note that this in no way locks the underlying pages, so it is only
* useful for synchronizing concurrent use of buffer objects, not for
* synchronizing independent access to the underlying pages.
*
* If we come across a stale, pinned, locked buffer, we know that we
* are being asked to lock a buffer that has been reallocated. Because
* it is pinned, we know that the log has not been pushed to disk and
* hence it will still be locked. Rather than sleeping until someone
* else pushes the log, push it ourselves before trying to get the lock.
*/
void
xfs_buf_lock(
xfs_buf_t *bp)
{
trace_xfs_buf_lock(bp, _RET_IP_);
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
down(&bp->b_sema);
XB_SET_OWNER(bp);
trace_xfs_buf_lock_done(bp, _RET_IP_);
}
/*
* Releases the lock on the buffer object.
* If the buffer is marked delwri but is not queued, do so before we
* unlock the buffer as we need to set flags correctly. We also need to
* take a reference for the delwri queue because the unlocker is going to
* drop their's and they don't know we just queued it.
*/
void
xfs_buf_unlock(
xfs_buf_t *bp)
{
if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
atomic_inc(&bp->b_hold);
bp->b_flags |= XBF_ASYNC;
xfs_buf_delwri_queue(bp, 0);
}
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
trace_xfs_buf_unlock(bp, _RET_IP_);
}
STATIC void
xfs_buf_wait_unpin(
xfs_buf_t *bp)
{
DECLARE_WAITQUEUE (wait, current);
if (atomic_read(&bp->b_pin_count) == 0)
return;
add_wait_queue(&bp->b_waiters, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&bp->b_pin_count) == 0)
break;
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
schedule();
}
remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING);
}
/*
* Buffer Utility Routines
*/
STATIC void
xfs_buf_iodone_work(
struct work_struct *work)
{
xfs_buf_t *bp =
container_of(work, xfs_buf_t, b_iodone_work);
if (bp->b_iodone)
(*(bp->b_iodone))(bp);
else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
}
void
xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
trace_xfs_buf_iodone(bp, _RET_IP_);
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
if (bp->b_error == 0)
bp->b_flags |= XBF_DONE;
if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
complete(&bp->b_iowait);
}
}
void
xfs_buf_ioerror(
xfs_buf_t *bp,
int error)
{
ASSERT(error >= 0 && error <= 0xffff);
bp->b_error = (unsigned short)error;
trace_xfs_buf_ioerror(bp, error, _RET_IP_);
}
int
xfs_bwrite(
struct xfs_mount *mp,
struct xfs_buf *bp)
{
int error;
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
xfs_buf_delwri_dequeue(bp);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error)
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
return error;
}
void
xfs_bdwrite(
void *mp,
struct xfs_buf *bp)
{
trace_xfs_buf_bdwrite(bp, _RET_IP_);
bp->b_flags &= ~XBF_READ;
bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
xfs_buf_delwri_queue(bp, 1);
}
/*
* Called when we want to stop a buffer from getting written or read.
* We attach the EIO error, muck with its flags, and call xfs_buf_ioend
* so that the proper iodone callbacks get called.
*/
STATIC int
xfs_bioerror(
xfs_buf_t *bp)
{
#ifdef XFSERRORDEBUG
ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
#endif
/*
* No need to wait until the buffer is unpinned, we aren't flushing it.
*/
XFS_BUF_ERROR(bp, EIO);
/*
* We're calling xfs_buf_ioend, so delete XBF_DONE flag.
*/
XFS_BUF_UNREAD(bp);
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
xfs_buf_ioend(bp, 0);
return EIO;
}
/*
* Same as xfs_bioerror, except that we are releasing the buffer
* here ourselves, and avoiding the xfs_buf_ioend call.
* This is meant for userdata errors; metadata bufs come with
* iodone functions attached, so that we can track down errors.
*/
STATIC int
xfs_bioerror_relse(
struct xfs_buf *bp)
{
int64_t fl = XFS_BUF_BFLAGS(bp);
/*
* No need to wait until the buffer is unpinned.
* We aren't flushing it.
*
* chunkhold expects B_DONE to be set, whether
* we actually finish the I/O or not. We don't want to
* change that interface.
*/
XFS_BUF_UNREAD(bp);
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_DONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_CLR_IODONE_FUNC(bp);
if (!(fl & XBF_ASYNC)) {
/*
* Mark b_error and B_ERROR _both_.
* Lot's of chunkcache code assumes that.
* There's no reason to mark error for
* ASYNC buffers.
*/
XFS_BUF_ERROR(bp, EIO);
XFS_BUF_FINISH_IOWAIT(bp);
} else {
xfs_buf_relse(bp);
}
return EIO;
}
/*
* All xfs metadata buffers except log state machine buffers
* get this attached as their b_bdstrat callback function.
* This is so that we can catch a buffer
* after prematurely unpinning it to forcibly shutdown the filesystem.
*/
int
xfs_bdstrat_cb(
struct xfs_buf *bp)
{
if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
trace_xfs_bdstrat_shut(bp, _RET_IP_);
/*
* Metadata write that didn't get logged but
* written delayed anyway. These aren't associated
* with a transaction, and can be ignored.
*/
if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
return xfs_bioerror_relse(bp);
else
return xfs_bioerror(bp);
}
xfs_buf_iorequest(bp);
return 0;
}
/*
* Wrapper around bdstrat so that we can stop data from going to disk in case
* we are shutting down the filesystem. Typically user data goes thru this
* path; one of the exceptions is the superblock.
*/
void
xfsbdstrat(
struct xfs_mount *mp,
struct xfs_buf *bp)
{
if (XFS_FORCED_SHUTDOWN(mp)) {
trace_xfs_bdstrat_shut(bp, _RET_IP_);
xfs_bioerror_relse(bp);
return;
}
xfs_buf_iorequest(bp);
}
STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
bp->b_flags &= ~_XBF_PAGE_LOCKED;
xfs_buf_ioend(bp, schedule);
}
}
STATIC void
xfs_buf_bio_end_io(
struct bio *bio,
int error)
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
xfs_buf_ioerror(bp, -error);
if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
do {
struct page *page = bvec->bv_page;
ASSERT(!PagePrivate(page));
if (unlikely(bp->b_error)) {
if (bp->b_flags & XBF_READ)
ClearPageUptodate(page);
} else if (blocksize >= PAGE_CACHE_SIZE) {
SetPageUptodate(page);
} else if (!PagePrivate(page) &&
(bp->b_flags & _XBF_PAGE_CACHE)) {
set_page_region(page, bvec->bv_offset, bvec->bv_len);
}
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
if (bp->b_flags & _XBF_PAGE_LOCKED)
unlock_page(page);
} while (bvec >= bio->bi_io_vec);
_xfs_buf_ioend(bp, 1);
bio_put(bio);
}
STATIC void
_xfs_buf_ioapply(
xfs_buf_t *bp)
{
int rw, map_i, total_nr_pages, nr_pages;
struct bio *bio;
int offset = bp->b_offset;
int size = bp->b_count_desired;
sector_t sector = bp->b_bn;
unsigned int blocksize = bp->b_target->bt_bsize;
total_nr_pages = bp->b_page_count;
map_i = 0;
if (bp->b_flags & XBF_ORDERED) {
ASSERT(!(bp->b_flags & XBF_READ));
rw = WRITE_FLUSH_FUA;
} else if (bp->b_flags & XBF_LOG_BUFFER) {
ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
bp->b_flags &= ~_XBF_RUN_QUEUES;
rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
} else if (bp->b_flags & _XBF_RUN_QUEUES) {
ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
bp->b_flags &= ~_XBF_RUN_QUEUES;
rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
} else {
rw = (bp->b_flags & XBF_WRITE) ? WRITE :
(bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
}
/* Special code path for reading a sub page size buffer in --
* we populate up the whole page, and hence the other metadata
* in the same page. This optimization is only valid when the
* filesystem block size is not smaller than the page size.
*/
if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
(XBF_READ|_XBF_PAGE_LOCKED)) &&
(blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector - (offset >> BBSHIFT);
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
size = 0;
atomic_inc(&bp->b_io_remaining);
goto submit_io;
}
next_chunk:
atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages;
bio = bio_alloc(GFP_NOIO, nr_pages);
bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
for (; size && nr_pages; nr_pages--, map_i++) {
int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
if (nbytes > size)
nbytes = size;
rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
if (rbytes < nbytes)
break;
offset = 0;
sector += nbytes >> BBSHIFT;
size -= nbytes;
total_nr_pages--;
}
submit_io:
if (likely(bio->bi_size)) {
if (xfs_buf_is_vmapped(bp)) {
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
submit_bio(rw, bio);
if (size)
goto next_chunk;
} else {
/*
* if we get here, no pages were added to the bio. However,
* we can't just error out here - if the pages are locked then
* we have to unlock them otherwise we can hang on a later
* access to the page.
*/
xfs_buf_ioerror(bp, EIO);
if (bp->b_flags & _XBF_PAGE_LOCKED) {
int i;
for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->b_pages[i]);
}
bio_put(bio);
}
}
int
xfs_buf_iorequest(
xfs_buf_t *bp)
{
trace_xfs_buf_iorequest(bp, _RET_IP_);
if (bp->b_flags & XBF_DELWRI) {
xfs_buf_delwri_queue(bp, 1);
return 0;
}
if (bp->b_flags & XBF_WRITE) {
xfs_buf_wait_unpin(bp);
}
xfs_buf_hold(bp);
/* Set the count to 1 initially, this will stop an I/O
* completion callout which happens before we have started
* all the I/O from calling xfs_buf_ioend too early.
*/
atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp);
_xfs_buf_ioend(bp, 0);
xfs_buf_rele(bp);
return 0;
}
/*
* Waits for I/O to complete on the buffer supplied.
* It returns immediately if no I/O is pending.
* It returns the I/O error code, if any, or 0 if there was no error.
*/
int
xfs_buf_iowait(
xfs_buf_t *bp)
{
trace_xfs_buf_iowait(bp, _RET_IP_);
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
return bp->b_error;
}
xfs_caddr_t
xfs_buf_offset(
xfs_buf_t *bp,
size_t offset)
{
struct page *page;
if (bp->b_flags & XBF_MAPPED)
return XFS_BUF_PTR(bp) + offset;
offset += bp->b_offset;
page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
}
/*
* Move data into or out of a buffer.
*/
void
xfs_buf_iomove(
xfs_buf_t *bp, /* buffer to process */
size_t boff, /* starting buffer offset */
size_t bsize, /* length to copy */
void *data, /* data address */
xfs_buf_rw_t mode) /* read/write/zero flag */
{
size_t bend, cpoff, csize;
struct page *page;
bend = boff + bsize;
while (boff < bend) {
page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
cpoff = xfs_buf_poff(boff + bp->b_offset);
csize = min_t(size_t,
PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
switch (mode) {
case XBRW_ZERO:
memset(page_address(page) + cpoff, 0, csize);
break;
case XBRW_READ:
memcpy(data, page_address(page) + cpoff, csize);
break;
case XBRW_WRITE:
memcpy(page_address(page) + cpoff, data, csize);
}
boff += csize;
data += csize;
}
}
/*
* Handling of buffer targets (buftargs).
*/
/*
* Wait for any bufs with callbacks that have been submitted but have not yet
* returned. These buffers will have an elevated hold count, so wait on those
* while freeing all the buffers only held by the LRU.
*/
void
xfs_wait_buftarg(
struct xfs_buftarg *btp)
{
struct xfs_buf *bp;
restart:
spin_lock(&btp->bt_lru_lock);
while (!list_empty(&btp->bt_lru)) {
bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
if (atomic_read(&bp->b_hold) > 1) {
spin_unlock(&btp->bt_lru_lock);
delay(100);
goto restart;
}
/*
* clear the LRU reference count so the bufer doesn't get
* ignored in xfs_buf_rele().
*/
atomic_set(&bp->b_lru_ref, 0);
spin_unlock(&btp->bt_lru_lock);
xfs_buf_rele(bp);
spin_lock(&btp->bt_lru_lock);
}
spin_unlock(&btp->bt_lru_lock);
}
int
xfs_buftarg_shrink(
struct shrinker *shrink,
struct shrink_control *sc)
{
struct xfs_buftarg *btp = container_of(shrink,
struct xfs_buftarg, bt_shrinker);
struct xfs_buf *bp;
int nr_to_scan = sc->nr_to_scan;
LIST_HEAD(dispose);
if (!nr_to_scan)
return btp->bt_lru_nr;
spin_lock(&btp->bt_lru_lock);
while (!list_empty(&btp->bt_lru)) {
if (nr_to_scan-- <= 0)
break;
bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
/*
* Decrement the b_lru_ref count unless the value is already
* zero. If the value is already zero, we need to reclaim the
* buffer, otherwise it gets another trip through the LRU.
*/
if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
list_move_tail(&bp->b_lru, &btp->bt_lru);
continue;
}
/*
* remove the buffer from the LRU now to avoid needing another
* lock round trip inside xfs_buf_rele().
*/
list_move(&bp->b_lru, &dispose);
btp->bt_lru_nr--;
}
spin_unlock(&btp->bt_lru_lock);
while (!list_empty(&dispose)) {
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
xfs_buf_rele(bp);
}
return btp->bt_lru_nr;
}
void
xfs_free_buftarg(
struct xfs_mount *mp,
struct xfs_buftarg *btp)
{
unregister_shrinker(&btp->bt_shrinker);
xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp);
iput(btp->bt_mapping->host);
kthread_stop(btp->bt_task);
kmem_free(btp);
}
STATIC int
xfs_setsize_buftarg_flags(
xfs_buftarg_t *btp,
unsigned int blocksize,
unsigned int sectorsize,
int verbose)
{
btp->bt_bsize = blocksize;
btp->bt_sshift = ffs(sectorsize) - 1;
btp->bt_smask = sectorsize - 1;
if (set_blocksize(btp->bt_bdev, sectorsize)) {
printk(KERN_WARNING
"XFS: Cannot set_blocksize to %u on device %s\n",
sectorsize, XFS_BUFTARG_NAME(btp));
return EINVAL;
}
if (verbose &&
(PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
printk(KERN_WARNING
"XFS: %u byte sectors in use on device %s. "
"This is suboptimal; %u or greater is ideal.\n",
sectorsize, XFS_BUFTARG_NAME(btp),
(unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
}
return 0;
}
/*
* When allocating the initial buffer target we have not yet
* read in the superblock, so don't know what sized sectors
* are being used is at this early stage. Play safe.
*/
STATIC int
xfs_setsize_buftarg_early(
xfs_buftarg_t *btp,
struct block_device *bdev)
{
return xfs_setsize_buftarg_flags(btp,
PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
}
int
xfs_setsize_buftarg(
xfs_buftarg_t *btp,
unsigned int blocksize,
unsigned int sectorsize)
{
return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
}
STATIC int
xfs_mapping_buftarg(
xfs_buftarg_t *btp,
struct block_device *bdev)
{
struct backing_dev_info *bdi;
struct inode *inode;
struct address_space *mapping;
static const struct address_space_operations mapping_aops = {
.sync_page = block_sync_page,
.migratepage = fail_migrate_page,
};
inode = new_inode(bdev->bd_inode->i_sb);
if (!inode) {
printk(KERN_WARNING
"XFS: Cannot allocate mapping inode for device %s\n",
XFS_BUFTARG_NAME(btp));
return ENOMEM;
}
inode->i_ino = get_next_ino();
inode->i_mode = S_IFBLK;
inode->i_bdev = bdev;
inode->i_rdev = bdev->bd_dev;
bdi = blk_get_backing_dev_info(bdev);
if (!bdi)
bdi = &default_backing_dev_info;
mapping = &inode->i_data;
mapping->a_ops = &mapping_aops;
mapping->backing_dev_info = bdi;
mapping_set_gfp_mask(mapping, GFP_NOFS);
btp->bt_mapping = mapping;
return 0;
}
STATIC int
xfs_alloc_delwrite_queue(
xfs_buftarg_t *btp,
const char *fsname)
{
INIT_LIST_HEAD(&btp->bt_delwrite_queue);
spin_lock_init(&btp->bt_delwrite_lock);
btp->bt_flags = 0;
btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
if (IS_ERR(btp->bt_task))
return PTR_ERR(btp->bt_task);
return 0;
}
xfs_buftarg_t *
xfs_alloc_buftarg(
struct xfs_mount *mp,
struct block_device *bdev,
int external,
const char *fsname)
{
xfs_buftarg_t *btp;
btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
INIT_LIST_HEAD(&btp->bt_lru);
spin_lock_init(&btp->bt_lru_lock);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
if (xfs_mapping_buftarg(btp, bdev))
goto error;
if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
btp->bt_shrinker.shrink = xfs_buftarg_shrink;
btp->bt_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&btp->bt_shrinker);
return btp;
error:
kmem_free(btp);
return NULL;
}
/*
* Delayed write buffer handling
*/
STATIC void
xfs_buf_delwri_queue(
xfs_buf_t *bp,
int unlock)
{
struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
trace_xfs_buf_delwri_queue(bp, _RET_IP_);
ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
spin_lock(dwlk);
/* If already in the queue, dequeue and place at tail */
if (!list_empty(&bp->b_list)) {
ASSERT(bp->b_flags & _XBF_DELWRI_Q);
if (unlock)
atomic_dec(&bp->b_hold);
list_del(&bp->b_list);
}
if (list_empty(dwq)) {
/* start xfsbufd as it is about to have something to do */
wake_up_process(bp->b_target->bt_task);
}
bp->b_flags |= _XBF_DELWRI_Q;
list_add_tail(&bp->b_list, dwq);
bp->b_queuetime = jiffies;
spin_unlock(dwlk);
if (unlock)
xfs_buf_unlock(bp);
}
void
xfs_buf_delwri_dequeue(
xfs_buf_t *bp)
{
spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
int dequeued = 0;
spin_lock(dwlk);
if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
ASSERT(bp->b_flags & _XBF_DELWRI_Q);
list_del_init(&bp->b_list);
dequeued = 1;
}
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
spin_unlock(dwlk);
if (dequeued)
xfs_buf_rele(bp);
trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
}
/*
* If a delwri buffer needs to be pushed before it has aged out, then promote
* it to the head of the delwri queue so that it will be flushed on the next
* xfsbufd run. We do this by resetting the queuetime of the buffer to be older
* than the age currently needed to flush the buffer. Hence the next time the
* xfsbufd sees it is guaranteed to be considered old enough to flush.
*/
void
xfs_buf_delwri_promote(
struct xfs_buf *bp)
{
struct xfs_buftarg *btp = bp->b_target;
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
ASSERT(bp->b_flags & XBF_DELWRI);
ASSERT(bp->b_flags & _XBF_DELWRI_Q);
/*
* Check the buffer age before locking the delayed write queue as we
* don't need to promote buffers that are already past the flush age.
*/
if (bp->b_queuetime < jiffies - age)
return;
bp->b_queuetime = jiffies - age;
spin_lock(&btp->bt_delwrite_lock);
list_move(&bp->b_list, &btp->bt_delwrite_queue);
spin_unlock(&btp->bt_delwrite_lock);
}
STATIC void
xfs_buf_runall_queues(
struct workqueue_struct *queue)
{
flush_workqueue(queue);
}
/*
* Move as many buffers as specified to the supplied list
* idicating if we skipped any buffers to prevent deadlocks.
*/
STATIC int
xfs_buf_delwri_split(
xfs_buftarg_t *target,
struct list_head *list,
unsigned long age)
{
xfs_buf_t *bp, *n;
struct list_head *dwq = &target->bt_delwrite_queue;
spinlock_t *dwlk = &target->bt_delwrite_lock;
int skipped = 0;
int force;
force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
INIT_LIST_HEAD(list);
spin_lock(dwlk);
list_for_each_entry_safe(bp, n, dwq, b_list) {
ASSERT(bp->b_flags & XBF_DELWRI);
if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
if (!force &&
time_before(jiffies, bp->b_queuetime + age)) {
xfs_buf_unlock(bp);
break;
}
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
_XBF_RUN_QUEUES);
bp->b_flags |= XBF_WRITE;
list_move_tail(&bp->b_list, list);
trace_xfs_buf_delwri_split(bp, _RET_IP_);
} else
skipped++;
}
spin_unlock(dwlk);
return skipped;
}
/*
* Compare function is more complex than it needs to be because
* the return value is only 32 bits and we are doing comparisons
* on 64 bit values
*/
static int
xfs_buf_cmp(
void *priv,
struct list_head *a,
struct list_head *b)
{
struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
xfs_daddr_t diff;
diff = ap->b_bn - bp->b_bn;
if (diff < 0)
return -1;
if (diff > 0)
return 1;
return 0;
}
void
xfs_buf_delwri_sort(
xfs_buftarg_t *target,
struct list_head *list)
{
list_sort(NULL, list, xfs_buf_cmp);
}
STATIC int
xfsbufd(
void *data)
{
xfs_buftarg_t *target = (xfs_buftarg_t *)data;
current->flags |= PF_MEMALLOC;
set_freezable();
do {
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
int count = 0;
struct list_head tmp;
if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
refrigerator();
} else {
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
}
/* sleep for a long time if there is nothing to do. */
if (list_empty(&target->bt_delwrite_queue))
tout = MAX_SCHEDULE_TIMEOUT;
schedule_timeout_interruptible(tout);
xfs_buf_delwri_split(target, &tmp, age);
list_sort(NULL, &tmp, xfs_buf_cmp);
while (!list_empty(&tmp)) {
struct xfs_buf *bp;
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_bdstrat_cb(bp);
count++;
}
if (count)
blk_run_address_space(target->bt_mapping);
} while (!kthread_should_stop());
return 0;
}
/*
* Go through all incore buffers, and release buffers if they belong to
* the given device. This is used in filesystem error handling to
* preserve the consistency of its metadata.
*/
int
xfs_flush_buftarg(
xfs_buftarg_t *target,
int wait)
{
xfs_buf_t *bp;
int pincount = 0;
LIST_HEAD(tmp_list);
LIST_HEAD(wait_list);
xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
xfs_buf_runall_queues(xfslogd_workqueue);
set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
/*
* Dropped the delayed write list lock, now walk the temporary list.
* All I/O is issued async and then if we need to wait for completion
* we do that after issuing all the IO.
*/
list_sort(NULL, &tmp_list, xfs_buf_cmp);
while (!list_empty(&tmp_list)) {
bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
ASSERT(target == bp->b_target);
list_del_init(&bp->b_list);
if (wait) {
bp->b_flags &= ~XBF_ASYNC;
list_add(&bp->b_list, &wait_list);
}
xfs_bdstrat_cb(bp);
}
if (wait) {
/* Expedite and wait for IO to complete. */
blk_run_address_space(target->bt_mapping);
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_buf_iowait(bp);
xfs_buf_relse(bp);
}
}
return pincount;
}
int __init
xfs_buf_init(void)
{
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
KM_ZONE_HWALIGN, NULL);
if (!xfs_buf_zone)
goto out;
xfslogd_workqueue = alloc_workqueue("xfslogd",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
if (!xfslogd_workqueue)
goto out_free_buf_zone;
xfsdatad_workqueue = create_workqueue("xfsdatad");
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
xfsconvertd_workqueue = create_workqueue("xfsconvertd");
if (!xfsconvertd_workqueue)
goto out_destroy_xfsdatad_workqueue;
return 0;
out_destroy_xfsdatad_workqueue:
destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
kmem_zone_destroy(xfs_buf_zone);
out:
return -ENOMEM;
}
void
xfs_buf_terminate(void)
{
destroy_workqueue(xfsconvertd_workqueue);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
}
#ifdef CONFIG_KDB_MODULES
struct list_head *
xfs_get_buftarg_list(void)
{
return &xfs_buftarg_list;
}
#endif
| moonlightly/kernel_htc_qsd8k | fs/xfs/linux-2.6/xfs_buf.c | C | gpl-2.0 | 48,381 |
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
*
*
* This is script engine factory for dummyNashorn engine.
*/
package jdk.dummyNashorn.api.scripting;
import javax.script.*;
import java.util.*;
public class DummyNashornJSEngineFactory implements ScriptEngineFactory {
public String getEngineName() {
return "dummyNashorn";
}
public String getEngineVersion() {
return "-1.0";
}
public List<String> getExtensions() {
return extensions;
}
public String getLanguageName() {
return "dummyNashorn";
}
public String getLanguageVersion() {
return "-1.0";
}
public String getMethodCallSyntax(String obj, String m, String... args) {
StringBuffer buf = new StringBuffer();
buf.append("call " + m + " ");
buf.append(" on " + obj + " with ");
for (int i = 0; i < args.length; i++) {
buf.append(args[i] + ", ");
}
buf.append(";");
return buf.toString();
}
public List<String> getMimeTypes() {
return mimeTypes;
}
public List<String> getNames() {
return names;
}
public String getOutputStatement(String str) {
return "output " + str;
}
public String getParameter(String key) {
if (key.equals(ScriptEngine.ENGINE)) {
return getEngineName();
} else if (key.equals(ScriptEngine.ENGINE_VERSION)) {
return getEngineVersion();
} else if (key.equals(ScriptEngine.NAME)) {
return getEngineName();
} else if (key.equals(ScriptEngine.LANGUAGE)) {
return getLanguageName();
} else if (key.equals(ScriptEngine.LANGUAGE_VERSION)) {
return getLanguageVersion();
} else {
return null;
}
}
public String getProgram(String... statements) {
StringBuffer buf = new StringBuffer();
for (int i = 0; i < statements.length; i++) {
buf.append(statements[i]);
}
return buf.toString();
}
public ScriptEngine getScriptEngine() {
return new DummyNashornJSEngine();
}
private static List<String> names;
private static List<String> extensions;
private static List<String> mimeTypes;
static {
names = new ArrayList<String>(1);
names.add("dummyNashorn");
names.add("js");
names = Collections.unmodifiableList(names);
extensions = names;
mimeTypes = new ArrayList<String>(0);
mimeTypes = Collections.unmodifiableList(mimeTypes);
}
}
| md-5/jdk10 | test/jdk/javax/script/multiEngines/jdk.scripting.dummyNashorn/jdk/dummyNashorn/api/scripting/DummyNashornJSEngineFactory.java | Java | gpl-2.0 | 3,594 |
<!doctype html>
<html>
<head>
<title>Code coverage report for src/map/handler/Map.Keyboard.js</title>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<meta http-equiv="content-language" content="en-gb">
<link rel='stylesheet' href='../../../prettify.css'>
<style type='text/css'>
body, html {
margin:0; padding: 0;
}
body {
font-family: "Helvetic Neue", Helvetica,Arial;
font-size: 10pt;
}
div.header, div.footer {
background: #eee;
padding: 1em;
}
div.header {
z-index: 100;
position: fixed;
top: 0;
border-bottom: 1px solid #666;
width: 100%;
}
div.footer {
border-top: 1px solid #666;
}
div.body {
margin-top: 10em;
}
div.meta {
font-size: 90%;
text-align: center;
}
h1, h2, h3 {
font-weight: normal;
}
h1 {
font-size: 12pt;
}
h2 {
font-size: 10pt;
}
pre {
font-family: consolas, menlo, monaco, monospace;
margin: 0;
padding: 0;
line-height: 14px;
font-size: 14px;
}
div.path { font-size: 110%; }
div.path a:link, div.path a:visited { color: #000; }
table.coverage { border-collapse: collapse; margin:0; padding: 0 }
table.coverage td {
margin: 0;
padding: 0;
color: #111;
vertical-align: top;
}
table.coverage td.line-count {
width: 50px;
text-align: right;
padding-right: 5px;
}
table.coverage td.line-coverage {
color: #777 !important;
text-align: right;
border-left: 1px solid #666;
border-right: 1px solid #666;
}
table.coverage td.text {
}
table.coverage td span.cline-any {
display: inline-block;
padding: 0 5px;
width: 40px;
}
table.coverage td span.cline-neutral {
background: #eee;
}
table.coverage td span.cline-yes {
background: #b5d592;
color: #999;
}
table.coverage td span.cline-no {
background: #fc8c84;
}
.cstat-yes { color: #111; }
.cstat-no { background: #fc8c84; color: #111; }
.fstat-no { background: #ffc520; color: #111 !important; }
.cbranch-no { background: yellow !important; color: #111; }
.missing-if-branch {
display: inline-block;
margin-right: 10px;
position: relative;
padding: 0 4px;
background: black;
color: yellow;
xtext-decoration: line-through;
}
.missing-if-branch .typ {
color: inherit !important;
}
.entity, .metric { font-weight: bold; }
.metric { display: inline-block; border: 1px solid #333; padding: 0.3em; background: white; }
.metric small { font-size: 80%; font-weight: normal; color: #666; }
div.coverage-summary table { border-collapse: collapse; margin: 3em; font-size: 110%; }
div.coverage-summary td, div.coverage-summary table th { margin: 0; padding: 0.25em 1em; border-top: 1px solid #666; border-bottom: 1px solid #666; }
div.coverage-summary th { text-align: left; border: 1px solid #666; background: #eee; font-weight: normal; }
div.coverage-summary th.file { border-right: none !important; }
div.coverage-summary th.pic { border-left: none !important; text-align: right; }
div.coverage-summary th.pct { border-right: none !important; }
div.coverage-summary th.abs { border-left: none !important; text-align: right; }
div.coverage-summary td.pct { text-align: right; border-left: 1px solid #666; }
div.coverage-summary td.abs { text-align: right; font-size: 90%; color: #444; border-right: 1px solid #666; }
div.coverage-summary td.file { text-align: right; border-left: 1px solid #666; white-space: nowrap; }
div.coverage-summary td.pic { min-width: 120px !important; }
div.coverage-summary a:link { text-decoration: none; color: #000; }
div.coverage-summary a:visited { text-decoration: none; color: #333; }
div.coverage-summary a:hover { text-decoration: underline; }
div.coverage-summary tfoot td { border-top: 1px solid #666; }
div.coverage-summary .yui3-datatable-sort-indicator, div.coverage-summary .dummy-sort-indicator {
height: 10px;
width: 7px;
display: inline-block;
margin-left: 0.5em;
}
div.coverage-summary .yui3-datatable-sort-indicator {
background: url("http://yui.yahooapis.com/3.6.0/build/datatable-sort/assets/skins/sam/sort-arrow-sprite.png") no-repeat scroll 0 0 transparent;
}
div.coverage-summary .yui3-datatable-sorted .yui3-datatable-sort-indicator {
background-position: 0 -20px;
}
div.coverage-summary .yui3-datatable-sorted-desc .yui3-datatable-sort-indicator {
background-position: 0 -10px;
}
.high { background: #b5d592 !important; }
.medium { background: #ffe87c !important; }
.low { background: #fc8c84 !important; }
span.cover-fill, span.cover-empty {
display:inline-block;
border:1px solid #444;
background: white;
height: 12px;
}
span.cover-fill {
background: #ccc;
border-right: 1px solid #444;
}
span.cover-empty {
background: white;
border-left: none;
}
span.cover-full {
border-right: none !important;
}
pre.prettyprint {
border: none !important;
padding: 0 !important;
margin: 0 !important;
}
.com { color: #999 !important; }
</style>
</head>
<body>
<div class='header medium'>
<h1>Code coverage report for <span class='entity'>src/map/handler/Map.Keyboard.js</span></h1>
<h2>
Statements: <span class='metric'>58.7% <small>(27 / 46)</small></span>
Branches: <span class='metric'>20% <small>(2 / 10)</small></span>
Functions: <span class='metric'>45.45% <small>(5 / 11)</small></span>
Lines: <span class='metric'>58.7% <small>(27 / 46)</small></span>
</h2>
<div class="path"><a href="../../../index.html">All files</a> » <a href="index.html">src/map/handler/</a> » Map.Keyboard.js</div>
</div>
<div class='body'>
<pre><table class="coverage">
<tr><td class="line-count">1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141</td><td class="line-coverage"><span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">1</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">1</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">1</span>
<span class="cline-any cline-yes">1</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">150</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">50</span>
<span class="cline-any cline-yes">150</span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-no"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-neutral"> </span>
<span class="cline-any cline-yes">1</span>
<span class="cline-any cline-neutral"> </span></td><td class="text"><pre class="prettyprint lang-js">/*
* L.Map.Keyboard is handling keyboard interaction with the map, enabled by default.
*/
L.Map.mergeOptions({
keyboard: true,
keyboardPanOffset: 80,
keyboardZoomOffset: 1
});
L.Map.Keyboard = L.Handler.extend({
keyCodes: {
left: [37],
right: [39],
down: [40],
up: [38],
zoomIn: [187, 107, 61],
zoomOut: [189, 109, 173]
},
initialize: function (map) {
this._map = map;
this._setPanOffset(map.options.keyboardPanOffset);
this._setZoomOffset(map.options.keyboardZoomOffset);
},
addHooks: function () {
var container = this._map._container;
// make the container focusable by tabbing
<span class="missing-if-branch" title="else path not taken"" >E</span>if (container.tabIndex === -1) {
container.tabIndex = "0";
}
L.DomEvent
.on(container, 'focus', this._onFocus, this)
.on(container, 'blur', this._onBlur, this)
.on(container, 'mousedown', this._onMouseDown, this);
this._map
.on('focus', this._addHooks, this)
.on('blur', this._removeHooks, this);
},
removeHooks: <span class="fstat-no" title="function not covered" >function () {</span>
<span class="cstat-no" title="statement not covered" > this._removeHooks();</span>
<span class="cstat-no" title="statement not covered" > var container = this._map._container;</span>
<span class="cstat-no" title="statement not covered" > L.DomEvent</span>
.off(container, 'focus', this._onFocus, this)
.off(container, 'blur', this._onBlur, this)
.off(container, 'mousedown', this._onMouseDown, this);
<span class="cstat-no" title="statement not covered" > this._map</span>
.off('focus', this._addHooks, this)
.off('blur', this._removeHooks, this);
},
_onMouseDown: function () {
<span class="missing-if-branch" title="else path not taken"" >E</span>if (!this._focused) {
this._map._container.focus();
}
},
_onFocus: <span class="fstat-no" title="function not covered" >function () {</span>
<span class="cstat-no" title="statement not covered" > this._focused = true;</span>
<span class="cstat-no" title="statement not covered" > this._map.fire('focus');</span>
},
_onBlur: <span class="fstat-no" title="function not covered" >function () {</span>
<span class="cstat-no" title="statement not covered" > this._focused = false;</span>
<span class="cstat-no" title="statement not covered" > this._map.fire('blur');</span>
},
_setPanOffset: function (pan) {
var keys = this._panKeys = {},
codes = this.keyCodes,
i, len;
for (i = 0, len = codes.left.length; i < len; i++) {
keys[codes.left[i]] = [-1 * pan, 0];
}
for (i = 0, len = codes.right.length; i < len; i++) {
keys[codes.right[i]] = [pan, 0];
}
for (i = 0, len = codes.down.length; i < len; i++) {
keys[codes.down[i]] = [0, pan];
}
for (i = 0, len = codes.up.length; i < len; i++) {
keys[codes.up[i]] = [0, -1 * pan];
}
},
_setZoomOffset: function (zoom) {
var keys = this._zoomKeys = {},
codes = this.keyCodes,
i, len;
for (i = 0, len = codes.zoomIn.length; i < len; i++) {
keys[codes.zoomIn[i]] = zoom;
}
for (i = 0, len = codes.zoomOut.length; i < len; i++) {
keys[codes.zoomOut[i]] = -zoom;
}
},
_addHooks: <span class="fstat-no" title="function not covered" >function () {</span>
<span class="cstat-no" title="statement not covered" > L.DomEvent.on(document, 'keydown', this._onKeyDown, this);</span>
},
_removeHooks: <span class="fstat-no" title="function not covered" >function () {</span>
<span class="cstat-no" title="statement not covered" > L.DomEvent.off(document, 'keydown', this._onKeyDown, this);</span>
},
_onKeyDown: <span class="fstat-no" title="function not covered" >function (e) {</span>
<span class="cstat-no" title="statement not covered" > var key = e.keyCode,</span>
map = this._map;
<span class="cstat-no" title="statement not covered" > if (this._panKeys.hasOwnProperty(key)) {</span>
<span class="cstat-no" title="statement not covered" > map.panBy(this._panKeys[key]);</span>
<span class="cstat-no" title="statement not covered" > if (map.options.maxBounds) {</span>
<span class="cstat-no" title="statement not covered" > map.panInsideBounds(map.options.maxBounds);</span>
}
} else <span class="cstat-no" title="statement not covered" >if (this._zoomKeys.hasOwnProperty(key)) {</span>
<span class="cstat-no" title="statement not covered" > map.setZoom(map.getZoom() + this._zoomKeys[key]);</span>
} else {
<span class="cstat-no" title="statement not covered" > return;</span>
}
<span class="cstat-no" title="statement not covered" > L.DomEvent.stop(e);</span>
}
});
L.Map.addInitHook('addHandler', 'keyboard', L.Map.Keyboard);
</pre></td></tr>
</table></pre>
</div>
<div class='footer'>
<div class='meta'>Generated by <a href='http://istanbul-js.org' target='_blank'>istanbul</a> at Thu Apr 04 2013 12:16:35 GMT+0300 (EEST)</div>
</div>
</body>
<script src="../../../prettify.js"></script>
<script src="http://yui.yahooapis.com/3.6.0/build/yui/yui-min.js"></script>
<script>
YUI().use('datatable', function (Y) {
var formatters = {
pct: function (o) {
o.className += o.record.get('classes')[o.column.key];
try {
return o.value.toFixed(2) + '%';
} catch (ex) { return o.value + '%'; }
},
html: function (o) {
o.className += o.record.get('classes')[o.column.key];
return o.record.get(o.column.key + '_html');
}
},
defaultFormatter = function (o) {
o.className += o.record.get('classes')[o.column.key];
return o.value;
};
function getColumns(theadNode) {
var colNodes = theadNode.all('tr th'),
cols = [],
col;
colNodes.each(function (colNode) {
col = {
key: colNode.getAttribute('data-col'),
label: colNode.get('innerHTML') || ' ',
sortable: !colNode.getAttribute('data-nosort'),
className: colNode.getAttribute('class'),
type: colNode.getAttribute('data-type'),
allowHTML: colNode.getAttribute('data-html') === 'true' || colNode.getAttribute('data-fmt') === 'html'
};
col.formatter = formatters[colNode.getAttribute('data-fmt')] || defaultFormatter;
cols.push(col);
});
return cols;
}
function getRowData(trNode, cols) {
var tdNodes = trNode.all('td'),
i,
row = { classes: {} },
node,
name;
for (i = 0; i < cols.length; i += 1) {
name = cols[i].key;
node = tdNodes.item(i);
row[name] = node.getAttribute('data-value') || node.get('innerHTML');
row[name + '_html'] = node.get('innerHTML');
row.classes[name] = node.getAttribute('class');
//Y.log('Name: ' + name + '; Value: ' + row[name]);
if (cols[i].type === 'number') { row[name] = row[name] * 1; }
}
//Y.log(row);
return row;
}
function getData(tbodyNode, cols) {
var data = [];
tbodyNode.all('tr').each(function (trNode) {
data.push(getRowData(trNode, cols));
});
return data;
}
function replaceTable(node) {
if (!node) { return; }
var cols = getColumns(node.one('thead')),
data = getData(node.one('tbody'), cols),
table,
parent = node.get('parentNode');
table = new Y.DataTable({
columns: cols,
data: data,
sortBy: 'file'
});
parent.set('innerHTML', '');
table.render(parent);
}
Y.on('domready', function () {
replaceTable(Y.one('div.coverage-summary table'));
if (typeof prettyPrint === 'function') {
prettyPrint();
}
});
});
</script>
</html>
| ingalls/Emendo | web/node_modules/leaflet/spec/coverage/PhantomJS 1.8 (Mac)/src/map/handler/Map.Keyboard.js.html | HTML | gpl-2.0 | 23,141 |
#ifndef CYGONCE_DEVS_FLASH_SYNTH_SYNTH_H
#define CYGONCE_DEVS_FLASH_SYNTH_SYNTH_H
//==========================================================================
//
// synth.h
//
// synth Flash programming - device constants, etc.
//
//==========================================================================
// ####ECOSGPLCOPYRIGHTBEGIN####
// -------------------------------------------
// This file is part of eCos, the Embedded Configurable Operating System.
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
//
// eCos is free software; you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 or (at your option) any later
// version.
//
// eCos is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
//
// You should have received a copy of the GNU General Public License
// along with eCos; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// As a special exception, if other files instantiate templates or use
// macros or inline functions from this file, or you compile this file
// and link it with other works to produce a work based on this file,
// this file does not by itself cause the resulting work to be covered by
// the GNU General Public License. However the source code for this file
// must still be made available in accordance with section (3) of the GNU
// General Public License v2.
//
// This exception does not invalidate any other reasons why a work based
// on this file might be covered by the GNU General Public License.
// -------------------------------------------
// ####ECOSGPLCOPYRIGHTEND####
//==========================================================================
//#####DESCRIPTIONBEGIN####
//
// Author(s): [email protected]
// Contributors: andrew.lunn
// Date: 2001-10-30
// Purpose:
// Description:
//
//####DESCRIPTIONEND####
//
//==========================================================================
#include <pkgconf/system.h>
#include <cyg/infra/cyg_type.h>
typedef unsigned long flash_t;
__externC int cyg_dev_flash_synth_flashfd;
__externC flash_t *cyg_dev_flash_synth_base;
#endif // CYGONCE_DEVS_FLASH_SYNTH_SYNTH_H
// ------------------------------------------------------------------------
// EOF synth.h
| reille/proj_ecos | src/ecos/packages/devs/flash/synth/current/src/synth.h | C | gpl-2.0 | 3,013 |
--
-- Copyright 2006 University of Dundee. All rights reserved.
-- Use is subject to license terms supplied in LICENSE.txt
--
--
-- This file was generated by dsl/resources/ome/dsl/data.vm
--
begin;
set constraints all deferred;
--
-- First, we install a unique constraint so that it is only possible
-- to go from versionA/patchA to versionB/patchB once.
--
alter table dbpatch add constraint unique_dbpatch unique (currentVersion, currentPatch, previousVersion, previousPatch);
--
-- Since this is a table that we will be using in DB-specific ways, we're also going
-- to make working with it a bit simpler.
--
alter table dbpatch alter id set default nextval('seq_dbpatch');
alter table dbpatch alter permissions set default -35;
alter table dbpatch alter message set default 'Updating';
--
-- Then, we insert into the patch table the patch (initialization) which we are currently
-- running so that if anything goes wrong, we'll have some record.
--
insert into dbpatch (currentVersion, currentPatch, previousVersion, previousPatch, message)
values ('OMERO3A', 7, 'OMERO3A', 0, 'Initializing');
--
-- Here we will create the root account and the necessary groups
--
insert into experimenter (id,permissions,version,omename,firstname,lastname)
values (0,0,0,'root','root','root');
insert into experimenter (id,permissions,version,omename,firstname,lastname)
values (nextval('seq_experimenter'),0,0,'guest','Guest','Account');
insert into session
(id,permissions,timetoidle,timetolive,started,closed,defaultpermissions,defaulteventtype,uuid)
select 0,-35,0,0,now(),now(),'rw----','BOOTSTRAP',0000;
insert into session
(id,permissions,timetoidle,timetolive,started,closed,defaultpermissions,defaulteventtype,uuid)
select nextval('seq_session'),-35, 0,0,now(),now(),'rw----','PREVIOUSITEMS','1111';
insert into event (id,permissions,time,status,experimenter,session) values (0,0,now(),'BOOTSTRAP',0,0);
insert into experimentergroup (id,permissions,version,owner_id,group_id,creation_id,update_id,name)
values (0,-35,0,0,0,0,0,'system');
insert into experimentergroup (id,permissions,version,owner_id,group_id,creation_id,update_id,name)
values (nextval('seq_experimentergroup'),-35,0,0,0,0,0,'user');
insert into experimentergroup (id,permissions,version,owner_id,group_id,creation_id,update_id,name)
values (nextval('seq_experimentergroup'),-35,0,0,0,0,0,'default');
insert into experimentergroup (id,permissions,version,owner_id,group_id,creation_id,update_id,name)
values (nextval('seq_experimentergroup'),-35,0,0,0,0,0,'guest');
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value) values
(0,-35,0,0,0,'Bootstrap');
insert into groupexperimentermap
(id,permissions,version,owner_id,group_id,creation_id,update_id, parent, child, child_index)
values
(0,-35,0,0,0,0,0,0,0,0);
insert into groupexperimentermap
(id,permissions,version,owner_id,group_id,creation_id,update_id, parent, child, child_index)
select nextval('seq_groupexperimentermap'),-35,0,0,0,0,0,1,0,1;
insert into groupexperimentermap
(id,permissions,version,owner_id,group_id,creation_id,update_id, parent, child, child_index)
select nextval('seq_groupexperimentermap'),-35,0,0,0,0,0,3,1,0;
update event set type = 0;
update event set experimentergroup = 0;
alter table event alter column type set not null;
alter table event alter column experimentergroup set not null;
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'Oil';
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'Water';
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'WaterDipping';
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'Air';
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'Multi';
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'Glycerol';
insert into immersion (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_immersion'),-35,0,0,0,'Other';
insert into arctype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_arctype'),-35,0,0,0,'Hg';
insert into arctype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_arctype'),-35,0,0,0,'Xe';
insert into arctype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_arctype'),-35,0,0,0,'Hg-Xe';
insert into arctype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_arctype'),-35,0,0,0,'Other';
insert into renderingmodel (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_renderingmodel'),-35,0,0,0,'rgb';
insert into renderingmodel (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_renderingmodel'),-35,0,0,0,'hsb';
insert into renderingmodel (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_renderingmodel'),-35,0,0,0,'greyscale';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'Wide-field';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'LaserScanningMicroscopy';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'LaserScanningConfocal';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'SpinningDiskConfocal';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'SlitScanConfocal';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'MultiPhotonMicroscopy';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'StructuredIllumination';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'SingleMoleculeImaging';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'TotalInternalReflection';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'FluorescenceLifetime';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'SpectralImaging';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'FluorescenceCorrelationSpectroscopy';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'NearFieldScanningOpticalMicroscopy';
insert into acquisitionmode (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_acquisitionmode'),-35,0,0,0,'SecondHarmonicGenerationImaging';
insert into binning (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_binning'),-35,0,0,0,'1x1';
insert into binning (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_binning'),-35,0,0,0,'2x2';
insert into binning (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_binning'),-35,0,0,0,'4x4';
insert into binning (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_binning'),-35,0,0,0,'8x8';
insert into family (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_family'),-35,0,0,0,'linear';
insert into family (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_family'),-35,0,0,0,'polynomial';
insert into family (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_family'),-35,0,0,0,'exponential';
insert into family (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_family'),-35,0,0,0,'logarithmic';
insert into medium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_medium'),-35,0,0,0,'Air';
insert into medium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_medium'),-35,0,0,0,'Oil';
insert into medium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_medium'),-35,0,0,0,'Water';
insert into medium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_medium'),-35,0,0,0,'Glycerol';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'int8';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'int16';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'int32';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'uint8';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'uint16';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'uint32';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'float';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'double';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'complex';
insert into pixelstype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pixelstype'),-35,0,0,0,'double-complex';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'PNG';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'JPEG';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'PGM';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Fits';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'GIF';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'BMP';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Dicom';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'BioRad';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'IPLab';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Deltavision';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'MRC';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Gatan';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Imaris';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'OpenlabRaw';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'OMEXML';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'LIF';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'AVI';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'QT';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Pict';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'SDT';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'EPS';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Slidebook';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Alicona';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'MNG';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'NRRD';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Khoros';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Visitech';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'LIM';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'PSD';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'InCell';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ICS';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'PerkinElmer';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'TCS';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'FV1000';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ZeissZVI';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'IPW';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'LegacyND2';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ND2';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'PCI';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ImarisHDF';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Metamorph';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ZeissLSM';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'SEQ';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Gel';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ImarisTiff';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Flex';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'SVS';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Leica';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Nikon';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Fluoview';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Prairie';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Micromanager';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'ImprovisionTiff';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'OMETiff';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'MetamorphTiff';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Tiff';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'Openlab';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'text/csv';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'text/plain';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'text/xml';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'text/html';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'text/rtf';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'text/x-python';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'application/pdf';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'application/ms-excel';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'application/ms-powerpoint';
insert into format (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_format'),-35,0,0,0,'application/ms-word';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'Excimer';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'Gas';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'MetalVapor';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'SolidState';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'Dye';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'Semiconductor';
insert into lasertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasertype'),-35,0,0,0,'FreeElectron';
insert into pulse (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pulse'),-35,0,0,0,'CW';
insert into pulse (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pulse'),-35,0,0,0,'Single';
insert into pulse (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pulse'),-35,0,0,0,'Q-Switched';
insert into pulse (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pulse'),-35,0,0,0,'Repetitive';
insert into pulse (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_pulse'),-35,0,0,0,'Mode-Locked';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Submitted';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Resubmitted';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Queued';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Requeued';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Running';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Error';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Waiting';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Finished';
insert into jobstatus (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_jobstatus'),-35,0,0,0,'Cancelled';
insert into coating (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_coating'),-35,0,0,0,'UV';
insert into coating (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_coating'),-35,0,0,0,'PlanApo';
insert into coating (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_coating'),-35,0,0,0,'PlanFluor';
insert into coating (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_coating'),-35,0,0,0,'SuperFluor';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'CCD';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'Intensified-CCD';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'Analog-Video';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'PMT';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'Photodiode';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'Spectroscopy';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'Life-time-Imaging';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'Correlation-Spectroscopy';
insert into detectortype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_detectortype'),-35,0,0,0,'FTIR';
insert into illumination (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_illumination'),-35,0,0,0,'Transmitted';
insert into illumination (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_illumination'),-35,0,0,0,'Epifluorescence';
insert into illumination (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_illumination'),-35,0,0,0,'Oblique';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Achro';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Achromat';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Fluor';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Fl';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Fluar';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Neofluar';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Fluotar';
insert into aberrationcorrection (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_aberrationcorrection'),-35,0,0,0,'Apo';
insert into photometricinterpretation (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_photometricinterpretation'),-35,0,0,0,'RGB';
insert into photometricinterpretation (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_photometricinterpretation'),-35,0,0,0,'ARGB';
insert into photometricinterpretation (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_photometricinterpretation'),-35,0,0,0,'CMYK';
insert into photometricinterpretation (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_photometricinterpretation'),-35,0,0,0,'HSV';
insert into photometricinterpretation (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_photometricinterpretation'),-35,0,0,0,'Monochrome';
insert into photometricinterpretation (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_photometricinterpretation'),-35,0,0,0,'ColorMap';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Import';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Internal';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Shoola';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'User';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Task';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Test';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Processing';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'FullText';
insert into eventtype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_eventtype'),-35,0,0,0,'Sessions';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Rhodamine-5G';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Coumaring-C30';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'ArFl';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'ArCl';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'KrFl';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'KrCl';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'XeFl';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'XeCl';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'XeBr';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'GaAs';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'GaAlAs';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'e-';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Cu';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Ag';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'N';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Ar';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Kr';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'Xe';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'HeNe';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'HeCd';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'CO';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'CO2';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'H2O';
insert into lasermedium (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_lasermedium'),-35,0,0,0,'HFl';
insert into microscopetype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_microscopetype'),-35,0,0,0,'Upright';
insert into microscopetype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_microscopetype'),-35,0,0,0,'Inverted';
insert into microscopetype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_microscopetype'),-35,0,0,0,'Dissection';
insert into microscopetype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_microscopetype'),-35,0,0,0,'Electrophysiology';
insert into irisdiaphragm (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_irisdiaphragm'),-35,0,0,0,'I';
insert into irisdiaphragm (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_irisdiaphragm'),-35,0,0,0,'Iris';
insert into irisdiaphragm (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_irisdiaphragm'),-35,0,0,0,'W/Iris';
insert into dimensionorder (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_dimensionorder'),-35,0,0,0,'XYZCT';
insert into dimensionorder (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_dimensionorder'),-35,0,0,0,'XYZTC';
insert into dimensionorder (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_dimensionorder'),-35,0,0,0,'XYCTZ';
insert into dimensionorder (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_dimensionorder'),-35,0,0,0,'XYCZT';
insert into dimensionorder (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_dimensionorder'),-35,0,0,0,'XYTCZ';
insert into dimensionorder (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_dimensionorder'),-35,0,0,0,'XYTZC';
insert into frequencymultiplication (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_frequencymultiplication'),-35,0,0,0,'x1';
insert into frequencymultiplication (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_frequencymultiplication'),-35,0,0,0,'x2';
insert into frequencymultiplication (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_frequencymultiplication'),-35,0,0,0,'x3';
insert into frequencymultiplication (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_frequencymultiplication'),-35,0,0,0,'x4';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'FP';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'FRET';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Time-lapse';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'4-D+';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Screen';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Immunocytopchemistry';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Immunofluroescence';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'FISH';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Electropyhsiology';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Ion-Imaging';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Colocalization';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'PGI/Documentation';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'FRAP';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Photoablation';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Photoactivation';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Uncaging';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Optical-Trapping';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Fluorescence-Lifetime';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Spectral-Imaging';
insert into experimenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_experimenttype'),-35,0,0,0,'Other';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'Brightfield';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'Phase';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'DIC';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'HoffmanModulation';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'ObliqueIllumination';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'PolarizedLight';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'Darkfield';
insert into contrastmethod (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_contrastmethod'),-35,0,0,0,'Fluorescence';
insert into filamenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_filamenttype'),-35,0,0,0,'Incandescent';
insert into filamenttype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_filamenttype'),-35,0,0,0,'Halogen';
insert into filtertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_filtertype'),-35,0,0,0,'LongPass';
insert into filtertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_filtertype'),-35,0,0,0,'ShortPass';
insert into filtertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_filtertype'),-35,0,0,0,'BandPass';
insert into filtertype (id,permissions,owner_id,group_id,creation_id,value)
select nextval('seq_filtertype'),-35,0,0,0,'MultiPass';
create table configuration ( name varchar(255) primary key, value text );
alter table pixels add column url varchar(2048);
alter table originalfile add column url varchar(2048);
alter table thumbnail add column url varchar(2048);
create table password ( experimenter_id bigint unique not null REFERENCES experimenter (id) , hash char(24), dn text );
insert into password values (0,'@ROOTPASS@');
insert into password values (1,'');
-- root can now login with omero.rootpass property value
-- and guest can login with any value
-- Here we have finished initializing this database.
update dbpatch set message = 'Database ready.', finished = now()
where currentVersion = 'OMERO3A' and
currentPatch = 7 and
previousVersion = 'OMERO3A' and
previousPatch = 0;
commit;
| stelfrich/openmicroscopy | sql/psql/OMERO3A__7/data.sql | SQL | gpl-2.0 | 38,586 |
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <string.h>
#include <netinet/in.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
#include "test_tcpbpf.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
__type(key, __u32);
__type(value, struct tcpbpf_globals);
} global_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, int);
} sockopt_results SEC(".maps");
static inline void update_event_map(int event)
{
__u32 key = 0;
struct tcpbpf_globals g, *gp;
gp = bpf_map_lookup_elem(&global_map, &key);
if (gp == NULL) {
struct tcpbpf_globals g = {0};
g.event_map |= (1 << event);
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
} else {
g = *gp;
g.event_map |= (1 << event);
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
}
}
int _version SEC("version") = 1;
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct tcphdr *thdr;
int good_call_rv = 0;
int bad_call_rv = 0;
int save_syn = 1;
int rv = -1;
int v = 0;
int op;
op = (int) skops->op;
update_event_map(op);
switch (op) {
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
/* Test failure to set largest cb flag (assumes not defined) */
bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
/* Set callback */
good_call_rv = bpf_sock_ops_cb_flags_set(skops,
BPF_SOCK_OPS_STATE_CB_FLAG);
/* Update results */
{
__u32 key = 0;
struct tcpbpf_globals g, *gp;
gp = bpf_map_lookup_elem(&global_map, &key);
if (!gp)
break;
g = *gp;
g.bad_cb_test_rv = bad_call_rv;
g.good_cb_test_rv = good_call_rv;
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
}
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
skops->sk_txhash = 0x12345f;
v = 0xff;
rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v,
sizeof(v));
if (skops->family == AF_INET6) {
v = bpf_getsockopt(skops, IPPROTO_TCP, TCP_SAVED_SYN,
header, (sizeof(struct ipv6hdr) +
sizeof(struct tcphdr)));
if (!v) {
int offset = sizeof(struct ipv6hdr);
thdr = (struct tcphdr *)(header + offset);
v = thdr->syn;
__u32 key = 1;
bpf_map_update_elem(&sockopt_results, &key, &v,
BPF_ANY);
}
}
break;
case BPF_SOCK_OPS_RTO_CB:
break;
case BPF_SOCK_OPS_RETRANS_CB:
break;
case BPF_SOCK_OPS_STATE_CB:
if (skops->args[1] == BPF_TCP_CLOSE) {
__u32 key = 0;
struct tcpbpf_globals g, *gp;
gp = bpf_map_lookup_elem(&global_map, &key);
if (!gp)
break;
g = *gp;
if (skops->args[0] == BPF_TCP_LISTEN) {
g.num_listen++;
} else {
g.total_retrans = skops->total_retrans;
g.data_segs_in = skops->data_segs_in;
g.data_segs_out = skops->data_segs_out;
g.bytes_received = skops->bytes_received;
g.bytes_acked = skops->bytes_acked;
}
g.num_close_events++;
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
}
break;
case BPF_SOCK_OPS_TCP_LISTEN_CB:
bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN,
&save_syn, sizeof(save_syn));
/* Update global map w/ result of setsock opt */
__u32 key = 0;
bpf_map_update_elem(&sockopt_results, &key, &v, BPF_ANY);
break;
default:
rv = -1;
}
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| tmshlvck/omnia-linux | tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c | C | gpl-2.0 | 3,676 |
/*
* linux/drivers/i2c/chips/twl4030-power.c
*
* Handle TWL4030 Power initialization
*
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2006 Texas Instruments, Inc
*
* Written by Kalle Jokiniemi
* Peter De Schrijver <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/i2c/twl4030.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
static u8 triton_next_free_address = 0x2b;
#define PWR_P1_SW_EVENTS 0x10
#define PWR_DEVOFF (1<<0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
#define PHY_TO_OFF_PM_RECEIVER(p) (p - 0x5b)
#define NUM_OF_RESOURCES 28
/* resource - hfclk */
#define R_HFCLKOUT_DEV_GRP PHY_TO_OFF_PM_RECEIVER(0xe6)
/* PM events */
#define R_P1_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x46)
#define R_P2_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x47)
#define R_P3_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x48)
#define R_CFG_P1_TRANSITION PHY_TO_OFF_PM_MASTER(0x36)
#define R_CFG_P2_TRANSITION PHY_TO_OFF_PM_MASTER(0x37)
#define R_CFG_P3_TRANSITION PHY_TO_OFF_PM_MASTER(0x38)
#define LVL_WAKEUP 0x08
#define ENABLE_WARMRESET (1<<4)
#define END_OF_SCRIPT 0x3f
#define R_SEQ_ADD_A2S PHY_TO_OFF_PM_MASTER(0x55)
#define R_SEQ_ADD_SA12 PHY_TO_OFF_PM_MASTER(0x56)
#define R_SEQ_ADD_S2A3 PHY_TO_OFF_PM_MASTER(0x57)
#define R_SEQ_ADD_WARM PHY_TO_OFF_PM_MASTER(0x58)
#define R_MEMORY_ADDRESS PHY_TO_OFF_PM_MASTER(0x59)
#define R_MEMORY_DATA PHY_TO_OFF_PM_MASTER(0x5a)
#define R_PROTECT_KEY 0x0E
#define KEY_1 0xC0
#define KEY_2 0x0C
/* resource configuration registers */
#define DEVGROUP_OFFSET 0
#define TYPE_OFFSET 1
static u8 res_config_addrs[] = {
[RES_VAUX1] = 0x17,
[RES_VAUX2] = 0x1b,
[RES_VAUX3] = 0x1f,
[RES_VAUX4] = 0x23,
[RES_VMMC1] = 0x27,
[RES_VMMC2] = 0x2b,
[RES_VPLL1] = 0x2f,
[RES_VPLL2] = 0x33,
[RES_VSIM] = 0x37,
[RES_VDAC] = 0x3b,
[RES_VINTANA1] = 0x3f,
[RES_VINTANA2] = 0x43,
[RES_VINTDIG] = 0x47,
[RES_VIO] = 0x4b,
[RES_VDD1] = 0x55,
[RES_VDD2] = 0x63,
[RES_VUSB_1V5] = 0x71,
[RES_VUSB_1V8] = 0x74,
[RES_VUSB_3V1] = 0x77,
[RES_VUSBCP] = 0x7a,
[RES_REGEN] = 0x7f,
[RES_NRES_PWRON] = 0x82,
[RES_CLKEN] = 0x85,
[RES_SYSEN] = 0x88,
[RES_HFCLKOUT] = 0x8b,
[RES_32KCLKOUT] = 0x8e,
[RES_RESET] = 0x91,
[RES_Main_Ref] = 0x94,
};
static int __init twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_MEMORY_ADDRESS);
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
R_MEMORY_DATA);
return err;
}
static int __init twl4030_write_script_ins(u8 address, u16 pmb_message,
u8 delay, u8 next)
{
int err = 0;
address *= 4;
err |= twl4030_write_script_byte(address++, pmb_message >> 8);
err |= twl4030_write_script_byte(address++, pmb_message & 0xff);
err |= twl4030_write_script_byte(address++, delay);
err |= twl4030_write_script_byte(address++, next);
return err;
}
static int __init twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
int err = 0;
for (; len; len--, address++, script++) {
if (len == 1)
err |= twl4030_write_script_ins(address,
script->pmb_message,
script->delay,
END_OF_SCRIPT);
else
err |= twl4030_write_script_ins(address,
script->pmb_message,
script->delay,
address + 1);
}
return err;
}
static int __init config_wakeup3_sequence(u8 address)
{
int err = 0;
/* Set SLEEP to ACTIVE SEQ address for P3 */
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A3);
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, LVL_WAKEUP,
R_P3_SW_EVENTS);
if (err)
printk(KERN_ERR "TWL4030 wakeup sequence for P3" \
"config error\n");
return err;
}
static int __init config_wakeup12_sequence(u8 address)
{
int err = 0;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_SA12);
/* P1/P2/P3 LVL_WAKEUP should be on LEVEL */
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, LVL_WAKEUP,
R_P1_SW_EVENTS);
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, LVL_WAKEUP,
R_P2_SW_EVENTS);
if (machine_is_omap_3430sdp() || machine_is_omap_ldp() ||
machine_is_omap_zoom2()) {
u8 data;
/* Disabling AC charger effect on sleep-active transitions */
err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_CFG_P1_TRANSITION);
data &= ~(1<<1);
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
R_CFG_P1_TRANSITION);
}
if (err)
printk(KERN_ERR "TWL4030 wakeup sequence for P1 and P2" \
"config error\n");
return err;
}
static int __init config_sleep_sequence(u8 address)
{
int err = 0;
/*
* CLKREQ is pulled high on the 2430SDP, therefore, we need to take
* it out of the HFCLKOUT DEV_GRP for P1 else HFCLKOUT can't be stopped.
*/
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
0x20, R_HFCLKOUT_DEV_GRP);
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_A2S);
if (err)
printk(KERN_ERR "TWL4030 sleep sequence config error\n");
return err;
}
static int __init config_warmreset_sequence(u8 address)
{
int err = 0;
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_WARM);
/* P1/P2/P3 enable WARMRESET */
err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P1_SW_EVENTS);
rd_data |= ENABLE_WARMRESET;
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P1_SW_EVENTS);
err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P2_SW_EVENTS);
rd_data |= ENABLE_WARMRESET;
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P2_SW_EVENTS);
err |= twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P3_SW_EVENTS);
rd_data |= ENABLE_WARMRESET;
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P3_SW_EVENTS);
if (err)
printk(KERN_ERR
"TWL4030 warmreset seq config error\n");
return err;
}
void twl4030_configure_resource(struct twl4030_resconfig *rconfig)
{
int rconfig_addr;
u8 type;
if (rconfig->resource > NUM_OF_RESOURCES) {
printk(KERN_ERR
"TWL4030 Resource %d does not exist\n",
rconfig->resource);
return;
}
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
if (rconfig->devgroup >= 0)
twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
rconfig->devgroup << 5,
rconfig_addr + DEVGROUP_OFFSET);
/* Set resource types */
if (twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER,
&type,
rconfig_addr + TYPE_OFFSET) < 0) {
printk(KERN_ERR
"TWL4030 Resource %d type could not read\n",
rconfig->resource);
return;
}
if (rconfig->type >= 0) {
type &= ~7;
type |= rconfig->type;
}
if (rconfig->type2 >= 0) {
type &= ~(3 << 3);
type |= rconfig->type2 << 3;
}
twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
}
static int __init load_triton_script(struct twl4030_script *tscript)
{
u8 address = triton_next_free_address;
int err;
err = twl4030_write_script(address, tscript->script, tscript->size);
if (err)
return err;
triton_next_free_address += tscript->size;
if (tscript->flags & TRITON_WRST_SCRIPT)
err |= config_warmreset_sequence(address);
if (tscript->flags & TRITON_WAKEUP12_SCRIPT)
err |= config_wakeup12_sequence(address);
if (tscript->flags & TRITON_WAKEUP3_SCRIPT)
err |= config_wakeup3_sequence(address);
if (tscript->flags & TRITON_SLEEP_SCRIPT)
err |= config_sleep_sequence(address);
return err;
}
void __init twl4030_power_init(struct twl4030_power_data *triton2_scripts)
{
int err = 0;
int i;
struct twl4030_resconfig *resconfig;
err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_1,
R_PROTECT_KEY);
err |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_2,
R_PROTECT_KEY);
if (err)
printk(KERN_ERR
"TWL4030 Unable to unlock registers\n");
for (i = 0; i < triton2_scripts->size; i++) {
err = load_triton_script(triton2_scripts->scripts[i]);
if (err)
break;
}
resconfig = triton2_scripts->resource_config;
if (resconfig) {
while (resconfig->resource) {
twl4030_configure_resource(resconfig);
resconfig++;
}
}
if (twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY))
printk(KERN_ERR
"TWL4030 Unable to relock registers\n");
}
| CyanogenModXT720/android_kernel | drivers/mfd/twl4030-power.c | C | gpl-2.0 | 9,149 |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RIIC driver
*
* Copyright (C) 2013 Wolfram Sang <[email protected]>
* Copyright (C) 2013 Renesas Solutions Corp.
*/
/*
* This i2c core has a lot of interrupts, namely 8. We use their chaining as
* some kind of state machine.
*
* 1) The main xfer routine kicks off a transmission by putting the start bit
* (or repeated start) on the bus and enabling the transmit interrupt (TIE)
* since we need to send the slave address + RW bit in every case.
*
* 2) TIE sends slave address + RW bit and selects how to continue.
*
* 3a) Write case: We keep utilizing TIE as long as we have data to send. If we
* are done, we switch over to the transmission done interrupt (TEIE) and mark
* the message as completed (includes sending STOP) there.
*
* 3b) Read case: We switch over to receive interrupt (RIE). One dummy read is
* needed to start clocking, then we keep receiving until we are done. Note
* that we use the RDRFS mode all the time, i.e. we ACK/NACK every byte by
* writing to the ACKBT bit. I tried using the RDRFS mode only at the end of a
* message to create the final NACK as sketched in the datasheet. This caused
* some subtle races (when byte n was processed and byte n+1 was already
* waiting), though, and I started with the safe approach.
*
* 4) If we got a NACK somewhere, we flag the error and stop the transmission
* via NAKIE.
*
* Also check the comments in the interrupt routines for some gory details.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define RIIC_ICCR1 0x00
#define RIIC_ICCR2 0x04
#define RIIC_ICMR1 0x08
#define RIIC_ICMR3 0x10
#define RIIC_ICSER 0x18
#define RIIC_ICIER 0x1c
#define RIIC_ICSR2 0x24
#define RIIC_ICBRL 0x34
#define RIIC_ICBRH 0x38
#define RIIC_ICDRT 0x3c
#define RIIC_ICDRR 0x40
#define ICCR1_ICE 0x80
#define ICCR1_IICRST 0x40
#define ICCR1_SOWP 0x10
#define ICCR2_BBSY 0x80
#define ICCR2_SP 0x08
#define ICCR2_RS 0x04
#define ICCR2_ST 0x02
#define ICMR1_CKS_MASK 0x70
#define ICMR1_BCWP 0x08
#define ICMR1_CKS(_x) ((((_x) << 4) & ICMR1_CKS_MASK) | ICMR1_BCWP)
#define ICMR3_RDRFS 0x20
#define ICMR3_ACKWP 0x10
#define ICMR3_ACKBT 0x08
#define ICIER_TIE 0x80
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
#define ICIER_NAKIE 0x10
#define ICIER_SPIE 0x08
#define ICSR2_NACKF 0x10
#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
#define RIIC_INIT_MSG -1
struct riic_dev {
void __iomem *base;
u8 *buf;
struct i2c_msg *msg;
int bytes_left;
int err;
int is_last;
struct completion msg_done;
struct i2c_adapter adapter;
struct clk *clk;
};
struct riic_irq_desc {
int res_num;
irq_handler_t isr;
char *name;
};
static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u8 reg)
{
writeb((readb(riic->base + reg) & ~clear) | set, riic->base + reg);
}
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
unsigned long time_left;
int i, ret;
u8 start_bit;
ret = clk_prepare_enable(riic->clk);
if (ret)
return ret;
if (readb(riic->base + RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
goto out;
}
reinit_completion(&riic->msg_done);
riic->err = 0;
writeb(0, riic->base + RIIC_ICSR2);
for (i = 0, start_bit = ICCR2_ST; i < num; i++) {
riic->bytes_left = RIIC_INIT_MSG;
riic->buf = msgs[i].buf;
riic->msg = &msgs[i];
riic->is_last = (i == num - 1);
writeb(ICIER_NAKIE | ICIER_TIE, riic->base + RIIC_ICIER);
writeb(start_bit, riic->base + RIIC_ICCR2);
time_left = wait_for_completion_timeout(&riic->msg_done, riic->adapter.timeout);
if (time_left == 0)
riic->err = -ETIMEDOUT;
if (riic->err)
break;
start_bit = ICCR2_RS;
}
out:
clk_disable_unprepare(riic->clk);
return riic->err ?: num;
}
static irqreturn_t riic_tdre_isr(int irq, void *data)
{
struct riic_dev *riic = data;
u8 val;
if (!riic->bytes_left)
return IRQ_NONE;
if (riic->bytes_left == RIIC_INIT_MSG) {
if (riic->msg->flags & I2C_M_RD)
/* On read, switch over to receive interrupt */
riic_clear_set_bit(riic, ICIER_TIE, ICIER_RIE, RIIC_ICIER);
else
/* On write, initialize length */
riic->bytes_left = riic->msg->len;
val = i2c_8bit_addr_from_msg(riic->msg);
} else {
val = *riic->buf;
riic->buf++;
riic->bytes_left--;
}
/*
* Switch to transmission ended interrupt when done. Do check here
* after bytes_left was initialized to support SMBUS_QUICK (new msg has
* 0 length then)
*/
if (riic->bytes_left == 0)
riic_clear_set_bit(riic, ICIER_TIE, ICIER_TEIE, RIIC_ICIER);
/*
* This acks the TIE interrupt. We get another TIE immediately if our
* value could be moved to the shadow shift register right away. So
* this must be after updates to ICIER (where we want to disable TIE)!
*/
writeb(val, riic->base + RIIC_ICDRT);
return IRQ_HANDLED;
}
static irqreturn_t riic_tend_isr(int irq, void *data)
{
struct riic_dev *riic = data;
if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
/* We got a NACKIE */
readb(riic->base + RIIC_ICDRR); /* dummy read */
riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
riic->err = -ENXIO;
} else if (riic->bytes_left) {
return IRQ_NONE;
}
if (riic->is_last || riic->err) {
riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
} else {
/* Transfer is complete, but do not send STOP */
riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
complete(&riic->msg_done);
}
return IRQ_HANDLED;
}
static irqreturn_t riic_rdrf_isr(int irq, void *data)
{
struct riic_dev *riic = data;
if (!riic->bytes_left)
return IRQ_NONE;
if (riic->bytes_left == RIIC_INIT_MSG) {
riic->bytes_left = riic->msg->len;
readb(riic->base + RIIC_ICDRR); /* dummy read */
return IRQ_HANDLED;
}
if (riic->bytes_left == 1) {
/* STOP must come before we set ACKBT! */
if (riic->is_last) {
riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
}
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
} else {
riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
}
/* Reading acks the RIE interrupt */
*riic->buf = readb(riic->base + RIIC_ICDRR);
riic->buf++;
riic->bytes_left--;
return IRQ_HANDLED;
}
static irqreturn_t riic_stop_isr(int irq, void *data)
{
struct riic_dev *riic = data;
/* read back registers to confirm writes have fully propagated */
writeb(0, riic->base + RIIC_ICSR2);
readb(riic->base + RIIC_ICSR2);
writeb(0, riic->base + RIIC_ICIER);
readb(riic->base + RIIC_ICIER);
complete(&riic->msg_done);
return IRQ_HANDLED;
}
static u32 riic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm riic_algo = {
.master_xfer = riic_xfer,
.functionality = riic_func,
};
static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
{
int ret;
unsigned long rate;
int total_ticks, cks, brl, brh;
ret = clk_prepare_enable(riic->clk);
if (ret)
return ret;
if (t->bus_freq_hz > 400000) {
dev_err(&riic->adapter.dev,
"unsupported bus speed (%dHz). 400000 max\n",
t->bus_freq_hz);
clk_disable_unprepare(riic->clk);
return -EINVAL;
}
rate = clk_get_rate(riic->clk);
/*
* Assume the default register settings:
* FER.SCLE = 1 (SCL sync circuit enabled, adds 2 or 3 cycles)
* FER.NFE = 1 (noise circuit enabled)
* MR3.NF = 0 (1 cycle of noise filtered out)
*
* Freq (CKS=000) = (I2CCLK + tr + tf)/ (BRH + 3 + 1) + (BRL + 3 + 1)
* Freq (CKS!=000) = (I2CCLK + tr + tf)/ (BRH + 2 + 1) + (BRL + 2 + 1)
*/
/*
* Determine reference clock rate. We must be able to get the desired
* frequency with only 62 clock ticks max (31 high, 31 low).
* Aim for a duty of 60% LOW, 40% HIGH.
*/
total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
for (cks = 0; cks < 7; cks++) {
/*
* 60% low time must be less than BRL + 2 + 1
* BRL max register value is 0x1F.
*/
brl = ((total_ticks * 6) / 10);
if (brl <= (0x1F + 3))
break;
total_ticks /= 2;
rate /= 2;
}
if (brl > (0x1F + 3)) {
dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
(unsigned long)t->bus_freq_hz);
clk_disable_unprepare(riic->clk);
return -EINVAL;
}
brh = total_ticks - brl;
/* Remove automatic clock ticks for sync circuit and NF */
if (cks == 0) {
brl -= 4;
brh -= 4;
} else {
brl -= 3;
brh -= 3;
}
/*
* Remove clock ticks for rise and fall times. Convert ns to clock
* ticks.
*/
brl -= t->scl_fall_ns / (1000000000 / rate);
brh -= t->scl_rise_ns / (1000000000 / rate);
/* Adjust for min register values for when SCLE=1 and NFE=1 */
if (brl < 1)
brl = 1;
if (brh < 1)
brh = 1;
pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n",
rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6),
t->scl_fall_ns / (1000000000 / rate),
t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
/* Changing the order of accessing IICRST and ICE may break things! */
writeb(ICCR1_IICRST | ICCR1_SOWP, riic->base + RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
writeb(ICMR1_CKS(cks), riic->base + RIIC_ICMR1);
writeb(brh | ICBR_RESERVED, riic->base + RIIC_ICBRH);
writeb(brl | ICBR_RESERVED, riic->base + RIIC_ICBRL);
writeb(0, riic->base + RIIC_ICSER);
writeb(ICMR3_ACKWP | ICMR3_RDRFS, riic->base + RIIC_ICMR3);
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
clk_disable_unprepare(riic->clk);
return 0;
}
static struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
{ .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
static int riic_i2c_probe(struct platform_device *pdev)
{
struct riic_dev *riic;
struct i2c_adapter *adap;
struct resource *res;
struct i2c_timings i2c_t;
int i, ret;
riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
if (!riic)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
riic->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(riic->base))
return PTR_ERR(riic->base);
riic->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(riic->clk)) {
dev_err(&pdev->dev, "missing controller clock");
return PTR_ERR(riic->clk);
}
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num);
if (!res)
return -ENODEV;
ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr,
0, riic_irqs[i].name, riic);
if (ret) {
dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
return ret;
}
}
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
init_completion(&riic->msg_done);
i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
ret = riic_init_hw(riic, &i2c_t);
if (ret)
return ret;
ret = i2c_add_adapter(adap);
if (ret)
return ret;
platform_set_drvdata(pdev, riic);
dev_info(&pdev->dev, "registered with %dHz bus speed\n",
i2c_t.bus_freq_hz);
return 0;
}
static int riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
writeb(0, riic->base + RIIC_ICIER);
i2c_del_adapter(&riic->adapter);
return 0;
}
static const struct of_device_id riic_i2c_dt_ids[] = {
{ .compatible = "renesas,riic-rz" },
{ /* Sentinel */ },
};
static struct platform_driver riic_i2c_driver = {
.probe = riic_i2c_probe,
.remove = riic_i2c_remove,
.driver = {
.name = "i2c-riic",
.of_match_table = riic_i2c_dt_ids,
},
};
module_platform_driver(riic_i2c_driver);
MODULE_DESCRIPTION("Renesas RIIC adapter");
MODULE_AUTHOR("Wolfram Sang <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, riic_i2c_dt_ids);
| Fe-Pi/linux | drivers/i2c/busses/i2c-riic.c | C | gpl-2.0 | 12,347 |
/*****************************************************************************
Copyright (c) 1997, 2009, Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
/**************************************************//**
@file include/read0read.h
Cursor read
Created 2/16/1997 Heikki Tuuri
*******************************************************/
#ifndef read0read_h
#define read0read_h
#include "univ.i"
#include "ut0byte.h"
#include "ut0lst.h"
#include "trx0trx.h"
#include "trx0sys.h"
#include "read0types.h"
/*********************************************************************//**
Opens a read view where exactly the transactions serialized before this
point in time are seen in the view.
@return own: read view struct */
UNIV_INTERN
read_view_t*
read_view_open_now(
/*===============*/
trx_id_t cr_trx_id, /*!< in: trx_id of creating
transaction, or 0 used in purge */
read_view_t* view, /*!< in: current read view or NULL if it
doesn't exist yet */
ibool exclude_self); /*!< in: TRUE, if cr_trx_id should be
excluded from the resulting view */
/*********************************************************************//**
Makes a copy of the oldest existing read view, or opens a new. The view
must be closed with ..._close.
@return own: read view struct */
UNIV_INTERN
read_view_t*
read_view_oldest_copy_or_open_new(
/*==============================*/
trx_id_t cr_trx_id, /*!< in: trx_id of creating
transaction, or 0 used in purge */
read_view_t* view); /*!< in: pre-allocated view array or
NULL if a new one needs to be created */
/*********************************************************************//**
Closes a read view. */
UNIV_INTERN
void
read_view_close(
/*============*/
read_view_t* view); /*!< in: read view */
/*********************************************************************//**
Frees memory allocated by a read view. */
UNIV_INTERN
void
read_view_free(
/*===========*/
read_view_t* view); /*< in: read view */
/*********************************************************************//**
Closes a consistent read view for MySQL. This function is called at an SQL
statement end if the trx isolation level is <= TRX_ISO_READ_COMMITTED. */
UNIV_INTERN
void
read_view_close_for_mysql(
/*======================*/
trx_t* trx); /*!< in: trx which has a read view */
/*********************************************************************//**
Checks if a read view sees the specified transaction.
@return TRUE if sees */
UNIV_INLINE
ibool
read_view_sees_trx_id(
/*==================*/
const read_view_t* view, /*!< in: read view */
trx_id_t trx_id);/*!< in: trx id */
/*********************************************************************//**
Prints a read view to stderr. */
UNIV_INTERN
void
read_view_print(
/*============*/
FILE* file,
const read_view_t* view); /*!< in: read view */
/*********************************************************************//**
Create a consistent cursor view for mysql to be used in cursors. In this
consistent read view modifications done by the creating transaction or future
transactions are not visible. */
UNIV_INTERN
cursor_view_t*
read_cursor_view_create_for_mysql(
/*==============================*/
trx_t* cr_trx);/*!< in: trx where cursor view is created */
/*********************************************************************//**
Close a given consistent cursor view for mysql and restore global read view
back to a transaction read view. */
UNIV_INTERN
void
read_cursor_view_close_for_mysql(
/*=============================*/
trx_t* trx, /*!< in: trx */
cursor_view_t* curview); /*!< in: cursor view to be closed */
/*********************************************************************//**
This function sets a given consistent cursor view to a transaction
read view if given consistent cursor view is not NULL. Otherwise, function
restores a global read view to a transaction read view. */
UNIV_INTERN
void
read_cursor_set_for_mysql(
/*======================*/
trx_t* trx, /*!< in: transaction where cursor is set */
cursor_view_t* curview);/*!< in: consistent cursor view to be set */
/** Read view lists the trx ids of those transactions for which a consistent
read should not see the modifications to the database. */
struct read_view_struct{
ulint type; /*!< VIEW_NORMAL, VIEW_HIGH_GRANULARITY */
undo_no_t undo_no;/*!< 0 or if type is
VIEW_HIGH_GRANULARITY
transaction undo_no when this high-granularity
consistent read view was created */
trx_id_t low_limit_no;
/*!< The view does not need to see the undo
logs for transactions whose transaction number
is strictly smaller (<) than this value: they
can be removed in purge if not needed by other
views */
trx_id_t low_limit_id;
/*!< The read should not see any transaction
with trx id >= this value. In other words,
this is the "high water mark". */
trx_id_t up_limit_id;
/*!< The read should see all trx ids which
are strictly smaller (<) than this value.
In other words,
this is the "low water mark". */
ulint n_descr;
/*!< Number of cells in the trx_ids array */
ulint max_descr;
/*!< Maximum number of cells in the trx_ids
array */
trx_id_t* descriptors;
/*!< Array of trx descriptors which the read
should not see: typically, these are the active
transactions at the time when the read is
serialized, except the reading transaction
itself; the trx ids in this array are in a
descending order. These trx_ids should be
between the "low" and "high" water marks, that
is, up_limit_id and low_limit_id. */
trx_id_t creator_trx_id;
/*!< trx id of creating transaction, or
0 used in purge */
UT_LIST_NODE_T(read_view_t) view_list;
/*!< List of read views in trx_sys */
};
/** Read view types @{ */
#define VIEW_NORMAL 1 /*!< Normal consistent read view
where transaction does not see changes
made by active transactions except
creating transaction. */
#define VIEW_HIGH_GRANULARITY 2 /*!< High-granularity read view where
transaction does not see changes
made by active transactions and own
changes after a point in time when this
read view was created. */
/* @} */
/** Implement InnoDB framework to support consistent read views in
cursors. This struct holds both heap where consistent read view
is allocated and pointer to a read view. */
struct cursor_view_struct{
mem_heap_t* heap;
/*!< Memory heap for the cursor view */
read_view_t* read_view;
/*!< Consistent read view of the cursor*/
ulint n_mysql_tables_in_use;
/*!< number of Innobase tables used in the
processing of this cursor */
};
#ifndef UNIV_NONINL
#include "read0read.ic"
#endif
#endif
| wzrdtales/tdb | storage/xtradb/include/read0read.h | C | gpl-2.0 | 7,411 |
/*
* drivers/amlogic/amports/arch/m8_m8m2/h264_enc/h264_enc_mix_sw_hdec_dblk.h
*
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
const u32 MicroCode[] __initconst = {
0x6810001, 0x6800000, 0xd000001, 0x7400040, 0xc000a00, 0x0000000,
0xc01b740, 0x0000000, 0xc000ec0, 0x0000000, 0x6bff840, 0x7c00000,
0x6030400, 0x0400000, 0x0000000, 0x0000000, 0xc7aee40, 0x0000000,
0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000,
0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000,
0x0000000, 0x0000000, 0xc799a80, 0x0000000, 0xc780a00, 0x0000000,
0xc78bec0, 0x64d3008, 0xc799ac0, 0x0000000, 0xc7abf80, 0x0000000,
0xcc00000, 0x0400000, 0x80d1a00, 0x6800008, 0x7c003c8, 0xcc00000,
0x0000000, 0x64ac808, 0x7c00108, 0x64ac908, 0x7c00148, 0x64ac608,
0x7c00188, 0x64ac508, 0x7c001c8, 0x64ac708, 0x7c00208, 0x64aca08,
0x7c00388, 0x64acb08, 0x7c00048, 0x64ace08, 0x7c01008, 0xcc00000,
0x0000000, 0xcc00000, 0x0000000, 0x80d2001, 0x64d2008, 0xbefc048,
0xcc00000, 0x0000000, 0x64f0108, 0x9808388, 0x60f0108, 0x6bfff88,
0x6030408, 0x0400000, 0x64ac008, 0xaa0c048, 0xa60c088, 0xc781d80,
0xc7808c0, 0x78003c9, 0xa60c009, 0xc0030c0, 0x0000000, 0x80d5c00,
0x80d5d00, 0x6800009, 0x7c00489, 0x7c009c9, 0x7c00a49, 0x7c00a89,
0x7c00ac9, 0x7c00b09, 0x6800016, 0x6800017, 0x7c00657, 0x7c00697,
0x7c00809, 0x7c00849, 0x7c00889, 0x7c008c9, 0x7c00909, 0x7c00949,
0x7c00989, 0xa608108, 0xc785480, 0xa6080c8, 0xc782e00, 0xa608348,
0xc79b200, 0xa608388, 0xc79d100, 0xc788200, 0x0000000, 0xc07ee80,
0x0000000, 0x6bc0008, 0x60d1f08, 0x64d1e08, 0x9c087c8, 0x60d1e08,
0x6800008, 0x6c00408, 0x60d1f08, 0x6800048, 0x6c00408, 0x60d1f08,
0x6a19c08, 0x60d1f08, 0x64d1e08, 0x98087c8, 0x60d1e08, 0x6a10808,
0x60d1f08, 0x6a00008, 0x60d1f08, 0x6a0a008, 0x60d1f08, 0xc07ed80,
0x80d2100, 0x7800148, 0x4404208, 0xc07ec80, 0x60d2108, 0xc07ec00,
0x80d2100, 0x7800108, 0x4404208, 0xc07eb00, 0x60d2108, 0xc07ea80,
0x80d2101, 0x80d1f20, 0x64d3109, 0x9010209, 0x400f208, 0x5804208,
0x4401208, 0x7c00308, 0xc07e840, 0x60d2108, 0x9210209, 0x400f208,
0x5804208, 0x4401208, 0x7c00348, 0xc07e680, 0x60d2108, 0x80d1f30,
0x80d1f30, 0x64d3108, 0x9210208, 0x9004248, 0xa60c009, 0xc7803c0,
0x80d1f20, 0x6800408, 0x6c00008, 0x2409248, 0x5801249, 0x80d1f30,
0xc07e2c0, 0x80d2100, 0xc07e240, 0x80d2100, 0xc07e1c0, 0x80d2100,
0xc07e140, 0x60d2109, 0x80d1f20, 0x80d1f30, 0x6bc0008, 0x60d1f08,
0x80ac007, 0xc786d80, 0x8007801, 0xc07da00, 0x0000000, 0x6bc0008,
0x60d1f08, 0x64d1e08, 0x9c087c8, 0x60d1e08, 0x6800008, 0x6c00408,
0x60d1f08, 0x6800048, 0x6c00408, 0x60d1f08, 0x6a1a008, 0x60d1f08,
0x64d1e08, 0x98087c8, 0x60d1e08, 0xc07da80, 0x80d2100, 0xc07da00,
0x80d2100, 0x80d1f20, 0x80d1f20, 0xc07d900, 0x80d2100, 0xc07d880,
0x80d2100, 0xc07d800, 0x80d2100, 0x80d1f20, 0x80d1f40, 0x7800048,
0xc0065c0, 0x441a208, 0xc07d640, 0x60d2108, 0xc0064c0, 0x6800008,
0xc07d540, 0x60d2108, 0xc07d4c0, 0x80d2100, 0x80d1f20, 0x80d5500,
0x80d1f20, 0x80d1f20, 0x80d1f30, 0x6bc0008, 0x60d1f08, 0x0000000,
0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x64d1608,
0xbefc108, 0x0000000, 0x6800008, 0x6e00008, 0x60d1c08, 0x80d1d00,
0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000,
0x64d1608, 0xbefc108, 0x0000000, 0x64d1b08, 0x9808648, 0x60d1b08,
0x64d1608, 0xbefc108, 0x0000000, 0x80ac008, 0xc785940, 0x8007801,
0x64d3109, 0x9010289, 0x400f28a, 0x580428a, 0x440128a, 0x7c0030a,
0x9210289, 0x400f28a, 0x580428a, 0x440128a, 0x7c0034a, 0x6800009,
0xcc00000, 0x7c003c9, 0x64f4008, 0x9c08048, 0x60f4008, 0xc0171c0,
0x0000000, 0x6498e08, 0x9c08788, 0x6098e08, 0x6490b08, 0x9c08608,
0x9c08648, 0x6090b08, 0x6401008, 0x9808008, 0x6001008, 0xc0177c0,
0x0000000, 0xc016dc0, 0x0000000, 0xc07bd80, 0x0000000, 0x64f4908,
0x9c08048, 0x60f4908, 0x6bc0008, 0x60d1f08, 0x64d1e08, 0x9c087c8,
0x60d1e08, 0x6800008, 0x6c00408, 0x60d1f08, 0x6800048, 0x6c00408,
0x60d1f08, 0x6a19408, 0x60d1f08, 0x64d1e08, 0x98087c8, 0x60d1e08,
0x64d3308, 0x9008248, 0x7c00089, 0x9108248, 0x7c000c9, 0x60f4109,
0x9210208, 0xc07bb80, 0x60d2108, 0xc07bb00, 0x80d2107, 0x64d3109,
0x9010209, 0x400f208, 0x5804208, 0x4401208, 0x7c00308, 0x9210209,
0x400f208, 0x5804208, 0x4401208, 0x7c00348, 0x64d3308, 0x9010208,
0xa654008, 0x6800008, 0x7c00248, 0x60f1b08, 0x7800309, 0x946d209,
0x60d3208, 0x6940008, 0x6c00008, 0x20087c8, 0x60f2008, 0x6803908,
0x64d3309, 0x9008249, 0x9508209, 0x9808508, 0x9808548, 0x98087c8,
0x60f1f08, 0x6800009, 0x60f1e09, 0xc07b200, 0x80d2100, 0x7800148,
0x7800189, 0x680040a, 0x240a20a, 0x3409289, 0x9605248, 0x60d1f09,
0x78001c8, 0xc07af80, 0x60d2108, 0x7800108, 0x7800209, 0x680040a,
0x240a20a, 0x3409289, 0x9605248, 0x60d1f09, 0x80d1f20, 0x80d1f20,
0x64d3308, 0x9010208, 0xaa10008, 0x78005c8, 0xc780440, 0x7c00588,
0x68002a3, 0xc01c500, 0x0000000, 0x7804009, 0x90c1209, 0x7c007c8,
0x9006209, 0x7c00608, 0x91c1209, 0x7c00788, 0x9106209, 0x7c00588,
0xc018980, 0x7c005c8, 0x60f1c0b, 0x7800049, 0xc003640, 0x2408248,
0xc07a6c0, 0x60d2108, 0x80d3d00, 0x64acd08, 0x60f4208, 0xc003680,
0x0000000, 0x680d103, 0x681a4c4, 0x80d300f, 0x64d0008, 0x9808008,
0x9808048, 0x9808088, 0x9c080c8, 0x9c08148, 0x9c08188, 0x9c081c8,
0x60d0008, 0x80ac005, 0xc782fc0, 0x8030502, 0xc0157c0, 0x0000000,
0x64f4008, 0x9c08048, 0x60f4008, 0xc014b40, 0x0000000, 0x6498e08,
0x9808788, 0x6098e08, 0x6490b08, 0x9808608, 0x9c08648, 0x6090b08,
0x6401008, 0x9808008, 0x6001008, 0xc015140, 0x0000000, 0xc014740,
0x0000000, 0xc079700, 0x0000000, 0x64d3109, 0x9010209, 0x400f208,
0x5804208, 0x4401208, 0x7c00308, 0x9210209, 0x400f208, 0x5804208,
0x4401208, 0x7c00348, 0x7800309, 0x7800348, 0x958c248, 0x6499008,
0x9708248, 0x60f4f09, 0x80f4a00, 0x8098002, 0x8098000, 0x64f4308,
0x9c08048, 0x9c080c8, 0x60f4308, 0x64f1508, 0x9c08508, 0x9c084c8,
0x9c08748, 0x9c08448, 0x9c08488, 0x9c08348, 0x9c08308, 0x9808248,
0x9808008, 0x60f1508, 0x80d3400, 0x9808488, 0x9808448, 0x9808348,
0x9808308, 0x98085c8, 0x9808608, 0x9808648, 0x9c08688, 0x98086c8,
0x9c08248, 0x9c08008, 0x60f1508, 0x6bc0008, 0x60d1f08, 0x64d1e08,
0x9c087c8, 0x60d1e08, 0x6800008, 0x6c00408, 0x60d1f08, 0x6800048,
0x6c00408, 0x60d1f08, 0x6a10408, 0x60d1f08, 0x64d1e08, 0x98087c8,
0x60d1e08, 0x64d3308, 0x9210208, 0xc078b00, 0x60d2108, 0xc078a80,
0x80d2105, 0x6800048, 0x7c00248, 0x60f1b08, 0x7800309, 0x946d209,
0x60d3208, 0xc078880, 0x80d2100, 0x7800148, 0x7800189, 0x680040a,
0x240a20a, 0x3409289, 0x9605248, 0x60d1f09, 0x7800108, 0x7800209,
0x680040a, 0x240a20a, 0x3409289, 0x9605248, 0x60d1f09, 0x80d1f20,
0x80d1f20, 0x80d1f20, 0x64d3308, 0x9010208, 0xaa10008, 0x78005c8,
0xc780580, 0x7c00588, 0xc019c40, 0x0000000, 0x7804009, 0x90c1209,
0x7c007c8, 0x9006209, 0x7c00608, 0x91c1209, 0x7c00788, 0x9106209,
0x7c00588, 0xc0160c0, 0x7c005c8, 0x60f1d0b, 0x6940009, 0x6c00009,
0x20097c9, 0x60f2009, 0x6800009, 0x60f1e09, 0x7800049, 0xc000c00,
0x2408248, 0xc077c80, 0x60d2108, 0x80d3d00, 0x80d5300, 0x80d5501,
0x6803fc8, 0x6ffffc8, 0x60f4208, 0xc001a40, 0x0000000, 0x68113c3,
0x681b444, 0x64d3308, 0x9010208, 0xaa14008, 0x0000000, 0x80ac005,
0xc780640, 0x8030502, 0x6800008, 0x6c00448, 0x60d3008, 0x64d0008,
0x9c08008, 0x9c08048, 0x9c08088, 0x9c08188, 0x9c081c8, 0x60d0008,
0x9808008, 0x9808048, 0x9808088, 0x98080c8, 0x98081c8, 0x60d0008,
0x6800048, 0x7800309, 0x948c209, 0x60f4b08, 0x80ac005, 0xc780080,
0x8030502, 0x0800000, 0x7800008, 0xc7f6ac0, 0x6030408, 0x6800009,
0xb005248, 0x2409209, 0x5401208, 0xcc00000, 0x4401208, 0xcc00000,
0x5401209, 0x6bfffd8, 0x6c00018, 0x6800009, 0x6800019, 0x6c00019,
0x4000699, 0x680000b, 0x680c00e, 0x680100f, 0x7400398, 0x400138e,
0x7400389, 0x400138e, 0x7400399, 0x400138e, 0x740038b, 0x400138e,
0xaee004f, 0x44013cf, 0x690000e, 0x6c0000e, 0x200e7ce, 0x680000f,
0x643500d, 0x580f34d, 0xbef804d, 0x0000000, 0x603510e, 0x680400d,
0x603520d, 0x6a0c00d, 0x603500d, 0x643500d, 0x580f34d, 0xbef804d,
0x0000000, 0x40403cf, 0xb611e8f, 0x680800d, 0xc7ffc00, 0x200e34e,
0x64d330d, 0x900834d, 0x680000e, 0x680100f, 0xb8053cd, 0x0000000,
0x404038e, 0xc7fff40, 0x40403cf, 0x7c0028e, 0x44013cf, 0x7c002cf,
0x60d3718, 0x60d3919, 0x60d3818, 0x60d3a19, 0xcc00000, 0x0000000,
0x6a0001b, 0x6d0001b, 0x6a0001c, 0x6d0001c, 0x6a0001d, 0x6d0001d,
0x6a0001e, 0x6d0001e, 0x690000a, 0x6800023, 0x6800028, 0x6800029,
0x6bfffd8, 0x6c00018, 0x4000918, 0x6800019, 0x6c00019, 0x4000699,
0x6800009, 0x680c00e, 0x680064f, 0x7400399, 0x400138e, 0x7400389,
0x400138e, 0x740039b, 0x400138e, 0x740038a, 0x400138e, 0x740039c,
0x400138e, 0x740038a, 0x400138e, 0x740039d, 0x400138e, 0x740038a,
0x400138e, 0x740039e, 0x400138e, 0x740038a, 0x400138e, 0xaeb004f,
0x44013cf, 0x690000e, 0x6c0000e, 0x200e7ce, 0x680000f, 0x643500d,
0x580f34d, 0xbef804d, 0x0000000, 0x603510e, 0x6803e8d, 0x603520d,
0x6a0c00d, 0x603500d, 0x643500d, 0x580f34d, 0xbef804d, 0x0000000,
0x40193cf, 0xb611e8f, 0x6807d0d, 0xc7ffc00, 0x200e34e, 0x64d330d,
0x900834d, 0x680000e, 0x680064f, 0xb8053cd, 0x0000000, 0x401938e,
0xc7fff40, 0x40193cf, 0x7c0028e, 0x44013cf, 0x7c002cf, 0x60d3919,
0x60d4a1b, 0x60d4b1c, 0x60d4c1d, 0x60d4d1e, 0x60d4a1b, 0x60d4a1b,
0x60d3a19, 0x60d4e1b, 0x60d4f1c, 0x60d501d, 0x60d511e, 0xcc00000,
0x0000000, 0x680000e, 0xa822388, 0x64d370e, 0x78009c8, 0xaa2c008,
0xa624048, 0x6800008, 0x80d3000, 0x80d300f, 0x64d0009, 0x9c09149,
0x60d0009, 0xc7f3fc0, 0x7c009c8, 0x7c009c8, 0xb60c223, 0x4001223,
0x4401223, 0xba18188, 0x40008c8, 0xb614288, 0x6800049, 0xaa0c188,
0x6800049, 0x6800009, 0x7c00a49, 0x64d3508, 0x9004248, 0x7c00a09,
0x908c248, 0x7c006c9, 0x920c248, 0x7c00709, 0xcb80003, 0x64d390f,
0xc7f3a00, 0x0000000, 0x60d3718, 0x64d3510, 0x908c250, 0xa620009,
0x60d3919, 0x6bfffc9, 0x6c00009, 0x60d3809, 0x6800009, 0x6c00009,
0x60d3a09, 0x9004250, 0xaa10249, 0x80d3601, 0xc780140, 0x80d3603,
0x80d3602, 0x80d3603, 0x80d3604, 0x6800008, 0x7c00748, 0x7800648,
0xaa3c008, 0x80d3d00, 0x78006c9, 0x780068a, 0xa40b289, 0xb21c388,
0x0000000, 0x7c00748, 0x6800009, 0x7c00649, 0xc780180, 0x60d3d08,
0x6800009, 0x7c00649, 0x60d3d08, 0x80d3605, 0x7800748, 0xa610008,
0x0000000, 0x80d3606, 0x80d360f, 0x7800649, 0x680000a, 0xa408289,
0x64f1c09, 0x9205249, 0x780060a, 0xa804289, 0xc015680, 0x0000000,
0xc780400, 0x78005c9, 0x780058a, 0xa80d289, 0x7800a08, 0xaa24288,
0x0000000, 0x64d3e08, 0x9008208, 0xaafbfc8, 0xa610008, 0x7800588,
0xc780100, 0x7c005c8, 0x78005c9, 0x7c00589, 0x7800748, 0xaa5c008,
0x0000000, 0x64d0009, 0x9384249, 0xa6f8009, 0x0000000, 0x64d3609,
0xa6ec009, 0x0000000, 0x7800a09, 0xaa10289, 0x64d3e09, 0x9008249,
0xaa18009, 0x7800749, 0x60d2109, 0x80d2001, 0x64d2009, 0xbefc049,
0x6800009, 0x7c00749, 0x80d3606, 0x80d360f, 0x78006c9, 0x7800708,
0x780030a, 0xb804289, 0x4001249, 0x6800009, 0x4001208, 0x960f248,
0x60f1e09, 0xc015b40, 0x0000000, 0x908c210, 0x680c10a, 0x7800289,
0xa814248, 0x2409248, 0x4401249, 0x5402249, 0x680c00a, 0x200a289,
0x740028e, 0x400128a, 0x581038e, 0x740028e, 0x400128a, 0x740028f,
0x400128a, 0x58103cf, 0x740028f, 0x400528a, 0x78002c9, 0xb40d248,
0x7800309, 0xb40b248, 0x7000298, 0x400128a, 0x700028e, 0x961060e,
0x400128a, 0x7000299, 0x400128a, 0x700028f, 0xc7f1800, 0x961064f,
0x908c210, 0x7800289, 0x2409248, 0x5402249, 0x680c00a, 0x200a289,
0x64d0009, 0x9384249, 0xa6f8009, 0x0000000, 0x64d3609, 0xa6ec009,
0x0000000, 0x64d370e, 0x64d390f, 0x740028e, 0x400128a, 0x581038e,
0x740028e, 0x400128a, 0x740028f, 0x400128a, 0x58103cf, 0x740028f,
0x400128a, 0x690000e, 0x6c0000e, 0x200e7ce, 0x780028f, 0x54033cf,
0x200e3ce, 0x643500d, 0x580f34d, 0xbef804d, 0x0000000, 0x603510e,
0x680400d, 0x603520d, 0x6a0c00d, 0x603500d, 0x908c210, 0x7800309,
0xb83e248, 0x4001208, 0x920c210, 0x7800349, 0xb40e248, 0x64d330a,
0x910828a, 0x4001248, 0x2408289, 0x64ad50a, 0x901028a, 0xa433288,
0x6800008, 0xc011a00, 0x0000000, 0x80ac00d, 0xc7f0900, 0x8030502,
0x80d3000, 0x80d1f30, 0x6bc0008, 0x60d1f08, 0x0000000, 0x0000000,
0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x64d1608, 0xbefc108,
0x0000000, 0x6800008, 0x6e00008, 0x60d1c08, 0x80d1d00, 0x0000000,
0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x64d1608,
0xbefc108, 0x0000000, 0x64d1b08, 0x9808648, 0x60d1b08, 0x64d1608,
0xbefc108, 0x0000000, 0xc00c180, 0x0000000, 0x7800188, 0x4001208,
0x7c00188, 0x7800208, 0x4002208, 0x7c00208, 0x80d3300, 0x80ac009,
0xc7efe00, 0x8007801, 0x7c00288, 0x403f248, 0x7c002c9, 0x5403208,
0x690000e, 0x6c0000e, 0x200e7ce, 0x200e20e, 0x680400f, 0x6b0c010,
0x643500d, 0x580f34d, 0xbef804d, 0x0000000, 0x643530d, 0x900c34d,
0xa6f800d, 0x0000000, 0x603510e, 0x603520f, 0x6035010, 0x643500d,
0x580f34d, 0xbef804d, 0x0000000, 0xc7fdd40, 0x680c00a, 0x64d4a11,
0x64d4b12, 0x64d4c13, 0x64d4d14, 0x64d3725, 0x60d4a1b, 0x60d4b1c,
0x60d4c1d, 0x60d4d1e, 0x60d3718, 0x5401a28, 0x9402a29, 0x6800029,
0x78006c9, 0xa638009, 0x60d3919, 0x6a00009, 0x6d00009, 0x60d4e09,
0x60d4f09, 0x60d5009, 0x60d5109, 0x6bfffc9, 0x6c00009, 0x60d3809,
0x6800009, 0x6c00009, 0x60d3a09, 0x7800789, 0x680000a, 0xa842289,
0x7800a89, 0x60d2109, 0x80d2001, 0x80d5300, 0x6800009, 0x7c00a89,
0x7800a09, 0x680024a, 0xa826289, 0x64d3e09, 0x9008249, 0xbe14809,
0x680038a, 0xbe0c409, 0x680028a, 0x680018a, 0xa20c3c9, 0x0000000,
0x400c28a, 0x64d3c09, 0x9002249, 0x200a24a, 0x64d2009, 0xbefc049,
0x0000000, 0x60d210a, 0x80d2001, 0x64d320a, 0x980a00a, 0x60d320a,
0x9c0a00a, 0x64d2009, 0xbefc049, 0x0000000, 0x688888b, 0x64d3709,
0x941024b, 0x60d3709, 0x64d3809, 0x941024b, 0x60d3809, 0x80d3601,
0x0000000, 0x60d320a, 0xc780d40, 0x80d3603, 0x64d2009, 0xbefc049,
0x0000000, 0x80d2105, 0x80d2001, 0x64d320a, 0x980a00a, 0x60d320a,
0x9c0a00a, 0x64d2009, 0xbefc049, 0x0000000, 0x80d3601, 0x0000000,
0x60d320a, 0x80d3602, 0x80d3603, 0xc780880, 0x80d3604, 0x9004228,
0xa60c008, 0xc780640, 0x80d5201, 0x78006c9, 0xa224208, 0x780030a,
0xb807289, 0x4807208, 0xa614008, 0xc0180c0, 0x0000000, 0xc7803c0,
0x0000000, 0xa62c009, 0x0000000, 0x4c01208, 0x64d3209, 0x9c09009,
0x60d3209, 0x6bfffca, 0x6c0000a, 0x60d380a, 0x9809009, 0x60d3209,
0xc016140, 0x0000000, 0x80d3607, 0x80d3601, 0x80d3608, 0x80d3609,
0x80d3604, 0x6800008, 0x7c00748, 0x7800648, 0xaa3c008, 0x80d3d00,
0x78006c9, 0x780068a, 0xa40b289, 0xb21c388, 0x0000000, 0x7c00748,
0x6800009, 0x7c00649, 0xc780180, 0x60d3d08, 0x6800009, 0x7c00649,
0x60d3d08, 0x80d3605, 0x7800748, 0xa610008, 0x0000000, 0x80d3606,
0x80d360f, 0x7800649, 0x680000a, 0xa40d289, 0x7800789, 0x78007ca,
0xa803289, 0xc0115c0, 0x0000000, 0x64f1d09, 0x9205249, 0x780060a,
0xa804289, 0xc00fc80, 0x0000000, 0xc7804c0, 0x78005c9, 0x780058a,
0xa810289, 0x7800a08, 0xaa30288, 0x0000000, 0x64d3e08, 0x9008208,
0xaafbfc8, 0xa61c008, 0x7800588, 0x78005c9, 0xc010c00, 0x2408209,
0xc780100, 0x0000000, 0x78005c9, 0x7c00589, 0x7800748, 0xaa5c008,
0x0000000, 0x64d0009, 0x9384249, 0xa6f8009, 0x0000000, 0x64d3609,
0xa6ec009, 0x0000000, 0x7800a09, 0xaa10289, 0x64d3e09, 0x9008249,
0xaa18009, 0x7800749, 0x60d2109, 0x80d2001, 0x64d2009, 0xbefc049,
0x6800009, 0x7c00749, 0x80d3606, 0x80d360f, 0x78009c9, 0x680000a,
0xa86a289, 0x680004a, 0xa446289, 0xc010a80, 0x681a604, 0x64f1509,
0x9c09449, 0x60f1509, 0x9809449, 0x60f1509, 0x80d3609, 0x64d0009,
0x9c09049, 0x60d0009, 0x9809049, 0x60d0009, 0x6800009, 0x780030a,
0x946d24a, 0x60d3209, 0x6800223, 0x64d5309, 0x7c00a89, 0x78006e6,
0x7800327, 0xb8059e6, 0x6bfffe6, 0x6c00026, 0x60d3826, 0x6800018,
0x60d3718, 0x68888a7, 0x64d4e26, 0x93e19a6, 0x96619e6, 0x64d4f26,
0x93e19a6, 0x96419e6, 0x64d5026, 0x93e19a6, 0x96219e6, 0x64d5126,
0x93e19a6, 0x96019e6, 0x60d3827, 0x64f1509, 0x9c09589, 0x60f1509,
0xc010540, 0x0000000, 0x6490b09, 0x9c09609, 0x9c09649, 0x6090b09,
0x64f1509, 0x9c095c9, 0x9c09609, 0x9c09649, 0x9c096c9, 0x9809509,
0x98094c9, 0x64f1f08, 0x9383208, 0xa6f8008, 0x0000000, 0x6435008,
0x580f208, 0xbef8048, 0x0000000, 0x9809589, 0xc780900, 0x60f1509,
0xc00f940, 0x681b444, 0x6800049, 0x780030a, 0x946d24a, 0x60d3209,
0x68001e3, 0x9829029, 0x64d3727, 0x92619a7, 0x541f9a6, 0x60d4e26,
0x92419a7, 0x541f9a6, 0x60d4f26, 0x92219a7, 0x541f9a6, 0x60d5026,
0x92019a7, 0x541f9a6, 0x60d5126, 0x6490b09, 0x9809609, 0x9c09649,
0x6090b09, 0x64f1509, 0x98095c9, 0x9809609, 0x9809649, 0x98096c9,
0x9c09509, 0x9c094c9, 0x9c09749, 0x60f1509, 0x78007c9, 0x7c00789,
0x78006c9, 0x7800708, 0x780030a, 0xb804289, 0x4001249, 0x6800009,
0x4001208, 0x960f248, 0x60f1e09, 0xc00dfc0, 0x0000000, 0x78006c8,
0xaa10008, 0x680bfca, 0xc0016c0, 0x4401208, 0x78006c8, 0x7800309,
0xb844248, 0x0000000, 0x64d0009, 0x9384249, 0xa6f8009, 0x0000000,
0x64d3609, 0xa6ec009, 0x0000000, 0x7800709, 0x780034a, 0xb40d289,
0x64d330a, 0x910828a, 0x4001249, 0x2409289, 0x64ad50a, 0x921028a,
0xa410289, 0xc00b480, 0x0000000, 0x80ac00e, 0xc7e9d00, 0x8030502,
0x64d5309, 0xaa1c009, 0x0000000, 0x60d2109, 0x80d2001, 0x64d2009,
0xbefc049, 0x0000000, 0xc7f9200, 0x0000000, 0x64d390f, 0x64d4a11,
0x64d4b12, 0x64d4c13, 0x64d4d14, 0x64d3725, 0xb60c223, 0x4001223,
0x4401223, 0xba18188, 0x40008c8, 0xb614288, 0x6800049, 0xaa0c188,
0x6800049, 0x6800009, 0x7c00a49, 0xc000900, 0x7800308, 0xc001500,
0x6800011, 0x6a00014, 0x6d00014, 0x60d4d14, 0xc001f00, 0x680c00a,
0x60d4a1b, 0x60d4b1c, 0x60d4c1d, 0x60d4d1e, 0x9402a29, 0x6800029,
0xc780280, 0x680c28a, 0x401528a, 0x78006c8, 0x4002208, 0x78002c9,
0xb004248, 0xc001000, 0x78006d1, 0x680c50a, 0xc001a80, 0x0000000,
0x78009c8, 0xaa18008, 0xa614048, 0x0000000, 0x64f1f08, 0x93e1208,
0xa6f8008, 0xc7e8d40, 0x0000000, 0x7800289, 0x240a248, 0x540324a,
0x540128a, 0x2009289, 0x680c00a, 0x200a289, 0x740028f, 0x400128a,
0x58103cf, 0x740028f, 0x400128a, 0x7800a66, 0xaa44026, 0x68fffe6,
0x6800027, 0x960f466, 0x90109a5, 0x9410466, 0x92619a5, 0x97e1466,
0x92419a5, 0x541f4a6, 0x941f4a7, 0x92219a5, 0x541f4e6, 0x941f4e7,
0x92019a5, 0x541f526, 0x941f527, 0x7400291, 0x400128a, 0x5810451,
0x7400291, 0x400128a, 0x7400292, 0x400128a, 0x5810492, 0x7400292,
0x400128a, 0x7400293, 0x400128a, 0x58104d3, 0x7400293, 0x400128a,
0x7400294, 0x400128a, 0x5810514, 0xcc00000, 0x7400294, 0x643500e,
0x580f38e, 0xbef804e, 0x0000000, 0x690000e, 0x6c0000e, 0x200e7ce,
0x780028f, 0x540434f, 0x54023cf, 0x200f34f, 0x200e3ce, 0x603510e,
0x6803e8d, 0x603520d, 0x6a0c00d, 0x603500d, 0x7c00291, 0x4018251,
0x7c002c9, 0x5404251, 0x5402211, 0x2008248, 0x690000e, 0x6c0000e,
0x200e7ce, 0x200e20e, 0x6803e8f, 0x6b0c010, 0x643500d, 0x580f34d,
0xbef804d, 0x0000000, 0x643530d, 0x900c34d, 0xa6f800d, 0x0000000,
0x603510e, 0x603520f, 0x6035010, 0x643500d, 0x580f34d, 0xbef804d,
0xcc00000, 0x0000000, 0x400065a, 0x700029a, 0x400128a, 0x7000289,
0x9610689, 0x4000cb1, 0x4000c5b, 0x400128a, 0x700029b, 0x400128a,
0x7000289, 0x96106c9, 0x400128a, 0x700029c, 0x400128a, 0x7000289,
0x9610709, 0x400128a, 0x700029d, 0x400128a, 0x7000289, 0x9610749,
0x400128a, 0x700029e, 0x400128a, 0x7000289, 0x9610789, 0x4000624,
0x920f99b, 0x68fffe7, 0xa8109e6, 0x6900027, 0xa80b9e6, 0x68888a4,
0x93e199b, 0x9661926, 0x93e199c, 0x9641926, 0x93e199d, 0x9621926,
0x93e199e, 0xcc00000, 0x9601926, 0x6bfffe4, 0xcc00000, 0x6c00024,
0x901091b, 0x93e199b, 0x680001b, 0x9829069, 0xcc00000, 0x97e16e6,
0x64d1602, 0x9162082, 0xaa0c002, 0xc7fff40, 0x0000000, 0xc7e66c0,
0x0000000, 0xcb80004, 0x64f1f08, 0x93e1248, 0xaa38009, 0x9042248,
0xc7e6500, 0x0000000, 0x7800ac9, 0xaa10009, 0x6800049, 0xc7e63c0,
0x7c00b09, 0x93e1248, 0xaa10009, 0x9042248, 0xc7e6280, 0x0000000,
0x64f010a, 0x9c0a20a, 0x60f010a, 0xaa14009, 0x680324a, 0xaa0c049,
0x680348a, 0x680390a, 0x7800088, 0x7800309, 0xb80e248, 0x4001208,
0x6800008, 0x78000c9, 0x4001249, 0x7c000c9, 0x64d330b, 0x91082cb,
0x240c2c9, 0x64ad50b, 0x90102cb, 0xa4032cc, 0x0000000, 0x980a54a,
0x7c00088, 0x950c288, 0xa60c008, 0x7800309, 0x980a50a, 0xa403248,
0x0000000, 0x980a58a, 0x78000c8, 0x64d3309, 0x9108249, 0xa403248,
0x60f4108, 0x980a54a, 0x980a7ca, 0x60f1f0a, 0x681a60a, 0xa404284,
0x680004a, 0xc011e40, 0x7c00aca, 0xc7e56c0, 0x0000000, 0x64f4b08,
0x9384248, 0xaa10009, 0x64f4a08, 0xc7e5540, 0x0000000, 0x920c248,
0x780034a, 0xb408289, 0x4001249, 0x5410209, 0x60f4a08, 0x6800048,
0x7800309, 0x948c209, 0x60f4b08, 0xc7e5240, 0x0000000, 0x64ac11f,
0x80ac102, 0x80ac200, 0x680c008, 0x6094008, 0x6803009, 0x6804008,
0xd000009, 0x6094108, 0x6800008, 0x6094008, 0xcc00000, 0x0000000,
0xcc00000, 0x0000000, 0xc000b40, 0x0000000, 0x780034a, 0x400128a,
0x40002ca, 0x7800309, 0x4001249, 0x96102c9, 0x609520b, 0x54042ca,
0x968c2c9, 0x609080b, 0x681010a, 0x6c3454a, 0x609070a, 0x6490b0a,
0x980a00a, 0x980a0ca, 0x6090b0a, 0x9c0a00a, 0x9c0a0ca, 0x6090b0a,
0x698000a, 0x6c0000a, 0x200a7ca, 0x609440a, 0x6a0000a, 0x6c0000a,
0x200a7ca, 0x609450a, 0x780030a, 0x400128a, 0x540528a, 0x7c00bca,
0xcc00000, 0x0000000, 0xc000600, 0x0000000, 0x8095003, 0x8095000,
0x6820889, 0x6c00209, 0x6095109, 0xcc00000, 0x0000000, 0x683ffc9,
0x649090a, 0xa21004a, 0xaa0c009, 0xc7fff40, 0x4401249, 0x6490b0a,
0x980a00a, 0x980a0ca, 0x6090b0a, 0x9c0a00a, 0x9c0a0ca, 0x6090b0a,
0xcc00000, 0x0000000, 0x683ffca, 0x649530b, 0x90012cb, 0x649bb09,
0x920c249, 0x20092c9, 0xaa10009, 0xaa0c00a, 0xc7ffe40, 0x440128a,
0xcc00000, 0x0000000, 0x64f4908, 0x9c08048, 0x60f4908, 0x6401008,
0x9808008, 0xc7e8040, 0x6001008, 0x6bc0008, 0x60d1f08, 0x64d1e08,
0x9c087c8, 0x60d1e08, 0x6800008, 0x6c00408, 0x60d1f08, 0x6800048,
0x6c00408, 0x60d1f08, 0x7800248, 0xaa0c048, 0x6a10408, 0x6a19408,
0x60d1f08, 0x64d1e08, 0x98087c8, 0x60d1e08, 0x64d3308, 0x9008248,
0x7c00089, 0x9108248, 0x7c000c9, 0x60f4109, 0x9210208, 0xc063a80,
0x60d2108, 0xc063a00, 0x80d2107, 0x6800008, 0x60f1b08, 0x7800309,
0x946d209, 0x60d3208, 0x6940008, 0x6c00008, 0x20087c8, 0x60f2008,
0x6803908, 0x64d3309, 0x9008249, 0x9508209, 0x9808508, 0x9808548,
0x98087c8, 0x60f1f08, 0x7800309, 0x7800348, 0x960f248, 0x60f1e09,
0xc063440, 0x80d2100, 0x7800148, 0x7800189, 0x680040a, 0x240a20a,
0x3409289, 0x9605248, 0x60d1f09, 0x7800248, 0xa610008, 0x78001c8,
0xc063140, 0x60d2108, 0x7800108, 0x7800209, 0x680040a, 0x240a20a,
0x3409289, 0x9605248, 0x60d1f09, 0x7800249, 0xaa1c049, 0x0000000,
0x80d1f20, 0x80d1f20, 0x64f1c08, 0xc780240, 0x9206208, 0x80d1f20,
0x64ad008, 0xc000dc0, 0x9206208, 0x60f1c0b, 0x64ad008, 0x9206208,
0x7800049, 0xc06ba00, 0x2408248, 0xc062a80, 0x60d2108, 0xc06bb00,
0x0000000, 0x680d103, 0x681a4c4, 0x80d300f, 0x64d0008, 0x9c08008,
0x60d0008, 0x9808008, 0x9808048, 0x9808088, 0x9c080c8, 0x9c08148,
0x9c08188, 0x9c081c8, 0x60d0008, 0x80d3609, 0x9c08048, 0x60d0008,
0x9808048, 0x60d0008, 0x64f4008, 0x9808048, 0x60f4008, 0x80ac005,
0xc7eb1c0, 0x8030502, 0x6490b08, 0x9808008, 0x98080c8, 0x6090b08,
0x9c08008, 0x9c080c8, 0x6090b08, 0x6498e08, 0x9808788, 0x6098e08,
0x6490b08, 0x9808608, 0x9c08648, 0x6090b08, 0x6401008, 0x9808008,
0xc7e8fc0, 0x6001008, 0x5401248, 0x2009209, 0x681f3ca, 0x2009289,
0xcb80009, 0x680000b, 0xcc00000, 0x6c0000b, 0x684040b, 0xcc00000,
0x6c0104b, 0x688080b, 0xcc00000, 0x6c0208b, 0x68c0c0b, 0xcc00000,
0x6c030cb, 0x690100b, 0xcc00000, 0x6c0410b, 0x694140b, 0xcc00000,
0x6c0514b, 0x680404b, 0xcc00000, 0x6c0618b, 0x684444b, 0xcc00000,
0x6c071cb, 0x688484b, 0xcc00000, 0x6c0820b, 0x68c4c4b, 0xcc00000,
0x6c0924b, 0x690504b, 0xcc00000, 0x6c0a28b, 0x694544b, 0xcc00000,
0x6c0b2cb, 0x680808b, 0xcc00000, 0x6c0c30b, 0x684848b, 0xcc00000,
0x6c0d34b, 0x688888b, 0xcc00000, 0x6c0e38b, 0x68c8c8b, 0xcc00000,
0x6c0f3cb, 0x690908b, 0xcc00000, 0x6c1040b, 0x694948b, 0xcc00000,
0x6c1144b, 0x680c0cb, 0xcc00000, 0x6c1248b, 0x684c4cb, 0xcc00000,
0x6c134cb, 0x688c8cb, 0xcc00000, 0x6c1450b, 0x68ccccb, 0xcc00000,
0x6c1554b, 0x690d0cb, 0xcc00000, 0x6c1658b, 0x694d4cb, 0xcc00000,
0x6c175cb, 0x681010b, 0xcc00000, 0x6c1860b, 0x685050b, 0xcc00000,
0x6c1964b, 0x689090b, 0xcc00000, 0x6c1a68b, 0x68d0d0b, 0xcc00000,
0x6c1b6cb, 0x691110b, 0xcc00000, 0x6c1c70b, 0x695150b, 0xcc00000,
0x6c1d74b, 0x695014b, 0xcc00000, 0x6c1d78b, 0x681454b, 0xcc00000,
0x6c1e7cb, 0x685494b, 0xcc00000, 0x6c1f80b, 0x6894d4b, 0xcc00000,
0x6c2084b, 0x689514b, 0xcc00000, 0x6c2088b, 0x68d554b, 0xcc00000,
0x6c218cb, 0x691418b, 0xcc00000, 0x6c2290b, 0x691458b, 0xcc00000,
0x6c2294b, 0x695498b, 0xcc00000, 0x6c2398b, 0x6954d8b, 0xcc00000,
0x6c239cb, 0x681918b, 0xcc00000, 0x6c24a0b, 0x681958b, 0xcc00000,
0x6c24a4b, 0x68581cb, 0xcc00000, 0x6c25a8b, 0x68585cb, 0xcc00000,
0x6c25acb, 0x68589cb, 0xcc00000, 0x6c25b0b, 0x6898dcb, 0xcc00000,
0x6c26b4b, 0x68991cb, 0xcc00000, 0x6c26b8b, 0x68995cb, 0xcc00000,
0x6c26bcb, 0x68d820b, 0xcc00000, 0x6c27c0b, 0x68d860b, 0xcc00000,
0x6c27c4b, 0x68d8a0b, 0xcc00000, 0x6c27c8b, 0x68d8e0b, 0xcc00000,
0x6c27ccb, 0x6430008, 0x64f4008, 0x9c08048, 0x60f4008, 0x64d0009,
0x9384249, 0xa6f8009, 0x0000000, 0x64d3609, 0xa6ec009, 0x0000000,
0xc07ae00, 0x0000000, 0x7800709, 0x4001249, 0x780030a, 0x400128a,
0xe00024a, 0x80d1f30, 0x6bc0008, 0x60d1f08, 0xf000280, 0x5408249,
0x961024a, 0xcc00000, 0x60d3309, 0x6430008, 0x78006c9, 0x780070a,
0x780030b, 0xb8042c9, 0x4001249, 0x6800009, 0x400128a, 0x40012cb,
0x4000309, 0xe0002ca, 0x0000000, 0x0000000, 0x950824a, 0xf0002c0,
0x200b30b, 0x961024b, 0x60d3309, 0x64d0009, 0x9384249, 0xa6f8009,
0x0000000, 0x64d3609, 0xa6ec009, 0x0000000, 0x64d5309, 0xaa1c009,
0x0000000, 0x60d2109, 0x80d2001, 0x64d2009, 0xbefc049, 0x0000000,
0x80d5300, 0xc07a1c0, 0x0000000, 0x80d1f30, 0x6bc0008, 0xcc00000,
0x60d1f08, 0x7800708, 0x7800349, 0xb809248, 0x6804008, 0x6800409,
0x680080a, 0x740020a, 0xd07ffc9, 0x4001208, 0xcc00000, 0x0000000,
0x6435008, 0x580f208, 0xbecc048, 0x0000000, 0x64ad608, 0x6035108,
0x7800309, 0x4001249, 0x5801249, 0x401f249, 0x5805249, 0x5405249,
0x6035209, 0x5401249, 0x2008248, 0x60ad608, 0x6be4008, 0x6035008,
0x6435008, 0x580f208, 0xbef8048, 0x0000000, 0xcc00000, 0x0000000,
0xc07be80, 0x7800608, 0x64d3e08, 0x9008288, 0xaafbfca, 0x0000000,
0x60f1c0b, 0x7800a08, 0xaa10288, 0xa60c00a, 0x7800588, 0x7c005c8,
0x7800608, 0x78005c9, 0xc001500, 0x2408248, 0x78005c8, 0x7c00588,
0x7800608, 0xcc00000, 0x7c005c8, 0xc07b940, 0x7800608, 0x64f1d0a,
0x900824b, 0x9408289, 0x920624b, 0x9606289, 0x6804b09, 0x6803fce,
0xae14049, 0x80f2308, 0x7800588, 0xcc00000, 0x7c005c8, 0x64f2408,
0x91e2208, 0xaa18048, 0x64d3e08, 0x9008388, 0xa60ffce, 0xc7ffd40,
0x4401249, 0x60f1d0a, 0x80f2308, 0x64f2408, 0x9208208, 0xaa17fc8,
0x64d3e08, 0x9008208, 0xaaebfce, 0x0000000, 0x60f1d0b, 0xa60ffce,
0x64d3e08, 0x9008388, 0x7800a08, 0xaa10288, 0xa60c00e, 0x7800588,
0x7c005c8, 0x7800608, 0x78005c9, 0xc0008c0, 0x2408248, 0x78005c8,
0x7c00588, 0x7800608, 0xcc00000, 0x7c005c8, 0x78006c8, 0x4001208,
0x7800309, 0xb80d248, 0x4001208, 0x2408248, 0xb628088, 0x4401208,
0xc07e3c0, 0x0000000, 0x7804009, 0x91c1209, 0x7c007c8, 0x9106209,
0xcc00000, 0x7c00608, 0x5801248, 0x680400a, 0x2009289, 0x7000249,
0x91c1289, 0xa210048, 0x9106209, 0x90c1289, 0x9006209, 0x7c007ca,
0xcc00000, 0x7c00608, 0xaa24008, 0x680000c, 0xba14008, 0x6800009,
0x5401208, 0xc780100, 0x4401308, 0x240c209, 0x540130c, 0x7c0064c,
0x78006c8, 0x4001208, 0x7800309, 0xb003248, 0x0000000, 0x6800008,
0xcc00000, 0x7c00688, 0x78007c9, 0xaa10009, 0x6800049, 0xcc00000,
0x7c009c9, 0x6800009, 0x6c00449, 0x60d3009, 0x6800089, 0xcc00000,
0x7c009c9, 0x64d0009, 0x9384249, 0xa6f8009, 0x0000000, 0x64d3609,
0xa6ec009, 0x0000000, 0x80f2306, 0x64f2409, 0x90c2249, 0xa6f4009,
0x0000000, 0x6490909, 0xbefc049, 0x0000000, 0x6490b09, 0x9809009,
0x98090c9, 0x6090b09, 0x9c09009, 0x9c090c9, 0x6090b09, 0xcc00000,
0x0000000, 0x6803c0e, 0x78006c9, 0x780070a, 0x7800308, 0xb804209,
0x4001249, 0x6800009, 0x400128a, 0x950c389, 0xa60c009, 0x7c00089,
0x980e50e, 0x7800308, 0xb803209, 0x7c000ca, 0x980e58e, 0x64d3308,
0x9108208, 0xa403288, 0x0000000, 0x980e54e, 0x980e7ce, 0x4000c0e,
0x7800709, 0x5404209, 0x64acc0a, 0xe000288, 0x64ac326, 0x78006ca,
0x540428a, 0xf0002c0, 0x20262e6, 0x20262a6, 0x7800709, 0x5403209,
0x64acc0a, 0xe000288, 0x64ac410, 0x78006ca, 0x540428a, 0xf0002c0,
0x20102d0, 0x2010290, 0x78006ca, 0x400128a, 0x7800309, 0x4001249,
0xb80424a, 0x7800708, 0x4001208, 0x240a24a, 0x2420289, 0x64ac32d,
0x5404208, 0x64acc09, 0x242d26d, 0xe000248, 0x540428a, 0x202d2ad,
0xf0002c0, 0x202d2ed, 0x78006ca, 0x400128a, 0x7800309, 0x4001249,
0xb80424a, 0x7800708, 0x4001208, 0x240a24a, 0x2422289, 0x64ac42e,
0x5403208, 0x64acc09, 0x242e26e, 0xe000248, 0x540428a, 0x202e2ae,
0xf0002c0, 0x202e2ee, 0x694002f, 0x6c0002f, 0x202f7ef, 0x78006c9,
0x4003249, 0x7800308, 0xb003209, 0x4001208, 0x2409209, 0x5405249,
0x202f26f, 0xc075b40, 0x0000000, 0x680830a, 0x680020c, 0x6035126,
0x8035208, 0x6b0a008, 0x6035008, 0x64acc08, 0x2026226, 0x6435008,
0x580f208, 0xbef8048, 0x0000000, 0x780a1c9, 0x6035126, 0x8035208,
0x6b0a008, 0x6035008, 0x64acc08, 0x2026226, 0x6435008, 0x580f208,
0xbef8048, 0x0000000, 0x780a1cb, 0x95082c9, 0x740028b, 0xa60c14c,
0x0000000, 0x440828a, 0xd07f94c, 0x400128a, 0x680800a, 0x680010c,
0x6035110, 0x8035208, 0x6b0a008, 0x6035008, 0x64acc08, 0x2010210,
0x6435008, 0x580f208, 0xbef8048, 0x0000000, 0x780a1c9, 0x6035110,
0x8035208, 0x6b0a008, 0x6035008, 0x64acc08, 0x2010210, 0x6435008,
0x580f208, 0xbef8048, 0x0000000, 0x780a1cb, 0x9008209, 0x910834b,
0x940824d, 0x95082c8, 0x740028b, 0x400428a, 0x7400289, 0xd07f8cc,
0x440328a, 0x78006c9, 0x4003249, 0x7800308, 0xb003209, 0x4001208,
0x2409209, 0x5405249, 0x6940008, 0x6c00008, 0x20087c8, 0x2008248,
0x6035108, 0x8035210, 0x6a08008, 0x6035008, 0x6435008, 0x580f208,
0xbef8048, 0x0000000, 0x643530d, 0x900c34d, 0xa6f800d, 0x0000000,
0xc006180, 0x60f1f0e, 0x64d3c08, 0x9004208, 0xaa0c048, 0x6800049,
0x68000c9, 0x7c00b49, 0xc004900, 0x0000000, 0x6435008, 0x580f208,
0xbef8048, 0x0000000, 0xc004cc0, 0x0000000, 0x6435008, 0x580f208,
0xbef8048, 0x0000000, 0xc005380, 0x0000000, 0x6435008, 0x580f208,
0xbef8048, 0x0000000, 0x643530d, 0x900c34d, 0xa6f800d, 0x0000000,
0x7800b49, 0xaa14049, 0x6803908, 0xaa0c089, 0x6803248, 0x6803488,
0x40003b0, 0x9408388, 0xb220049, 0x60f1f0e, 0x4401249, 0x7c00b49,
0xc005700, 0x0000000, 0xc7ff700, 0x4420bef, 0xc003f80, 0x0000000,
0x6800048, 0xcc00000, 0x7c00ac8, 0xa65c188, 0x64d4e09, 0x901f249,
0x60d4a09, 0x60d4b09, 0x60d4c09, 0x60d4d09, 0x60d4a09, 0x60d4b09,
0x60d4c09, 0x60d4d09, 0xc0031c0, 0x80d5201, 0x64d4a08, 0x60d4a08,
0x60d4b08, 0x60d4c08, 0x60d4d08, 0x60d4a1b, 0x60d4b1c, 0x60d4c1d,
0xcc00000, 0x60d4d1e, 0x68000c9, 0xa420248, 0x64d4a09, 0x901f249,
0x78006c8, 0xa610008, 0x4000289, 0x6a0000a, 0x6d0000a, 0x60d4a09,
0x60d4b09, 0x60d4c09, 0x60d4d09, 0x60d4a09, 0x60d4b09, 0x60d4c09,
0x60d4d09, 0x60d4e0a, 0x60d4f0a, 0x60d500a, 0x60d510a, 0xc002980,
0x80d5201, 0x64d4a08, 0x60d4a08, 0x60d4b08, 0x60d4c08, 0x60d4d08,
0x60d4a1b, 0x60d4b1c, 0x60d4c1d, 0xcc00000, 0x60d4d1e, 0x6800149,
0xa422248, 0x901f25b, 0x68fffc9, 0x6cfffc9, 0x60d4a09, 0x60d4b09,
0x60d4c09, 0x60d4d09, 0x78006c9, 0x780030a, 0xb807289, 0x0000000,
0x60d4a32, 0x60d4b32, 0x60d4c32, 0xc780180, 0x60d4d32, 0x60d4a1b,
0x60d4b1c, 0x60d4c1d, 0x60d4d1e, 0x6b00049, 0x6d00049, 0x60d4e09,
0x60d4f09, 0x60d5009, 0x60d5109, 0x64d320a, 0x69ffe49, 0x60d3209,
0xc001ec0, 0x80d5201, 0xcc00000, 0x60d320a, 0x64d3309, 0x9108249,
0x780070a, 0xa80824a, 0x680004a, 0xa440288, 0x78006c9, 0xaa10009,
0x64d4a09, 0x901f249, 0xa60c009, 0xcc00000, 0x80d5201, 0x78006cb,
0x780030a, 0xa41628b, 0x90612a8, 0xaa0c00a, 0xc7feb00, 0x0000000,
0x64d4a09, 0x901f249, 0x60d4a09, 0x60d4b09, 0x60d4c09, 0x60d4d09,
0x60d4a32, 0x60d4b32, 0x60d4c32, 0x60d4d32, 0x64d320a, 0x69ffe49,
0x60d3209, 0xc001580, 0x80d5201, 0xcc00000, 0x60d320a, 0x9010a89,
0x9010adb, 0x91e12aa, 0x91e12eb, 0x200a2ca, 0xaa1800a, 0x6bfffec,
0xaa1008a, 0x680006c, 0x6bfffea, 0x680002b, 0x920f289, 0x960fa8a,
0x920f29b, 0x960faca, 0x93c12aa, 0x93c12eb, 0x200a2ca, 0x6bfffcb,
0xaa2400a, 0x960fb0b, 0x680004b, 0xaa1808a, 0x960fb0b, 0x6bfffcb,
0x960fa8b, 0x680000b, 0x960facb, 0xc000780, 0x0000000, 0xcc00000,
0x0000000, 0xa638108, 0x64d4e09, 0x901f249, 0xa60c009, 0xcc00000,
0x80d5201, 0x78006cb, 0x780030a, 0xa40428b, 0x90612a8, 0xaae800a,
0xc7fd580, 0xc7ff500, 0x0000000, 0xa6281c8, 0x68fffec, 0x6cfffec,
0x6b0006a, 0x6d0006a, 0x680002b, 0xc000180, 0x0000000, 0xcc00000,
0x0000000, 0xcc00000, 0x80d5201, 0x60d4a2a, 0x60d4b2a, 0x60d4c2a,
0x60d4d2a, 0x60d4a2b, 0x60d4b2b, 0x60d4c2b, 0x60d4d2b, 0x60d4e2c,
0x60d4f2c, 0x60d502c, 0x60d512c, 0xc000300, 0x80d5201, 0x64d4a0a,
0x60d4a0a, 0x60d4b0a, 0x60d4c0a, 0x60d4d0a, 0x60d4a1b, 0x60d4b1c,
0x60d4c1d, 0xcc00000, 0x60d4d1e, 0x80d5602, 0x64d5709, 0x9005249,
0xaaf4009, 0x0000000, 0xcc00000, 0x0000000, 0x8006402, 0x7800acf,
0xa60c00f, 0xc7d4040, 0x0000000, 0x6435008, 0x580f208, 0xbef8048,
0x0000000, 0xa61804f, 0x6800088, 0xc000a80, 0x7c00ac8, 0xc7d3dc0,
0x0000000, 0xa61808f, 0x68000c8, 0xc001140, 0x7c00ac8, 0xc7d3c40,
0x0000000, 0xa6240cf, 0x6435308, 0x900c208, 0xa6f8008, 0x6800008,
0x7800b09, 0xaa0c009, 0x7c00ac8, 0x8030508, 0xc7d3980, 0x0000000,
0x6435008, 0x580f208, 0xbef8048, 0x0000000, 0x603512d, 0x8035208,
0x6b08808, 0x6035008, 0x4401820, 0x4010b6d, 0xaa0c020, 0xcc00000,
0x0000000, 0x7800320, 0x4001820, 0x5404220, 0x242d22d, 0x64acc08,
0x5404208, 0xcc00000, 0x202d22d, 0x603512e, 0x8035208, 0x6b09008,
0x6035008, 0x7808908, 0x7c08208, 0x7808948, 0x7c08248, 0x7808988,
0x7c08288, 0x78089c8, 0x7c082c8, 0x7808808, 0x7c08308, 0x7808848,
0x7c08348, 0x7808888, 0x7c08388, 0x78088c8, 0x7c083c8, 0x44018a2,
0x4010bae, 0xaa0c022, 0xcc00000, 0x0000000, 0x7800322, 0x40018a2,
0x5404222, 0x242e22e, 0x64acc08, 0x5403208, 0xcc00000, 0x202e22e,
0x6435008, 0x580f208, 0xbef8048, 0x0000000, 0x6809008, 0x680800e,
0x6800109, 0x700020a, 0x90082ca, 0x4001208, 0x700020c, 0x910834c,
0x950830b, 0x740038c, 0x400438e, 0x940828d, 0x740038a, 0x440338e,
0xd07fd49, 0x4001208, 0x6940008, 0x6c00008, 0x20087c8, 0x7800309,
0x4001249, 0x5405249, 0x2008248, 0xb80322f, 0x0000000, 0x242f26f,
0x603512f, 0x8035210, 0x6a08008, 0x6035008, 0xcc00000, 0x4020bef,
0x64f1508, 0x9c08308, 0x9c085c8, 0x9c08608, 0x9c08648, 0x9c086c8,
0x9808508, 0x98084c8, 0x60f1508, 0x78006c9, 0x7800708, 0x780030a,
0xb804289, 0x4001249, 0x6800009, 0x4001208, 0x960f248, 0x60f1e09,
0x64f1f08, 0x9384208, 0xa6f8008, 0x0000000, 0x80d300f, 0x80f4003,
0x64f1508, 0x9808248, 0x9c08508, 0x9c084c8, 0x60f1508, 0x64d0009,
0x9c09009, 0x60d0009, 0x9809009, 0x60d0009, 0x9c08248, 0x9808508,
0x98084c8, 0x9808308, 0x60f1508, 0xcc00000, 0x80f4000, 0x6432434,
0xaa0c034, 0x8032400, 0x6699334, 0x64ac034, 0xaa1c074, 0xaa180b4,
0xaa14134, 0xaa100f4, 0x0000000, 0xc7ffd80, 0x0000000, 0x8030502,
0xc7ffcc0, 0x0000000, 0x0000000, 0x0000000, 0x0000000, 0x0000000,
0x0000000, 0x0000000, 0x0000000, 0x0000000,
};
| wetek-enigma/linux-wetek-3.14.y | drivers/amlogic/amports/arch/ucode/h264_enc/h264_enc_mix_sw_hdec_dblk.h | C | gpl-2.0 | 34,639 |
#undef CONFIG_FEATURE_BASH_IS_HUSH
| twobob/buildroot-kindle | output/build/busybox-1.20.2/include/config/feature/bash/is/hush.h | C | gpl-2.0 | 35 |
<?php
/*
+--------------------------------------------------------------------+
| CiviCRM version 4.4 |
+--------------------------------------------------------------------+
| Copyright CiviCRM LLC (c) 2004-2013 |
+--------------------------------------------------------------------+
| This file is a part of CiviCRM. |
| |
| CiviCRM is free software; you can copy, modify, and distribute it |
| under the terms of the GNU Affero General Public License |
| Version 3, 19 November 2007 and the CiviCRM Licensing Exception. |
| |
| CiviCRM is distributed in the hope that it will be useful, but |
| WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
| See the GNU Affero General Public License for more details. |
| |
| You should have received a copy of the GNU Affero General Public |
| License and the CiviCRM Licensing Exception along |
| with this program; if not, contact CiviCRM LLC |
| at info[AT]civicrm[DOT]org. If you have questions about the |
| GNU Affero General Public License or the licensing of CiviCRM, |
| see the CiviCRM license FAQ at http://civicrm.org/licensing |
+--------------------------------------------------------------------+
*/
/**
*
* @package CRM
* @copyright CiviCRM LLC (c) 2004-2013
* $Id$
*
*/
/**
* BAO object for civicrm_cache table. This is a database cache and is persisted across sessions. Typically we use
* this to store meta data (like profile fields, custom fields etc).
*
* The group_name column is used for grouping together all cache elements that logically belong to the same set.
* Thus all session cache entries are grouped under 'CiviCRM Session'. This allows us to delete all entries of
* a specific group if needed.
*
* The path column allows us to differentiate between items in that group. Thus for the session cache, the path is
* the unique form name for each form (per user)
*/
class CRM_Core_BAO_Cache extends CRM_Core_DAO_Cache {
/**
* @var array ($cacheKey => $cacheValue)
*/
static $_cache = NULL;
/**
* Retrieve an item from the DB cache
*
* @param string $group (required) The group name of the item
* @param string $path (required) The path under which this item is stored
* @param int $componentID The optional component ID (so componenets can share the same name space)
*
* @return object The data if present in cache, else null
* @static
* @access public
*/
static function &getItem($group, $path, $componentID = NULL) {
if (self::$_cache === NULL) {
self::$_cache = array();
}
$argString = "CRM_CT_{$group}_{$path}_{$componentID}";
if (!array_key_exists($argString, self::$_cache)) {
$cache = CRM_Utils_Cache::singleton();
self::$_cache[$argString] = $cache->get($argString);
if (!self::$_cache[$argString]) {
$dao = new CRM_Core_DAO_Cache();
$dao->group_name = $group;
$dao->path = $path;
$dao->component_id = $componentID;
$data = NULL;
if ($dao->find(TRUE)) {
$data = unserialize($dao->data);
}
$dao->free();
self::$_cache[$argString] = $data;
$cache->set($argString, self::$_cache[$argString]);
}
}
return self::$_cache[$argString];
}
/**
* Retrieve all items in a group
*
* @param string $group (required) The group name of the item
* @param int $componentID The optional component ID (so componenets can share the same name space)
*
* @return object The data if present in cache, else null
* @static
* @access public
*/
static function &getItems($group, $componentID = NULL) {
if (self::$_cache === NULL) {
self::$_cache = array();
}
$argString = "CRM_CT_CI_{$group}_{$componentID}";
if (!array_key_exists($argString, self::$_cache)) {
$cache = CRM_Utils_Cache::singleton();
self::$_cache[$argString] = $cache->get($argString);
if (!self::$_cache[$argString]) {
$dao = new CRM_Core_DAO_Cache();
$dao->group_name = $group;
$dao->component_id = $componentID;
$dao->find();
$result = array(); // array($path => $data)
while ($dao->fetch()) {
$result[$dao->path] = unserialize($dao->data);
}
$dao->free();
self::$_cache[$argString] = $result;
$cache->set($argString, self::$_cache[$argString]);
}
}
return self::$_cache[$argString];
}
/**
* Store an item in the DB cache
*
* @param object $data (required) A reference to the data that will be serialized and stored
* @param string $group (required) The group name of the item
* @param string $path (required) The path under which this item is stored
* @param int $componentID The optional component ID (so componenets can share the same name space)
*
* @return void
* @static
* @access public
*/
static function setItem(&$data, $group, $path, $componentID = NULL) {
if (self::$_cache === NULL) {
self::$_cache = array();
}
$dao = new CRM_Core_DAO_Cache();
$dao->group_name = $group;
$dao->path = $path;
$dao->component_id = $componentID;
// get a lock so that multiple ajax requests on the same page
// dont trample on each other
// CRM-11234
$lockName = "civicrm.cache.{$group}_{$path}._{$componentID}";
$lock = new CRM_Core_Lock($lockName);
if (!$lock->isAcquired()) {
CRM_Core_Error::fatal();
}
$dao->find(TRUE);
$dao->data = serialize($data);
$dao->created_date = date('YmdHis');
$dao->save();
$lock->release();
$dao->free();
// cache coherency - refresh or remove dependent caches
$argString = "CRM_CT_{$group}_{$path}_{$componentID}";
$cache = CRM_Utils_Cache::singleton();
$data = unserialize($dao->data);
self::$_cache[$argString] = $data;
$cache->set($argString, $data);
$argString = "CRM_CT_CI_{$group}_{$componentID}";
unset(self::$_cache[$argString]);
$cache->delete($argString);
}
/**
* Delete all the cache elements that belong to a group OR
* delete the entire cache if group is not specified
*
* @param string $group The group name of the entries to be deleted
* @param string $path path of the item that needs to be deleted
* @param booleab $clearAll clear all caches
*
* @return void
* @static
* @access public
*/
static function deleteGroup($group = NULL, $path = NULL, $clearAll = TRUE) {
$dao = new CRM_Core_DAO_Cache();
if (!empty($group)) {
$dao->group_name = $group;
}
if (!empty($path)) {
$dao->path = $path;
}
$dao->delete();
if ($clearAll) {
// also reset ACL Cache
CRM_ACL_BAO_Cache::resetCache();
// also reset memory cache if any
CRM_Utils_System::flushCache();
}
}
/**
* The next two functions are internal functions used to store and retrieve session from
* the database cache. This keeps the session to a limited size and allows us to
* create separate session scopes for each form in a tab
*
*/
/**
* This function takes entries from the session array and stores it in the cache.
* It also deletes the entries from the $_SESSION object (for a smaller session size)
*
* @param array $names Array of session values that should be persisted
* This is either a form name + qfKey or just a form name
* (in the case of profile)
* @param boolean $resetSession Should session state be reset on completion of DB store?
*
* @return void
* @static
* @access private
*/
static function storeSessionToCache($names, $resetSession = TRUE) {
foreach ($names as $key => $sessionName) {
if (is_array($sessionName)) {
$value = null;
if (!empty($_SESSION[$sessionName[0]][$sessionName[1]])) {
$value = $_SESSION[$sessionName[0]][$sessionName[1]];
}
self::setItem($value, 'CiviCRM Session', "{$sessionName[0]}_{$sessionName[1]}");
if ($resetSession) {
$_SESSION[$sessionName[0]][$sessionName[1]] = NULL;
unset($_SESSION[$sessionName[0]][$sessionName[1]]);
}
}
else {
$value = null;
if (!empty($_SESSION[$sessionName])) {
$value = $_SESSION[$sessionName];
}
self::setItem($value, 'CiviCRM Session', $sessionName);
if ($resetSession) {
$_SESSION[$sessionName] = NULL;
unset($_SESSION[$sessionName]);
}
}
}
self::cleanup();
}
/* Retrieve the session values from the cache and populate the $_SESSION array
*
* @param array $names Array of session values that should be persisted
* This is either a form name + qfKey or just a form name
* (in the case of profile)
*
* @return void
* @static
* @access private
*/
static function restoreSessionFromCache($names) {
foreach ($names as $key => $sessionName) {
if (is_array($sessionName)) {
$value = self::getItem('CiviCRM Session',
"{$sessionName[0]}_{$sessionName[1]}"
);
if ($value) {
$_SESSION[$sessionName[0]][$sessionName[1]] = $value;
}
}
else {
$value = self::getItem('CiviCRM Session',
$sessionName
);
if ($value) {
$_SESSION[$sessionName] = $value;
}
}
}
}
/**
* Do periodic cleanup of the CiviCRM session table. Also delete all session cache entries
* which are a couple of days old. This keeps the session cache to a manageable size
*
* @return void
* @static
* @access private
*/
static function cleanup($session = false, $table = false, $prevNext = false) {
// clean up the session cache every $cacheCleanUpNumber probabilistically
$cleanUpNumber = 757;
// clean up all sessions older than $cacheTimeIntervalDays days
$timeIntervalDays = 2;
$timeIntervalMins = 30;
if (mt_rand(1, 100000) % $cleanUpNumber == 0) {
$session = $table = $prevNext = true;
}
if ( ! $session && ! $table && ! $prevNext ) {
return;
}
if ( $prevNext ) {
// delete all PrevNext caches
CRM_Core_BAO_PrevNextCache::cleanupCache();
}
if ( $table ) {
// also delete all the action temp tables
// that were created the same interval ago
$dao = new CRM_Core_DAO();
$query = "
SELECT TABLE_NAME as tableName
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = %1
AND ( TABLE_NAME LIKE 'civicrm_task_action_temp_%'
OR TABLE_NAME LIKE 'civicrm_export_temp_%'
OR TABLE_NAME LIKE 'civicrm_import_job_%' )
AND CREATE_TIME < date_sub( NOW( ), INTERVAL $timeIntervalDays day )
";
$params = array(1 => array($dao->database(), 'String'));
$tableDAO = CRM_Core_DAO::executeQuery($query, $params);
$tables = array();
while ($tableDAO->fetch()) {
$tables[] = $tableDAO->tableName;
}
if (!empty($tables)) {
$table = implode(',', $tables);
// drop leftover temporary tables
CRM_Core_DAO::executeQuery("DROP TABLE $table");
}
}
if ( $session ) {
// first delete all sessions which are related to any potential transaction
// page
$transactionPages = array(
'CRM_Contribute_Controller_Contribution',
'CRM_Event_Controller_Registration',
);
$params = array(
1 => array(date('Y-m-d H:i:s', time() - $timeIntervalMins * 60), 'String'),
);
foreach ($transactionPages as $trPage) {
$params[] = array("%${trPage}%", 'String');
$where[] = 'path LIKE %' . sizeof($params);
}
$sql = "
DELETE FROM civicrm_cache
WHERE group_name = 'CiviCRM Session'
AND created_date <= %1
AND (" . implode(' OR ', $where) . ")";
CRM_Core_DAO::executeQuery($sql, $params);
$sql = "
DELETE FROM civicrm_cache
WHERE group_name = 'CiviCRM Session'
AND created_date < date_sub( NOW( ), INTERVAL $timeIntervalDays DAY )
";
CRM_Core_DAO::executeQuery($sql);
}
}
}
| ChrisChinchilla/greenrenters.org | sites/all/modules/civicrm/CRM/Core/BAO/Cache.php | PHP | gpl-2.0 | 12,743 |
<?php
/**
* @copyright Copyright © Kartik Visweswaran, Krajee.com, 2014
* @package yii2-popover-x
* @version 1.3.1
*/
namespace kartik\popover;
use Yii;
/**
* Asset bundle for PopoverX widget. Includes assets from
* bootstrap-popover-x plugin by Krajee.
*
* @see http://plugins.krajee.com/popover-x
* @see http://github.com/kartik-v/bootstrap-popover-x
* @author Kartik Visweswaran <[email protected]>
* @since 1.0
*/
class PopoverXAsset extends \kartik\base\AssetBundle
{
public $depends = [
'yii\web\YiiAsset',
'yii\bootstrap\BootstrapAsset',
'yii\bootstrap\BootstrapPluginAsset',
];
public function init()
{
$this->setSourcePath('@vendor/kartik-v/bootstrap-popover-x');
$this->setupAssets('css', ['css/bootstrap-popover-x']);
$this->setupAssets('js', ['js/bootstrap-popover-x']);
parent::init();
}
}
| Hyuchiha/SAU | vendor/kartik-v/yii2-popover-x/PopoverXAsset.php | PHP | gpl-2.0 | 905 |
/* Generic write queue implementation */
/*
* (C) 2010 by Holger Hans Peter Freyther
* (C) 2010 by On-Waves
*
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#pragma once
/*! \defgroup write_queue Osmocom msgb write queues
* @{
*/
/*! \file write_queue.h
*/
#include <osmocom/core/select.h>
#include <osmocom/core/msgb.h>
/*! write queue instance */
struct osmo_wqueue {
/*! \brief osmocom file descriptor */
struct osmo_fd bfd;
/*! \brief maximum length of write queue */
unsigned int max_length;
/*! \brief current length of write queue */
unsigned int current_length;
/*! \brief actual linked list implementing the queue */
struct llist_head msg_queue;
/*! \brief call-back in case qeueue is readable */
int (*read_cb)(struct osmo_fd *fd);
/*! \brief call-back in case qeueue is writable */
int (*write_cb)(struct osmo_fd *fd, struct msgb *msg);
/*! \brief call-back in case qeueue has exceptions */
int (*except_cb)(struct osmo_fd *fd);
};
void osmo_wqueue_init(struct osmo_wqueue *queue, int max_length);
void osmo_wqueue_clear(struct osmo_wqueue *queue);
int osmo_wqueue_enqueue(struct osmo_wqueue *queue, struct msgb *data);
int osmo_wqueue_bfd_cb(struct osmo_fd *fd, unsigned int what);
/*! @} */
| rubund/debian-libosmocore | include/osmocom/core/write_queue.h | C | gpl-2.0 | 1,936 |
/** @file
Provides library functions for all PEI Services.
Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef __PEI_SERVICES_LIB_H__
#define __PEI_SERVICES_LIB_H__
/**
This service enables a given PEIM to register an interface into the PEI Foundation.
@param PpiList A pointer to the list of interfaces that the caller shall install.
@retval EFI_SUCCESS The interface was successfully installed.
@retval EFI_INVALID_PARAMETER The PpiList pointer is NULL.
@retval EFI_INVALID_PARAMETER Any of the PEI PPI descriptors in the list do not have the
EFI_PEI_PPI_DESCRIPTOR_PPI bit set in the Flags field.
@retval EFI_OUT_OF_RESOURCES There is no additional space in the PPI database.
**/
EFI_STATUS
EFIAPI
PeiServicesInstallPpi (
IN CONST EFI_PEI_PPI_DESCRIPTOR *PpiList
);
/**
This service enables PEIMs to replace an entry in the PPI database with an alternate entry.
@param OldPpi Pointer to the old PEI PPI Descriptors.
@param NewPpi Pointer to the new PEI PPI Descriptors.
@retval EFI_SUCCESS The interface was successfully installed.
@retval EFI_INVALID_PARAMETER The OldPpi or NewPpi is NULL.
@retval EFI_INVALID_PARAMETER Any of the PEI PPI descriptors in the list do not have the
EFI_PEI_PPI_DESCRIPTOR_PPI bit set in the Flags field.
@retval EFI_OUT_OF_RESOURCES There is no additional space in the PPI database.
@retval EFI_NOT_FOUND The PPI for which the reinstallation was requested has not been
installed.
**/
EFI_STATUS
EFIAPI
PeiServicesReInstallPpi (
IN CONST EFI_PEI_PPI_DESCRIPTOR *OldPpi,
IN CONST EFI_PEI_PPI_DESCRIPTOR *NewPpi
);
/**
This service enables PEIMs to discover a given instance of an interface.
@param Guid A pointer to the GUID whose corresponding interface needs to be
found.
@param Instance The N-th instance of the interface that is required.
@param PpiDescriptor A pointer to instance of the EFI_PEI_PPI_DESCRIPTOR.
@param Ppi A pointer to the instance of the interface.
@retval EFI_SUCCESS The interface was successfully returned.
@retval EFI_NOT_FOUND The PPI descriptor is not found in the database.
**/
EFI_STATUS
EFIAPI
PeiServicesLocatePpi (
IN CONST EFI_GUID *Guid,
IN UINTN Instance,
IN OUT EFI_PEI_PPI_DESCRIPTOR **PpiDescriptor,
IN OUT VOID **Ppi
);
/**
This service enables PEIMs to register a given service to be invoked when another service is
installed or reinstalled.
@param NotifyList A pointer to the list of notification interfaces that the caller
shall install.
@retval EFI_SUCCESS The interface was successfully installed.
@retval EFI_INVALID_PARAMETER The NotifyList pointer is NULL.
@retval EFI_INVALID_PARAMETER Any of the PEI notify descriptors in the list do not have the
EFI_PEI_PPI_DESCRIPTOR_NOTIFY_TYPES bit set in the Flags field.
@retval EFI_OUT_OF_RESOURCES There is no additional space in the PPI database.
**/
EFI_STATUS
EFIAPI
PeiServicesNotifyPpi (
IN CONST EFI_PEI_NOTIFY_DESCRIPTOR *NotifyList
);
/**
This service enables PEIMs to ascertain the present value of the boot mode.
@param BootMode A pointer to contain the value of the boot mode.
@retval EFI_SUCCESS The boot mode was returned successfully.
@retval EFI_INVALID_PARAMETER BootMode is NULL.
**/
EFI_STATUS
EFIAPI
PeiServicesGetBootMode (
OUT EFI_BOOT_MODE *BootMode
);
/**
This service enables PEIMs to update the boot mode variable.
@param BootMode The value of the boot mode to set.
@retval EFI_SUCCESS The value was successfully updated
**/
EFI_STATUS
EFIAPI
PeiServicesSetBootMode (
IN EFI_BOOT_MODE BootMode
);
/**
This service enables a PEIM to ascertain the address of the list of HOBs in memory.
@param HobList A pointer to the list of HOBs that the PEI Foundation will initialize.
@retval EFI_SUCCESS The list was successfully returned.
@retval EFI_NOT_AVAILABLE_YET The HOB list is not yet published.
**/
EFI_STATUS
EFIAPI
PeiServicesGetHobList (
OUT VOID **HobList
);
/**
This service enables PEIMs to create various types of HOBs.
@param Type The type of HOB to be installed.
@param Length The length of the HOB to be added.
@param Hob The address of a pointer that will contain the HOB header.
@retval EFI_SUCCESS The HOB was successfully created.
@retval EFI_OUT_OF_RESOURCES There is no additional space for HOB creation.
**/
EFI_STATUS
EFIAPI
PeiServicesCreateHob (
IN UINT16 Type,
IN UINT16 Length,
OUT VOID **Hob
);
/**
This service enables PEIMs to discover additional firmware volumes.
@param Instance This instance of the firmware volume to find. The value 0 is the
Boot Firmware Volume (BFV).
@param VolumeHandle Handle of the firmware volume header of the volume to return.
@retval EFI_SUCCESS The volume was found.
@retval EFI_NOT_FOUND The volume was not found.
@retval EFI_INVALID_PARAMETER FwVolHeader is NULL.
**/
EFI_STATUS
EFIAPI
PeiServicesFfsFindNextVolume (
IN UINTN Instance,
IN OUT EFI_PEI_FV_HANDLE *VolumeHandle
);
/**
This service enables PEIMs to discover additional firmware files.
@param SearchType A filter to find files only of this type.
@param VolumeHandle Pointer to the firmware volume header of the volume to search.
This parameter must point to a valid FFS volume.
@param FileHandle Handle of the current file from which to begin searching.
@retval EFI_SUCCESS The file was found.
@retval EFI_NOT_FOUND The file was not found.
@retval EFI_NOT_FOUND The header checksum was not zero.
**/
EFI_STATUS
EFIAPI
PeiServicesFfsFindNextFile (
IN EFI_FV_FILETYPE SearchType,
IN EFI_PEI_FV_HANDLE VolumeHandle,
IN OUT EFI_PEI_FILE_HANDLE *FileHandle
);
/**
This service enables PEIMs to discover sections of a given type within a valid FFS file.
@param SectionType The value of the section type to find.
@param FileHandle A pointer to the file header that contains the set of sections to
be searched.
@param SectionData A pointer to the discovered section, if successful.
@retval EFI_SUCCESS The section was found.
@retval EFI_NOT_FOUND The section was not found.
**/
EFI_STATUS
EFIAPI
PeiServicesFfsFindSectionData (
IN EFI_SECTION_TYPE SectionType,
IN EFI_PEI_FILE_HANDLE FileHandle,
OUT VOID **SectionData
);
/**
This service enables PEIMs to register the permanent memory configuration
that has been initialized with the PEI Foundation.
@param MemoryBegin The value of a region of installed memory.
@param MemoryLength The corresponding length of a region of installed memory.
@retval EFI_SUCCESS The region was successfully installed in a HOB.
@retval EFI_INVALID_PARAMETER MemoryBegin and MemoryLength are illegal for this system.
@retval EFI_OUT_OF_RESOURCES There is no additional space for HOB creation.
**/
EFI_STATUS
EFIAPI
PeiServicesInstallPeiMemory (
IN EFI_PHYSICAL_ADDRESS MemoryBegin,
IN UINT64 MemoryLength
);
/**
This service enables PEIMs to allocate memory after the permanent memory has been installed by a
PEIM.
@param MemoryType Type of memory to allocate.
@param Pages Number of pages to allocate.
@param Memory Pointer of memory allocated.
@retval EFI_SUCCESS The memory range was successfully allocated.
@retval EFI_INVALID_PARAMETER Type is not equal to AllocateAnyPages.
@retval EFI_NOT_AVAILABLE_YET Called with permanent memory not available.
@retval EFI_OUT_OF_RESOURCES The pages could not be allocated.
**/
EFI_STATUS
EFIAPI
PeiServicesAllocatePages (
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN Pages,
OUT EFI_PHYSICAL_ADDRESS *Memory
);
/**
This service allocates memory from the Hand-Off Block (HOB) heap.
@param Size The number of bytes to allocate from the pool.
@param Buffer If the call succeeds, a pointer to a pointer to the allocate
buffer; undefined otherwise.
@retval EFI_SUCCESS The allocation was successful
@retval EFI_OUT_OF_RESOURCES There is not enough heap to allocate the requested size.
**/
EFI_STATUS
EFIAPI
PeiServicesAllocatePool (
IN UINTN Size,
OUT VOID **Buffer
);
/**
Resets the entire platform.
@retval EFI_SUCCESS The function completed successfully.
@retval EFI_NOT_AVAILABLE_YET The service has not been installed yet.
**/
EFI_STATUS
EFIAPI
PeiServicesResetSystem (
VOID
);
/**
This service is a wrapper for the PEI Service FfsFindByName(), except the pointer to the PEI Services
Table has been removed. See the Platform Initialization Pre-EFI Initialization Core Interface
Specification for details.
@param FileName A pointer to the name of the file to
find within the firmware volume.
@param VolumeHandle The firmware volume to search FileHandle
Upon exit, points to the found file's
handle or NULL if it could not be found.
@param FileHandle Pointer to found file handle
@retval EFI_SUCCESS File was found.
@retval EFI_NOT_FOUND File was not found.
@retval EFI_INVALID_PARAMETER VolumeHandle or FileHandle or
FileName was NULL.
**/
EFI_STATUS
EFIAPI
PeiServicesFfsFindFileByName (
IN CONST EFI_GUID *FileName,
IN CONST EFI_PEI_FV_HANDLE VolumeHandle,
OUT EFI_PEI_FILE_HANDLE *FileHandle
);
/**
This service is a wrapper for the PEI Service FfsGetFileInfo(), except the pointer to the PEI Services
Table has been removed. See the Platform Initialization Pre-EFI Initialization Core Interface
Specification for details.
@param FileHandle Handle of the file.
@param FileInfo Upon exit, points to the file's
information.
@retval EFI_SUCCESS File information returned.
@retval EFI_INVALID_PARAMETER If FileHandle does not
represent a valid file.
@retval EFI_INVALID_PARAMETER If FileInfo is NULL.
**/
EFI_STATUS
EFIAPI
PeiServicesFfsGetFileInfo (
IN CONST EFI_PEI_FILE_HANDLE FileHandle,
OUT EFI_FV_FILE_INFO *FileInfo
);
/**
This service is a wrapper for the PEI Service FfsGetVolumeInfo(), except the pointer to the PEI Services
Table has been removed. See the Platform Initialization Pre-EFI Initialization Core Interface
Specification for details.
@param VolumeHandle Handle of the volume.
@param VolumeInfo Upon exit, points to the volume's
information.
@retval EFI_SUCCESS File information returned.
@retval EFI_INVALID_PARAMETER If FileHandle does not
represent a valid file.
@retval EFI_INVALID_PARAMETER If FileInfo is NULL.
**/
EFI_STATUS
EFIAPI
PeiServicesFfsGetVolumeInfo (
IN EFI_PEI_FV_HANDLE VolumeHandle,
OUT EFI_FV_INFO *VolumeInfo
);
/**
This service is a wrapper for the PEI Service RegisterForShadow(), except the pointer to the PEI Services
Table has been removed. See the Platform Initialization Pre-EFI Initialization Core Interface
Specification for details.
@param FileHandle PEIM's file handle. Must be the currently
executing PEIM.
@retval EFI_SUCCESS The PEIM was successfully registered for
shadowing.
@retval EFI_ALREADY_STARTED The PEIM was previously
registered for shadowing.
@retval EFI_NOT_FOUND The FileHandle does not refer to a
valid file handle.
**/
EFI_STATUS
EFIAPI
PeiServicesRegisterForShadow (
IN EFI_PEI_FILE_HANDLE FileHandle
);
/**
Install a EFI_PEI_FIRMWARE_VOLUME_INFO_PPI instance so the PEI Core will be notified about a new firmware volume.
This function allocates, initializes, and installs a new EFI_PEI_FIRMWARE_VOLUME_INFO_PPI using
the parameters passed in to initialize the fields of the EFI_PEI_FIRMWARE_VOLUME_INFO_PPI instance.
If the resources can not be allocated for EFI_PEI_FIRMWARE_VOLUME_INFO_PPI, then ASSERT().
If the EFI_PEI_FIRMWARE_VOLUME_INFO_PPI can not be installed, then ASSERT().
@param FvFormat Unique identifier of the format of the memory-mapped firmware volume.
This parameter is optional and may be NULL.
If NULL is specified, the EFI_FIRMWARE_FILE_SYSTEM2_GUID format is assumed.
@param FvInfo Points to a buffer which allows the EFI_PEI_FIRMWARE_VOLUME_PPI to process the volume.
The format of this buffer is specific to the FvFormat. For memory-mapped firmware volumes,
this typically points to the first byte of the firmware volume.
@param FvInfoSize The size, in bytes, of FvInfo. For memory-mapped firmware volumes,
this is typically the size of the firmware volume.
@param ParentFvName If the new firmware volume originated from a file in a different firmware volume,
then this parameter specifies the GUID name of the originating firmware volume.
Otherwise, this parameter must be NULL.
@param ParentFileName If the new firmware volume originated from a file in a different firmware volume,
then this parameter specifies the GUID file name of the originating firmware file.
Otherwise, this parameter must be NULL.
**/
VOID
EFIAPI
PeiServicesInstallFvInfoPpi (
IN CONST EFI_GUID *FvFormat, OPTIONAL
IN CONST VOID *FvInfo,
IN UINT32 FvInfoSize,
IN CONST EFI_GUID *ParentFvName, OPTIONAL
IN CONST EFI_GUID *ParentFileName OPTIONAL
);
#endif
| carmark/vbox | src/VBox/Devices/EFI/Firmware/MdePkg/Include/Library/PeiServicesLib.h | C | gpl-2.0 | 15,515 |
#ifndef foopulsedeviceporthfoo
#define foopulsedeviceporthfoo
/***
This file is part of PulseAudio.
Copyright 2004-2006 Lennart Poettering
Copyright 2006 Pierre Ossman <[email protected]> for Cendio AB
Copyright 2011 David Henningsson, Canonical Ltd.
PulseAudio is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 2.1 of the License,
or (at your option) any later version.
PulseAudio is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
***/
typedef struct pa_device_port pa_device_port;
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <inttypes.h>
#include <pulse/def.h>
#include <pulsecore/object.h>
#include <pulsecore/hashmap.h>
#include <pulsecore/core.h>
#include <pulsecore/card.h>
struct pa_device_port {
pa_object parent; /* Needed for reference counting */
pa_core *core;
pa_card *card;
char *name;
char *description;
unsigned priority;
pa_available_t available; /* PA_AVAILABLE_UNKNOWN, PA_AVAILABLE_NO or PA_AVAILABLE_YES */
pa_proplist *proplist;
pa_hashmap *profiles; /* Does not own the profiles */
pa_direction_t direction;
int64_t latency_offset;
/* .. followed by some implementation specific data */
};
PA_DECLARE_PUBLIC_CLASS(pa_device_port);
#define PA_DEVICE_PORT(s) (pa_device_port_cast(s))
#define PA_DEVICE_PORT_DATA(d) ((void*) ((uint8_t*) d + PA_ALIGN(sizeof(pa_device_port))))
typedef struct pa_device_port_new_data {
char *name;
char *description;
pa_available_t available;
pa_direction_t direction;
} pa_device_port_new_data;
pa_device_port_new_data *pa_device_port_new_data_init(pa_device_port_new_data *data);
void pa_device_port_new_data_set_name(pa_device_port_new_data *data, const char *name);
void pa_device_port_new_data_set_description(pa_device_port_new_data *data, const char *description);
void pa_device_port_new_data_set_available(pa_device_port_new_data *data, pa_available_t available);
void pa_device_port_new_data_set_direction(pa_device_port_new_data *data, pa_direction_t direction);
void pa_device_port_new_data_done(pa_device_port_new_data *data);
pa_device_port *pa_device_port_new(pa_core *c, pa_device_port_new_data *data, size_t extra);
/* The port's available status has changed */
void pa_device_port_set_available(pa_device_port *p, pa_available_t available);
void pa_device_port_set_latency_offset(pa_device_port *p, int64_t offset);
pa_device_port *pa_device_port_find_best(pa_hashmap *ports);
#endif
| marmarek/qubes-gui-agent-linux | pulse/pulsecore-7.1/device-port.h | C | gpl-2.0 | 2,925 |
/*
* linux/fs/fat/file.c
*
* Written 1992,1993 by Werner Almesberger
*
* regular file handling primitives for fat-based filesystems
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/msdos_fs.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
int fat_generic_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
u32 __user *user_attr = (u32 __user *)arg;
switch (cmd) {
case FAT_IOCTL_GET_ATTRIBUTES:
{
u32 attr;
if (inode->i_ino == MSDOS_ROOT_INO)
attr = ATTR_DIR;
else
attr = fat_attr(inode);
return put_user(attr, user_attr);
}
case FAT_IOCTL_SET_ATTRIBUTES:
{
u32 attr, oldattr;
int err, is_dir = S_ISDIR(inode->i_mode);
struct iattr ia;
err = get_user(attr, user_attr);
if (err)
return err;
mutex_lock(&inode->i_mutex);
if (IS_RDONLY(inode)) {
err = -EROFS;
goto up;
}
/*
* ATTR_VOLUME and ATTR_DIR cannot be changed; this also
* prevents the user from turning us into a VFAT
* longname entry. Also, we obviously can't set
* any of the NTFS attributes in the high 24 bits.
*/
attr &= 0xff & ~(ATTR_VOLUME | ATTR_DIR);
/* Merge in ATTR_VOLUME and ATTR_DIR */
attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
(is_dir ? ATTR_DIR : 0);
oldattr = fat_attr(inode);
/* Equivalent to a chmod() */
ia.ia_valid = ATTR_MODE | ATTR_CTIME;
if (is_dir) {
ia.ia_mode = MSDOS_MKMODE(attr,
S_IRWXUGO & ~sbi->options.fs_dmask)
| S_IFDIR;
} else {
ia.ia_mode = MSDOS_MKMODE(attr,
(S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO))
& ~sbi->options.fs_fmask)
| S_IFREG;
}
/* The root directory has no attributes */
if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
err = -EINVAL;
goto up;
}
if (sbi->options.sys_immutable) {
if ((attr | oldattr) & ATTR_SYS) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
err = -EPERM;
goto up;
}
}
}
/* This MUST be done before doing anything irreversible... */
err = notify_change(filp->f_dentry, &ia);
if (err)
goto up;
if (sbi->options.sys_immutable) {
if (attr & ATTR_SYS)
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= S_IMMUTABLE;
}
MSDOS_I(inode)->i_attrs = attr & ATTR_UNUSED;
mark_inode_dirty(inode);
up:
mutex_unlock(&inode->i_mutex);
return err;
}
default:
return -ENOTTY; /* Inappropriate ioctl for device */
}
}
const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.readv = generic_file_readv,
.writev = generic_file_writev,
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.ioctl = fat_generic_ioctl,
.fsync = file_fsync,
.sendfile = generic_file_sendfile,
};
static int fat_cont_expand(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
loff_t start = inode->i_size, count = size - inode->i_size;
int err;
err = generic_cont_expand_simple(inode, size);
if (err)
goto out;
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
if (IS_SYNC(inode))
err = sync_page_range_nolock(inode, mapping, start, count);
out:
return err;
}
int fat_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
struct inode *inode = dentry->d_inode;
int mask, error = 0;
lock_kernel();
/*
* Expand the file. Since inode_setattr() updates ->i_size
* before calling the ->truncate(), but FAT needs to fill the
* hole before it.
*/
if (attr->ia_valid & ATTR_SIZE) {
if (attr->ia_size > inode->i_size) {
error = fat_cont_expand(inode, attr->ia_size);
if (error || attr->ia_valid == ATTR_SIZE)
goto out;
attr->ia_valid &= ~ATTR_SIZE;
}
}
error = inode_change_ok(inode, attr);
if (error) {
if (sbi->options.quiet)
error = 0;
goto out;
}
if (((attr->ia_valid & ATTR_UID) &&
(attr->ia_uid != sbi->options.fs_uid)) ||
((attr->ia_valid & ATTR_GID) &&
(attr->ia_gid != sbi->options.fs_gid)) ||
((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode & ~MSDOS_VALID_MODE)))
error = -EPERM;
if (error) {
if (sbi->options.quiet)
error = 0;
goto out;
}
error = inode_setattr(inode, attr);
if (error)
goto out;
if (S_ISDIR(inode->i_mode))
mask = sbi->options.fs_dmask;
else
mask = sbi->options.fs_fmask;
inode->i_mode &= S_IFMT | (S_IRWXUGO & ~mask);
out:
unlock_kernel();
return error;
}
EXPORT_SYMBOL_GPL(fat_notify_change);
/* Free all clusters after the skip'th cluster. */
static int fat_free(struct inode *inode, int skip)
{
struct super_block *sb = inode->i_sb;
int err, wait, free_start, i_start, i_logstart;
if (MSDOS_I(inode)->i_start == 0)
return 0;
fat_cache_inval_inode(inode);
wait = IS_DIRSYNC(inode);
i_start = free_start = MSDOS_I(inode)->i_start;
i_logstart = MSDOS_I(inode)->i_logstart;
/* First, we write the new file size. */
if (!skip) {
MSDOS_I(inode)->i_start = 0;
MSDOS_I(inode)->i_logstart = 0;
}
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
if (wait) {
err = fat_sync_inode(inode);
if (err) {
MSDOS_I(inode)->i_start = i_start;
MSDOS_I(inode)->i_logstart = i_logstart;
return err;
}
} else
mark_inode_dirty(inode);
/* Write a new EOF, and get the remaining cluster chain for freeing. */
if (skip) {
struct fat_entry fatent;
int ret, fclus, dclus;
ret = fat_get_cluster(inode, skip - 1, &fclus, &dclus);
if (ret < 0)
return ret;
else if (ret == FAT_ENT_EOF)
return 0;
fatent_init(&fatent);
ret = fat_ent_read(inode, &fatent, dclus);
if (ret == FAT_ENT_EOF) {
fatent_brelse(&fatent);
return 0;
} else if (ret == FAT_ENT_FREE) {
fat_fs_panic(sb,
"%s: invalid cluster chain (i_pos %lld)",
__FUNCTION__, MSDOS_I(inode)->i_pos);
ret = -EIO;
} else if (ret > 0) {
err = fat_ent_write(inode, &fatent, FAT_ENT_EOF, wait);
if (err)
ret = err;
}
fatent_brelse(&fatent);
if (ret < 0)
return ret;
free_start = ret;
}
inode->i_blocks = skip << (MSDOS_SB(sb)->cluster_bits - 9);
/* Freeing the remained cluster chain */
return fat_free_clusters(inode, free_start);
}
void fat_truncate(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
const unsigned int cluster_size = sbi->cluster_size;
int nr_clusters;
/*
* This protects against truncating a file bigger than it was then
* trying to write into the hole.
*/
if (MSDOS_I(inode)->mmu_private > inode->i_size)
MSDOS_I(inode)->mmu_private = inode->i_size;
nr_clusters = (inode->i_size + (cluster_size - 1)) >> sbi->cluster_bits;
lock_kernel();
fat_free(inode, nr_clusters);
unlock_kernel();
}
struct inode_operations fat_file_inode_operations = {
.truncate = fat_truncate,
.setattr = fat_notify_change,
};
| zhoupeng/spice4xen | linux-2.6.18-xen.hg/fs/fat/file.c | C | gpl-2.0 | 7,055 |
/*
* [The "BSD license"]
* Copyright (c) 2010 Terence Parr
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.antlr.tool;
import org.antlr.analysis.DFAState;
import org.antlr.analysis.DecisionProbe;
import org.antlr.analysis.NFAState;
import org.antlr.misc.Utils;
import org.stringtemplate.v4.ST;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/** Reports a potential parsing issue with a decision; the decision is
* nondeterministic in some way.
*/
public class GrammarNonDeterminismMessage extends Message {
public DecisionProbe probe;
public DFAState problemState;
public GrammarNonDeterminismMessage(DecisionProbe probe,
DFAState problemState)
{
super(ErrorManager.MSG_GRAMMAR_NONDETERMINISM);
this.probe = probe;
this.problemState = problemState;
// flip msg ID if alts are actually token refs in Tokens rule
if ( probe.dfa.isTokensRuleDecision() ) {
setMessageID(ErrorManager.MSG_TOKEN_NONDETERMINISM);
}
}
public String toString() {
GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
line = decisionASTNode.getLine();
column = decisionASTNode.getCharPositionInLine();
String fileName = probe.dfa.nfa.grammar.getFileName();
if ( fileName!=null ) {
file = fileName;
}
ST st = getMessageTemplate();
// Now fill template with information about problemState
List labels = probe.getSampleNonDeterministicInputSequence(problemState);
String input = probe.getInputSequenceDisplay(labels);
st.add("input", input);
if ( probe.dfa.isTokensRuleDecision() ) {
Set disabledAlts = probe.getDisabledAlternatives(problemState);
for (Iterator it = disabledAlts.iterator(); it.hasNext();) {
Integer altI = (Integer) it.next();
String tokenName =
probe.getTokenNameForTokensRuleAlt(altI.intValue());
// reset the line/col to the token definition (pick last one)
NFAState ruleStart =
probe.dfa.nfa.grammar.getRuleStartState(tokenName);
line = ruleStart.associatedASTNode.getLine();
column = ruleStart.associatedASTNode.getCharPositionInLine();
st.add("disabled", tokenName);
}
}
else {
st.add("disabled", probe.getDisabledAlternatives(problemState));
}
List nondetAlts = probe.getNonDeterministicAltsForState(problemState);
NFAState nfaStart = probe.dfa.getNFADecisionStartState();
// all state paths have to begin with same NFA state
int firstAlt = 0;
if ( nondetAlts!=null ) {
for (Iterator iter = nondetAlts.iterator(); iter.hasNext();) {
Integer displayAltI = (Integer) iter.next();
if ( DecisionProbe.verbose ) {
int tracePathAlt =
nfaStart.translateDisplayAltToWalkAlt(displayAltI.intValue());
if ( firstAlt == 0 ) {
firstAlt = tracePathAlt;
}
List path =
probe.getNFAPathStatesForAlt(firstAlt,
tracePathAlt,
labels);
st.addAggr("paths.{alt, states}", displayAltI, path);
}
else {
if ( probe.dfa.isTokensRuleDecision() ) {
// alts are token rules, convert to the names instead of numbers
String tokenName =
probe.getTokenNameForTokensRuleAlt(displayAltI.intValue());
st.add("conflictingTokens", tokenName);
}
else {
st.add("conflictingAlts", displayAltI);
}
}
}
}
st.add("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
return super.toString(st);
}
}
| raj-bhatia/grooveip-ios-public | submodules/externals/antlr3/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java | Java | gpl-2.0 | 4,820 |
/*
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package nsk.share.gc.gp.obj;
import nsk.share.gc.gp.GarbageProducer;
import nsk.share.gc.MemoryObject;
public class MemoryObjectProducer implements GarbageProducer<MemoryObject> {
public MemoryObject create(long memory) {
return new MemoryObject((int) memory);
}
public void validate(MemoryObject obj) {
}
}
| md-5/jdk10 | test/hotspot/jtreg/vmTestbase/nsk/share/gc/gp/obj/MemoryObjectProducer.java | Java | gpl-2.0 | 1,417 |
#!/usr/bin/perl
#
# DW::SiteScheme
#
# SiteScheme related functions
#
# Authors:
# Andrea Nall <[email protected]>
#
# Copyright (c) 2010-2013 by Dreamwidth Studios, LLC.
#
# This program is free software; you may redistribute it and/or modify it under
# the same terms as Perl itself. For a copy of the license, please reference
# 'perldoc perlartistic' or 'perldoc perlgpl'.
#
=head1 NAME
DW::SiteScheme - SiteScheme related functions
=head1 SYNOPSIS
=cut
package DW::SiteScheme;
use strict;
my %sitescheme_data = (
blueshift => { parent => 'common', title => "Blueshift" },
celerity => { parent => 'common', title => "Celerity" },
common => { parent => 'global', internal => 1 },
'gradation-horizontal' => { parent => 'common', title => "Gradation Horizontal" },
'gradation-vertical' => { parent => 'common', title => "Gradation Vertical" },
lynx => { parent => 'common', title => "Lynx (light mode)" },
global => { engine => 'current' },
tt_runner => { engine => 'bml', internal => 1 },
);
my $data_loaded = 0;
my @sitescheme_order = ();
=head2 C<< DW::SiteScheme->get( $scheme ) >>
$scheme defaults to the current sitescheme.
Returns a DW::SiteScheme object.
=cut
sub get {
my ( $class, $scheme ) = @_;
$class->__load_data;
$scheme ||= $class->current;
$scheme = $class->default unless exists $sitescheme_data{$scheme};
return $class->new($scheme);
}
# should not be called directly
sub new {
my ( $class, $scheme ) = @_;
return bless { scheme => $scheme }, $class;
}
sub name {
return $_[0]->{scheme};
}
sub tt_file {
return undef unless $_[0]->supports_tt;
return $_[0]->{scheme} . '.tt';
}
sub engine {
$_[0]->__load_data;
return $sitescheme_data{$_[0]->{scheme}}->{engine} || 'tt';
}
sub supports_tt {
return $_[0]->engine eq 'tt' || $_[0]->engine eq 'current';
}
sub supports_bml {
return $_[0]->engine eq 'bml' || $_[0]->engine eq 'current';
}
=head2 C<< DW::SiteScheme->inheritance( $scheme ) >>
Scheme defaults to the current sitescheme.
Returns the inheritance array, with the provided scheme being at the start of the list.
Also works on a DW::SiteScheme object
=cut
sub inheritance {
my ( $self, $scheme ) = @_;
$self->__load_data;
$scheme = $self->{scheme} if ref $self;
$scheme ||= $self->current;
my @scheme;
push @scheme, $scheme;
push @scheme, $scheme
while exists $sitescheme_data{$scheme}
&& ( $scheme = $sitescheme_data{$scheme}->{parent} );
return @scheme;
}
sub get_vars {
return {
remote => LJ::get_remote()
};
}
sub __load_data {
return if $data_loaded;
$data_loaded = 1;
# function to merge additional site schemes into our base site scheme data
# new site scheme row overwrites original site schemes, if there is a conflict
my $merge_data = sub {
my ( %data ) = @_;
foreach my $k ( keys %data ) {
$sitescheme_data{$k} = { %{ $sitescheme_data{$k} || {} }, %{ $data{$k} } };
}
};
my @schemes = @LJ::SCHEMES;
LJ::Hooks::run_hooks( 'modify_scheme_list', \@schemes, $merge_data );
# take the final site scheme list (after all modificatios)
foreach my $row ( @schemes ) {
my $scheme = $row->{scheme};
# copy over any information from the modified scheme list
# into the site scheme data
my $targ = ( $sitescheme_data{$scheme} ||= {} );
foreach my $k ( keys %$row ) {
$targ->{$k} = $row->{$k};
}
next if $targ->{disabled};
# and then add it to the list of site schemes
push @sitescheme_order, $scheme;
}
}
=head2 C<< DW::SiteScheme->available >>
=cut
sub available {
$_[0]->__load_data;
return map { $sitescheme_data{$_} } @sitescheme_order;
}
=head2 C<< DW::SiteScheme->current >>
Get the user's current sitescheme, using the following in order:
=over
=item bml_use_scheme note
=item skin / usescheme GET argument
=item BMLschemepref cookie
=item Default sitescheme ( first sitescheme in sitescheme_order )
=item 'global'
=back
=cut
sub current {
my $r = DW::Request->get;
$_[0]->__load_data;
my $rv;
if ( defined $r ) {
$rv = $r->note( 'bml_use_scheme' ) ||
$r->get_args->{skin} ||
$r->get_args->{usescheme} ||
$r->cookie( 'BMLschemepref' );
}
return $rv if defined $rv and defined $sitescheme_data{$rv};
return $_[0]->default;
}
=head2 C<< DW::SiteScheme->default >>
Get the default sitescheme.
=cut
sub default {
$_[0]->__load_data;
return $sitescheme_order[0] ||
'global';
}
=head2 C<< DW::SiteScheme->set_for_request( $scheme ) >>
Set the sitescheme for the request.
Note: this must be called early enough in a request
before calling into bml_handler for BML, or before render_template for TT
otherwise has no action.
=cut
sub set_for_request {
my $r = DW::Request->get;
return 0 unless exists $sitescheme_data{$_[1]};
$r->note( 'bml_use_scheme', $_[1] );
return 1;
}
=head2 C<< DW::SiteScheme->set_for_user( $scheme, $u ) >>
Set the sitescheme for the user.
If $u does not exist, this will default to remote
if $u ( or remote ) is undef, this will only set the cookie.
Note: If done early enough in the process this will affect the current request.
See the note on set_for_request
=cut
sub set_for_user {
my $r = DW::Request->get;
my $scheme = $_[1];
my $u = exists $_[2] ? $_[2] : LJ::get_remote();
return 0 unless exists $sitescheme_data{$scheme};
my $cval = $scheme;
if ( $scheme eq $sitescheme_order[0] && !$LJ::SAVE_SCHEME_EXPLICITLY ) {
$cval = undef;
$r->delete_cookie( domain => ".$LJ::DOMAIN", name => 'BMLschemepref' );
}
my $expires = undef;
if ($u) {
# set a userprop to remember their schemepref
$u->set_prop( schemepref => $scheme );
# cookie expires when session expires
$expires = $u->{_session}->{timeexpire} if $u->{_session}->{exptype} eq "long";
}
$r->add_cookie(
name => 'BMLschemepref',
value => $cval,
expires => $expires,
domain => ".$LJ::DOMAIN",
) if $cval;
return 1;
}
1;
| endlist/dw-free | cgi-bin/DW/SiteScheme.pm | Perl | gpl-2.0 | 6,291 |
#ifndef MATRIX2F_H
#define MATRIX2F_H
#include <cstdio>
class Vector2f;
// 2x2 Matrix, stored in column major order (OpenGL style)
class Matrix2f
{
public:
// Fill a 2x2 matrix with "fill", default to 0.
Matrix2f( float fill = 0.f );
Matrix2f( float m00, float m01,
float m10, float m11 );
// setColumns = true ==> sets the columns of the matrix to be [v0 v1]
// otherwise, sets the rows
Matrix2f( const Vector2f& v0, const Vector2f& v1, bool setColumns = true );
Matrix2f( const Matrix2f& rm ); // copy constructor
Matrix2f& operator = ( const Matrix2f& rm ); // assignment operator
// no destructor necessary
const float& operator () ( int i, int j ) const;
float& operator () ( int i, int j );
Vector2f getRow( int i ) const;
void setRow( int i, const Vector2f& v );
Vector2f getCol( int j ) const;
void setCol( int j, const Vector2f& v );
float determinant();
Matrix2f inverse( bool* pbIsSingular = NULL, float epsilon = 0.f );
void transpose();
Matrix2f transposed() const;
// ---- Utility ----
operator float* (); // automatic type conversion for GL
void print();
static float determinant2x2( float m00, float m01,
float m10, float m11 );
static Matrix2f ones();
static Matrix2f identity();
static Matrix2f rotation( float degrees );
private:
float m_elements[ 4 ];
};
// Scalar-Matrix multiplication
Matrix2f operator * ( float f, const Matrix2f& m );
Matrix2f operator * ( const Matrix2f& m, float f );
// Matrix-Vector multiplication
// 2x2 * 2x1 ==> 2x1
Vector2f operator * ( const Matrix2f& m, const Vector2f& v );
// Matrix-Matrix multiplication
Matrix2f operator * ( const Matrix2f& x, const Matrix2f& y );
#endif // MATRIX2F_H
| tuituji/6.837-MIT-CG | zero/include/vecmath/Matrix2f.h | C | gpl-2.0 | 1,765 |
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
int qp;
vp8::GetQp(data, size, &qp);
}
} // namespace webrtc
| golden1232004/webrtc_new | webrtc/test/fuzzers/vp8_qp_parser_fuzzer.cc | C++ | gpl-3.0 | 620 |
-----------------------------------------
-- ID: 5174
-- Item: tavnazian_taco
-- Food Effect: 30Min, All Races
-----------------------------------------
-- Health 20
-- Magic 20
-- Dexterity 4
-- Agility 4
-- Vitality 6
-- Charisma 4
-- Defense % 25
-- HP Recovered While Healing 1
-- MP Recovered While Healing 1
-- Defense Cap 150
-----------------------------------------
require("scripts/globals/status")
require("scripts/globals/msg")
-----------------------------------------
function onItemCheck(target)
local result = 0
if target:hasStatusEffect(dsp.effect.FOOD) or target:hasStatusEffect(dsp.effect.FIELD_SUPPORT_FOOD) then
result = dsp.msg.basic.IS_FULL
end
return result
end
function onItemUse(target)
target:addStatusEffect(dsp.effect.FOOD,0,0,1800,5174)
end
function onEffectGain(target, effect)
target:addMod(dsp.mod.HP, 20)
target:addMod(dsp.mod.MP, 20)
target:addMod(dsp.mod.DEX, 4)
target:addMod(dsp.mod.AGI, 4)
target:addMod(dsp.mod.VIT, 6)
target:addMod(dsp.mod.CHR, 4)
target:addMod(dsp.mod.FOOD_DEFP, 25)
target:addMod(dsp.mod.FOOD_DEF_CAP, 150)
target:addMod(dsp.mod.HPHEAL, 1)
target:addMod(dsp.mod.MPHEAL, 1)
end
function onEffectLose(target, effect)
target:delMod(dsp.mod.HP, 20)
target:delMod(dsp.mod.MP, 20)
target:delMod(dsp.mod.DEX, 4)
target:delMod(dsp.mod.AGI, 4)
target:delMod(dsp.mod.VIT, 6)
target:delMod(dsp.mod.CHR, 4)
target:delMod(dsp.mod.FOOD_DEFP, 25)
target:delMod(dsp.mod.FOOD_DEF_CAP, 150)
target:delMod(dsp.mod.HPHEAL, 1)
target:delMod(dsp.mod.MPHEAL, 1)
end
| ffxijuggalo/darkstar | scripts/globals/items/tavnazian_taco.lua | Lua | gpl-3.0 | 1,615 |
<?php
/**
*
* @package mahara
* @subpackage artefact-comment-import-leap
* @author Catalyst IT Ltd
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL version 3 or later
* @copyright For copyright information on Mahara, please see the README file distributed with this software.
*
*/
defined('INTERNAL') || die();
/**
* Implements LEAP2A import of comment entries into Mahara
*
*/
class LeapImportComment extends LeapImportArtefactPlugin {
/**
* Import an entry as a comment (with associated attachments)
* on an artefact or view
*/
const STRATEGY_IMPORT_AS_COMMENT = 1;
/**
* Hack to allow comments to be created before the thing they
* comment on (a view or another artefact) gets created.
*
* On creation, point comments at this temporary view until the
* setup_relationships stage of the import, at which time the
* correct comment relationship will be restored.
*/
private static $tempview = null;
private static $savetempview = false;
public static function create_temporary_view($user) {
$time = db_format_timestamp(time());
$viewdata = (object) array(
'owner' => $user,
'title' => '--',
'type' => 'portfolio',
'numrows' => 1,
'numcolumns' => 1,
'ctime' => $time,
'mtime' => $time,
'atime' => $time,
);
return self::$tempview = insert_record('view', $viewdata, 'id', true);
}
/**
* Delete the temporary view
*/
public static function cleanup(PluginImportLeap $importer) {
if (self::$tempview) {
if (self::$savetempview) {
$title = get_string('entriesimportedfromleapexport', 'artefact.comment');
set_field('view', 'title', $title, 'id', self::$tempview);
}
else {
delete_records('view', 'id', self::$tempview);
}
}
}
public static function get_import_strategies_for_entry(SimpleXMLElement $entry, PluginImportLeap $importer) {
$strategies = array();
if (PluginImportLeap::is_rdf_type($entry, $importer, 'entry')
&& $entry->xpath('mahara:artefactplugin[@mahara:type="comment"]')) {
// Check that the entry 'reflects_on' something
$otherentries = array();
foreach ($entry->link as $link) {
if ($importer->curie_equals($link['rel'], $importer->get_leap2a_namespace(), 'reflects_on') && isset($link['href'])) {
$otherentries[] = (string)$link['href'];
}
}
if (count($otherentries) == 1) {
$strategies[] = array(
'strategy' => self::STRATEGY_IMPORT_AS_COMMENT,
'score' => 100,
'other_required_entries' => array(),
);
}
}
return $strategies;
}
/**
* Import from entry requests for Mahara comments
*
* @param PluginImportLeap $importer
* @return updated DB
* @throw ImportException
*/
public static function import_from_requests(PluginImportLeap $importer) {
if ($entry_requests = get_records_select_array('import_entry_requests', 'importid = ? AND entrytype = ?', array($importer->get('importertransport')->get('importid'), 'comment'))) {
foreach ($entry_requests as $entry_request) {
self::create_artefact_from_request($importer, $entry_request);
}
}
}
/**
* Logic to figure out how to process an entry into a comment
* Used by import_using_strategy() and add_import_entry_request_using_strategy().
*
* @param SimpleXMLElement $entry
* @param PluginImportLeap $importer
* @param unknown_type $strategy
* @param array $otherentries
* @return array An array of config stuff to either create the comment, or store an import request.
* @throws ImportException
*/
private static function get_entry_data_using_strategy(SimpleXMLElement $entry, PluginImportLeap $importer, $strategy, array $otherentries) {
if ($strategy != self::STRATEGY_IMPORT_AS_COMMENT) {
throw new ImportException($importer, 'TODO: get_string: unknown strategy chosen for importing entry');
}
$description = PluginImportLeap::get_entry_content($entry, $importer);
$type = isset($entry->content['type']) ? (string)$entry->content['type'] : 'text';
if ($type == 'text') {
$description = format_whitespace($description);
}
if (isset($entry->author->name) && strlen($entry->author->name)) {
$authorname = (string)$entry->author->name;
}
else {
$author = $importer->get('usr');
}
return array(
'owner' => $importer->get('usr'),
'type' => 'comment',
'content' => array(
'title' => (string)$entry->title,
'description' => $description,
'ctime' => (string)$entry->published,
'mtime' => (string)$entry->updated,
'private' => (int)PluginImportLeap::is_correct_category_scheme($entry, $importer, 'audience', 'Private'),
'authorname' => isset($authorname) ? $authorname : null,
'author' => isset($author) ? $author : null,
'tags' => PluginImportLeap::get_entry_tags($entry),
),
);
}
public static function add_import_entry_request_using_strategy(SimpleXMLElement $entry, PluginImportLeap $importer, $strategy, array $otherentries) {
$config = self::get_entry_data_using_strategy($entry, $importer, $strategy, $otherentries);
return PluginImportLeap::add_import_entry_request(
$importer->get('importertransport')->get('importid'),
(string)$entry->id,
self::STRATEGY_IMPORT_AS_COMMENT,
'comment',
$config
);
}
public static function import_using_strategy(SimpleXMLElement $entry, PluginImportLeap $importer, $strategy, array $otherentries) {
$config = self::get_entry_data_using_strategy($entry, $importer, $strategy, $otherentries);
$content = $config['content'];
$comment = new ArtefactTypeComment();
$comment->set('title', $content['title']);
$comment->set('description', $content['title']);
if ($content['ctime']) {
$comment->set('ctime', $content['ctime']);
}
if ($content['mtime']) {
$comment->set('mtime', $content['mtime']);
}
$comment->set('private', $content['private']);
$comment->set('owner', $config['owner']);
if ($content['authorname']) {
$comment->set('authorname', $content['authorname']);
}
else {
$comment->set('author', $content['author']);
}
if (empty(self::$tempview)) {
self::create_temporary_view($config['owner']);
}
$comment->set('onview', self::$tempview);
$comment->set('tags', $content['tags']);
$comment->commit();
$artefactmapping = array();
$artefactmapping[(string)$entry->id] = array($comment->get('id'));
return $artefactmapping;
}
/**
* Add an import entry request as a comment from the given entry
*
* @param SimpleXMLElement $entry The entry to create the comment from
* @param PluginImportLeap $importer The importer
*/
private static function add_import_entry_request_comment(SimpleXMLElement $entry, PluginImportLeap $importer) {
}
/**
* Get the id of the entry reflected on by a comment entry
*/
public static function get_referent_entryid(SimpleXMLElement $entry, PluginImportLeap $importer) {
foreach ($entry->link as $link) {
if ($importer->curie_equals($link['rel'], $importer->get_leap2a_namespace(), 'reflects_on') && isset($link['href'])) {
return (string)$link['href'];
}
}
// Shouldn't happen -- this was checked when offering the strategy
throw new ImportException($importer, 'TODO: get_string: cannot find an entry for a comment to comment on');
}
public static function get_comment_instance(SimpleXMLElement $entry, PluginImportLeap $importer) {
$artefactids = $importer->get_artefactids_imported_by_entryid((string)$entry->id);
return new ArtefactTypeComment($artefactids[0]);
}
/**
* Relate comments to the artefacts they comment on
* Attach comments to comments
*
*/
public static function setup_relationships_from_requests(PluginImportLeap $importer) {
if ($entry_requests = get_records_select_array('import_entry_requests', 'importid = ? AND entrytype = ?', array($importer->get('importertransport')->get('importid'), 'comment'))) {
foreach ($entry_requests as $entry_request) {
$entry = $importer->get_entry_by_id($entry_request->entryid);
self::setup_relationships($entry, $importer);
}
}
}
/**
* Relate comments to the artefacts they comment on
* Attach comments to comments
*/
public static function setup_relationships(SimpleXMLElement $entry, PluginImportLeap $importer) {
$comment = null;
$newartefacts = array(); // save any newly created extra ones (eg enclosures)
$referentid = self::get_referent_entryid($entry, $importer);
// Link artefact comments; view comments are done later
if ($artefactids = $importer->get_artefactids_imported_by_entryid($referentid)) {
$comment = self::get_comment_instance($entry, $importer);
$comment->set('onartefact', $artefactids[0]);
$comment->set('onview', null);
}
// Attachments
foreach ($entry->link as $link) {
if (!$comment) {
$comment = self::get_comment_instance($entry, $importer);
}
if ($id = $importer->create_attachment($entry, $link, $comment)) {
$newartefacts[] = $id;
}
}
if ($comment) {
$comment->commit();
}
return $newartefacts;
}
/**
* Fix comments to point to the right view. Probably more
* appropriate in setup_relationships. To do that we would have
* to change that call to happen after views are created.
*/
public static function setup_view_relationships_from_request(PluginImportLeap $importer) {
if ($entry_requests = get_records_select_array('import_entry_requests', 'importid = ? AND entrytype = ?', array($importer->get('importertransport')->get('importid'), 'comment'))) {
foreach ($entry_requests as $entry_request) {
$commentids = unserialize($entry_request->artefactmapping);
$comment = new ArtefactTypeComment($commentids[0]);
if ($comment->get('onartefact')) {
continue;
}
$entry = $importer->get_entry_by_id($entry_request->entryid);
$referentid = self::get_referent_entryid($entry, $importer);
if ($viewid = $importer->get_viewid_imported_by_entryid($referentid)) {
$comment->set('onview', $viewid);
$comment->commit();
}
else {
// Nothing to link this comment to, so leave it in the temporary view.
self::$savetempview = true;
}
}
}
}
/**
* Fix comments to point to the right view. Probably more
* appropriate in setup_relationships. To do that we would have
* to change that call to happen after views are created.
*/
public static function setup_view_relationships(SimpleXMLElement $entry, PluginImportLeap $importer) {
$comment = self::get_comment_instance($entry, $importer);
if ($comment->get('onartefact')) {
return;
}
$referentid = self::get_referent_entryid($entry, $importer);
if ($viewid = $importer->get_viewid_imported_by_entryid($referentid)) {
$comment->set('onview', $viewid);
$comment->commit();
}
else {
// Nothing to link this comment to, so leave it in the temporary view.
self::$savetempview = true;
}
}
/**
* Render import entry requests for Mahara comments
* @param PluginImportLeap $importer
* @return HTML code for displaying comments and choosing how to import them
*/
public static function render_import_entry_requests(PluginImportLeap $importer) {
$importid = $importer->get('importertransport')->get('importid');
// Get import entry requests for Mahara comments
$entrycomments = array();
if ($iercomments = get_records_select_array('import_entry_requests', 'importid = ? AND entrytype = ?', array($importid, 'comment'))) {
foreach ($iercomments as $iercomment) {
$comment = unserialize($iercomment->entrycontent);
$comment['id'] = $iercomment->id;
$comment['decision'] = $iercomment->decision;
$comment['disabled'][PluginImport::DECISION_IGNORE] = false;
$comment['disabled'][PluginImport::DECISION_ADDNEW] = false;
$comment['disabled'][PluginImport::DECISION_APPEND] = true;
$comment['disabled'][PluginImport::DECISION_REPLACE] = true;
$entrycomments[] = $comment;
}
}
$smarty = smarty_core();
$smarty->assign_by_ref('displaydecisions', $importer->get('displaydecisions'));
$smarty->assign_by_ref('entrycomments', $entrycomments);
return $smarty->fetch('artefact:comment:import/comments.tpl');
}
}
| TheCrowsJoker/mahara | htdocs/artefact/comment/import/leap/lib.php | PHP | gpl-3.0 | 14,157 |
/////////////////////////////////////////////////////////////////////////////
// Name: wx/gtk1/dcscreen.h
// Purpose:
// Author: Robert Roebling
// Copyright: (c) 1998 Robert Roebling
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
#ifndef __GTKDCSCREENH__
#define __GTKDCSCREENH__
#include "wx/gtk1/dcclient.h"
//-----------------------------------------------------------------------------
// wxScreenDCImpl
//-----------------------------------------------------------------------------
class WXDLLIMPEXP_CORE wxScreenDCImpl : public wxPaintDCImpl
{
public:
wxScreenDCImpl(wxScreenDC *owner);
virtual ~wxScreenDCImpl();
// implementation
static GdkWindow *sm_overlayWindow;
static int sm_overlayWindowX;
static int sm_overlayWindowY;
protected:
virtual void DoGetSize(int *width, int *height) const;
private:
DECLARE_DYNAMIC_CLASS(wxScreenDCImpl)
};
#endif // __GTKDCSCREENH__
| vivier/SCSI2SD | software/scsi2sd-util/wxWidgets/include/wx/gtk1/dcscreen.h | C | gpl-3.0 | 1,017 |
#!/usr/bin/python
# encoding: utf-8
# (c) 2016, Jiri Tyr <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.urls import url_argument_spec
import base64
import hashlib
import json
import os
import tempfile
import time
import urllib
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
version_added: '2.2'
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
required: false
default: jenkins
description:
- Name of the Jenkins group on the OS.
jenkins_home:
required: false
default: /var/lib/jenkins
description:
- Home directory of the Jenkins user.
mode:
required: false
default: '0664'
description:
- File mode applied on versioned plugins.
name:
required: true
description:
- Plugin name.
owner:
required: false
default: jenkins
description:
- Name of the Jenkins user on the OS.
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options. To
remove an option, set the value of the option to C(null).
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
timeout:
required: false
default: 30
description:
- Server connection timeout in secs.
updates_expiration:
required: false
default: 86400
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
updates_url:
required: false
default: https://updates.jenkins-ci.org
description:
- URL of the Update Centre.
- Used as the base URL to download the plugins and the
I(update-center.json) JSON file.
url:
required: false
default: http://localhost:8080
description:
- URL of the Jenkins server.
version:
required: false
default: null
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
with_dependencies:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether to install plugin dependencies.
notes:
- Plugin installation shoud be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkis service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
'''
EXAMPLES = '''
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
jenkins_plugin:
name: token-macro
version: 1.15
- name: Pin the plugin
jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to use the params
#
# Define a variable and specify all default parameters you want to use across
# all jenkins_plugin calls:
#
# my_jenkins_params:
# url_username: admin
# url_password: p4ssw0rd
# url: http://localhost:8888
#
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
params: "{{ my_jenkins_params }}"
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: 1.4.9
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Install plugins with a specific version
jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Initiate the fact
set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_versioned.results }}"
- name: Check if restart is required by any of the unversioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_unversioned.results }}"
- name: Restart Jenkins if required
service:
name: jenkins
state: restarted
when: jenkins_restart_required
- name: Wait for Jenkins to start up
uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Plugin enabling
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: "{{ my_jenkins_plugins }}"
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: string
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
if self._csrf_enabled():
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
return csrf_data["useCrumbs"]
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.load(r)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=e.message)
return json_data
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
**kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, **kwargs)
if info['status'] != 200:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception:
e = get_exception()
self.module.fail_json(msg=msg_exception, details=e.message)
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] is None:
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
script_data.update(self.crumb)
data = urllib.urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data)
changed = True
else:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
md5sum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
md5sum_old = hashlib.md5(
open(plugin_file, 'rb').read()).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_url = (
"%s/latest/%s.hpi" % (
self.params['updates_url'],
self.params['name']))
else:
# Take specific version
plugin_url = (
"{0}/download/plugins/"
"{1}/{2}/{1}.hpi".format(
self.params['updates_url'],
self.params['name'],
self.params['version']))
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
md5sum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_url)
# Write downloaded plugin into file if checksums don't match
if md5sum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
md5sum_new = hashlib.md5(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if md5sum_old != md5sum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
else:
# Check for update from the updates JSON file
plugin_data = self._download_updates()
try:
sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot calculate SHA1 of the old plugin.",
details=e.message)
sha1sum_old = base64.b64encode(sha1_old.digest())
# If the latest version changed, download it
if sha1sum_old != plugin_data['sha1']:
if not self.module.check_mode:
r = self._download_plugin(plugin_url)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
url = "%s/update-center.json" % self.params['updates_url']
# Get the data
r = self._get_url_data(
url,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
updates_file = tempfile.mkstemp()
try:
fd = open(updates_file, 'wb')
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open the tmp updates file %s." % updates_file,
details=str(e))
fd.write(r.read())
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
detail=str(e))
# Open the updates file
try:
f = open(updates_file)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=str(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=e.message)
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError:
e = get_exception()
self.module.fail_json(
msg="Cannot create temporal directory.",
details=e.message)
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_url):
# Download the plugin
r = self._get_url_data(
plugin_url,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
return r
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f = tempfile.mkstemp()
try:
fd = open(tmp_f, 'wb')
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot open the temporal plugin file %s.' % tmp_f,
details=str(e))
if isinstance(data, str):
d = data
else:
d = data.read()
fd.write(d)
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=str(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
data = urllib.urlencode(self.crumb)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
data=data)
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(default='jenkins'),
jenkins_home=dict(default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(required=True),
owner=dict(default='jenkins'),
params=dict(type='dict'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(default='https://updates.jenkins-ci.org'),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError:
e = get_exception()
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=str(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
| CenturylinkTechnology/ansible-modules-extras | web_infrastructure/jenkins_plugin.py | Python | gpl-3.0 | 25,547 |
// ========================================================================
// $Id: StringBufferWriter.java,v 1.4 2004/05/09 20:33:04 gregwilkins Exp $
// Copyright 2001-2004 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ========================================================================
package net.lightbody.bmp.proxy.jetty.util;
import java.io.IOException;
import java.io.Writer;
/* ------------------------------------------------------------ */
/** A Writer to a StringBuffer.
*
* @version $Id: StringBufferWriter.java,v 1.4 2004/05/09 20:33:04 gregwilkins Exp $
* @author Greg Wilkins (gregw)
*/
public class StringBufferWriter extends Writer
{
/* ------------------------------------------------------------ */
private StringBuffer _buffer;
/* ------------------------------------------------------------ */
/** Constructor.
*/
public StringBufferWriter()
{
_buffer=new StringBuffer();
}
/* ------------------------------------------------------------ */
/** Constructor.
* @param buffer
*/
public StringBufferWriter(StringBuffer buffer)
{
_buffer=buffer;
}
/* ------------------------------------------------------------ */
public void setStringBuffer(StringBuffer buffer)
{
_buffer=buffer;
}
/* ------------------------------------------------------------ */
public StringBuffer getStringBuffer()
{
return _buffer;
}
/* ------------------------------------------------------------ */
public void write(char c)
throws IOException
{
_buffer.append(c);
}
/* ------------------------------------------------------------ */
public void write(char[] ca)
throws IOException
{
_buffer.append(ca);
}
/* ------------------------------------------------------------ */
public void write(char[] ca,int offset, int length)
throws IOException
{
_buffer.append(ca,offset,length);
}
/* ------------------------------------------------------------ */
public void write(String s)
throws IOException
{
_buffer.append(s);
}
/* ------------------------------------------------------------ */
public void write(String s,int offset, int length)
throws IOException
{
for (int i=0;i<length;i++)
_buffer.append(s.charAt(offset+i));
}
/* ------------------------------------------------------------ */
public void flush()
{}
/* ------------------------------------------------------------ */
public void reset()
{
_buffer.setLength(0);
}
/* ------------------------------------------------------------ */
public void close()
{}
}
| nagyistoce/Wilma | browsermob-proxy/src/main/java/net/lightbody/bmp/proxy/jetty/util/StringBufferWriter.java | Java | gpl-3.0 | 3,434 |
<?php
function itsec_security_check_register_sync_verbs( $api ) {
$api->register( 'itsec-do-security-check', 'Ithemes_Sync_Verb_ITSEC_Do_Security_Check', dirname( __FILE__ ) . '/sync-verbs/itsec-do-security-check.php' );
$api->register( 'itsec-get-security-check-feedback-response', 'Ithemes_Sync_Verb_ITSEC_Get_Security_Check_Feedback_Response', dirname( __FILE__ ) . '/sync-verbs/itsec-get-security-check-feedback-response.php' );
$api->register( 'itsec-get-security-check-modules', 'Ithemes_Sync_Verb_ITSEC_Get_Security_Check_Modules', dirname( __FILE__ ) . '/sync-verbs/itsec-get-security-check-modules.php' );
}
add_action( 'ithemes_sync_register_verbs', 'itsec_security_check_register_sync_verbs' );
| IDS-UK/genderhub | wp-content/plugins/better-wp-security/core/modules/security-check/active.php | PHP | gpl-3.0 | 710 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
<title>CSV Parser: Class Members</title>
<link href="doxygen.css" rel="stylesheet" type="text/css">
<link href="tabs.css" rel="stylesheet" type="text/css">
</head><body>
<!-- Generated by Doxygen 1.5.5 -->
<div class="navigation" id="top">
<div class="tabs">
<ul>
<li><a href="main.html"><span>Main Page</span></a></li>
<li><a href="pages.html"><span>Related Pages</span></a></li>
<li><a href="annotated.html"><span>Classes</span></a></li>
<li class="current"><a href="files.html"><span>Files</span></a></li>
</ul>
</div>
<div class="tabs">
<ul>
<li><a href="files.html"><span>File List</span></a></li>
<li class="current"><a href="globals.html"><span>File Members</span></a></li>
</ul>
</div>
<div class="tabs">
<ul>
<li class="current"><a href="globals.html"><span>All</span></a></li>
<li><a href="globals_func.html"><span>Functions</span></a></li>
<li><a href="globals_type.html"><span>Typedefs</span></a></li>
<li><a href="globals_enum.html"><span>Enumerations</span></a></li>
<li><a href="globals_eval.html"><span>Enumerator</span></a></li>
<li><a href="globals_defs.html"><span>Defines</span></a></li>
</ul>
</div>
</div>
<div class="contents">
Here is a list of all file members with links to the files they belong to:
<p>
<ul>
<li>CSV_PARSER_FREE_BUFFER_PTR
: <a class="el" href="csv__parser_8hpp.html#c3df174007612623c6bc8b57c49c2492">csv_parser.hpp</a>
<li>CSV_PARSER_FREE_FILE_PTR
: <a class="el" href="csv__parser_8hpp.html#aeaabc8afdaa9e7a324e9f199f853d2e">csv_parser.hpp</a>
<li>csv_row
: <a class="el" href="csv__parser_8hpp.html#9a12766078db4f683d2820079b53d14b">csv_parser.hpp</a>
<li>csv_row_ptr
: <a class="el" href="csv__parser_8hpp.html#690b24956c2f5beca69bef745f3a231b">csv_parser.hpp</a>
<li>ENCLOSURE_NONE
: <a class="el" href="csv__parser_8hpp.html#9002d1d1a53c112bba6de3e30831364230859bec9ff10e286470e26d7bf6b2d2">csv_parser.hpp</a>
<li>ENCLOSURE_OPTIONAL
: <a class="el" href="csv__parser_8hpp.html#9002d1d1a53c112bba6de3e308313642ae3ac09bf01448b744979bad975908e7">csv_parser.hpp</a>
<li>ENCLOSURE_REQUIRED
: <a class="el" href="csv__parser_8hpp.html#9002d1d1a53c112bba6de3e30831364249b17ea3f1e08d2485dca996cbfcf329">csv_parser.hpp</a>
<li>ENCLOSURE_TYPE_BEGIN
: <a class="el" href="csv__parser_8hpp.html#9002d1d1a53c112bba6de3e30831364223e6abb88dca98ddc9e0aad2dbc26ec1">csv_parser.hpp</a>
<li>ENCLOSURE_TYPE_END
: <a class="el" href="csv__parser_8hpp.html#9002d1d1a53c112bba6de3e308313642707baad6f8991c4a3c0da24031902fd7">csv_parser.hpp</a>
<li>enclosure_type_t
: <a class="el" href="csv__parser_8hpp.html#9002d1d1a53c112bba6de3e308313642">csv_parser.hpp</a>
<li>LIBCSV_PARSER_MAJOR_VERSION
: <a class="el" href="csv__parser_8hpp.html#54f2c726ba9342cd096c3c78afac9be3">csv_parser.hpp</a>
<li>LIBCSV_PARSER_MINOR_VERSION
: <a class="el" href="csv__parser_8hpp.html#e3e205d7d55f8e82acc3e0131259e04f">csv_parser.hpp</a>
<li>LIBCSV_PARSER_PATCH_VERSION
: <a class="el" href="csv__parser_8hpp.html#118eec12367de7d98ad77c3da6a27aaf">csv_parser.hpp</a>
<li>LIBCSV_PARSER_VERSION_NUMBER
: <a class="el" href="csv__parser_8hpp.html#82a48f61bbdcaa9e5db4b930077263bc">csv_parser.hpp</a>
<li>main()
: <a class="el" href="driver_8cpp.html#3c04138a5bfe5d72780bb7e82a18e627">driver.cpp</a>
</ul>
</div>
<hr size="1"><address style="text-align: right;"><small>Generated on Sun Jun 28 21:19:30 2009 for CSV Parser by
<a href="http://www.doxygen.org/index.html">
<img src="doxygen.png" alt="doxygen" align="middle" border="0"></a> 1.5.5 </small></address>
</body>
</html>
| headupinclouds/csv-parser-cplusplus | doc/html/globals.html | HTML | gpl-3.0 | 3,759 |
<!doctype html>
<html>
<head>
<title>spinbutton</title>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>
<link rel="stylesheet" href="/resources/testharness.css">
<link rel="stylesheet" href="/wai-aria/scripts/manual.css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/wai-aria/scripts/ATTAcomm.js"></script>
<script>
setup({explicit_timeout: true, explicit_done: true });
var theTest = new ATTAcomm(
{
"steps" : [
{
"element" : "test",
"test" : {
"ATK" : [
[
"property",
"role",
"is",
"ROLE_SPIN_BUTTON"
],
[
"property",
"interfaces",
"contains",
"Value"
]
],
"AXAPI" : [
[
"property",
"AXRole",
"is",
"AXIncrementor"
],
[
"property",
"AXSubrole",
"is",
"<nil>"
],
[
"property",
"AXRoleDescription",
"is",
"stepper"
]
],
"IAccessible2" : [
[
"property",
"interfaces",
"contains",
"IAccessibleValue"
]
],
"MSAA" : [
[
"property",
"role",
"is",
"ROLE_SYSTEM_SPINBUTTON"
]
],
"UIA" : [
[
"property",
"ControlType",
"is",
"Spinner"
],
[
"property",
"Control Pattern",
"is",
"RangeValue"
]
]
},
"title" : "step 1",
"type" : "test"
}
],
"title" : "spinbutton"
}
) ;
</script>
</head>
<body>
<p>This test examines the ARIA properties for spinbutton.</p>
<div role='spinbutton' id='test'>10</div>
<div id="manualMode"></div>
<div id="log"></div>
<div id="ATTAmessages"></div>
</body>
</html>
| anthgur/servo | tests/wpt/web-platform-tests/core-aam/spinbutton-manual.html | HTML | mpl-2.0 | 2,558 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.http import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from bedrock.base.urlresolvers import reverse
from mock import patch, Mock
from nose.tools import eq_
from pathlib import Path
from pyquery import PyQuery as pq
from rna.models import Release
from bedrock.firefox.firefox_details import FirefoxDesktop
from bedrock.mozorg.tests import TestCase
from bedrock.releasenotes import views
from bedrock.thunderbird.details import ThunderbirdDesktop
DATA_PATH = str(Path(__file__).parent / 'data')
firefox_desktop = FirefoxDesktop(json_dir=DATA_PATH)
thunderbird_desktop = ThunderbirdDesktop(json_dir=DATA_PATH)
class TestRNAViews(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.render_patch = patch('bedrock.releasenotes.views.l10n_utils.render')
self.mock_render = self.render_patch.start()
self.mock_render.return_value.has_header.return_value = False
def tearDown(self):
self.render_patch.stop()
@property
def last_ctx(self):
"""
Convenient way to access the context of the last rendered
response.
"""
return self.mock_render.call_args[0][2]
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404(self, Q, get_object_or_404):
eq_(views.get_release_or_404('version', 'product'),
get_object_or_404.return_value)
get_object_or_404.assert_called_with(
Release, Q.return_value, version='version')
Q.assert_called_with(product='product')
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404_esr(self, Q, get_object_or_404):
eq_(views.get_release_or_404('24.5.0', 'Firefox'),
get_object_or_404.return_value)
Q.assert_any_call(product='Firefox')
Q.assert_any_call(product='Firefox Extended Support Release')
Q.__or__.assert_called()
@override_settings(DEV=False)
@patch('bedrock.releasenotes.views.release_notes_template')
@patch('bedrock.releasenotes.views.get_release_or_404')
@patch('bedrock.releasenotes.views.equivalent_release_url')
def test_release_notes(self, mock_equiv_rel_url, get_release_or_404,
mock_release_notes_template):
"""
Should use release returned from get_release_or_404 with the
correct params and pass the correct context variables and
template to l10n_utils.render.
"""
mock_release = get_release_or_404.return_value
mock_release.major_version.return_value = '34'
mock_release.notes.return_value = ([Release(id=1), Release(id=2)],
[Release(id=3), Release(id=4)])
views.release_notes(self.request, '27.0')
get_release_or_404.assert_called_with('27.0', 'Firefox')
mock_release.notes.assert_called_with(public_only=True)
eq_(self.last_ctx['version'], '27.0')
eq_(self.last_ctx['release'], mock_release)
eq_(self.last_ctx['new_features'], [Release(id=1), Release(id=2)])
eq_(self.last_ctx['known_issues'], [Release(id=3), Release(id=4)])
eq_(self.mock_render.call_args[0][1],
mock_release_notes_template.return_value)
mock_equiv_rel_url.assert_called_with(mock_release)
mock_release_notes_template.assert_called_with(
mock_release.channel, 'Firefox', 34)
@patch('bedrock.releasenotes.views.get_release_or_404')
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_release_notes_beta_redirect(self, releasenotes_url,
get_release_or_404):
"""
Should redirect to url for beta release
"""
get_release_or_404.side_effect = [Http404, 'mock release']
releasenotes_url.return_value = '/firefox/27.0beta/releasenotes/'
response = views.release_notes(self.request, '27.0')
eq_(response.status_code, 302)
eq_(response['location'], '/firefox/27.0beta/releasenotes/')
get_release_or_404.assert_called_with('27.0beta', 'Firefox')
releasenotes_url.assert_called_with('mock release')
@patch('bedrock.releasenotes.views.get_release_or_404')
def test_system_requirements(self, get_release_or_404):
"""
Should use release returned from get_release_or_404, with a
default channel of Release and default product of Firefox,
and pass the version to l10n_utils.render
"""
views.system_requirements(self.request, '27.0.1')
get_release_or_404.assert_called_with('27.0.1', 'Firefox')
eq_(self.last_ctx['release'], get_release_or_404.return_value)
eq_(self.last_ctx['version'], '27.0.1')
eq_(self.mock_render.call_args[0][1],
'firefox/releases/system_requirements.html')
def test_release_notes_template(self):
"""
Should return correct template name based on channel
and product
"""
eq_(views.release_notes_template('Nightly', 'Firefox'),
'firefox/releases/nightly-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox'),
'firefox/releases/aurora-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox', 35),
'firefox/releases/dev-browser-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox', 34),
'firefox/releases/aurora-notes.html')
eq_(views.release_notes_template('Beta', 'Firefox'),
'firefox/releases/beta-notes.html')
eq_(views.release_notes_template('Release', 'Firefox'),
'firefox/releases/release-notes.html')
eq_(views.release_notes_template('ESR', 'Firefox'),
'firefox/releases/esr-notes.html')
eq_(views.release_notes_template('Release', 'Thunderbird'),
'thunderbird/releases/release-notes.html')
eq_(views.release_notes_template('Beta', 'Thunderbird'),
'thunderbird/releases/beta-notes.html')
eq_(views.release_notes_template('', ''),
'firefox/releases/release-notes.html')
@override_settings(DEV=False)
@patch('bedrock.releasenotes.views.get_object_or_404')
def test_non_public_release(self, get_object_or_404):
"""
Should raise 404 if not release.is_public and not settings.DEV
"""
get_object_or_404.return_value = Release(is_public=False)
with self.assertRaises(Http404):
views.get_release_or_404('42', 'Firefox')
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_no_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return None without calling releasenotes_url
"""
release = Mock()
release.equivalent_android_release.return_value = None
release.equivalent_desktop_release.return_value = None
eq_(views.equivalent_release_url(release), None)
eq_(mock_releasenotes_url.called, 0)
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_android_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return the url for the equivalent android release
"""
release = Mock()
eq_(views.equivalent_release_url(release),
mock_releasenotes_url.return_value)
mock_releasenotes_url.assert_called_with(
release.equivalent_android_release.return_value)
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_desktop_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return the url for the equivalent desktop release
"""
release = Mock()
release.equivalent_android_release.return_value = None
eq_(views.equivalent_release_url(release),
mock_releasenotes_url.return_value)
mock_releasenotes_url.assert_called_with(
release.equivalent_desktop_release.return_value)
@patch('bedrock.releasenotes.views.android_builds')
def test_get_download_url_android(self, mock_android_builds):
"""
Shoud return the download link for the release.channel from
android_builds
"""
mock_android_builds.return_value = [{'download_link': '/download'}]
release = Mock(product='Firefox for Android')
link = views.get_download_url(release)
eq_(link, '/download')
mock_android_builds.assert_called_with(release.channel)
def test_get_download_url_thunderbird(self):
release = Mock(product='Thunderbird')
link = views.get_download_url(release)
eq_(link, 'https://www.mozilla.org/thunderbird/')
class TestReleaseNotesIndex(TestCase):
@patch('bedrock.releasenotes.views.l10n_utils.render')
@patch('bedrock.releasenotes.views.firefox_desktop', firefox_desktop)
def test_relnotes_index_firefox(self, render_mock):
with self.activate('en-US'):
self.client.get(reverse('firefox.releases.index'))
releases = render_mock.call_args[0][2]['releases']
eq_(len(releases), len(firefox_desktop.firefox_history_major_releases))
eq_(releases[0][0], 36.0)
eq_(releases[0][1]['major'], '36.0')
eq_(releases[0][1]['minor'], [])
eq_(releases[3][0], 33.1)
eq_(releases[3][1]['major'], '33.1')
eq_(releases[3][1]['minor'], ['33.1.1'])
eq_(releases[4][0], 33.0)
eq_(releases[4][1]['major'], '33.0')
eq_(releases[4][1]['minor'], ['33.0.1', '33.0.2', '33.0.3'])
eq_(releases[6][0], 31.0)
eq_(releases[6][1]['major'], '31.0')
eq_(releases[6][1]['minor'],
['31.1.0', '31.1.1', '31.2.0', '31.3.0', '31.4.0', '31.5.0'])
@patch('bedrock.releasenotes.views.thunderbird_desktop', thunderbird_desktop)
def test_relnotes_index_thunderbird(self):
with self.activate('en-US'):
response = self.client.get(reverse('thunderbird.releases.index'))
doc = pq(response.content)
eq_(len(doc('a[href="0.1.html"]')), 1)
eq_(len(doc('a[href="1.5.0.2.html"]')), 1)
eq_(len(doc('a[href="../2.0.0.0/releasenotes/"]')), 1)
eq_(len(doc('a[href="../3.0.1/releasenotes/"]')), 1)
class TestNotesRedirects(TestCase):
def _test(self, url_from, url_to):
with self.activate('en-US'):
url = '/en-US' + url_from
response = self.client.get(url)
eq_(response.status_code, 302)
eq_(response['Location'], 'http://testserver/en-US' + url_to)
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='22.0'))
def test_desktop_release_version(self):
self._test('/firefox/notes/',
'/firefox/22.0/releasenotes/')
self._test('/firefox/latest/releasenotes/',
'/firefox/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='23.0b1'))
def test_desktop_beta_version(self):
self._test('/firefox/beta/notes/',
'/firefox/23.0beta/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.0a2'))
def test_desktop_developer_version(self):
self._test('/firefox/developer/notes/',
'/firefox/24.0a2/auroranotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.2.0esr'))
def test_desktop_esr_version(self):
self._test('/firefox/organizations/notes/',
'/firefox/24.2.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='22.0'))
def test_android_release_version(self):
self._test('/firefox/android/notes/',
'/firefox/android/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='23.0b1'))
def test_android_beta_version(self):
self._test('/firefox/android/beta/notes/',
'/firefox/android/23.0beta/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='24.0a2'))
def test_android_aurora_version(self):
self._test('/firefox/android/aurora/notes/',
'/firefox/android/24.0a2/auroranotes/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='22.0'))
def test_thunderbird_release_version(self):
self._test('/thunderbird/latest/releasenotes/',
'/thunderbird/22.0/releasenotes/')
class TestSysreqRedirect(TestCase):
def _test(self, url_from, url_to):
with self.activate('en-US'):
url = '/en-US' + url_from
response = self.client.get(url)
eq_(response.status_code, 302)
eq_(response['Location'], 'http://testserver/en-US' + url_to)
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='22.0'))
def test_desktop_release_version(self):
self._test('/firefox/system-requirements/',
'/firefox/22.0/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='23.0b1'))
def test_desktop_beta_version(self):
self._test('/firefox/beta/system-requirements/',
'/firefox/23.0beta/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.0a2'))
def test_desktop_developer_version(self):
self._test('/firefox/developer/system-requirements/',
'/firefox/24.0a2/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.2.0esr'))
def test_desktop_esr_version(self):
self._test('/firefox/organizations/system-requirements/',
'/firefox/24.0/system-requirements/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='22.0'))
def test_thunderbird_release_version(self):
self._test('/thunderbird/latest/system-requirements/',
'/thunderbird/22.0/system-requirements/')
| andreadelrio/bedrock | bedrock/releasenotes/tests/test_base.py | Python | mpl-2.0 | 14,825 |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
gTestfile = 'package-001.js';
/**
File Name: package-001.js
Description:
@author [email protected]
@version 1.00
*/
var SECTION = "LiveConnect Packages";
var VERSION = "1_3";
var TITLE = "LiveConnect Packages";
startTest();
writeHeaderToLog( SECTION + " "+ TITLE);
// All packages are of the type "object"
var E_TYPE = "object";
// The JavaScript [[Class]] property for all Packages is "[JavaPackage <packagename>]"
var E_JSCLASS = "[JavaPackage ";
// Create arrays of actual results (java_array) and
// expected results (test_array).
var java_array = new Array();
var test_array = new Array();
var i = 0;
java_array[i] = new JavaValue( java );
test_array[i] = new TestValue( "java" );
i++;
java_array[i] = new JavaValue( java.awt );
test_array[i] = new TestValue( "java.awt" );
i++;
java_array[i] = new JavaValue( java.beans );
test_array[i] = new TestValue( "java.beans" );
i++;
java_array[i] = new JavaValue( java.io );
test_array[i] = new TestValue( "java.io" );
i++;
java_array[i] = new JavaValue( java.lang );
test_array[i] = new TestValue( "java.lang" );
i++;
java_array[i] = new JavaValue( java.math );
test_array[i] = new TestValue( "java.math" );
i++;
java_array[i] = new JavaValue( java.net );
test_array[i] = new TestValue( "java.net" );
i++;
java_array[i] = new JavaValue( java.rmi );
test_array[i] = new TestValue( "java.rmi" );
i++;
java_array[i] = new JavaValue( java.text );
test_array[i] = new TestValue( "java.text" );
i++;
java_array[i] = new JavaValue( java.util );
test_array[i] = new TestValue( "java.util" );
i++;
java_array[i] = new JavaValue( Packages.javax );
test_array[i] = new TestValue( "Packages.javax" );
i++;
java_array[i] = new JavaValue( Packages.javax.javascript );
test_array[i] = new TestValue( "Packages.javax.javascript" );
i++;
java_array[i] = new JavaValue( Packages.javax.javascript.examples );
test_array[i] = new TestValue( "Packages.javax.javascript.examples" );
i++;
for ( i = 0; i < java_array.length; i++ ) {
CompareValues( java_array[i], test_array[i] );
}
test();
function CompareValues( javaval, testval ) {
// Check typeof, which should be E_TYPE
new TestCase( SECTION,
"typeof (" + testval.description +")",
testval.type,
javaval.type );
// Check JavaScript class, which should be E_JSCLASS + the package name
new TestCase( SECTION,
"(" + testval.description +").getJSClass()",
testval.jsclass,
javaval.jsclass );
}
function JavaValue( value ) {
this.value = value;
this.type = typeof value;
this.jsclass = value +""
return this;
}
function TestValue( description ) {
this.packagename = (description.substring(0, "Packages.".length) ==
"Packages.") ? description.substring("Packages.".length, description.length ) :
description;
this.description = description;
this.type = E_TYPE;
this.jsclass = E_JSCLASS + this.packagename +"]";
return this;
}
| sam/htmlunit-rhino-fork | testsrc/tests/lc2/Packages/package-001.js | JavaScript | mpl-2.0 | 3,248 |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
var gTestfile = 'regress-355339.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 355339;
var summary = 'Do not assert: sprop->setter != js_watch_set';
var actual = '';
var expect = '';
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
expect = actual = 'No Crash';
o = {};
o.watch("j", function(a,b,c) { print("*",a,b,c) });
o.unwatch("j");
o.watch("j", function(a,b,c) { print("*",a,b,c) });
reportCompare(expect, actual, summary);
exitFunc ('test');
}
| tejassaoji/RhinoCoarseTainting | testsrc/tests/js1_5/extensions/regress-355339.js | JavaScript | mpl-2.0 | 1,025 |
--
-- Kuali Coeus, a comprehensive research administration system for higher education.
--
-- Copyright 2005-2015 Kuali, Inc.
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU Affero General Public License for more details.
--
-- You should have received a copy of the GNU Affero General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
ALTER TABLE EPS_PROP_PERSON_EXT
ADD CONSTRAINT EPS_PROP_PERSON_EXT_FK1
FOREIGN KEY(CITIZENSHIP_TYPE_CODE)
REFERENCES CITIZENSHIP_TYPE_T(CITIZENSHIP_TYPE_CODE);
| UniversityOfHawaiiORS/kc | coeus-db/coeus-db-sql/src/main/resources/co/kuali/coeus/data/migration/sql/mysql/kc/bootstrap/V310_4_068__KC_FK_EPS_PROP_PERSON_EXT.sql | SQL | agpl-3.0 | 976 |
/**
* Shopware 4.0
* Copyright © 2012 shopware AG
*
* According to our dual licensing model, this program can be used either
* under the terms of the GNU Affero General Public License, version 3,
* or under a proprietary license.
*
* The texts of the GNU Affero General Public License with an additional
* permission and of our proprietary license can be found at and
* in the LICENSE file you have received along with this program.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* "Shopware" is a registered trademark of shopware AG.
* The licensing of the program under the AGPLv3 does not imply a
* trademark license. Therefore any rights, title and interest in
* our trademarks remain entirely with us.
*
* @category Shopware
* @package Article
* @subpackage Detail
* @copyright Copyright (c) 2012, shopware AG (http://www.shopware.de)
* @version $Id$
* @author shopware AG
*/
/**
* Shopware UI - Article detail page
* The sidebar component contains the definition of the sidebar layout.
* The sidebar displays as an accordion. The different sidebar elements
* defined under the 'Shopware.apps.Article.view.detail.sidebar' namespace:
*/
//{namespace name=backend/article/view/main}
//{block name="backend/article/view/detail/sidebar"}
Ext.define('Shopware.apps.Article.view.detail.Sidebar', {
/**
* Define that the billing field set is an extension of the Ext.form.FieldSet
* @string
*/
extend:'Ext.panel.Panel',
/**
* The Ext.container.Container.layout for the fieldset's immediate child items.
* @object
*/
layout: {
type: 'accordion',
animate: Ext.isChrome
},
/**
* Enable only collapse animation in chrome.
* @boolean
*/
animCollapse: Ext.isChrome,
collapsible: true,
collapsed: true,
title: '{s name=accordion-title}Article-Options{/s}',
/**
* Defines the component region
*/
region: 'east',
/**
* List of short aliases for class names. Most useful for defining xtypes for widgets.
* @string
*/
alias:'widget.article-sidebar',
/**
* Set css class for this component
* @string
*/
cls: Ext.baseCSSPrefix + 'article-sidebar',
/**
* Sets the container width to a fix value.
* @integer
*/
width: 350,
/**
* The initComponent template method is an important initialization step for a Component.
* It is intended to be implemented by each subclass of Ext.Component to provide any needed constructor logic.
* The initComponent method of the class being created is called first,
* with each initComponent method up the hierarchy to Ext.Component being called thereafter.
* This makes it easy to implement and, if needed, override the constructor logic of the Component at any step in the hierarchy.
* The initComponent method must contain a call to callParent in order to ensure that the parent class' initComponent method is also called.
*
* @return void
*/
initComponent:function () {
var me = this,
mainWindow = me.subApp.articleWindow;
mainWindow.on('storesLoaded', me.onStoresLoaded, me);
me.callParent(arguments);
},
/**
* Creates the elements for the sidebar container.
* @return object
*/
createElements: function() {
var me = this;
return [
{
xtype: 'article-sidebar-option',
shopStore: me.shopStore,
article: me.article,
animCollapse: Ext.isChrome,
animate: Ext.isChrome
}, {
xtype: 'article-sidebar-link',
article: me.article,
animCollapse: Ext.isChrome,
animate: Ext.isChrome
},
{
xtype: 'article-sidebar-accessory',
article: me.article,
gridStore: me.article.getAccessory(),
animCollapse: Ext.isChrome,
animate: Ext.isChrome
},
{
xtype: 'article-sidebar-similar',
article: me.article,
gridStore: me.article.getSimilar(),
animCollapse: Ext.isChrome,
animate: Ext.isChrome
}
];
},
onStoresLoaded: function(article, stores) {
var me = this;
me.article = article;
me.shopStore = stores['shops'];
me.add(me.createElements());
}
});
//{/block}
| buddhaCode/shopware | templates/_default/backend/article/view/detail/sidebar.js | JavaScript | agpl-3.0 | 4,761 |
Clazz.declarePackage ("J.console");
Clazz.load (["J.api.JmolAppConsoleInterface", "$.JmolCallbackListener", "java.util.Hashtable"], "J.console.GenericConsole", ["java.lang.Boolean", "JU.PT", "J.c.CBK", "J.i18n.GT", "JS.T", "JV.Viewer"], function () {
c$ = Clazz.decorateAsClass (function () {
this.input = null;
this.output = null;
this.vwr = null;
this.labels = null;
this.menuMap = null;
this.editButton = null;
this.runButton = null;
this.historyButton = null;
this.stateButton = null;
this.clearOutButton = null;
this.clearInButton = null;
this.loadButton = null;
this.defaultMessage = null;
this.label1 = null;
this.nTab = 0;
this.incompleteCmd = null;
Clazz.instantialize (this, arguments);
}, J.console, "GenericConsole", null, [J.api.JmolAppConsoleInterface, J.api.JmolCallbackListener]);
Clazz.prepareFields (c$, function () {
this.menuMap = new java.util.Hashtable ();
});
Clazz.defineMethod (c$, "setViewer",
function (vwr) {
this.vwr = vwr;
}, "JV.Viewer");
Clazz.defineMethod (c$, "addButton",
function (b, label) {
b.addConsoleListener (this);
this.menuMap.put (label, b);
return b;
}, "J.api.JmolAbstractButton,~S");
Clazz.defineMethod (c$, "getLabel1",
function () {
return null;
});
Clazz.defineMethod (c$, "setupLabels",
function () {
this.labels.put ("help", J.i18n.GT._ ("&Help"));
this.labels.put ("search", J.i18n.GT._ ("&Search..."));
this.labels.put ("commands", J.i18n.GT._ ("&Commands"));
this.labels.put ("functions", J.i18n.GT._ ("Math &Functions"));
this.labels.put ("parameters", J.i18n.GT._ ("Set &Parameters"));
this.labels.put ("more", J.i18n.GT._ ("&More"));
this.labels.put ("Editor", J.i18n.GT._ ("Editor"));
this.labels.put ("State", J.i18n.GT._ ("State"));
this.labels.put ("Run", J.i18n.GT._ ("Run"));
this.labels.put ("Clear Output", J.i18n.GT._ ("Clear Output"));
this.labels.put ("Clear Input", J.i18n.GT._ ("Clear Input"));
this.labels.put ("History", J.i18n.GT._ ("History"));
this.labels.put ("Load", J.i18n.GT._ ("Load"));
this.labels.put ("label1", J.i18n.GT._ ("press CTRL-ENTER for new line or paste model data and press Load"));
this.labels.put ("default", J.i18n.GT._ ("Messages will appear here. Enter commands in the box below. Click the console Help menu item for on-line help, which will appear in a new browser window."));
});
Clazz.defineMethod (c$, "setLabels",
function () {
var doTranslate = J.i18n.GT.setDoTranslate (true);
this.editButton = this.setButton ("Editor");
this.stateButton = this.setButton ("State");
this.runButton = this.setButton ("Run");
this.clearOutButton = this.setButton ("Clear Output");
this.clearInButton = this.setButton ("Clear Input");
this.historyButton = this.setButton ("History");
this.loadButton = this.setButton ("Load");
this.defaultMessage = this.getLabel ("default");
this.setTitle ();
J.i18n.GT.setDoTranslate (doTranslate);
});
Clazz.defineMethod (c$, "getLabel",
function (key) {
if (this.labels == null) {
this.labels = new java.util.Hashtable ();
this.labels.put ("title", J.i18n.GT._ ("Jmol Script Console") + " " + JV.Viewer.getJmolVersion ());
this.setupLabels ();
}return this.labels.get (key);
}, "~S");
Clazz.defineMethod (c$, "displayConsole",
function () {
this.layoutWindow (null);
this.outputMsg (this.defaultMessage);
});
Clazz.defineMethod (c$, "updateLabels",
function () {
return;
});
Clazz.defineMethod (c$, "completeCommand",
function (thisCmd) {
if (thisCmd.length == 0) return null;
var strCommand = (this.nTab <= 0 || this.incompleteCmd == null ? thisCmd : this.incompleteCmd);
this.incompleteCmd = strCommand;
var splitCmd = J.console.GenericConsole.splitCommandLine (thisCmd);
if (splitCmd == null) return null;
var asCommand = splitCmd[2] == null;
var inBrace = (splitCmd[3] != null);
var notThis = splitCmd[asCommand ? 1 : 2];
var s = splitCmd[1];
if (notThis.length == 0) return null;
var token = JS.T.getTokenFromName (s.trim ().toLowerCase ());
var cmdtok = (token == null ? 0 : token.tok);
var isSelect = JS.T.tokAttr (cmdtok, 12288);
splitCmd = J.console.GenericConsole.splitCommandLine (strCommand);
var cmd = null;
if (!asCommand && (notThis.charAt (0) == '"' || notThis.charAt (0) == '\'')) {
var q = notThis.charAt (0);
notThis = JU.PT.trim (notThis, "\"\'");
var stub = JU.PT.trim (splitCmd[2], "\"\'");
cmd = this.nextFileName (stub, this.nTab);
if (cmd != null) cmd = splitCmd[0] + splitCmd[1] + q + cmd + q;
} else {
var map = null;
if (!asCommand) {
notThis = s;
if (inBrace || splitCmd[2].startsWith ("$") || JS.T.isIDcmd (cmdtok) || isSelect) {
map = new java.util.Hashtable ();
this.vwr.getObjectMap (map, inBrace || isSelect ? '{' : splitCmd[2].startsWith ("$") ? '$' : '0');
}}cmd = JS.T.completeCommand (map, s.equalsIgnoreCase ("set "), asCommand, asCommand ? splitCmd[1] : splitCmd[2], this.nTab);
cmd = splitCmd[0] + (cmd == null ? notThis : asCommand ? cmd : splitCmd[1] + cmd);
}return (cmd == null || cmd.equals (strCommand) ? null : cmd);
}, "~S");
Clazz.defineMethod (c$, "doAction",
function (source) {
if (source === this.runButton) {
this.execute (null);
} else if (source === this.editButton) {
this.vwr.getProperty ("DATA_API", "scriptEditor", null);
} else if (source === this.historyButton) {
this.clearContent (this.vwr.getSetHistory (2147483647));
} else if (source === this.stateButton) {
this.clearContent (this.vwr.getStateInfo ());
} else if (source === this.clearInButton) {
this.input.setText ("");
return;
}if (source === this.clearOutButton) {
this.output.setText ("");
return;
}if (source === this.loadButton) {
this.vwr.loadInlineAppend (this.input.getText (), false);
return;
}if (this.isMenuItem (source)) {
this.execute ((source).getName ());
return;
}}, "~O");
Clazz.defineMethod (c$, "execute",
function (strCommand) {
var cmd = (strCommand == null ? this.input.getText () : strCommand);
if (strCommand == null) this.input.setText (null);
var strErrorMessage = this.vwr.script (cmd + "\u0001## EDITOR_IGNORE ##");
if (strErrorMessage != null && !strErrorMessage.equals ("pending")) this.outputMsg (strErrorMessage);
}, "~S");
Clazz.defineMethod (c$, "destroyConsole",
function () {
if (this.vwr.isApplet ()) this.vwr.getProperty ("DATA_API", "getAppConsole", Boolean.FALSE);
});
c$.setAbstractButtonLabels = Clazz.defineMethod (c$, "setAbstractButtonLabels",
function (menuMap, labels) {
for (var key, $key = menuMap.keySet ().iterator (); $key.hasNext () && ((key = $key.next ()) || true);) {
var m = menuMap.get (key);
var label = labels.get (key);
if (key.indexOf ("Tip") == key.length - 3) {
m.setToolTipText (labels.get (key));
} else {
var mnemonic = J.console.GenericConsole.getMnemonic (label);
if (mnemonic != ' ') m.setMnemonic (mnemonic);
label = J.console.GenericConsole.getLabelWithoutMnemonic (label);
m.setText (label);
}}
}, "java.util.Map,java.util.Map");
c$.getLabelWithoutMnemonic = Clazz.defineMethod (c$, "getLabelWithoutMnemonic",
function (label) {
if (label == null) {
return null;
}var index = label.indexOf ('&');
if (index == -1) {
return label;
}return label.substring (0, index) + ((index < label.length - 1) ? label.substring (index + 1) : "");
}, "~S");
c$.getMnemonic = Clazz.defineMethod (c$, "getMnemonic",
function (label) {
if (label == null) {
return ' ';
}var index = label.indexOf ('&');
if ((index == -1) || (index == label.length - 1)) {
return ' ';
}return label.charAt (index + 1);
}, "~S");
c$.map = Clazz.defineMethod (c$, "map",
function (button, key, label, menuMap) {
var mnemonic = J.console.GenericConsole.getMnemonic (label);
if (mnemonic != ' ') (button).setMnemonic (mnemonic);
menuMap.put (key, button);
}, "~O,~S,~S,java.util.Map");
Clazz.overrideMethod (c$, "notifyEnabled",
function (type) {
switch (type) {
case J.c.CBK.ECHO:
case J.c.CBK.MEASURE:
case J.c.CBK.MESSAGE:
case J.c.CBK.PICK:
return true;
case J.c.CBK.ANIMFRAME:
case J.c.CBK.APPLETREADY:
case J.c.CBK.ATOMMOVED:
case J.c.CBK.CLICK:
case J.c.CBK.DRAGDROP:
case J.c.CBK.ERROR:
case J.c.CBK.EVAL:
case J.c.CBK.HOVER:
case J.c.CBK.LOADSTRUCT:
case J.c.CBK.MINIMIZATION:
case J.c.CBK.RESIZE:
case J.c.CBK.SCRIPT:
case J.c.CBK.SYNC:
case J.c.CBK.STRUCTUREMODIFIED:
break;
}
return false;
}, "J.c.CBK");
Clazz.overrideMethod (c$, "getText",
function () {
return this.output.getText ();
});
Clazz.overrideMethod (c$, "sendConsoleEcho",
function (strEcho) {
if (strEcho == null) {
this.updateLabels ();
this.outputMsg (null);
strEcho = this.defaultMessage;
}this.outputMsg (strEcho);
}, "~S");
Clazz.defineMethod (c$, "outputMsg",
function (message) {
if (message == null || message.length == 0) {
this.output.setText ("");
return;
}if (message.charAt (message.length - 1) != '\n') message += "\n";
this.output.append (message);
}, "~S");
Clazz.defineMethod (c$, "clearContent",
function (text) {
this.output.setText (text);
}, "~S");
Clazz.overrideMethod (c$, "sendConsoleMessage",
function (strInfo) {
if (strInfo != null && this.output.getText ().startsWith (this.defaultMessage)) this.outputMsg (null);
this.outputMsg (strInfo);
}, "~S");
Clazz.overrideMethod (c$, "notifyCallback",
function (type, data) {
var strInfo = (data == null || data[1] == null ? null : data[1].toString ());
switch (type) {
case J.c.CBK.ECHO:
this.sendConsoleEcho (strInfo);
break;
case J.c.CBK.MEASURE:
var mystatus = data[3];
if (mystatus.indexOf ("Picked") >= 0 || mystatus.indexOf ("Sequence") >= 0) this.sendConsoleMessage (strInfo);
else if (mystatus.indexOf ("Completed") >= 0) this.sendConsoleEcho (strInfo.substring (strInfo.lastIndexOf (",") + 2, strInfo.length - 1));
break;
case J.c.CBK.MESSAGE:
this.sendConsoleMessage (data == null ? null : strInfo);
break;
case J.c.CBK.PICK:
this.sendConsoleMessage (strInfo);
break;
}
}, "J.c.CBK,~A");
Clazz.overrideMethod (c$, "setCallbackFunction",
function (callbackType, callbackFunction) {
}, "~S,~S");
Clazz.overrideMethod (c$, "zap",
function () {
});
Clazz.defineMethod (c$, "recallCommand",
function (up) {
var cmd = this.vwr.getSetHistory (up ? -1 : 1);
if (cmd == null) return;
this.input.setText (cmd);
}, "~B");
Clazz.defineMethod (c$, "processKey",
function (kcode, kid, isControlDown) {
var mode = 0;
switch (kid) {
case 401:
switch (kcode) {
case 9:
var s = this.input.getText ();
if (s.endsWith ("\n") || s.endsWith ("\t")) return 0;
mode = 1;
if (this.input.getCaretPosition () == s.length) {
var cmd = this.completeCommand (s);
if (cmd != null) this.input.setText (cmd.$replace ('\t', ' '));
this.nTab++;
return mode;
}break;
case 27:
mode = 1;
this.input.setText ("");
break;
}
this.nTab = 0;
if (kcode == 10 && !isControlDown) {
this.execute (null);
return mode;
}if (kcode == 38 || kcode == 40) {
this.recallCommand (kcode == 38);
return mode;
}break;
case 402:
if (kcode == 10 && !isControlDown) return mode;
break;
}
return mode | 2;
}, "~N,~N,~B");
c$.splitCommandLine = Clazz.defineMethod (c$, "splitCommandLine",
function (cmd) {
var sout = new Array (4);
var isEscaped1 = false;
var isEscaped2 = false;
var isEscaped = false;
if (cmd.length == 0) return null;
var ptQ = -1;
var ptCmd = 0;
var ptToken = 0;
var nBrace = 0;
var ch;
for (var i = 0; i < cmd.length; i++) {
switch (ch = cmd.charAt (i)) {
case '"':
if (!isEscaped && !isEscaped1) {
isEscaped2 = !isEscaped2;
if (isEscaped2) ptQ = ptToken = i;
}break;
case '\'':
if (!isEscaped && !isEscaped2) {
isEscaped1 = !isEscaped1;
if (isEscaped1) ptQ = ptToken = i;
}break;
case '\\':
isEscaped = !isEscaped;
continue;
case ' ':
if (!isEscaped && !isEscaped1 && !isEscaped2) {
ptToken = i + 1;
ptQ = -1;
}break;
case ';':
if (!isEscaped1 && !isEscaped2) {
ptCmd = ptToken = i + 1;
ptQ = -1;
nBrace = 0;
}break;
case '{':
case '}':
if (!isEscaped1 && !isEscaped2) {
nBrace += (ch == '{' ? 1 : -1);
ptToken = i + 1;
ptQ = -1;
}break;
default:
if (!isEscaped1 && !isEscaped2) ptQ = -1;
}
isEscaped = false;
}
sout[0] = cmd.substring (0, ptCmd);
sout[1] = (ptToken == ptCmd ? cmd.substring (ptCmd) : cmd.substring (ptCmd, (ptToken > ptQ ? ptToken : ptQ)));
sout[2] = (ptToken == ptCmd ? null : cmd.substring (ptToken));
sout[3] = (nBrace > 0 ? "{" : null);
return sout;
}, "~S");
});
| xmao/cbioportal | portal/src/main/webapp/js/lib/jsmol/j2s/J/console/GenericConsole.js | JavaScript | agpl-3.0 | 12,388 |
--
-- Kuali Coeus, a comprehensive research administration system for higher education.
--
-- Copyright 2005-2015 Kuali, Inc.
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU Affero General Public License for more details.
--
-- You should have received a copy of the GNU Affero General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
DELIMITER /
CREATE TABLE RATE_CLASS_BASE_EXCLUSION (
RATE_CLASS_BASE_EXCL_ID DECIMAL(12,0) NOT NULL,
RATE_CLASS_CODE VARCHAR(3) NOT NULL,
RATE_TYPE_CODE VARCHAR(3),
RATE_CLASS_CODE_EXCL VARCHAR(3) NOT NULL,
RATE_TYPE_CODE_EXCL VARCHAR(3),
UPDATE_TIMESTAMP DATE NOT NULL,
UPDATE_USER VARCHAR(60) NOT NULL,
VER_NBR DECIMAL(8,0) DEFAULT 1 NOT NULL,
OBJ_ID VARCHAR(36) NOT NULL) ENGINE InnoDB CHARACTER SET utf8 COLLATE utf8_bin
/
ALTER TABLE RATE_CLASS_BASE_EXCLUSION
ADD CONSTRAINT PK_RATE_CLASS_BASE_EXCLUSION
PRIMARY KEY (RATE_CLASS_BASE_EXCL_ID)
/
DELIMITER ;
| sanjupolus/KC6.oLatest | coeus-db/coeus-db-sql/src/main/resources/co/kuali/coeus/data/migration/sql/mysql/kc/bootstrap/V400_202__KC_TBL_RATE_CLASS_BASE_EXCLUSION.sql | SQL | agpl-3.0 | 1,412 |
#!/bin/sh
#
## KAAPI library source
## -----------------------------------------
##
## by Thierry Gautier
## Apache Project, IMAG, Grenoble, France
##
## use:
# m4_generator <NB_PARAM>
#
makeheader() {
cat <<ENDDDD
// **********************************************************
// WARNING! This file has been automatically generated by M4
// *********************************************************
//#define RUN_TASK
ENDDDD
}
#############################################################
## loop for file bodies
#############################################################
echo "SOFA_DIR="$SOFA_DIR
NUMBER=0
COUNTER=$1
MAX_NUMBER_PARAMS=$1
## `expr $1 + 1`
cat $SOFA_DIR/framework/copyright.txt > $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks.h
cat $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks_prologue.m4 >> $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks.h
while [ $COUNTER -ge 0 ]
do
if [ $NUMBER -gt 0 ]
then
m4 -DKAAPI_NUMBER_PARAMS=$NUMBER -DKAAPI_MAX_NUMBER_PARAMS=$MAX_NUMBER_PARAMS $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks_macros.m4 $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks_gen.m4 >> $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks.h
fi
echo "Generating file: $file step $NUMBER :-)"
NUMBER=`expr $NUMBER + 1`
COUNTER=`expr $COUNTER - 1`
done
cat $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks_epilogue.m4 >> $SOFA_DIR/framework/sofa/core/objectmodel/BaseObjectTasks.h
| Anatoscope/sofa | scripts/base-object-tasks-generator.sh | Shell | lgpl-2.1 | 1,527 |
/***************************************************************************
* Copyright (c) 2021 Werner Mayer <wmayer[at]users.sourceforge.net> *
* *
* This file is part of the FreeCAD CAx development system. *
* *
* This library is free software; you can redistribute it and/or *
* modify it under the terms of the GNU Library General Public *
* License as published by the Free Software Foundation; either *
* version 2 of the License, or (at your option) any later version. *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Library General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this library; see the file COPYING.LIB. If not, *
* write to the Free Software Foundation, Inc., 59 Temple Place, *
* Suite 330, Boston, MA 02111-1307, USA *
* *
***************************************************************************/
#ifndef PARTGUI_SHAPEFROMMESH_H
#define PARTGUI_SHAPEFROMMESH_H
#include <QDialog>
#include <memory>
namespace PartGui {
class Ui_ShapeFromMesh;
class ShapeFromMesh : public QDialog
{
Q_OBJECT
public:
ShapeFromMesh(QWidget* parent = nullptr, Qt::WindowFlags fl = Qt::WindowFlags());
~ShapeFromMesh();
virtual void accept();
private:
void perform();
std::unique_ptr<Ui_ShapeFromMesh> ui;
};
} // namespace PartGui
#endif // PARTGUI_SHAPEFROMMESH_H
| sanguinariojoe/FreeCAD | src/Mod/Part/Gui/ShapeFromMesh.h | C | lgpl-2.1 | 2,098 |
<html xmlns:mso="urn:schemas-microsoft-com:office:office" xmlns:msdt="uuid:C2F41010-65B3-11d1-A29F-00AA00C14882">
<head>
<title>Default Glossary</title>
<!--[if gte mso 9]><xml>
<mso:CustomDocumentProperties>
<mso:TemplateHidden msdt:dt="string">0</mso:TemplateHidden>
<mso:MasterPageDescription msdt:dt="string">Displays the default result item template for Glossary Items.</mso:MasterPageDescription>
<mso:ContentTypeId msdt:dt="string">0x0101002039C03B61C64EC4A04F5361F385106603</mso:ContentTypeId>
<mso:TargetControlType msdt:dt="string">;#SearchResults;#</mso:TargetControlType>
<mso:HtmlDesignAssociated msdt:dt="string">1</mso:HtmlDesignAssociated>
<mso:HtmlDesignStatusAndPreview msdt:dt="string">http://intranet.contoso.com/sites/Search/_catalogs/masterpage/Display Templates/Search/DefaultGlossary.html, Conversion successful.</mso:HtmlDesignStatusAndPreview>
<mso:HtmlDesignConversionSucceeded msdt:dt="string">True</mso:HtmlDesignConversionSucceeded>
<mso:CrawlerXSLFile msdt:dt="string"></mso:CrawlerXSLFile>
<mso:HtmlDesignPreviewUrl msdt:dt="string"></mso:HtmlDesignPreviewUrl>
<mso:ManagedPropertyMapping msdt:dt="string">'Title':'Title','Path':'Path','Description':'Description','EditorOWSUSER':'EditorOWSUSER','LastModifiedTime':'LastModifiedTime','CollapsingStatus':'CollapsingStatus','DocId':'DocId','HitHighlightedSummary':'HitHighlightedSummary','HitHighlightedProperties':'HitHighlightedProperties','FileExtension':'FileExtension','ViewsLifeTime':'ViewsLifeTime','ParentLink':'ParentLink','FileType':'FileType','IsContainer':'IsContainer','SecondaryFileExtension':'SecondaryFileExtension','DisplayAuthor':'DisplayAuthor','Product':'Product','RelatedTerm1OWSTEXT':'RelatedTerm1OWSTEXT','RelatedTerm2OWSTEXT':'RelatedTerm2OWSTEXT'</mso:ManagedPropertyMapping>
</mso:CustomDocumentProperties>
</xml><![endif]-->
</head>
<body>
<div id="DefaultGlossary">
<!--#_
if(!$isNull(ctx.CurrentItem) && !$isNull(ctx.ClientControl)){
var id = ctx.ClientControl.get_nextUniqueId();
var itemId = id + Srch.U.Ids.item;
var hoverId = id + Srch.U.Ids.hover;
var hoverUrl = "~sitecollection/_catalogs/masterpage/Display Templates/Search/Item_Default_HoverPanel.js";
$setResultItem(itemId, ctx.CurrentItem);
if(ctx.CurrentItem.IsContainer){
ctx.CurrentItem.csr_Icon = Srch.U.getFolderIconUrl();
}
ctx.currentItem_ShowHoverPanelCallback = Srch.U.getShowHoverPanelCallback(itemId, hoverId, hoverUrl);
ctx.currentItem_HideHoverPanelCallback = Srch.U.getHideHoverPanelCallback();
_#-->
<!-- Paste code below this line -->
<!-- Paste code above this line -->
<!--#_
}
_#-->
</div>
</body>
</html>
| johnseto/TrainingContent | O3658/09 Using Search Capabilities with app model/Demos/SearchUICustomizations/DefaultGlossary_Orig.html | HTML | apache-2.0 | 2,702 |
/*
Copyright 2007-2009 Selenium committers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote.server;
import org.junit.Test;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.Platform;
import org.openqa.selenium.StubDriver;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.internal.Killable;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.openqa.selenium.remote.SessionId;
import org.openqa.selenium.remote.server.testing.StaticTestSessions;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.FutureTask;
import java.util.logging.Logger;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* @author Kristian Rosenvold
*/
public class SessionCleanerTest {
private final static Logger log = Logger.getLogger(SessionCleanerTest.class.getName());
@Test
public void testCleanup() throws Exception {
DriverSessions defaultDriverSessions = getDriverSessions();
defaultDriverSessions.newSession(DesiredCapabilities.firefox());
defaultDriverSessions.newSession(DesiredCapabilities.firefox());
assertEquals(2, defaultDriverSessions.getSessions().size());
SessionCleaner sessionCleaner = new SessionCleaner(defaultDriverSessions, log, 10, 10);
waitForAllSessionsToExpire();
sessionCleaner.checkExpiry();
assertEquals(0, defaultDriverSessions.getSessions().size());
}
@Test
public void testCleanupWithTimedOutKillableDriver() throws Exception {
Capabilities capabilities = new DesiredCapabilities("foo", "1", Platform.ANY);
DriverSessions testSessions = new StaticTestSessions(capabilities, new KillableDriver());
final Session session = testSessions.get(testSessions.newSession(capabilities));
final CountDownLatch started = new CountDownLatch(1);
final CountDownLatch testDone = new CountDownLatch(1);
Runnable runnable = getRunnableThatMakesSessionBusy(session, started, testDone);
new Thread( runnable).start();
started.await();
KillableDriver killableDriver = (KillableDriver) session.getDriver();
assertTrue(session.isInUse());
SessionCleaner sessionCleaner = new SessionCleaner(testSessions, log, 10, 10);
waitForAllSessionsToExpire();
sessionCleaner.checkExpiry();
assertEquals(0, testSessions.getSessions().size());
assertTrue(killableDriver.killed);
testDone.countDown();
}
private Runnable getRunnableThatMakesSessionBusy(final Session session,
final CountDownLatch started,
final CountDownLatch testDone) {
return new Runnable(){
public void run(){
try {
session.execute(new FutureTask<Object>(new Callable<Object>()
{
public Object call() {
try {
started.countDown();
testDone.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return "yo";
}
}));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
@Test
public void testCleanupWithThread() throws Exception {
DriverSessions defaultDriverSessions = getDriverSessions();
defaultDriverSessions.newSession(DesiredCapabilities.firefox());
defaultDriverSessions.newSession(DesiredCapabilities.firefox());
assertEquals(2, defaultDriverSessions.getSessions().size());
SessionCleaner sessionCleaner = new TestSessionCleaner(defaultDriverSessions, log, 10);
sessionCleaner.start();
waitForAllSessionsToExpire();
synchronized (sessionCleaner) {
sessionCleaner.wait();
}
assertEquals(0, defaultDriverSessions.getSessions().size());
sessionCleaner.stopCleaner();
}
private void waitForAllSessionsToExpire() throws InterruptedException {
Thread.sleep(11);
}
class TestSessionCleaner extends SessionCleaner {
TestSessionCleaner(DriverSessions driverSessions, Logger log, int sessionTimeOutInMs) {
super(driverSessions, log, sessionTimeOutInMs, sessionTimeOutInMs);
}
@Override
void checkExpiry() {
super.checkExpiry();
synchronized (this) {
this.notifyAll();
}
}
}
@Test
public void testCleanupWithSessionExtension() throws Exception {
DriverSessions defaultDriverSessions = getDriverSessions();
SessionId firstSession = defaultDriverSessions.newSession(DesiredCapabilities.firefox());
defaultDriverSessions.newSession(DesiredCapabilities.firefox());
SessionCleaner sessionCleaner = new SessionCleaner(defaultDriverSessions, log, 10, 10);
waitForAllSessionsToExpire();
defaultDriverSessions.get(firstSession).updateLastAccessTime();
sessionCleaner.checkExpiry();
assertEquals(1, defaultDriverSessions.getSessions().size());
waitForAllSessionsToExpire();
sessionCleaner.checkExpiry();
assertEquals(0, defaultDriverSessions.getSessions().size());
}
private DriverSessions getDriverSessions() {
DriverFactory factory = new MyDriverFactory();
return new DefaultDriverSessions(Platform.LINUX, factory);
}
class MyDriverFactory implements DriverFactory {
public void registerDriver(Capabilities capabilities, Class<? extends WebDriver> implementation) {
}
public WebDriver newInstance(Capabilities capabilities) {
return new StubDriver() {
@Override
public void quit() {
}
};
}
public boolean hasMappingFor(Capabilities capabilities) {
return true;
}
}
static class KillableDriver extends RemoteWebDriver implements Killable {
boolean killed;
public void kill() {
killed = true;
}
}
}
| krosenvold/selenium-git-release-candidate | java/server/test/org/openqa/selenium/remote/server/SessionCleanerTest.java | Java | apache-2.0 | 6,379 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pig.builtin;
import static org.objectweb.asm.Opcodes.ACC_PUBLIC;
import static org.objectweb.asm.Opcodes.ACC_SUPER;
import static org.objectweb.asm.Opcodes.ALOAD;
import static org.objectweb.asm.Opcodes.ARETURN;
import static org.objectweb.asm.Opcodes.ASTORE;
import static org.objectweb.asm.Opcodes.CHECKCAST;
import static org.objectweb.asm.Opcodes.ICONST_0;
import static org.objectweb.asm.Opcodes.ICONST_1;
import static org.objectweb.asm.Opcodes.ICONST_2;
import static org.objectweb.asm.Opcodes.ICONST_3;
import static org.objectweb.asm.Opcodes.ICONST_4;
import static org.objectweb.asm.Opcodes.ICONST_5;
import static org.objectweb.asm.Opcodes.INVOKEINTERFACE;
import static org.objectweb.asm.Opcodes.INVOKESPECIAL;
import static org.objectweb.asm.Opcodes.INVOKESTATIC;
import static org.objectweb.asm.Opcodes.INVOKEVIRTUAL;
import static org.objectweb.asm.Opcodes.RETURN;
import static org.objectweb.asm.Opcodes.V1_6;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.pig.EvalFunc;
import org.apache.pig.data.DataType;
import org.apache.pig.data.Tuple;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.objectweb.asm.ClassWriter;
import org.objectweb.asm.MethodVisitor;
import org.python.google.common.collect.Maps;
import com.google.common.collect.Sets;
//TODO need to add support for ANY Pig type!
//TODO statically cache the generated code based on the input Strings
public class InvokerGenerator extends EvalFunc<Object> {
private String className_;
private String methodName_;
private String[] argumentTypes_;
private boolean isInitialized = false;
private InvokerFunction generatedFunction;
private Schema outputSchema;
private static int uniqueId = 0;
private static final Map<Class<?>, Byte> returnTypeMap = new HashMap<Class<?>, Byte>() {{
put(String.class, DataType.CHARARRAY);
put(Integer.class, DataType.INTEGER);
put(Long.class, DataType.LONG);
put(Float.class, DataType.FLOAT);
put(Double.class, DataType.DOUBLE);
put(Boolean.class, DataType.BOOLEAN);
//put(byte[].class, DataType.BYTEARRAY);
put(Integer.TYPE, DataType.INTEGER);
put(Long.TYPE, DataType.LONG);
put(Float.TYPE, DataType.FLOAT);
put(Double.TYPE, DataType.DOUBLE);
put(Boolean.TYPE, DataType.BOOLEAN);
}};
private static final Map<Class<?>, Class<?>> inverseTypeMap = new HashMap<Class<?>, Class<?>>() {{
put(Integer.class, Integer.TYPE);
put(Long.class, Long.TYPE);
put(Float.class, Float.TYPE);
put(Double.class, Double.TYPE);
put(Boolean.class, Boolean.TYPE);
put(Integer.TYPE, Integer.class);
put(Long.TYPE, Long.class);
put(Float.TYPE, Float.class);
put(Double.TYPE, Double.class);
put(Boolean.TYPE, Boolean.class);
}};
private static final Map<Class<?>, String> primitiveSignature = new HashMap<Class<?>, String>() {{
put(Integer.TYPE, "I");
put(Long.TYPE, "J");
put(Float.TYPE, "F");
put(Double.TYPE, "D");
put(Boolean.TYPE, "Z");
}};
private static final Map<String,Class<?>> nameToClassObjectMap = new HashMap<String,Class<?>>() {{
put("String",String.class);
put("Integer", Integer.class);
put("int", Integer.TYPE);
put("Long", Long.class);
put("long", Long.TYPE);
put("Float", Float.class);
put("float", Float.TYPE);
put("Double", Double.class);
put("double", Double.TYPE);
put("Boolean", Boolean.class);
put("boolean", Boolean.TYPE);
//put("byte[]", byte[].class);
put("java.lang.String",String.class);
put("java.lang.Integer", Integer.class);
put("java.lang.Long", Long.class);
put("java.lang.Float", Float.class);
put("java.lang.Double", Double.class);
put("java.lang.Boolean", Boolean.class);
}};
public InvokerGenerator(String className, String methodName, String argumentTypes) {
className_ = className;
methodName_ = methodName;
argumentTypes_ = argumentTypes.split(",");
if ("".equals(argumentTypes)) {
argumentTypes_ = new String[0];
}
}
@Override
public Object exec(Tuple input) throws IOException {
if (!isInitialized)
initialize();
return generatedFunction.eval(input);
}
@Override
public Schema outputSchema(Schema input) {
if (!isInitialized)
initialize();
return outputSchema;
}
private static int getUniqueId() {
return uniqueId++;
}
protected void initialize() {
Class<?> clazz;
try {
clazz = PigContext.resolveClassName(className_); //TODO I should probably be using this for all of the Class<?> resolution
} catch (IOException e) {
throw new RuntimeException("Given className not found: " + className_, e);
}
Class<?>[] arguments = getArgumentClassArray(argumentTypes_);
Method method;
try {
method = clazz.getMethod(methodName_, arguments); //must match exactly
} catch (SecurityException e) {
throw new RuntimeException("Not allowed to call given method["+methodName_+"] on class ["+className_+"] with arguments: " + Arrays.toString(argumentTypes_), e);
} catch (NoSuchMethodException e) {
throw new RuntimeException("Given method name ["+methodName_+"] does not exist on class ["+className_+"] with arguments: " + Arrays.toString(argumentTypes_), e);
}
boolean isStatic = Modifier.isStatic(method.getModifiers());
Class<?> returnClazz = method.getReturnType();
Byte type;
if (returnClazz.isPrimitive()) {
type = returnTypeMap.get(inverseTypeMap.get(returnClazz));
} else {
type = returnTypeMap.get(returnClazz);
}
//TODO add functionality so that if the user pairs this witha cast that it will let you return object
if (type == null) {
throw new RuntimeException("Function returns invalid type: " + returnClazz);
}
outputSchema = new Schema();
outputSchema.add(new Schema.FieldSchema(null, type));
generatedFunction = generateInvokerFunction("InvokerFunction_"+getUniqueId(), method, isStatic, arguments);
isInitialized = true;
}
private Class<?>[] getArgumentClassArray(String[] argumentTypes) {
Class<?>[] arguments = new Class<?>[argumentTypes.length];
for (int i = 0; i < argumentTypes.length; i++) {
try {
arguments[i]= nameToClassObjectMap.get(argumentTypes[i]);
if (arguments[i] == null) {
arguments[i] = PigContext.resolveClassName(argumentTypes[i]);
}
} catch (IOException e) {
throw new RuntimeException("Unable to find class in PigContext: " + argumentTypes[i], e);
}
}
return arguments;
}
private InvokerFunction generateInvokerFunction(String className, Method method, boolean isStatic, Class<?>[] arguments) {
byte[] byteCode = generateInvokerFunctionBytecode(className, method, isStatic, arguments);
return ByteClassLoader.getInvokerFunction(className, byteCode);
}
private byte[] generateInvokerFunctionBytecode(String className, Method method, boolean isStatic, Class<?>[] arguments) {
boolean isInterface = method.getDeclaringClass().isInterface();
ClassWriter cw = new ClassWriter(0);
cw.visit(V1_6, ACC_PUBLIC + ACC_SUPER, className, null, "java/lang/Object", new String[] { "org/apache/pig/builtin/InvokerFunction" });
makeConstructor(cw);
MethodVisitor mv = cw.visitMethod(ACC_PUBLIC, "eval", "(Lorg/apache/pig/data/Tuple;)Ljava/lang/Object;", null, new String[] { "java/io/IOException" });
mv.visitCode();
int next = 2;
//this will get the arguments from the Tuple, cast them, and astore them
int begin = 0;
if (!isStatic)
loadAndStoreArgument(mv, begin++, next++, getMethodStyleName(method.getDeclaringClass()));
for (int i = 0; i < arguments.length; i++)
loadAndStoreArgument(mv, i + begin, next++, getMethodStyleName(getObjectVersion(arguments[i])));
//puts the arguments on the stack
next = 2;
if (!isStatic) {
mv.visitVarInsn(ALOAD, next++); //put the method receiver on the stack
}
for (Class<?> arg : arguments) {
mv.visitVarInsn(ALOAD, next++);
unboxIfPrimitive(mv, arg);
}
String signature = buildSignatureString(arguments, method.getReturnType());
mv.visitMethodInsn(isStatic ? INVOKESTATIC : isInterface ? INVOKEINTERFACE : INVOKEVIRTUAL, getMethodStyleName(method.getDeclaringClass()), method.getName(), signature);
boxIfPrimitive(mv, method.getReturnType()); //TODO does this work?
mv.visitInsn(ARETURN);
mv.visitMaxs(2, (isStatic ? 2 : 3) + arguments.length);
mv.visitEnd();
cw.visitEnd();
return cw.toByteArray();
}
private String buildSignatureString(Class<?>[] arguments, Class<?> returnClazz) {
String sig = "(";
for (Class<?> arg : arguments) {
if (!arg.isPrimitive())
sig += "L" + getMethodStyleName(arg) + ";";
else
sig += getMethodStyleName(arg);
}
sig += ")";
if (!returnClazz.isPrimitive()) {
sig += "L" + getMethodStyleName(returnClazz) + ";";
} else {
sig += getMethodStyleName(returnClazz);
}
return sig;
}
private Class<?> getObjectVersion(Class<?> clazz) {
if (clazz.isPrimitive()) {
return inverseTypeMap.get(clazz);
}
return clazz;
}
private String getMethodStyleName(Class<?> clazz) {
if (!clazz.isPrimitive()) {
return clazz.getCanonicalName().replaceAll("\\.","/");
}
return primitiveSignature.get(clazz);
}
private void boxIfPrimitive(MethodVisitor mv, Class<?> clazz) {
if (!clazz.isPrimitive()) {
return;
}
String boxedClass = getMethodStyleName(inverseTypeMap.get(clazz));
mv.visitMethodInsn(INVOKESTATIC, boxedClass, "valueOf", "("+getMethodStyleName(clazz)+")L"+boxedClass+";");
}
private void unboxIfPrimitive(MethodVisitor mv, Class<?> clazz) {
if (!clazz.isPrimitive()) {
return;
}
String methodName = clazz.getSimpleName()+"Value";
mv.visitMethodInsn(INVOKEVIRTUAL, getMethodStyleName(inverseTypeMap.get(clazz)), methodName, "()"+getMethodStyleName(clazz));
}
private void loadAndStoreArgument(MethodVisitor mv, int loadIdx, int storeIdx, String castName) {
mv.visitVarInsn(ALOAD, 1); //loads the 1st argument
addConst(mv, loadIdx);
mv.visitMethodInsn(INVOKEINTERFACE, "org/apache/pig/data/Tuple", "get", "(I)Ljava/lang/Object;");
mv.visitTypeInsn(CHECKCAST, castName);
mv.visitVarInsn(ASTORE, storeIdx);
}
private void addConst(MethodVisitor mv, int idx) {
switch (idx) {
case(0): mv.visitInsn(ICONST_0); break;
case(1): mv.visitInsn(ICONST_1); break;
case(2): mv.visitInsn(ICONST_2); break;
case(3): mv.visitInsn(ICONST_3); break;
case(4): mv.visitInsn(ICONST_4); break;
case(5): mv.visitInsn(ICONST_5); break;
default:
throw new RuntimeException("Invalid index given to addConst: " + idx);
}
}
private void makeConstructor(ClassWriter cw) {
MethodVisitor mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 0);
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V");
mv.visitInsn(RETURN);
mv.visitMaxs(1, 1);
mv.visitEnd();
}
static class ByteClassLoader extends ClassLoader {
private byte[] buf;
public ByteClassLoader(byte[] buf) {
this.buf = buf;
}
public Class<InvokerFunction> findClass(String name) {
return (Class<InvokerFunction>)defineClass(name, buf, 0, buf.length);
}
public static InvokerFunction getInvokerFunction(String name, byte[] buf) {
try {
return new ByteClassLoader(buf).findClass(name).newInstance();
} catch (InstantiationException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
}
| siddaartha/spork | src/org/apache/pig/builtin/InvokerGenerator.java | Java | apache-2.0 | 13,997 |
<?php
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Abstract class for the Output conversion of the RESTful API
*
*/
abstract class OutputConverter {
private $boundry;
abstract function outputResponse(ResponseItem $responseItem, RestRequestItem $requestItem);
abstract function outputBatch(Array $responses, SecurityToken $token);
}
| chikim/shindig | src/social/converters/OutputConverter.php | PHP | apache-2.0 | 1,116 |
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"!
#
FROM ubuntu:trusty
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.5.3
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor selinux
ENV RUNC_BUILDTAGS apparmor selinux
| pyotr777/docker | contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile | Dockerfile | apache-2.0 | 661 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.physical.impl.validate;
import java.util.List;
import org.apache.drill.common.exceptions.ExecutionSetupException;
import org.apache.drill.exec.ops.FragmentContext;
import org.apache.drill.exec.physical.base.AbstractPhysicalVisitor;
import org.apache.drill.exec.physical.base.FragmentRoot;
import org.apache.drill.exec.physical.base.PhysicalOperator;
import org.apache.drill.exec.physical.config.IteratorValidator;
import org.apache.drill.exec.physical.config.RowKeyJoinPOP;
import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
public class IteratorValidatorInjector extends
AbstractPhysicalVisitor<PhysicalOperator, FragmentContext, ExecutionSetupException> {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(IteratorValidatorInjector.class);
/* This flag when set creates all the validators as repeatable validators */
private final boolean isRepeatablePipeline;
public IteratorValidatorInjector() {
this(false);
}
public IteratorValidatorInjector(boolean repeatablePipeline) {
this.isRepeatablePipeline = repeatablePipeline;
}
public static FragmentRoot rewritePlanWithIteratorValidator(FragmentContext context, FragmentRoot root) throws ExecutionSetupException {
IteratorValidatorInjector inject = new IteratorValidatorInjector();
PhysicalOperator newOp = root.accept(inject, context);
if ( !(newOp instanceof FragmentRoot) ) {
throw new IllegalStateException("This shouldn't happen.");
}
return (FragmentRoot) newOp;
}
/**
* Traverse the physical plan and inject the IteratorValidator operator after every operator.
*
* @param op
* Physical operator under which the IteratorValidator operator will be injected
* @param context
* Fragment context
* @return same physical operator as passed in, but its child will be a IteratorValidator operator whose child will be the
* original child of this operator
* @throws ExecutionSetupException
*/
@Override
public PhysicalOperator visitOp(PhysicalOperator op, FragmentContext context) throws ExecutionSetupException {
List<PhysicalOperator> newChildren = Lists.newArrayList();
PhysicalOperator newOp = op;
if (op instanceof RowKeyJoinPOP) {
/* create a RepeatablePipeline for the left side of RowKeyJoin */
PhysicalOperator left = new IteratorValidator(((RowKeyJoinPOP) op).getLeft()
.accept(new IteratorValidatorInjector(true), context), true);
left.setOperatorId(op.getOperatorId() + 1000);
newChildren.add(left);
/* right pipeline is not repeatable pipeline */
PhysicalOperator right = new IteratorValidator(((RowKeyJoinPOP) op).getRight()
.accept(this, context));
right.setOperatorId(op.getOperatorId() + 1000);
newChildren.add(right);
} else {
/* Get the list of child operators */
for (PhysicalOperator child : op) {
PhysicalOperator validator = new IteratorValidator(child.accept(this, context), this.isRepeatablePipeline);
validator.setOperatorId(op.getOperatorId() + 1000);
newChildren.add(validator);
}
}
/* Inject trace operator */
if (newChildren.size() > 0) {
newOp = op.getNewWithChildren(newChildren);
newOp.setOperatorId(op.getOperatorId());
}
return newOp;
}
}
| johnnywale/drill | exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorInjector.java | Java | apache-2.0 | 4,218 |
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied.See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.appserver.sample.ee.cdi.qualifier;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import javax.inject.Qualifier;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.PARAMETER;
import static java.lang.annotation.ElementType.TYPE;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
@Qualifier
@Retention(RUNTIME)
@Target({METHOD, FIELD, PARAMETER, TYPE})
public @interface Entrance {
}
| kasungayan/product-as | modules/samples/JavaEE-TomEE/CDI-Servlet/cdi-qualifier/src/main/java/org/wso2/appserver/sample/ee/cdi/qualifier/Entrance.java | Java | apache-2.0 | 1,216 |
/*
* Copyright 2006 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkOSWindow_Unix_DEFINED
#define SkOSWindow_Unix_DEFINED
#include <GL/glx.h>
#include <X11/Xlib.h>
#include "SkWindow.h"
class SkEvent;
struct SkUnixWindow {
Display* fDisplay;
Window fWin;
size_t fOSWin;
GC fGc;
GLXContext fGLContext;
};
class SkOSWindow : public SkWindow {
public:
SkOSWindow(void*);
~SkOSWindow();
void* getHWND() const { return (void*)fUnixWindow.fWin; }
void* getDisplay() const { return (void*)fUnixWindow.fDisplay; }
void* getUnixWindow() const { return (void*)&fUnixWindow; }
void loop();
enum SkBackEndTypes {
kNone_BackEndType,
kNativeGL_BackEndType,
};
bool attach(SkBackEndTypes attachType, int msaaSampleCount, AttachmentInfo*);
void detach();
void present();
int getMSAASampleCount() const { return fMSAASampleCount; }
//static bool PostEvent(SkEvent* evt, SkEventSinkID, SkMSec delay);
bool makeFullscreen();
void setVsync(bool);
void closeWindow();
protected:
// Overridden from from SkWindow:
void onSetTitle(const char title[]) override;
private:
enum NextXEventResult {
kContinue_NextXEventResult,
kQuitRequest_NextXEventResult,
kPaintRequest_NextXEventResult
};
NextXEventResult nextXEvent();
void doPaint();
void mapWindowAndWait();
// Forcefully closes the window. If a graceful shutdown is desired then call the public
// closeWindow method
void internalCloseWindow();
void initWindow(int newMSAASampleCount, AttachmentInfo* info);
SkUnixWindow fUnixWindow;
// Needed for GL
XVisualInfo* fVi;
// we recreate the underlying xwindow if this changes
int fMSAASampleCount;
typedef SkWindow INHERITED;
};
#endif
| weolar/miniblink49 | third_party/skia/include/views/SkOSWindow_Unix.h | C | apache-2.0 | 1,921 |
package org.apache.samoa.evaluation;
/*
* #%L
* SAMOA
* %%
* Copyright (C) 2014 - 2015 Apache Software Foundation
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
public interface RegressionPerformanceEvaluator extends PerformanceEvaluator {
}
| GozdeBoztepe/samoa | samoa-api/src/main/java/org/apache/samoa/evaluation/RegressionPerformanceEvaluator.java | Java | apache-2.0 | 777 |
/*
* Copyright 2012-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.docs.context.properties.bind;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DataSizeUnit;
import org.springframework.util.unit.DataSize;
import org.springframework.util.unit.DataUnit;
/**
* A {@link ConfigurationProperties @ConfigurationProperties} example that uses
* {@link DataSize}.
*
* @author Stephane Nicoll
*/
// tag::example[]
@ConfigurationProperties("app.io")
public class AppIoProperties {
@DataSizeUnit(DataUnit.MEGABYTES)
private DataSize bufferSize = DataSize.ofMegabytes(2);
private DataSize sizeThreshold = DataSize.ofBytes(512);
public DataSize getBufferSize() {
return this.bufferSize;
}
public void setBufferSize(DataSize bufferSize) {
this.bufferSize = bufferSize;
}
public DataSize getSizeThreshold() {
return this.sizeThreshold;
}
public void setSizeThreshold(DataSize sizeThreshold) {
this.sizeThreshold = sizeThreshold;
}
}
// end::example[]
| eddumelendez/spring-boot | spring-boot-project/spring-boot-docs/src/main/java/org/springframework/boot/docs/context/properties/bind/AppIoProperties.java | Java | apache-2.0 | 1,626 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.apache.kafka.common.network;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.ScatteringByteChannel;
/**
* A size delimited Receive that consists of a 4 byte network-ordered size N followed by N bytes of content
*/
public class NetworkReceive implements Receive {
public final static String UNKNOWN_SOURCE = "";
public final static int UNLIMITED = -1;
private final String source;
private final ByteBuffer size;
private final int maxSize;
private ByteBuffer buffer;
public NetworkReceive(String source, ByteBuffer buffer) {
this.source = source;
this.buffer = buffer;
this.size = null;
this.maxSize = UNLIMITED;
}
public NetworkReceive(String source) {
this.source = source;
this.size = ByteBuffer.allocate(4);
this.buffer = null;
this.maxSize = UNLIMITED;
}
public NetworkReceive(int maxSize, String source) {
this.source = source;
this.size = ByteBuffer.allocate(4);
this.buffer = null;
this.maxSize = maxSize;
}
public NetworkReceive() {
this(UNKNOWN_SOURCE);
}
@Override
public String source() {
return source;
}
@Override
public boolean complete() {
return !size.hasRemaining() && !buffer.hasRemaining();
}
public long readFrom(ScatteringByteChannel channel) throws IOException {
return readFromReadableChannel(channel);
}
// Need a method to read from ReadableByteChannel because BlockingChannel requires read with timeout
// See: http://stackoverflow.com/questions/2866557/timeout-for-socketchannel-doesnt-work
// This can go away after we get rid of BlockingChannel
@Deprecated
public long readFromReadableChannel(ReadableByteChannel channel) throws IOException {
int read = 0;
if (size.hasRemaining()) {
int bytesRead = channel.read(size);
if (bytesRead < 0)
throw new EOFException();
read += bytesRead;
if (!size.hasRemaining()) {
size.rewind();
int receiveSize = size.getInt();
if (receiveSize < 0)
throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + ")");
if (maxSize != UNLIMITED && receiveSize > maxSize)
throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + " larger than " + maxSize + ")");
this.buffer = ByteBuffer.allocate(receiveSize);
}
}
if (buffer != null) {
int bytesRead = channel.read(buffer);
if (bytesRead < 0)
throw new EOFException();
read += bytesRead;
}
return read;
}
public ByteBuffer payload() {
return this.buffer;
}
// Used only by BlockingChannel, so we may be able to get rid of this when/if we get rid of BlockingChannel
@Deprecated
public long readCompletely(ReadableByteChannel channel) throws IOException {
int totalRead = 0;
while (!complete()) {
totalRead += readFromReadableChannel(channel);
}
return totalRead;
}
}
| aditya-chaturvedi/kafka | clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java | Java | apache-2.0 | 4,145 |
MODULES=fsync_helper
PG_CONFIG=pg_config
REGRESS = setup bgwriter_checkpoint
REGRESS_OPTS = --dbname="fsync_regression"
subdir = src/test/fsync/
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
NO_PGXS = 1
include $(top_srcdir)/src/makefiles/pgxs.mk
| Quikling/gpdb | src/test/fsync/Makefile | Makefile | apache-2.0 | 273 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.config;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
/** A helper class to create {@link Config} objects */
public class ConfigBuilder {
private final Map<String, Object> primitiveProps = new HashMap<>();
private final Optional<String> originDestription;
private Config currentConfig;
ConfigBuilder(Optional<String> originDescription) {
this.originDestription = originDescription;
this.currentConfig =
originDescription.isPresent() ? ConfigFactory.empty(this.originDestription.get()) : ConfigFactory.empty();
}
/**
* Loads properties which have a given name prefix into the config. The following restrictions
* apply:
* <ul>
* <li>No property can have a name that is equal to the prefix
* <li>After removal of the prefix, the remaining property name should start with a letter.
* </ul>
*
* @param props the collection from where to load the properties
* @param scopePrefix only properties with this prefix will be considered. The prefix will be
* removed from the names of the keys added to the {@link Config} object.
* The prefix can be an empty string but cannot be null.
*/
public ConfigBuilder loadProps(Properties props, String scopePrefix) {
Preconditions.checkNotNull(props);
Preconditions.checkNotNull(scopePrefix);
int scopePrefixLen = scopePrefix.length();
for (Map.Entry<Object, Object> propEntry : props.entrySet()) {
String propName = propEntry.getKey().toString();
if (propName.startsWith(scopePrefix)) {
String scopedName = propName.substring(scopePrefixLen);
if (scopedName.isEmpty()) {
throw new RuntimeException("Illegal scoped property:" + propName);
}
if (!Character.isAlphabetic(scopedName.charAt(0))) {
throw new RuntimeException(
"Scoped name for property " + propName + " should start with a character: " + scopedName);
}
this.primitiveProps.put(scopedName, propEntry.getValue());
}
}
return this;
}
public ConfigBuilder addPrimitive(String name, Object value) {
this.primitiveProps.put(name, value);
return this;
}
public ConfigBuilder addList(String name, Iterable<? extends Object> values) {
this.currentConfig = this.originDestription.isPresent()
? this.currentConfig.withValue(name, ConfigValueFactory.fromIterable(values, this.originDestription.get()))
: this.currentConfig.withValue(name, ConfigValueFactory.fromIterable(values));
return this;
}
public static ConfigBuilder create() {
return new ConfigBuilder(Optional.<String> absent());
}
public static ConfigBuilder create(String originDescription) {
return new ConfigBuilder(Optional.of(originDescription));
}
public Config build() {
return ConfigFactory.parseMap(this.primitiveProps).withFallback(this.currentConfig);
}
}
| jinhyukchang/gobblin | gobblin-core/src/main/java/org/apache/gobblin/config/ConfigBuilder.java | Java | apache-2.0 | 4,015 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.client.ml.job.results;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.stream.IntStream;
public class BucketTests extends AbstractXContentTestCase<Bucket> {
@Override
public Bucket createTestInstance() {
return createTestInstance("foo");
}
public static Bucket createTestInstance(String jobId) {
Bucket bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong());
if (randomBoolean()) {
bucket.setAnomalyScore(randomDouble());
}
if (randomBoolean()) {
int size = randomInt(10);
List<BucketInfluencer> bucketInfluencers = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
BucketInfluencer bucketInfluencer = new BucketInfluencer(jobId, new Date(), 600);
bucketInfluencer.setAnomalyScore(randomDouble());
bucketInfluencer.setInfluencerFieldName(randomAlphaOfLengthBetween(1, 20));
bucketInfluencer.setInitialAnomalyScore(randomDouble());
bucketInfluencer.setProbability(randomDouble());
bucketInfluencer.setRawAnomalyScore(randomDouble());
bucketInfluencers.add(bucketInfluencer);
}
bucket.setBucketInfluencers(bucketInfluencers);
}
if (randomBoolean()) {
bucket.setEventCount(randomNonNegativeLong());
}
if (randomBoolean()) {
bucket.setInitialAnomalyScore(randomDouble());
}
if (randomBoolean()) {
bucket.setInterim(randomBoolean());
}
if (randomBoolean()) {
bucket.setProcessingTimeMs(randomLong());
}
if (randomBoolean()) {
int size = randomInt(10);
List<AnomalyRecord> records = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
AnomalyRecord anomalyRecord = AnomalyRecordTests.createTestInstance(jobId);
records.add(anomalyRecord);
}
bucket.setRecords(records);
}
if (randomBoolean()) {
int size = randomInt(10);
List<String> scheduledEvents = new ArrayList<>(size);
IntStream.range(0, size).forEach(i -> scheduledEvents.add(randomAlphaOfLength(20)));
bucket.setScheduledEvents(scheduledEvents);
}
return bucket;
}
@Override
protected Bucket doParseInstance(XContentParser parser) {
return Bucket.PARSER.apply(parser, null);
}
public void testEquals_GivenDifferentClass() {
Bucket bucket = new Bucket("foo", new Date(randomLong()), randomNonNegativeLong());
assertFalse(bucket.equals("a string"));
}
public void testEquals_GivenTwoDefaultBuckets() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
assertTrue(bucket1.equals(bucket2));
assertTrue(bucket2.equals(bucket1));
}
public void testEquals_GivenDifferentAnomalyScore() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
bucket1.setAnomalyScore(3.0);
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
bucket2.setAnomalyScore(2.0);
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenSameDates() {
Bucket b1 = new Bucket("foo", new Date(1234567890L), 123);
Bucket b2 = new Bucket("foo", new Date(1234567890L), 123);
assertTrue(b1.equals(b2));
}
public void testEquals_GivenDifferentEventCount() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
bucket1.setEventCount(3);
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
bucket2.setEventCount(100);
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenOneHasRecordsAndTheOtherDoesNot() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
bucket1.setRecords(Collections.singletonList(new AnomalyRecord("foo", new Date(123), 123)));
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
bucket2.setRecords(Collections.emptyList());
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenDifferentNumberOfRecords() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
bucket1.setRecords(Collections.singletonList(new AnomalyRecord("foo", new Date(123), 123)));
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
bucket2.setRecords(Arrays.asList(new AnomalyRecord("foo", new Date(123), 123),
new AnomalyRecord("foo", new Date(123), 123)));
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenSameNumberOfRecordsButDifferent() {
AnomalyRecord anomalyRecord1 = new AnomalyRecord("foo", new Date(123), 123);
anomalyRecord1.setRecordScore(1.0);
AnomalyRecord anomalyRecord2 = new AnomalyRecord("foo", new Date(123), 123);
anomalyRecord1.setRecordScore(2.0);
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
bucket1.setRecords(Collections.singletonList(anomalyRecord1));
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
bucket2.setRecords(Collections.singletonList(anomalyRecord2));
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenDifferentIsInterim() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
bucket1.setInterim(true);
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
bucket2.setInterim(false);
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenDifferentBucketInfluencers() {
Bucket bucket1 = new Bucket("foo", new Date(123), 123);
BucketInfluencer influencer1 = new BucketInfluencer("foo", new Date(123), 123);
influencer1.setInfluencerFieldName("foo");
bucket1.setBucketInfluencers(Collections.singletonList(influencer1));
Bucket bucket2 = new Bucket("foo", new Date(123), 123);
BucketInfluencer influencer2 = new BucketInfluencer("foo", new Date(123), 123);
influencer2.setInfluencerFieldName("bar");
bucket2.setBucketInfluencers(Collections.singletonList(influencer2));
assertFalse(bucket1.equals(bucket2));
assertFalse(bucket2.equals(bucket1));
}
public void testEquals_GivenEqualBuckets() {
AnomalyRecord record = new AnomalyRecord("job_id", new Date(123), 123);
BucketInfluencer bucketInfluencer = new BucketInfluencer("foo", new Date(123), 123);
Date date = new Date();
Bucket bucket1 = new Bucket("foo", date, 123);
bucket1.setAnomalyScore(42.0);
bucket1.setInitialAnomalyScore(92.0);
bucket1.setEventCount(134);
bucket1.setInterim(true);
bucket1.setRecords(Collections.singletonList(record));
bucket1.setBucketInfluencers(Collections.singletonList(bucketInfluencer));
Bucket bucket2 = new Bucket("foo", date, 123);
bucket2.setAnomalyScore(42.0);
bucket2.setInitialAnomalyScore(92.0);
bucket2.setEventCount(134);
bucket2.setInterim(true);
bucket2.setRecords(Collections.singletonList(record));
bucket2.setBucketInfluencers(Collections.singletonList(bucketInfluencer));
assertTrue(bucket1.equals(bucket2));
assertTrue(bucket2.equals(bucket1));
assertEquals(bucket1.hashCode(), bucket2.hashCode());
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
}
| robin13/elasticsearch | client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/BucketTests.java | Java | apache-2.0 | 8,533 |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.testsuite.admin;
import java.util.ArrayList;
import java.util.List;
import org.keycloak.representations.idm.CredentialRepresentation;
import static org.keycloak.representations.idm.CredentialRepresentation.PASSWORD;
import org.keycloak.representations.idm.UserRepresentation;
/**
*
* @author Petr Mensik
* @author tkyjovsk
*/
public class Users {
public static String getPasswordOf(UserRepresentation user) {
String value = null;
CredentialRepresentation password = getPasswordCredentialOf(user);
if (password != null) {
value = password.getValue();
}
return value;
}
public static CredentialRepresentation getPasswordCredentialOf(UserRepresentation user) {
CredentialRepresentation password = null;
if (user.getCredentials() != null) {
for (CredentialRepresentation c : user.getCredentials()) {
if (CredentialRepresentation.PASSWORD.equals(c.getType())) {
password = c;
}
}
}
return password;
}
public static void setPasswordFor(UserRepresentation user, String password) {
setPasswordFor(user, password, false);
}
public static void setPasswordFor(UserRepresentation user, String password, boolean temporary) {
List<CredentialRepresentation> credentials = new ArrayList<>();
CredentialRepresentation pass = new CredentialRepresentation();
pass.setType(PASSWORD);
pass.setValue(password);
pass.setTemporary(temporary);
credentials.add(pass);
user.setCredentials(credentials);
}
}
| lkubik/keycloak | testsuite/integration-arquillian/tests/base/src/main/java/org/keycloak/testsuite/admin/Users.java | Java | apache-2.0 | 2,342 |
// @declaration: true
export interface Foo {
preFetch: <T1 extends T2> (c: T1) => void; // Type T2 is not defined
preFetcher: new <T1 extends T2> (c: T1) => void; // Type T2 is not defined
}
| weswigham/TypeScript | tests/cases/compiler/declarationEmitLambdaWithMissingTypeParameterNoCrash.ts | TypeScript | apache-2.0 | 204 |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.ide.highlighter;
import com.intellij.icons.AllIcons;
import com.intellij.openapi.fileTypes.InternalFileType;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.projectModel.ProjectModelBundle;
import org.jetbrains.annotations.Nls;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.nio.charset.StandardCharsets;
public final class ProjectFileType implements InternalFileType {
public static final ProjectFileType INSTANCE = new ProjectFileType();
public static final String DEFAULT_EXTENSION = "ipr";
public static final String DOT_DEFAULT_EXTENSION = ".ipr";
private ProjectFileType() { }
@Override
@NotNull
public String getName() {
return "IDEA_PROJECT";
}
@Override
@NotNull
public String getDescription() {
return ProjectModelBundle.message("filetype.idea.project.description");
}
@Nls
@Override
public @NotNull String getDisplayName() {
return ProjectModelBundle.message("filetype.idea.project.display.name");
}
@Override
@NotNull
public String getDefaultExtension() {
return DEFAULT_EXTENSION;
}
@Override
public Icon getIcon() {
return AllIcons.Nodes.IdeaModule;
}
@Override
public boolean isBinary() {
return false;
}
@Override
public String getCharset(@NotNull VirtualFile file, byte @NotNull [] content) {
return StandardCharsets.UTF_8.name();
}
} | siosio/intellij-community | platform/projectModel-api/src/com/intellij/ide/highlighter/ProjectFileType.java | Java | apache-2.0 | 1,557 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.riot.resultset;
import org.apache.jena.riot.Lang ;
import org.apache.jena.riot.LangBuilder ;
import org.apache.jena.riot.RDFLanguages ;
import org.apache.jena.riot.WebContent ;
public class ResultSetLang {
// Add SSE!
public static final Lang SPARQLResultSetXML
= LangBuilder.create("SPARQL-Results-XML", WebContent.contentTypeResultsXML)
.addAltNames("SRX")
.addFileExtensions("srx")
.build() ;
public static final Lang SPARQLResultSetJSON
= LangBuilder.create("SPARQL-Results-JSON", WebContent.contentTypeResultsJSON)
.addAltNames("SRJ")
.addFileExtensions("srj")
.build() ;
public static final Lang SPARQLResultSetCSV = Lang.CSV ;
public static final Lang SPARQLResultSetTSV
= LangBuilder.create("TSV", WebContent.contentTypeTextTSV)
.addFileExtensions("tsv")
.build() ;
public static final Lang SPARQLResultSetThrift
= LangBuilder.create("SPARQL-Results-Thrift", WebContent.contentTypeResultsThrift)
.addAltNames("SRT")
.addFileExtensions("srt")
.build() ;
public static final Lang SPARQLResultSetText
= LangBuilder.create("SPARQL-Results-Text", WebContent.contentTypeTextPlain)
.addFileExtensions("txt")
.build() ;
private static boolean initialized = false ;
public static void init() {
if ( initialized )
return ;
initialized = true ;
RDFLanguages.register(SPARQLResultSetXML) ;
RDFLanguages.register(SPARQLResultSetJSON) ;
RDFLanguages.register(SPARQLResultSetCSV) ;
RDFLanguages.register(SPARQLResultSetTSV) ;
RDFLanguages.register(SPARQLResultSetThrift) ;
ResultSetReaderRegistry.init();
ResultSetWriterRegistry.init();
}
}
| samaitra/jena | jena-arq/src/main/java/org/apache/jena/riot/resultset/ResultSetLang.java | Java | apache-2.0 | 2,835 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.aries.subsystem.itests.defect;
import static org.junit.Assert.fail;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.jar.Attributes;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import org.apache.aries.subsystem.itests.Header;
import org.apache.aries.subsystem.itests.SubsystemTest;
import org.apache.aries.subsystem.itests.util.TestCapability;
import org.apache.aries.subsystem.itests.util.TestRepository;
import org.apache.aries.subsystem.itests.util.TestRepositoryContent;
import org.apache.aries.subsystem.itests.util.TestRequirement;
import org.junit.Before;
import org.junit.Test;
import org.osgi.framework.Bundle;
import org.osgi.framework.Constants;
import org.osgi.framework.Version;
import org.osgi.framework.namespace.IdentityNamespace;
import org.osgi.resource.Resource;
import org.osgi.service.repository.Repository;
import org.osgi.service.subsystem.Subsystem;
import org.osgi.service.subsystem.SubsystemConstants;
import org.osgi.service.subsystem.SubsystemException;
/*
* https://issues.apache.org/jira/browse/ARIES-1408
*
* The RequireCapabilityHeader currently only supports requirements defined by
* the Aries implementation
*/
public class Aries1408Test extends SubsystemTest {
/*
* Subsystem-SymbolicName: application.a.esa
* Subsystem-Content: bundle.a.jar
*/
private static final String APPLICATION_A = "application.a.esa";
/*
* Bundle-SymbolicName: bundle.a.jar
* Require-Capability: foo
*/
private static final String BUNDLE_A = "bundle.a.jar";
/*
* Bundle-SymbolicName: bundle.b.jar
* Provide-Capability: foo
*/
private static final String BUNDLE_B = "bundle.b.jar";
private static boolean createdTestFiles;
@Before
public void createTestFiles() throws Exception {
if (createdTestFiles)
return;
createBundleB();
createApplicationA();
createdTestFiles = true;
}
private void createBundleB() throws IOException {
createBundle(name(BUNDLE_B), new Header(Constants.PROVIDE_CAPABILITY, "foo;foo=bar"));
}
private static void createApplicationA() throws IOException {
createApplicationAManifest();
createSubsystem(APPLICATION_A);
}
private static void createApplicationAManifest() throws IOException {
Map<String, String> attributes = new HashMap<String, String>();
attributes.put(SubsystemConstants.SUBSYSTEM_SYMBOLICNAME, APPLICATION_A);
attributes.put(SubsystemConstants.SUBSYSTEM_CONTENT, BUNDLE_A);
createManifest(APPLICATION_A + ".mf", attributes);
}
@Test
public void testRequirementFromRemoteRepositoryConvertsToRequireCapability() throws Exception {
Bundle bundleB = installBundleFromFile(BUNDLE_B);
try {
Subsystem applicationA = installSubsystemFromFile(APPLICATION_A);
uninstallSubsystemSilently(applicationA);
}
catch (SubsystemException e) {
e.printStackTrace();
fail("Subsystem should have installed");
}
finally {
uninstallSilently(bundleB);
}
}
@Override
public void setUp() throws Exception {
super.setUp();
try {
serviceRegistrations.add(
bundleContext.registerService(
Repository.class,
createTestRepository(),
null));
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
private byte[] createBundleAContent() throws IOException {
Manifest manifest = new Manifest();
manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0");
manifest.getMainAttributes().putValue(Constants.BUNDLE_SYMBOLICNAME, BUNDLE_A);
manifest.getMainAttributes().putValue(Constants.REQUIRE_CAPABILITY, "foo;filter:=\'(foo=bar)\"");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JarOutputStream jos = new JarOutputStream(baos, manifest);
jos.close();
return baos.toByteArray();
}
private Resource createBundleAResource() throws IOException {
return new TestRepositoryContent.Builder()
.capability(
new TestCapability.Builder()
.namespace(IdentityNamespace.IDENTITY_NAMESPACE)
.attribute(IdentityNamespace.IDENTITY_NAMESPACE, BUNDLE_A)
.attribute(IdentityNamespace.CAPABILITY_VERSION_ATTRIBUTE, Version.emptyVersion)
.attribute(IdentityNamespace.CAPABILITY_TYPE_ATTRIBUTE, IdentityNamespace.TYPE_BUNDLE))
.requirement(
new TestRequirement.Builder()
.namespace("foo")
.directive(Constants.FILTER_DIRECTIVE, "(foo=bar)"))
.content(createBundleAContent())
.build();
}
private Repository createTestRepository() throws IOException {
return new TestRepository.Builder()
.resource(createBundleAResource())
.build();
}
}
| gnodet/aries | subsystem/subsystem-itests/src/test/java/org/apache/aries/subsystem/itests/defect/Aries1408Test.java | Java | apache-2.0 | 5,722 |
/*=========================================================================
* Copyright (c) 2010-2014 Pivotal Software, Inc. All Rights Reserved.
* This product is protected by U.S. and international copyright
* and intellectual property laws. Pivotal products are covered by
* one or more patents listed at http://www.pivotal.io/patents.
*=========================================================================
*/
package com.gemstone.gemfire.management.internal.configuration.callbacks;
import org.apache.logging.log4j.Logger;
import com.gemstone.gemfire.cache.EntryEvent;
import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
import com.gemstone.gemfire.distributed.internal.SharedConfiguration;
import com.gemstone.gemfire.internal.logging.LogService;
import com.gemstone.gemfire.management.internal.configuration.domain.Configuration;
/****
* CacheListener on ConfigRegion to write the configuration changes to file-system.
* @author bansods
*
*/
public class ConfigurationChangeListener extends CacheListenerAdapter<String, Configuration> {
private static final Logger logger = LogService.getLogger();
private final SharedConfiguration sharedConfig;
public ConfigurationChangeListener(SharedConfiguration sharedConfig) {
this.sharedConfig = sharedConfig;
}
@Override
public void afterUpdate(EntryEvent<String, Configuration> event) {
super.afterUpdate(event);
writeToFileSystem(event);
}
@Override
public void afterCreate(EntryEvent<String, Configuration> event) {
super.afterCreate(event);
writeToFileSystem(event);
}
private void writeToFileSystem(EntryEvent<String, Configuration> event) {
Configuration newConfig = (Configuration)event.getNewValue();
try {
sharedConfig.writeConfig(newConfig);
} catch (Exception e) {
logger.info("Exception occurred while writing the configuration changes to the filesystem: {}", e.getMessage(), e);
}
}
}
| ameybarve15/incubator-geode | gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/configuration/callbacks/ConfigurationChangeListener.java | Java | apache-2.0 | 1,955 |
package integration
import (
"reflect"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
watchapi "k8s.io/apimachinery/pkg/watch"
kclientset "k8s.io/client-go/kubernetes"
kapi "k8s.io/kubernetes/pkg/apis/core"
kapiv1 "k8s.io/kubernetes/pkg/apis/core/v1"
buildtestutil "github.com/openshift/origin/pkg/build/admission/testutil"
buildapi "github.com/openshift/origin/pkg/build/apis/build"
defaultsapi "github.com/openshift/origin/pkg/build/controller/build/apis/defaults"
overridesapi "github.com/openshift/origin/pkg/build/controller/build/apis/overrides"
buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset"
configapi "github.com/openshift/origin/pkg/cmd/server/apis/config"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
testutil "github.com/openshift/origin/test/util"
testserver "github.com/openshift/origin/test/util/server"
)
var buildPodAdmissionTestTimeout time.Duration = 30 * time.Second
func TestBuildDefaultGitHTTPProxy(t *testing.T) {
httpProxy := "http://my.test.proxy:12345"
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
GitHTTPProxy: httpProxy,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Source.Git.HTTPProxy; actual == nil || *actual != httpProxy {
t.Errorf("Resulting build did not get expected HTTP proxy: %v", actual)
}
}
func TestBuildDefaultGitHTTPSProxy(t *testing.T) {
httpsProxy := "https://my.test.proxy:12345"
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
GitHTTPSProxy: httpsProxy,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Source.Git.HTTPSProxy; actual == nil || *actual != httpsProxy {
t.Errorf("Resulting build did not get expected HTTPS proxy: %v", actual)
}
}
func TestBuildDefaultEnvironment(t *testing.T) {
env := []kapi.EnvVar{
{
Name: "VAR1",
Value: "VALUE1",
},
{
Name: "VAR2",
Value: "VALUE2",
},
}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
Env: env,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Strategy.DockerStrategy.Env; !reflect.DeepEqual(env, actual) {
t.Errorf("Resulting build did not get expected environment: %v", actual)
}
}
func TestBuildDefaultLabels(t *testing.T) {
labels := []buildapi.ImageLabel{{Name: "KEY", Value: "VALUE"}}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
ImageLabels: labels,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Output.ImageLabels; !reflect.DeepEqual(labels, actual) {
t.Errorf("Resulting build did not get expected labels: %v", actual)
}
}
func TestBuildDefaultNodeSelectors(t *testing.T) {
selectors := map[string]string{"KEY": "VALUE"}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
NodeSelector: selectors,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Spec.NodeSelector; !reflect.DeepEqual(selectors, actual) {
t.Errorf("Resulting pod did not get expected nodeselectors: %v", actual)
}
}
func TestBuildDefaultAnnotations(t *testing.T) {
annotations := map[string]string{"KEY": "VALUE"}
oclient, kclientset, fn := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
Annotations: annotations,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Annotations; strings.Compare(actual["KEY"], annotations["KEY"]) != 0 {
t.Errorf("Resulting pod did not get expected annotations: actual: %v, expected: %v", actual["KEY"], annotations["KEY"])
}
}
func TestBuildOverrideTolerations(t *testing.T) {
tolerations := []kapi.Toleration{
{
Key: "mykey1",
Value: "myvalue1",
Effect: "NoSchedule",
Operator: "Equal",
},
{
Key: "mykey2",
Value: "myvalue2",
Effect: "NoSchedule",
Operator: "Equal",
},
}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
Tolerations: tolerations,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
for i, toleration := range tolerations {
tol := v1.Toleration{}
if err := kapiv1.Convert_core_Toleration_To_v1_Toleration(&toleration, &tol, nil); err != nil {
t.Errorf("Unable to convert core.Toleration to v1.Toleration: %v", err)
}
if !reflect.DeepEqual(pod.Spec.Tolerations[i], tol) {
t.Errorf("Resulting pod did not get expected tolerations, expected: %#v, actual: %#v", toleration, pod.Spec.Tolerations[i])
}
}
}
func TestBuildOverrideForcePull(t *testing.T) {
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
ForcePull: true,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if !build.Spec.Strategy.DockerStrategy.ForcePull {
t.Errorf("ForcePull was not set on resulting build")
}
}
func TestBuildOverrideForcePullCustomStrategy(t *testing.T) {
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
ForcePull: true,
})
defer fn()
build, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestCustomBuild())
if pod.Spec.Containers[0].ImagePullPolicy != v1.PullAlways {
t.Errorf("Pod ImagePullPolicy is not PullAlways")
}
if !build.Spec.Strategy.CustomStrategy.ForcePull {
t.Errorf("ForcePull was not set on resulting build")
}
}
func TestBuildOverrideLabels(t *testing.T) {
labels := []buildapi.ImageLabel{{Name: "KEY", Value: "VALUE"}}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
ImageLabels: labels,
})
defer fn()
build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := build.Spec.Output.ImageLabels; !reflect.DeepEqual(labels, actual) {
t.Errorf("Resulting build did not get expected labels: %v", actual)
}
}
func TestBuildOverrideNodeSelectors(t *testing.T) {
selectors := map[string]string{"KEY": "VALUE"}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
NodeSelector: selectors,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Spec.NodeSelector; !reflect.DeepEqual(selectors, actual) {
t.Errorf("Resulting build did not get expected nodeselectors: %v", actual)
}
}
func TestBuildOverrideAnnotations(t *testing.T) {
annotations := map[string]string{"KEY": "VALUE"}
oclient, kclientset, fn := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
Annotations: annotations,
})
defer fn()
_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
if actual := pod.Annotations; strings.Compare(actual["KEY"], annotations["KEY"]) != 0 {
t.Errorf("Resulting build did not get expected annotations: %v", actual)
}
}
func buildPodAdmissionTestCustomBuild() *buildapi.Build {
build := &buildapi.Build{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
buildapi.BuildConfigLabel: "mock-build-config",
buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel),
},
}}
build.Name = "test-custom-build"
build.Spec.Source.Git = &buildapi.GitBuildSource{URI: "http://test/src"}
build.Spec.Strategy.CustomStrategy = &buildapi.CustomBuildStrategy{}
build.Spec.Strategy.CustomStrategy.From.Kind = "DockerImage"
build.Spec.Strategy.CustomStrategy.From.Name = "test/image"
return build
}
func buildPodAdmissionTestDockerBuild() *buildapi.Build {
build := &buildapi.Build{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
buildapi.BuildConfigLabel: "mock-build-config",
buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel),
},
}}
build.Name = "test-build"
build.Spec.Source.Git = &buildapi.GitBuildSource{URI: "http://test/src"}
build.Spec.Strategy.DockerStrategy = &buildapi.DockerBuildStrategy{}
return build
}
func runBuildPodAdmissionTest(t *testing.T, client buildclient.Interface, kclientset kclientset.Interface, build *buildapi.Build) (*buildapi.Build, *v1.Pod) {
ns := testutil.Namespace()
_, err := client.Build().Builds(ns).Create(build)
if err != nil {
t.Fatalf("%v", err)
}
watchOpt := metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector(
"metadata.name",
buildapi.GetBuildPodName(build),
).String(),
}
podWatch, err := kclientset.Core().Pods(ns).Watch(watchOpt)
if err != nil {
t.Fatalf("%v", err)
}
type resultObjs struct {
build *buildapi.Build
pod *v1.Pod
}
result := make(chan resultObjs)
defer podWatch.Stop()
go func() {
for e := range podWatch.ResultChan() {
if e.Type == watchapi.Added {
pod, ok := e.Object.(*v1.Pod)
if !ok {
t.Fatalf("unexpected object: %v", e.Object)
}
build := (*buildtestutil.TestPod)(pod).GetBuild(t)
result <- resultObjs{build: build, pod: pod}
}
}
}()
select {
case <-time.After(buildPodAdmissionTestTimeout):
t.Fatalf("timed out after %v", buildPodAdmissionTestTimeout)
case objs := <-result:
return objs.build, objs.pod
}
return nil, nil
}
func setupBuildDefaultsAdmissionTest(t *testing.T, defaultsConfig *defaultsapi.BuildDefaultsConfig) (buildclient.Interface, kclientset.Interface, func()) {
return setupBuildPodAdmissionTest(t, map[string]*configapi.AdmissionPluginConfig{
"BuildDefaults": {
Configuration: defaultsConfig,
},
})
}
func setupBuildOverridesAdmissionTest(t *testing.T, overridesConfig *overridesapi.BuildOverridesConfig) (buildclient.Interface, kclientset.Interface, func()) {
return setupBuildPodAdmissionTest(t, map[string]*configapi.AdmissionPluginConfig{
"BuildOverrides": {
Configuration: overridesConfig,
},
})
}
func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]*configapi.AdmissionPluginConfig) (buildclient.Interface, kclientset.Interface, func()) {
master, err := testserver.DefaultMasterOptions()
if err != nil {
t.Fatal(err)
}
master.AdmissionConfig.PluginConfig = pluginConfig
clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(master)
if err != nil {
t.Fatal(err)
}
internalClusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
if err != nil {
t.Fatal(err)
}
clientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatal(err)
}
clusterAdminKubeClientset, err := kclientset.NewForConfig(clientConfig)
if err != nil {
t.Fatal(err)
}
_, err = clusterAdminKubeClientset.Core().Namespaces().Create(&v1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: testutil.Namespace()},
})
if err != nil {
t.Fatalf("%v", err)
}
err = testserver.WaitForServiceAccounts(
internalClusterAdminKubeClientset,
testutil.Namespace(),
[]string{
bootstrappolicy.BuilderServiceAccountName,
bootstrappolicy.DefaultServiceAccountName,
})
if err != nil {
t.Fatalf("%v", err)
}
return buildclient.NewForConfigOrDie(clientConfig), clusterAdminKubeClientset, func() {
testserver.CleanupMasterEtcd(t, master)
}
}
| wjiangjay/origin | test/integration/buildpod_admission_test.go | GO | apache-2.0 | 11,768 |
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package securityhub
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/restjson"
)
const opAcceptInvitation = "AcceptInvitation"
// AcceptInvitationRequest generates a "aws/request.Request" representing the
// client's request for the AcceptInvitation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See AcceptInvitation for more information on using the AcceptInvitation
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the AcceptInvitationRequest method.
// req, resp := client.AcceptInvitationRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/AcceptInvitation
func (c *SecurityHub) AcceptInvitationRequest(input *AcceptInvitationInput) (req *request.Request, output *AcceptInvitationOutput) {
op := &request.Operation{
Name: opAcceptInvitation,
HTTPMethod: "POST",
HTTPPath: "/master",
}
if input == nil {
input = &AcceptInvitationInput{}
}
output = &AcceptInvitationOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// AcceptInvitation API operation for AWS SecurityHub.
//
// Accepts the invitation to be monitored by a master SecurityHub account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation AcceptInvitation for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/AcceptInvitation
func (c *SecurityHub) AcceptInvitation(input *AcceptInvitationInput) (*AcceptInvitationOutput, error) {
req, out := c.AcceptInvitationRequest(input)
return out, req.Send()
}
// AcceptInvitationWithContext is the same as AcceptInvitation with the addition of
// the ability to pass a context and additional request options.
//
// See AcceptInvitation for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) AcceptInvitationWithContext(ctx aws.Context, input *AcceptInvitationInput, opts ...request.Option) (*AcceptInvitationOutput, error) {
req, out := c.AcceptInvitationRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opBatchDisableStandards = "BatchDisableStandards"
// BatchDisableStandardsRequest generates a "aws/request.Request" representing the
// client's request for the BatchDisableStandards operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See BatchDisableStandards for more information on using the BatchDisableStandards
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the BatchDisableStandardsRequest method.
// req, resp := client.BatchDisableStandardsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchDisableStandards
func (c *SecurityHub) BatchDisableStandardsRequest(input *BatchDisableStandardsInput) (req *request.Request, output *BatchDisableStandardsOutput) {
op := &request.Operation{
Name: opBatchDisableStandards,
HTTPMethod: "POST",
HTTPPath: "/standards/deregister",
}
if input == nil {
input = &BatchDisableStandardsInput{}
}
output = &BatchDisableStandardsOutput{}
req = c.newRequest(op, input, output)
return
}
// BatchDisableStandards API operation for AWS SecurityHub.
//
// Disables the standards specified by the standards subscription ARNs. In the
// context of Security Hub, supported standards (for example, CIS AWS Foundations)
// are automated and continuous checks that help determine your compliance status
// against security industry (including AWS) best practices.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation BatchDisableStandards for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchDisableStandards
func (c *SecurityHub) BatchDisableStandards(input *BatchDisableStandardsInput) (*BatchDisableStandardsOutput, error) {
req, out := c.BatchDisableStandardsRequest(input)
return out, req.Send()
}
// BatchDisableStandardsWithContext is the same as BatchDisableStandards with the addition of
// the ability to pass a context and additional request options.
//
// See BatchDisableStandards for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) BatchDisableStandardsWithContext(ctx aws.Context, input *BatchDisableStandardsInput, opts ...request.Option) (*BatchDisableStandardsOutput, error) {
req, out := c.BatchDisableStandardsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opBatchEnableStandards = "BatchEnableStandards"
// BatchEnableStandardsRequest generates a "aws/request.Request" representing the
// client's request for the BatchEnableStandards operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See BatchEnableStandards for more information on using the BatchEnableStandards
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the BatchEnableStandardsRequest method.
// req, resp := client.BatchEnableStandardsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchEnableStandards
func (c *SecurityHub) BatchEnableStandardsRequest(input *BatchEnableStandardsInput) (req *request.Request, output *BatchEnableStandardsOutput) {
op := &request.Operation{
Name: opBatchEnableStandards,
HTTPMethod: "POST",
HTTPPath: "/standards/register",
}
if input == nil {
input = &BatchEnableStandardsInput{}
}
output = &BatchEnableStandardsOutput{}
req = c.newRequest(op, input, output)
return
}
// BatchEnableStandards API operation for AWS SecurityHub.
//
// Enables the standards specified by the standards ARNs. In the context of
// Security Hub, supported standards (for example, CIS AWS Foundations) are
// automated and continuous checks that help determine your compliance status
// against security industry (including AWS) best practices.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation BatchEnableStandards for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchEnableStandards
func (c *SecurityHub) BatchEnableStandards(input *BatchEnableStandardsInput) (*BatchEnableStandardsOutput, error) {
req, out := c.BatchEnableStandardsRequest(input)
return out, req.Send()
}
// BatchEnableStandardsWithContext is the same as BatchEnableStandards with the addition of
// the ability to pass a context and additional request options.
//
// See BatchEnableStandards for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) BatchEnableStandardsWithContext(ctx aws.Context, input *BatchEnableStandardsInput, opts ...request.Option) (*BatchEnableStandardsOutput, error) {
req, out := c.BatchEnableStandardsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opBatchImportFindings = "BatchImportFindings"
// BatchImportFindingsRequest generates a "aws/request.Request" representing the
// client's request for the BatchImportFindings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See BatchImportFindings for more information on using the BatchImportFindings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the BatchImportFindingsRequest method.
// req, resp := client.BatchImportFindingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchImportFindings
func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput) (req *request.Request, output *BatchImportFindingsOutput) {
op := &request.Operation{
Name: opBatchImportFindings,
HTTPMethod: "POST",
HTTPPath: "/findings/import",
}
if input == nil {
input = &BatchImportFindingsInput{}
}
output = &BatchImportFindingsOutput{}
req = c.newRequest(op, input, output)
return
}
// BatchImportFindings API operation for AWS SecurityHub.
//
// Imports security findings that are generated by the integrated third-party
// products into Security Hub.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation BatchImportFindings for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchImportFindings
func (c *SecurityHub) BatchImportFindings(input *BatchImportFindingsInput) (*BatchImportFindingsOutput, error) {
req, out := c.BatchImportFindingsRequest(input)
return out, req.Send()
}
// BatchImportFindingsWithContext is the same as BatchImportFindings with the addition of
// the ability to pass a context and additional request options.
//
// See BatchImportFindings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) BatchImportFindingsWithContext(ctx aws.Context, input *BatchImportFindingsInput, opts ...request.Option) (*BatchImportFindingsOutput, error) {
req, out := c.BatchImportFindingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateInsight = "CreateInsight"
// CreateInsightRequest generates a "aws/request.Request" representing the
// client's request for the CreateInsight operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateInsight for more information on using the CreateInsight
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateInsightRequest method.
// req, resp := client.CreateInsightRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateInsight
func (c *SecurityHub) CreateInsightRequest(input *CreateInsightInput) (req *request.Request, output *CreateInsightOutput) {
op := &request.Operation{
Name: opCreateInsight,
HTTPMethod: "POST",
HTTPPath: "/insights",
}
if input == nil {
input = &CreateInsightInput{}
}
output = &CreateInsightOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateInsight API operation for AWS SecurityHub.
//
// Creates an insight, which is a consolidation of findings that identifies
// a security area that requires attention or intervention.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation CreateInsight for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateInsight
func (c *SecurityHub) CreateInsight(input *CreateInsightInput) (*CreateInsightOutput, error) {
req, out := c.CreateInsightRequest(input)
return out, req.Send()
}
// CreateInsightWithContext is the same as CreateInsight with the addition of
// the ability to pass a context and additional request options.
//
// See CreateInsight for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) CreateInsightWithContext(ctx aws.Context, input *CreateInsightInput, opts ...request.Option) (*CreateInsightOutput, error) {
req, out := c.CreateInsightRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateMembers = "CreateMembers"
// CreateMembersRequest generates a "aws/request.Request" representing the
// client's request for the CreateMembers operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateMembers for more information on using the CreateMembers
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateMembersRequest method.
// req, resp := client.CreateMembersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateMembers
func (c *SecurityHub) CreateMembersRequest(input *CreateMembersInput) (req *request.Request, output *CreateMembersOutput) {
op := &request.Operation{
Name: opCreateMembers,
HTTPMethod: "POST",
HTTPPath: "/members",
}
if input == nil {
input = &CreateMembersInput{}
}
output = &CreateMembersOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateMembers API operation for AWS SecurityHub.
//
// Creates member Security Hub accounts in the current AWS account (which becomes
// the master Security Hub account) that has Security Hub enabled.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation CreateMembers for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateMembers
func (c *SecurityHub) CreateMembers(input *CreateMembersInput) (*CreateMembersOutput, error) {
req, out := c.CreateMembersRequest(input)
return out, req.Send()
}
// CreateMembersWithContext is the same as CreateMembers with the addition of
// the ability to pass a context and additional request options.
//
// See CreateMembers for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) CreateMembersWithContext(ctx aws.Context, input *CreateMembersInput, opts ...request.Option) (*CreateMembersOutput, error) {
req, out := c.CreateMembersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeclineInvitations = "DeclineInvitations"
// DeclineInvitationsRequest generates a "aws/request.Request" representing the
// client's request for the DeclineInvitations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeclineInvitations for more information on using the DeclineInvitations
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeclineInvitationsRequest method.
// req, resp := client.DeclineInvitationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeclineInvitations
func (c *SecurityHub) DeclineInvitationsRequest(input *DeclineInvitationsInput) (req *request.Request, output *DeclineInvitationsOutput) {
op := &request.Operation{
Name: opDeclineInvitations,
HTTPMethod: "POST",
HTTPPath: "/invitations/decline",
}
if input == nil {
input = &DeclineInvitationsInput{}
}
output = &DeclineInvitationsOutput{}
req = c.newRequest(op, input, output)
return
}
// DeclineInvitations API operation for AWS SecurityHub.
//
// Declines invitations that are sent to this AWS account (invitee) by the AWS
// accounts (inviters) that are specified by the account IDs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DeclineInvitations for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeclineInvitations
func (c *SecurityHub) DeclineInvitations(input *DeclineInvitationsInput) (*DeclineInvitationsOutput, error) {
req, out := c.DeclineInvitationsRequest(input)
return out, req.Send()
}
// DeclineInvitationsWithContext is the same as DeclineInvitations with the addition of
// the ability to pass a context and additional request options.
//
// See DeclineInvitations for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DeclineInvitationsWithContext(ctx aws.Context, input *DeclineInvitationsInput, opts ...request.Option) (*DeclineInvitationsOutput, error) {
req, out := c.DeclineInvitationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteInsight = "DeleteInsight"
// DeleteInsightRequest generates a "aws/request.Request" representing the
// client's request for the DeleteInsight operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteInsight for more information on using the DeleteInsight
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteInsightRequest method.
// req, resp := client.DeleteInsightRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInsight
func (c *SecurityHub) DeleteInsightRequest(input *DeleteInsightInput) (req *request.Request, output *DeleteInsightOutput) {
op := &request.Operation{
Name: opDeleteInsight,
HTTPMethod: "DELETE",
HTTPPath: "/insights/{InsightArn+}",
}
if input == nil {
input = &DeleteInsightInput{}
}
output = &DeleteInsightOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteInsight API operation for AWS SecurityHub.
//
// Deletes an insight that is specified by the insight ARN.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DeleteInsight for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInsight
func (c *SecurityHub) DeleteInsight(input *DeleteInsightInput) (*DeleteInsightOutput, error) {
req, out := c.DeleteInsightRequest(input)
return out, req.Send()
}
// DeleteInsightWithContext is the same as DeleteInsight with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteInsight for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DeleteInsightWithContext(ctx aws.Context, input *DeleteInsightInput, opts ...request.Option) (*DeleteInsightOutput, error) {
req, out := c.DeleteInsightRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteInvitations = "DeleteInvitations"
// DeleteInvitationsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteInvitations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteInvitations for more information on using the DeleteInvitations
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteInvitationsRequest method.
// req, resp := client.DeleteInvitationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInvitations
func (c *SecurityHub) DeleteInvitationsRequest(input *DeleteInvitationsInput) (req *request.Request, output *DeleteInvitationsOutput) {
op := &request.Operation{
Name: opDeleteInvitations,
HTTPMethod: "POST",
HTTPPath: "/invitations/delete",
}
if input == nil {
input = &DeleteInvitationsInput{}
}
output = &DeleteInvitationsOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteInvitations API operation for AWS SecurityHub.
//
// Deletes invitations that are sent to this AWS account (invitee) by the AWS
// accounts (inviters) that are specified by their account IDs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DeleteInvitations for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInvitations
func (c *SecurityHub) DeleteInvitations(input *DeleteInvitationsInput) (*DeleteInvitationsOutput, error) {
req, out := c.DeleteInvitationsRequest(input)
return out, req.Send()
}
// DeleteInvitationsWithContext is the same as DeleteInvitations with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteInvitations for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DeleteInvitationsWithContext(ctx aws.Context, input *DeleteInvitationsInput, opts ...request.Option) (*DeleteInvitationsOutput, error) {
req, out := c.DeleteInvitationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteMembers = "DeleteMembers"
// DeleteMembersRequest generates a "aws/request.Request" representing the
// client's request for the DeleteMembers operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteMembers for more information on using the DeleteMembers
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteMembersRequest method.
// req, resp := client.DeleteMembersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteMembers
func (c *SecurityHub) DeleteMembersRequest(input *DeleteMembersInput) (req *request.Request, output *DeleteMembersOutput) {
op := &request.Operation{
Name: opDeleteMembers,
HTTPMethod: "POST",
HTTPPath: "/members/delete",
}
if input == nil {
input = &DeleteMembersInput{}
}
output = &DeleteMembersOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteMembers API operation for AWS SecurityHub.
//
// Deletes the Security Hub member accounts that are specified by the account
// IDs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DeleteMembers for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteMembers
func (c *SecurityHub) DeleteMembers(input *DeleteMembersInput) (*DeleteMembersOutput, error) {
req, out := c.DeleteMembersRequest(input)
return out, req.Send()
}
// DeleteMembersWithContext is the same as DeleteMembers with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteMembers for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DeleteMembersWithContext(ctx aws.Context, input *DeleteMembersInput, opts ...request.Option) (*DeleteMembersOutput, error) {
req, out := c.DeleteMembersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDisableImportFindingsForProduct = "DisableImportFindingsForProduct"
// DisableImportFindingsForProductRequest generates a "aws/request.Request" representing the
// client's request for the DisableImportFindingsForProduct operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DisableImportFindingsForProduct for more information on using the DisableImportFindingsForProduct
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DisableImportFindingsForProductRequest method.
// req, resp := client.DisableImportFindingsForProductRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableImportFindingsForProduct
func (c *SecurityHub) DisableImportFindingsForProductRequest(input *DisableImportFindingsForProductInput) (req *request.Request, output *DisableImportFindingsForProductOutput) {
op := &request.Operation{
Name: opDisableImportFindingsForProduct,
HTTPMethod: "DELETE",
HTTPPath: "/productSubscriptions/{ProductSubscriptionArn+}",
}
if input == nil {
input = &DisableImportFindingsForProductInput{}
}
output = &DisableImportFindingsForProductOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DisableImportFindingsForProduct API operation for AWS SecurityHub.
//
// Stops you from being able to import findings generated by integrated third-party
// providers into Security Hub.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DisableImportFindingsForProduct for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableImportFindingsForProduct
func (c *SecurityHub) DisableImportFindingsForProduct(input *DisableImportFindingsForProductInput) (*DisableImportFindingsForProductOutput, error) {
req, out := c.DisableImportFindingsForProductRequest(input)
return out, req.Send()
}
// DisableImportFindingsForProductWithContext is the same as DisableImportFindingsForProduct with the addition of
// the ability to pass a context and additional request options.
//
// See DisableImportFindingsForProduct for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DisableImportFindingsForProductWithContext(ctx aws.Context, input *DisableImportFindingsForProductInput, opts ...request.Option) (*DisableImportFindingsForProductOutput, error) {
req, out := c.DisableImportFindingsForProductRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDisableSecurityHub = "DisableSecurityHub"
// DisableSecurityHubRequest generates a "aws/request.Request" representing the
// client's request for the DisableSecurityHub operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DisableSecurityHub for more information on using the DisableSecurityHub
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DisableSecurityHubRequest method.
// req, resp := client.DisableSecurityHubRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub
func (c *SecurityHub) DisableSecurityHubRequest(input *DisableSecurityHubInput) (req *request.Request, output *DisableSecurityHubOutput) {
op := &request.Operation{
Name: opDisableSecurityHub,
HTTPMethod: "DELETE",
HTTPPath: "/accounts",
}
if input == nil {
input = &DisableSecurityHubInput{}
}
output = &DisableSecurityHubOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DisableSecurityHub API operation for AWS SecurityHub.
//
// Disables the AWS Security Hub Service.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DisableSecurityHub for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub
func (c *SecurityHub) DisableSecurityHub(input *DisableSecurityHubInput) (*DisableSecurityHubOutput, error) {
req, out := c.DisableSecurityHubRequest(input)
return out, req.Send()
}
// DisableSecurityHubWithContext is the same as DisableSecurityHub with the addition of
// the ability to pass a context and additional request options.
//
// See DisableSecurityHub for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DisableSecurityHubWithContext(ctx aws.Context, input *DisableSecurityHubInput, opts ...request.Option) (*DisableSecurityHubOutput, error) {
req, out := c.DisableSecurityHubRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDisassociateFromMasterAccount = "DisassociateFromMasterAccount"
// DisassociateFromMasterAccountRequest generates a "aws/request.Request" representing the
// client's request for the DisassociateFromMasterAccount operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DisassociateFromMasterAccount for more information on using the DisassociateFromMasterAccount
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DisassociateFromMasterAccountRequest method.
// req, resp := client.DisassociateFromMasterAccountRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount
func (c *SecurityHub) DisassociateFromMasterAccountRequest(input *DisassociateFromMasterAccountInput) (req *request.Request, output *DisassociateFromMasterAccountOutput) {
op := &request.Operation{
Name: opDisassociateFromMasterAccount,
HTTPMethod: "POST",
HTTPPath: "/master/disassociate",
}
if input == nil {
input = &DisassociateFromMasterAccountInput{}
}
output = &DisassociateFromMasterAccountOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DisassociateFromMasterAccount API operation for AWS SecurityHub.
//
// Disassociates the current Security Hub member account from its master account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DisassociateFromMasterAccount for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount
func (c *SecurityHub) DisassociateFromMasterAccount(input *DisassociateFromMasterAccountInput) (*DisassociateFromMasterAccountOutput, error) {
req, out := c.DisassociateFromMasterAccountRequest(input)
return out, req.Send()
}
// DisassociateFromMasterAccountWithContext is the same as DisassociateFromMasterAccount with the addition of
// the ability to pass a context and additional request options.
//
// See DisassociateFromMasterAccount for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DisassociateFromMasterAccountWithContext(ctx aws.Context, input *DisassociateFromMasterAccountInput, opts ...request.Option) (*DisassociateFromMasterAccountOutput, error) {
req, out := c.DisassociateFromMasterAccountRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDisassociateMembers = "DisassociateMembers"
// DisassociateMembersRequest generates a "aws/request.Request" representing the
// client's request for the DisassociateMembers operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DisassociateMembers for more information on using the DisassociateMembers
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DisassociateMembersRequest method.
// req, resp := client.DisassociateMembersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers
func (c *SecurityHub) DisassociateMembersRequest(input *DisassociateMembersInput) (req *request.Request, output *DisassociateMembersOutput) {
op := &request.Operation{
Name: opDisassociateMembers,
HTTPMethod: "POST",
HTTPPath: "/members/disassociate",
}
if input == nil {
input = &DisassociateMembersInput{}
}
output = &DisassociateMembersOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DisassociateMembers API operation for AWS SecurityHub.
//
// Disassociates the Security Hub member accounts that are specified by the
// account IDs from their master account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation DisassociateMembers for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers
func (c *SecurityHub) DisassociateMembers(input *DisassociateMembersInput) (*DisassociateMembersOutput, error) {
req, out := c.DisassociateMembersRequest(input)
return out, req.Send()
}
// DisassociateMembersWithContext is the same as DisassociateMembers with the addition of
// the ability to pass a context and additional request options.
//
// See DisassociateMembers for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) DisassociateMembersWithContext(ctx aws.Context, input *DisassociateMembersInput, opts ...request.Option) (*DisassociateMembersOutput, error) {
req, out := c.DisassociateMembersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opEnableImportFindingsForProduct = "EnableImportFindingsForProduct"
// EnableImportFindingsForProductRequest generates a "aws/request.Request" representing the
// client's request for the EnableImportFindingsForProduct operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See EnableImportFindingsForProduct for more information on using the EnableImportFindingsForProduct
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the EnableImportFindingsForProductRequest method.
// req, resp := client.EnableImportFindingsForProductRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableImportFindingsForProduct
func (c *SecurityHub) EnableImportFindingsForProductRequest(input *EnableImportFindingsForProductInput) (req *request.Request, output *EnableImportFindingsForProductOutput) {
op := &request.Operation{
Name: opEnableImportFindingsForProduct,
HTTPMethod: "POST",
HTTPPath: "/productSubscriptions",
}
if input == nil {
input = &EnableImportFindingsForProductInput{}
}
output = &EnableImportFindingsForProductOutput{}
req = c.newRequest(op, input, output)
return
}
// EnableImportFindingsForProduct API operation for AWS SecurityHub.
//
// Enables you to import findings generated by integrated third-party providers
// into Security Hub.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation EnableImportFindingsForProduct for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableImportFindingsForProduct
func (c *SecurityHub) EnableImportFindingsForProduct(input *EnableImportFindingsForProductInput) (*EnableImportFindingsForProductOutput, error) {
req, out := c.EnableImportFindingsForProductRequest(input)
return out, req.Send()
}
// EnableImportFindingsForProductWithContext is the same as EnableImportFindingsForProduct with the addition of
// the ability to pass a context and additional request options.
//
// See EnableImportFindingsForProduct for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) EnableImportFindingsForProductWithContext(ctx aws.Context, input *EnableImportFindingsForProductInput, opts ...request.Option) (*EnableImportFindingsForProductOutput, error) {
req, out := c.EnableImportFindingsForProductRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opEnableSecurityHub = "EnableSecurityHub"
// EnableSecurityHubRequest generates a "aws/request.Request" representing the
// client's request for the EnableSecurityHub operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See EnableSecurityHub for more information on using the EnableSecurityHub
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the EnableSecurityHubRequest method.
// req, resp := client.EnableSecurityHubRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub
func (c *SecurityHub) EnableSecurityHubRequest(input *EnableSecurityHubInput) (req *request.Request, output *EnableSecurityHubOutput) {
op := &request.Operation{
Name: opEnableSecurityHub,
HTTPMethod: "POST",
HTTPPath: "/accounts",
}
if input == nil {
input = &EnableSecurityHubInput{}
}
output = &EnableSecurityHubOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// EnableSecurityHub API operation for AWS SecurityHub.
//
// Enables the AWS Security Hub service.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation EnableSecurityHub for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub
func (c *SecurityHub) EnableSecurityHub(input *EnableSecurityHubInput) (*EnableSecurityHubOutput, error) {
req, out := c.EnableSecurityHubRequest(input)
return out, req.Send()
}
// EnableSecurityHubWithContext is the same as EnableSecurityHub with the addition of
// the ability to pass a context and additional request options.
//
// See EnableSecurityHub for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) EnableSecurityHubWithContext(ctx aws.Context, input *EnableSecurityHubInput, opts ...request.Option) (*EnableSecurityHubOutput, error) {
req, out := c.EnableSecurityHubRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetEnabledStandards = "GetEnabledStandards"
// GetEnabledStandardsRequest generates a "aws/request.Request" representing the
// client's request for the GetEnabledStandards operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetEnabledStandards for more information on using the GetEnabledStandards
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetEnabledStandardsRequest method.
// req, resp := client.GetEnabledStandardsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetEnabledStandards
func (c *SecurityHub) GetEnabledStandardsRequest(input *GetEnabledStandardsInput) (req *request.Request, output *GetEnabledStandardsOutput) {
op := &request.Operation{
Name: opGetEnabledStandards,
HTTPMethod: "POST",
HTTPPath: "/standards/get",
}
if input == nil {
input = &GetEnabledStandardsInput{}
}
output = &GetEnabledStandardsOutput{}
req = c.newRequest(op, input, output)
return
}
// GetEnabledStandards API operation for AWS SecurityHub.
//
// Lists and describes enabled standards.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetEnabledStandards for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetEnabledStandards
func (c *SecurityHub) GetEnabledStandards(input *GetEnabledStandardsInput) (*GetEnabledStandardsOutput, error) {
req, out := c.GetEnabledStandardsRequest(input)
return out, req.Send()
}
// GetEnabledStandardsWithContext is the same as GetEnabledStandards with the addition of
// the ability to pass a context and additional request options.
//
// See GetEnabledStandards for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetEnabledStandardsWithContext(ctx aws.Context, input *GetEnabledStandardsInput, opts ...request.Option) (*GetEnabledStandardsOutput, error) {
req, out := c.GetEnabledStandardsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetFindings = "GetFindings"
// GetFindingsRequest generates a "aws/request.Request" representing the
// client's request for the GetFindings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetFindings for more information on using the GetFindings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetFindingsRequest method.
// req, resp := client.GetFindingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetFindings
func (c *SecurityHub) GetFindingsRequest(input *GetFindingsInput) (req *request.Request, output *GetFindingsOutput) {
op := &request.Operation{
Name: opGetFindings,
HTTPMethod: "POST",
HTTPPath: "/findings",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &GetFindingsInput{}
}
output = &GetFindingsOutput{}
req = c.newRequest(op, input, output)
return
}
// GetFindings API operation for AWS SecurityHub.
//
// Lists and describes Security Hub-aggregated findings that are specified by
// filter attributes.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetFindings for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetFindings
func (c *SecurityHub) GetFindings(input *GetFindingsInput) (*GetFindingsOutput, error) {
req, out := c.GetFindingsRequest(input)
return out, req.Send()
}
// GetFindingsWithContext is the same as GetFindings with the addition of
// the ability to pass a context and additional request options.
//
// See GetFindings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetFindingsWithContext(ctx aws.Context, input *GetFindingsInput, opts ...request.Option) (*GetFindingsOutput, error) {
req, out := c.GetFindingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// GetFindingsPages iterates over the pages of a GetFindings operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See GetFindings method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a GetFindings operation.
// pageNum := 0
// err := client.GetFindingsPages(params,
// func(page *GetFindingsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *SecurityHub) GetFindingsPages(input *GetFindingsInput, fn func(*GetFindingsOutput, bool) bool) error {
return c.GetFindingsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// GetFindingsPagesWithContext same as GetFindingsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetFindingsPagesWithContext(ctx aws.Context, input *GetFindingsInput, fn func(*GetFindingsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *GetFindingsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.GetFindingsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*GetFindingsOutput), !p.HasNextPage())
}
return p.Err()
}
const opGetInsightResults = "GetInsightResults"
// GetInsightResultsRequest generates a "aws/request.Request" representing the
// client's request for the GetInsightResults operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetInsightResults for more information on using the GetInsightResults
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetInsightResultsRequest method.
// req, resp := client.GetInsightResultsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults
func (c *SecurityHub) GetInsightResultsRequest(input *GetInsightResultsInput) (req *request.Request, output *GetInsightResultsOutput) {
op := &request.Operation{
Name: opGetInsightResults,
HTTPMethod: "GET",
HTTPPath: "/insights/results/{InsightArn+}",
}
if input == nil {
input = &GetInsightResultsInput{}
}
output = &GetInsightResultsOutput{}
req = c.newRequest(op, input, output)
return
}
// GetInsightResults API operation for AWS SecurityHub.
//
// Lists the results of the Security Hub insight specified by the insight ARN.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetInsightResults for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults
func (c *SecurityHub) GetInsightResults(input *GetInsightResultsInput) (*GetInsightResultsOutput, error) {
req, out := c.GetInsightResultsRequest(input)
return out, req.Send()
}
// GetInsightResultsWithContext is the same as GetInsightResults with the addition of
// the ability to pass a context and additional request options.
//
// See GetInsightResults for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetInsightResultsWithContext(ctx aws.Context, input *GetInsightResultsInput, opts ...request.Option) (*GetInsightResultsOutput, error) {
req, out := c.GetInsightResultsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetInsights = "GetInsights"
// GetInsightsRequest generates a "aws/request.Request" representing the
// client's request for the GetInsights operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetInsights for more information on using the GetInsights
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetInsightsRequest method.
// req, resp := client.GetInsightsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights
func (c *SecurityHub) GetInsightsRequest(input *GetInsightsInput) (req *request.Request, output *GetInsightsOutput) {
op := &request.Operation{
Name: opGetInsights,
HTTPMethod: "POST",
HTTPPath: "/insights/get",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &GetInsightsInput{}
}
output = &GetInsightsOutput{}
req = c.newRequest(op, input, output)
return
}
// GetInsights API operation for AWS SecurityHub.
//
// Lists and describes insights that are specified by insight ARNs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetInsights for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights
func (c *SecurityHub) GetInsights(input *GetInsightsInput) (*GetInsightsOutput, error) {
req, out := c.GetInsightsRequest(input)
return out, req.Send()
}
// GetInsightsWithContext is the same as GetInsights with the addition of
// the ability to pass a context and additional request options.
//
// See GetInsights for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetInsightsWithContext(ctx aws.Context, input *GetInsightsInput, opts ...request.Option) (*GetInsightsOutput, error) {
req, out := c.GetInsightsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// GetInsightsPages iterates over the pages of a GetInsights operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See GetInsights method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a GetInsights operation.
// pageNum := 0
// err := client.GetInsightsPages(params,
// func(page *GetInsightsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *SecurityHub) GetInsightsPages(input *GetInsightsInput, fn func(*GetInsightsOutput, bool) bool) error {
return c.GetInsightsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// GetInsightsPagesWithContext same as GetInsightsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetInsightsPagesWithContext(ctx aws.Context, input *GetInsightsInput, fn func(*GetInsightsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *GetInsightsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.GetInsightsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*GetInsightsOutput), !p.HasNextPage())
}
return p.Err()
}
const opGetInvitationsCount = "GetInvitationsCount"
// GetInvitationsCountRequest generates a "aws/request.Request" representing the
// client's request for the GetInvitationsCount operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetInvitationsCount for more information on using the GetInvitationsCount
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetInvitationsCountRequest method.
// req, resp := client.GetInvitationsCountRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInvitationsCount
func (c *SecurityHub) GetInvitationsCountRequest(input *GetInvitationsCountInput) (req *request.Request, output *GetInvitationsCountOutput) {
op := &request.Operation{
Name: opGetInvitationsCount,
HTTPMethod: "GET",
HTTPPath: "/invitations/count",
}
if input == nil {
input = &GetInvitationsCountInput{}
}
output = &GetInvitationsCountOutput{}
req = c.newRequest(op, input, output)
return
}
// GetInvitationsCount API operation for AWS SecurityHub.
//
// Returns the count of all Security Hub membership invitations that were sent
// to the current member account, not including the currently accepted invitation.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetInvitationsCount for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInvitationsCount
func (c *SecurityHub) GetInvitationsCount(input *GetInvitationsCountInput) (*GetInvitationsCountOutput, error) {
req, out := c.GetInvitationsCountRequest(input)
return out, req.Send()
}
// GetInvitationsCountWithContext is the same as GetInvitationsCount with the addition of
// the ability to pass a context and additional request options.
//
// See GetInvitationsCount for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetInvitationsCountWithContext(ctx aws.Context, input *GetInvitationsCountInput, opts ...request.Option) (*GetInvitationsCountOutput, error) {
req, out := c.GetInvitationsCountRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetMasterAccount = "GetMasterAccount"
// GetMasterAccountRequest generates a "aws/request.Request" representing the
// client's request for the GetMasterAccount operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetMasterAccount for more information on using the GetMasterAccount
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetMasterAccountRequest method.
// req, resp := client.GetMasterAccountRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount
func (c *SecurityHub) GetMasterAccountRequest(input *GetMasterAccountInput) (req *request.Request, output *GetMasterAccountOutput) {
op := &request.Operation{
Name: opGetMasterAccount,
HTTPMethod: "GET",
HTTPPath: "/master",
}
if input == nil {
input = &GetMasterAccountInput{}
}
output = &GetMasterAccountOutput{}
req = c.newRequest(op, input, output)
return
}
// GetMasterAccount API operation for AWS SecurityHub.
//
// Provides the details for the Security Hub master account to the current member
// account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetMasterAccount for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount
func (c *SecurityHub) GetMasterAccount(input *GetMasterAccountInput) (*GetMasterAccountOutput, error) {
req, out := c.GetMasterAccountRequest(input)
return out, req.Send()
}
// GetMasterAccountWithContext is the same as GetMasterAccount with the addition of
// the ability to pass a context and additional request options.
//
// See GetMasterAccount for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetMasterAccountWithContext(ctx aws.Context, input *GetMasterAccountInput, opts ...request.Option) (*GetMasterAccountOutput, error) {
req, out := c.GetMasterAccountRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetMembers = "GetMembers"
// GetMembersRequest generates a "aws/request.Request" representing the
// client's request for the GetMembers operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetMembers for more information on using the GetMembers
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetMembersRequest method.
// req, resp := client.GetMembersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers
func (c *SecurityHub) GetMembersRequest(input *GetMembersInput) (req *request.Request, output *GetMembersOutput) {
op := &request.Operation{
Name: opGetMembers,
HTTPMethod: "POST",
HTTPPath: "/members/get",
}
if input == nil {
input = &GetMembersInput{}
}
output = &GetMembersOutput{}
req = c.newRequest(op, input, output)
return
}
// GetMembers API operation for AWS SecurityHub.
//
// Returns the details on the Security Hub member accounts that are specified
// by the account IDs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation GetMembers for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers
func (c *SecurityHub) GetMembers(input *GetMembersInput) (*GetMembersOutput, error) {
req, out := c.GetMembersRequest(input)
return out, req.Send()
}
// GetMembersWithContext is the same as GetMembers with the addition of
// the ability to pass a context and additional request options.
//
// See GetMembers for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) GetMembersWithContext(ctx aws.Context, input *GetMembersInput, opts ...request.Option) (*GetMembersOutput, error) {
req, out := c.GetMembersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opInviteMembers = "InviteMembers"
// InviteMembersRequest generates a "aws/request.Request" representing the
// client's request for the InviteMembers operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See InviteMembers for more information on using the InviteMembers
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the InviteMembersRequest method.
// req, resp := client.InviteMembersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers
func (c *SecurityHub) InviteMembersRequest(input *InviteMembersInput) (req *request.Request, output *InviteMembersOutput) {
op := &request.Operation{
Name: opInviteMembers,
HTTPMethod: "POST",
HTTPPath: "/members/invite",
}
if input == nil {
input = &InviteMembersInput{}
}
output = &InviteMembersOutput{}
req = c.newRequest(op, input, output)
return
}
// InviteMembers API operation for AWS SecurityHub.
//
// Invites other AWS accounts to enable Security Hub and become Security Hub
// member accounts. When an account accepts the invitation and becomes a member
// account, the master account can view Security Hub findings of the member
// account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation InviteMembers for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers
func (c *SecurityHub) InviteMembers(input *InviteMembersInput) (*InviteMembersOutput, error) {
req, out := c.InviteMembersRequest(input)
return out, req.Send()
}
// InviteMembersWithContext is the same as InviteMembers with the addition of
// the ability to pass a context and additional request options.
//
// See InviteMembers for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) InviteMembersWithContext(ctx aws.Context, input *InviteMembersInput, opts ...request.Option) (*InviteMembersOutput, error) {
req, out := c.InviteMembersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListEnabledProductsForImport = "ListEnabledProductsForImport"
// ListEnabledProductsForImportRequest generates a "aws/request.Request" representing the
// client's request for the ListEnabledProductsForImport operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListEnabledProductsForImport for more information on using the ListEnabledProductsForImport
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListEnabledProductsForImportRequest method.
// req, resp := client.ListEnabledProductsForImportRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport
func (c *SecurityHub) ListEnabledProductsForImportRequest(input *ListEnabledProductsForImportInput) (req *request.Request, output *ListEnabledProductsForImportOutput) {
op := &request.Operation{
Name: opListEnabledProductsForImport,
HTTPMethod: "GET",
HTTPPath: "/productSubscriptions",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListEnabledProductsForImportInput{}
}
output = &ListEnabledProductsForImportOutput{}
req = c.newRequest(op, input, output)
return
}
// ListEnabledProductsForImport API operation for AWS SecurityHub.
//
// Lists all Security Hub-integrated third-party findings providers.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation ListEnabledProductsForImport for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport
func (c *SecurityHub) ListEnabledProductsForImport(input *ListEnabledProductsForImportInput) (*ListEnabledProductsForImportOutput, error) {
req, out := c.ListEnabledProductsForImportRequest(input)
return out, req.Send()
}
// ListEnabledProductsForImportWithContext is the same as ListEnabledProductsForImport with the addition of
// the ability to pass a context and additional request options.
//
// See ListEnabledProductsForImport for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) ListEnabledProductsForImportWithContext(ctx aws.Context, input *ListEnabledProductsForImportInput, opts ...request.Option) (*ListEnabledProductsForImportOutput, error) {
req, out := c.ListEnabledProductsForImportRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListEnabledProductsForImportPages iterates over the pages of a ListEnabledProductsForImport operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListEnabledProductsForImport method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListEnabledProductsForImport operation.
// pageNum := 0
// err := client.ListEnabledProductsForImportPages(params,
// func(page *ListEnabledProductsForImportOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *SecurityHub) ListEnabledProductsForImportPages(input *ListEnabledProductsForImportInput, fn func(*ListEnabledProductsForImportOutput, bool) bool) error {
return c.ListEnabledProductsForImportPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListEnabledProductsForImportPagesWithContext same as ListEnabledProductsForImportPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) ListEnabledProductsForImportPagesWithContext(ctx aws.Context, input *ListEnabledProductsForImportInput, fn func(*ListEnabledProductsForImportOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListEnabledProductsForImportInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListEnabledProductsForImportRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*ListEnabledProductsForImportOutput), !p.HasNextPage())
}
return p.Err()
}
const opListInvitations = "ListInvitations"
// ListInvitationsRequest generates a "aws/request.Request" representing the
// client's request for the ListInvitations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListInvitations for more information on using the ListInvitations
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListInvitationsRequest method.
// req, resp := client.ListInvitationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListInvitations
func (c *SecurityHub) ListInvitationsRequest(input *ListInvitationsInput) (req *request.Request, output *ListInvitationsOutput) {
op := &request.Operation{
Name: opListInvitations,
HTTPMethod: "GET",
HTTPPath: "/invitations",
}
if input == nil {
input = &ListInvitationsInput{}
}
output = &ListInvitationsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListInvitations API operation for AWS SecurityHub.
//
// Lists all Security Hub membership invitations that were sent to the current
// AWS account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation ListInvitations for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListInvitations
func (c *SecurityHub) ListInvitations(input *ListInvitationsInput) (*ListInvitationsOutput, error) {
req, out := c.ListInvitationsRequest(input)
return out, req.Send()
}
// ListInvitationsWithContext is the same as ListInvitations with the addition of
// the ability to pass a context and additional request options.
//
// See ListInvitations for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) ListInvitationsWithContext(ctx aws.Context, input *ListInvitationsInput, opts ...request.Option) (*ListInvitationsOutput, error) {
req, out := c.ListInvitationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListMembers = "ListMembers"
// ListMembersRequest generates a "aws/request.Request" representing the
// client's request for the ListMembers operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListMembers for more information on using the ListMembers
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListMembersRequest method.
// req, resp := client.ListMembersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListMembers
func (c *SecurityHub) ListMembersRequest(input *ListMembersInput) (req *request.Request, output *ListMembersOutput) {
op := &request.Operation{
Name: opListMembers,
HTTPMethod: "GET",
HTTPPath: "/members",
}
if input == nil {
input = &ListMembersInput{}
}
output = &ListMembersOutput{}
req = c.newRequest(op, input, output)
return
}
// ListMembers API operation for AWS SecurityHub.
//
// Lists details about all member accounts for the current Security Hub master
// account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation ListMembers for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListMembers
func (c *SecurityHub) ListMembers(input *ListMembersInput) (*ListMembersOutput, error) {
req, out := c.ListMembersRequest(input)
return out, req.Send()
}
// ListMembersWithContext is the same as ListMembers with the addition of
// the ability to pass a context and additional request options.
//
// See ListMembers for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) ListMembersWithContext(ctx aws.Context, input *ListMembersInput, opts ...request.Option) (*ListMembersOutput, error) {
req, out := c.ListMembersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateFindings = "UpdateFindings"
// UpdateFindingsRequest generates a "aws/request.Request" representing the
// client's request for the UpdateFindings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateFindings for more information on using the UpdateFindings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateFindingsRequest method.
// req, resp := client.UpdateFindingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateFindings
func (c *SecurityHub) UpdateFindingsRequest(input *UpdateFindingsInput) (req *request.Request, output *UpdateFindingsOutput) {
op := &request.Operation{
Name: opUpdateFindings,
HTTPMethod: "PATCH",
HTTPPath: "/findings",
}
if input == nil {
input = &UpdateFindingsInput{}
}
output = &UpdateFindingsOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateFindings API operation for AWS SecurityHub.
//
// Updates the AWS Security Hub-aggregated findings specified by the filter
// attributes.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation UpdateFindings for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateFindings
func (c *SecurityHub) UpdateFindings(input *UpdateFindingsInput) (*UpdateFindingsOutput, error) {
req, out := c.UpdateFindingsRequest(input)
return out, req.Send()
}
// UpdateFindingsWithContext is the same as UpdateFindings with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateFindings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) UpdateFindingsWithContext(ctx aws.Context, input *UpdateFindingsInput, opts ...request.Option) (*UpdateFindingsOutput, error) {
req, out := c.UpdateFindingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateInsight = "UpdateInsight"
// UpdateInsightRequest generates a "aws/request.Request" representing the
// client's request for the UpdateInsight operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateInsight for more information on using the UpdateInsight
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateInsightRequest method.
// req, resp := client.UpdateInsightRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateInsight
func (c *SecurityHub) UpdateInsightRequest(input *UpdateInsightInput) (req *request.Request, output *UpdateInsightOutput) {
op := &request.Operation{
Name: opUpdateInsight,
HTTPMethod: "PATCH",
HTTPPath: "/insights/{InsightArn+}",
}
if input == nil {
input = &UpdateInsightInput{}
}
output = &UpdateInsightOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateInsight API operation for AWS SecurityHub.
//
// Updates the AWS Security Hub insight specified by the insight ARN.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS SecurityHub's
// API operation UpdateInsight for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalException "InternalException"
// Internal server error.
//
// * ErrCodeInvalidInputException "InvalidInputException"
// The request was rejected because an invalid or out-of-range value was supplied
// for an input parameter.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// The request was rejected because it attempted to create resources beyond
// the current AWS account limits. The error code describes the limit exceeded.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The request was rejected because the specified resource cannot be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateInsight
func (c *SecurityHub) UpdateInsight(input *UpdateInsightInput) (*UpdateInsightOutput, error) {
req, out := c.UpdateInsightRequest(input)
return out, req.Send()
}
// UpdateInsightWithContext is the same as UpdateInsight with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateInsight for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *SecurityHub) UpdateInsightWithContext(ctx aws.Context, input *UpdateInsightInput, opts ...request.Option) (*UpdateInsightOutput, error) {
req, out := c.UpdateInsightRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
type AcceptInvitationInput struct {
_ struct{} `type:"structure"`
// The ID of the invitation that is sent to the AWS account by the Security
// Hub master account.
InvitationId *string `type:"string"`
// The account ID of the master Security Hub account whose invitation you're
// accepting.
MasterId *string `type:"string"`
}
// String returns the string representation
func (s AcceptInvitationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AcceptInvitationInput) GoString() string {
return s.String()
}
// SetInvitationId sets the InvitationId field's value.
func (s *AcceptInvitationInput) SetInvitationId(v string) *AcceptInvitationInput {
s.InvitationId = &v
return s
}
// SetMasterId sets the MasterId field's value.
func (s *AcceptInvitationInput) SetMasterId(v string) *AcceptInvitationInput {
s.MasterId = &v
return s
}
type AcceptInvitationOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s AcceptInvitationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AcceptInvitationOutput) GoString() string {
return s.String()
}
// The details of an AWS account.
type AccountDetails struct {
_ struct{} `type:"structure"`
// The ID of an AWS account.
AccountId *string `type:"string"`
// The email of an AWS account.
Email *string `type:"string"`
}
// String returns the string representation
func (s AccountDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AccountDetails) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *AccountDetails) SetAccountId(v string) *AccountDetails {
s.AccountId = &v
return s
}
// SetEmail sets the Email field's value.
func (s *AccountDetails) SetEmail(v string) *AccountDetails {
s.Email = &v
return s
}
// The details of an AWS EC2 instance.
type AwsEc2InstanceDetails struct {
_ struct{} `type:"structure"`
// The IAM profile ARN of the instance.
IamInstanceProfileArn *string `type:"string"`
// The Amazon Machine Image (AMI) ID of the instance.
ImageId *string `type:"string"`
// The IPv4 addresses associated with the instance.
IpV4Addresses []*string `type:"list"`
// The IPv6 addresses associated with the instance.
IpV6Addresses []*string `type:"list"`
// The key name associated with the instance.
KeyName *string `type:"string"`
// The date/time the instance was launched.
LaunchedAt *string `type:"string"`
// The identifier of the subnet in which the instance was launched.
SubnetId *string `type:"string"`
// The instance type of the instance.
Type *string `type:"string"`
// The identifier of the VPC in which the instance was launched.
VpcId *string `type:"string"`
}
// String returns the string representation
func (s AwsEc2InstanceDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AwsEc2InstanceDetails) GoString() string {
return s.String()
}
// SetIamInstanceProfileArn sets the IamInstanceProfileArn field's value.
func (s *AwsEc2InstanceDetails) SetIamInstanceProfileArn(v string) *AwsEc2InstanceDetails {
s.IamInstanceProfileArn = &v
return s
}
// SetImageId sets the ImageId field's value.
func (s *AwsEc2InstanceDetails) SetImageId(v string) *AwsEc2InstanceDetails {
s.ImageId = &v
return s
}
// SetIpV4Addresses sets the IpV4Addresses field's value.
func (s *AwsEc2InstanceDetails) SetIpV4Addresses(v []*string) *AwsEc2InstanceDetails {
s.IpV4Addresses = v
return s
}
// SetIpV6Addresses sets the IpV6Addresses field's value.
func (s *AwsEc2InstanceDetails) SetIpV6Addresses(v []*string) *AwsEc2InstanceDetails {
s.IpV6Addresses = v
return s
}
// SetKeyName sets the KeyName field's value.
func (s *AwsEc2InstanceDetails) SetKeyName(v string) *AwsEc2InstanceDetails {
s.KeyName = &v
return s
}
// SetLaunchedAt sets the LaunchedAt field's value.
func (s *AwsEc2InstanceDetails) SetLaunchedAt(v string) *AwsEc2InstanceDetails {
s.LaunchedAt = &v
return s
}
// SetSubnetId sets the SubnetId field's value.
func (s *AwsEc2InstanceDetails) SetSubnetId(v string) *AwsEc2InstanceDetails {
s.SubnetId = &v
return s
}
// SetType sets the Type field's value.
func (s *AwsEc2InstanceDetails) SetType(v string) *AwsEc2InstanceDetails {
s.Type = &v
return s
}
// SetVpcId sets the VpcId field's value.
func (s *AwsEc2InstanceDetails) SetVpcId(v string) *AwsEc2InstanceDetails {
s.VpcId = &v
return s
}
// AWS IAM access key details related to a finding.
type AwsIamAccessKeyDetails struct {
_ struct{} `type:"structure"`
// The creation date/time of the IAM access key related to a finding.
CreatedAt *string `type:"string"`
// The status of the IAM access key related to a finding.
Status *string `type:"string" enum:"AwsIamAccessKeyStatus"`
// The user associated with the IAM access key related to a finding.
UserName *string `type:"string"`
}
// String returns the string representation
func (s AwsIamAccessKeyDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AwsIamAccessKeyDetails) GoString() string {
return s.String()
}
// SetCreatedAt sets the CreatedAt field's value.
func (s *AwsIamAccessKeyDetails) SetCreatedAt(v string) *AwsIamAccessKeyDetails {
s.CreatedAt = &v
return s
}
// SetStatus sets the Status field's value.
func (s *AwsIamAccessKeyDetails) SetStatus(v string) *AwsIamAccessKeyDetails {
s.Status = &v
return s
}
// SetUserName sets the UserName field's value.
func (s *AwsIamAccessKeyDetails) SetUserName(v string) *AwsIamAccessKeyDetails {
s.UserName = &v
return s
}
// The details of an AWS S3 Bucket.
type AwsS3BucketDetails struct {
_ struct{} `type:"structure"`
// The canonical user ID of the owner of the S3 bucket.
OwnerId *string `type:"string"`
// The display name of the owner of the S3 bucket.
OwnerName *string `type:"string"`
}
// String returns the string representation
func (s AwsS3BucketDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AwsS3BucketDetails) GoString() string {
return s.String()
}
// SetOwnerId sets the OwnerId field's value.
func (s *AwsS3BucketDetails) SetOwnerId(v string) *AwsS3BucketDetails {
s.OwnerId = &v
return s
}
// SetOwnerName sets the OwnerName field's value.
func (s *AwsS3BucketDetails) SetOwnerName(v string) *AwsS3BucketDetails {
s.OwnerName = &v
return s
}
// Provides consistent format for the contents of the Security Hub-aggregated
// findings. AwsSecurityFinding format enables you to share findings between
// AWS security services and third-party solutions, and compliance checks.
//
// A finding is a potential security issue generated either by AWS services
// (GuardDuty, Inspector, Macie) or by the integrated third-party solutions
// and compliance checks.
type AwsSecurityFinding struct {
_ struct{} `type:"structure"`
// The AWS account ID in which a finding is generated.
//
// AwsAccountId is a required field
AwsAccountId *string `type:"string" required:"true"`
// This data type is exclusive to findings that are generated as the result
// of a check run against a specific rule in a supported standard (for example,
// AWS CIS Foundations). Contains compliance-related finding details.
Compliance *Compliance `type:"structure"`
// A finding's confidence. Confidence is defined as the likelihood that a finding
// accurately identifies the behavior or issue that it was intended to identify.
// Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero
// percent confidence and 100 equates to 100 percent confidence.
Confidence *int64 `type:"integer"`
// An ISO8601-formatted timestamp that indicates when the potential security
// issue captured by a finding was created by the security findings provider.
//
// CreatedAt is a required field
CreatedAt *string `type:"string" required:"true"`
// The level of importance assigned to the resources associated with the finding.
// A score of 0 means the underlying resources have no criticality, and a score
// of 100 is reserved for the most critical resources.
Criticality *int64 `type:"integer"`
// A finding's description.
Description *string `type:"string"`
// An ISO8601-formatted timestamp that indicates when the potential security
// issue captured by a finding was first observed by the security findings provider.
FirstObservedAt *string `type:"string"`
// This is the identifier for the solution-specific component (a discrete unit
// of logic) that generated a finding. In various security findings provider's
// solutions, this generator can be called a rule, a check, a detector, a plug-in,
// etc.
//
// GeneratorId is a required field
GeneratorId *string `type:"string" required:"true"`
// The security findings provider-specific identifier for a finding.
//
// Id is a required field
Id *string `type:"string" required:"true"`
// An ISO8601-formatted timestamp that indicates when the potential security
// issue captured by a finding was most recently observed by the security findings
// provider.
LastObservedAt *string `type:"string"`
// A list of malware related to a finding.
Malware []*Malware `type:"list"`
// The details of network-related information about a finding.
Network *Network `type:"structure"`
// A user-defined note added to a finding.
Note *Note `type:"structure"`
// The details of process-related information about a finding.
Process *ProcessDetails `type:"structure"`
// The ARN generated by Security Hub that uniquely identifies a third-party
// company (security findings provider) once this provider's product (solution
// that generates findings) is registered with Security Hub.
//
// ProductArn is a required field
ProductArn *string `type:"string" required:"true"`
// A data type where security findings providers can include additional solution-specific
// details that are not part of the defined AwsSecurityFinding format.
ProductFields map[string]*string `type:"map"`
// The record state of a finding.
RecordState *string `type:"string" enum:"RecordState"`
// A list of related findings.
RelatedFindings []*RelatedFinding `type:"list"`
// An data type that describes the remediation options for a finding.
Remediation *Remediation `type:"structure"`
// A set of resource data types that describe the resources to which the finding
// refers.
//
// Resources is a required field
Resources []*Resource `type:"list" required:"true"`
// The schema version for which a finding is formatted.
//
// SchemaVersion is a required field
SchemaVersion *string `type:"string" required:"true"`
// A finding's severity.
//
// Severity is a required field
Severity *Severity `type:"structure" required:"true"`
// A URL that links to a page about the current finding in the security findings
// provider's solution.
SourceUrl *string `type:"string"`
// Threat intel details related to a finding.
ThreatIntelIndicators []*ThreatIntelIndicator `type:"list"`
// A finding's title.
Title *string `type:"string"`
// One or more finding types in the format of 'namespace/category/classifier'
// that classify a finding.
//
// Valid namespace values are: Software and Configuration Checks | TTPs | Effects
// | Unusual Behaviors | Sensitive Data Identifications
//
// Types is a required field
Types []*string `type:"list" required:"true"`
// An ISO8601-formatted timestamp that indicates when the finding record was
// last updated by the security findings provider.
//
// UpdatedAt is a required field
UpdatedAt *string `type:"string" required:"true"`
// A list of name/value string pairs associated with the finding. These are
// custom, user-defined fields added to a finding.
UserDefinedFields map[string]*string `type:"map"`
// Indicates the veracity of a finding.
VerificationState *string `type:"string" enum:"VerificationState"`
// The workflow state of a finding.
WorkflowState *string `type:"string" enum:"WorkflowState"`
}
// String returns the string representation
func (s AwsSecurityFinding) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AwsSecurityFinding) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *AwsSecurityFinding) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "AwsSecurityFinding"}
if s.AwsAccountId == nil {
invalidParams.Add(request.NewErrParamRequired("AwsAccountId"))
}
if s.CreatedAt == nil {
invalidParams.Add(request.NewErrParamRequired("CreatedAt"))
}
if s.GeneratorId == nil {
invalidParams.Add(request.NewErrParamRequired("GeneratorId"))
}
if s.Id == nil {
invalidParams.Add(request.NewErrParamRequired("Id"))
}
if s.ProductArn == nil {
invalidParams.Add(request.NewErrParamRequired("ProductArn"))
}
if s.Resources == nil {
invalidParams.Add(request.NewErrParamRequired("Resources"))
}
if s.SchemaVersion == nil {
invalidParams.Add(request.NewErrParamRequired("SchemaVersion"))
}
if s.Severity == nil {
invalidParams.Add(request.NewErrParamRequired("Severity"))
}
if s.Types == nil {
invalidParams.Add(request.NewErrParamRequired("Types"))
}
if s.UpdatedAt == nil {
invalidParams.Add(request.NewErrParamRequired("UpdatedAt"))
}
if s.Malware != nil {
for i, v := range s.Malware {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Malware", i), err.(request.ErrInvalidParams))
}
}
}
if s.Note != nil {
if err := s.Note.Validate(); err != nil {
invalidParams.AddNested("Note", err.(request.ErrInvalidParams))
}
}
if s.RelatedFindings != nil {
for i, v := range s.RelatedFindings {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RelatedFindings", i), err.(request.ErrInvalidParams))
}
}
}
if s.Resources != nil {
for i, v := range s.Resources {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Resources", i), err.(request.ErrInvalidParams))
}
}
}
if s.Severity != nil {
if err := s.Severity.Validate(); err != nil {
invalidParams.AddNested("Severity", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAwsAccountId sets the AwsAccountId field's value.
func (s *AwsSecurityFinding) SetAwsAccountId(v string) *AwsSecurityFinding {
s.AwsAccountId = &v
return s
}
// SetCompliance sets the Compliance field's value.
func (s *AwsSecurityFinding) SetCompliance(v *Compliance) *AwsSecurityFinding {
s.Compliance = v
return s
}
// SetConfidence sets the Confidence field's value.
func (s *AwsSecurityFinding) SetConfidence(v int64) *AwsSecurityFinding {
s.Confidence = &v
return s
}
// SetCreatedAt sets the CreatedAt field's value.
func (s *AwsSecurityFinding) SetCreatedAt(v string) *AwsSecurityFinding {
s.CreatedAt = &v
return s
}
// SetCriticality sets the Criticality field's value.
func (s *AwsSecurityFinding) SetCriticality(v int64) *AwsSecurityFinding {
s.Criticality = &v
return s
}
// SetDescription sets the Description field's value.
func (s *AwsSecurityFinding) SetDescription(v string) *AwsSecurityFinding {
s.Description = &v
return s
}
// SetFirstObservedAt sets the FirstObservedAt field's value.
func (s *AwsSecurityFinding) SetFirstObservedAt(v string) *AwsSecurityFinding {
s.FirstObservedAt = &v
return s
}
// SetGeneratorId sets the GeneratorId field's value.
func (s *AwsSecurityFinding) SetGeneratorId(v string) *AwsSecurityFinding {
s.GeneratorId = &v
return s
}
// SetId sets the Id field's value.
func (s *AwsSecurityFinding) SetId(v string) *AwsSecurityFinding {
s.Id = &v
return s
}
// SetLastObservedAt sets the LastObservedAt field's value.
func (s *AwsSecurityFinding) SetLastObservedAt(v string) *AwsSecurityFinding {
s.LastObservedAt = &v
return s
}
// SetMalware sets the Malware field's value.
func (s *AwsSecurityFinding) SetMalware(v []*Malware) *AwsSecurityFinding {
s.Malware = v
return s
}
// SetNetwork sets the Network field's value.
func (s *AwsSecurityFinding) SetNetwork(v *Network) *AwsSecurityFinding {
s.Network = v
return s
}
// SetNote sets the Note field's value.
func (s *AwsSecurityFinding) SetNote(v *Note) *AwsSecurityFinding {
s.Note = v
return s
}
// SetProcess sets the Process field's value.
func (s *AwsSecurityFinding) SetProcess(v *ProcessDetails) *AwsSecurityFinding {
s.Process = v
return s
}
// SetProductArn sets the ProductArn field's value.
func (s *AwsSecurityFinding) SetProductArn(v string) *AwsSecurityFinding {
s.ProductArn = &v
return s
}
// SetProductFields sets the ProductFields field's value.
func (s *AwsSecurityFinding) SetProductFields(v map[string]*string) *AwsSecurityFinding {
s.ProductFields = v
return s
}
// SetRecordState sets the RecordState field's value.
func (s *AwsSecurityFinding) SetRecordState(v string) *AwsSecurityFinding {
s.RecordState = &v
return s
}
// SetRelatedFindings sets the RelatedFindings field's value.
func (s *AwsSecurityFinding) SetRelatedFindings(v []*RelatedFinding) *AwsSecurityFinding {
s.RelatedFindings = v
return s
}
// SetRemediation sets the Remediation field's value.
func (s *AwsSecurityFinding) SetRemediation(v *Remediation) *AwsSecurityFinding {
s.Remediation = v
return s
}
// SetResources sets the Resources field's value.
func (s *AwsSecurityFinding) SetResources(v []*Resource) *AwsSecurityFinding {
s.Resources = v
return s
}
// SetSchemaVersion sets the SchemaVersion field's value.
func (s *AwsSecurityFinding) SetSchemaVersion(v string) *AwsSecurityFinding {
s.SchemaVersion = &v
return s
}
// SetSeverity sets the Severity field's value.
func (s *AwsSecurityFinding) SetSeverity(v *Severity) *AwsSecurityFinding {
s.Severity = v
return s
}
// SetSourceUrl sets the SourceUrl field's value.
func (s *AwsSecurityFinding) SetSourceUrl(v string) *AwsSecurityFinding {
s.SourceUrl = &v
return s
}
// SetThreatIntelIndicators sets the ThreatIntelIndicators field's value.
func (s *AwsSecurityFinding) SetThreatIntelIndicators(v []*ThreatIntelIndicator) *AwsSecurityFinding {
s.ThreatIntelIndicators = v
return s
}
// SetTitle sets the Title field's value.
func (s *AwsSecurityFinding) SetTitle(v string) *AwsSecurityFinding {
s.Title = &v
return s
}
// SetTypes sets the Types field's value.
func (s *AwsSecurityFinding) SetTypes(v []*string) *AwsSecurityFinding {
s.Types = v
return s
}
// SetUpdatedAt sets the UpdatedAt field's value.
func (s *AwsSecurityFinding) SetUpdatedAt(v string) *AwsSecurityFinding {
s.UpdatedAt = &v
return s
}
// SetUserDefinedFields sets the UserDefinedFields field's value.
func (s *AwsSecurityFinding) SetUserDefinedFields(v map[string]*string) *AwsSecurityFinding {
s.UserDefinedFields = v
return s
}
// SetVerificationState sets the VerificationState field's value.
func (s *AwsSecurityFinding) SetVerificationState(v string) *AwsSecurityFinding {
s.VerificationState = &v
return s
}
// SetWorkflowState sets the WorkflowState field's value.
func (s *AwsSecurityFinding) SetWorkflowState(v string) *AwsSecurityFinding {
s.WorkflowState = &v
return s
}
// A collection of attributes that are applied to all active Security Hub-aggregated
// findings and that result in a subset of findings that are included in this
// insight.
type AwsSecurityFindingFilters struct {
_ struct{} `type:"structure"`
// The AWS account ID in which a finding is generated.
AwsAccountId []*StringFilter `type:"list"`
// The name of the findings provider (company) that owns the solution (product)
// that generates findings.
CompanyName []*StringFilter `type:"list"`
// Exclusive to findings that are generated as the result of a check run against
// a specific rule in a supported standard (for example, AWS CIS Foundations).
// Contains compliance-related finding details.
ComplianceStatus []*StringFilter `type:"list"`
// A finding's confidence. Confidence is defined as the likelihood that a finding
// accurately identifies the behavior or issue that it was intended to identify.
// Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero
// percent confidence and 100 equates to 100 percent confidence.
Confidence []*NumberFilter `type:"list"`
// An ISO8601-formatted timestamp that indicates when the potential security
// issue captured by a finding was created by the security findings provider.
CreatedAt []*DateFilter `type:"list"`
// The level of importance assigned to the resources associated with the finding.
// A score of 0 means the underlying resources have no criticality, and a score
// of 100 is reserved for the most critical resources.
Criticality []*NumberFilter `type:"list"`
// A finding's description.
Description []*StringFilter `type:"list"`
// An ISO8601-formatted timestamp that indicates when the potential security
// issue captured by a finding was first observed by the security findings provider.
FirstObservedAt []*DateFilter `type:"list"`
// This is the identifier for the solution-specific component (a discrete unit
// of logic) that generated a finding. In various security findings provider's
// solutions, this generator can be called a rule, a check, a detector, a plug-in,
// etc.
GeneratorId []*StringFilter `type:"list"`
// The security findings provider-specific identifier for a finding.
Id []*StringFilter `type:"list"`
// A keyword for a finding.
Keyword []*KeywordFilter `type:"list"`
// An ISO8601-formatted timestamp that indicates when the potential security
// issue captured by a finding was most recently observed by the security findings
// provider.
LastObservedAt []*DateFilter `type:"list"`
// The name of the malware that was observed.
MalwareName []*StringFilter `type:"list"`
// The filesystem path of the malware that was observed.
MalwarePath []*StringFilter `type:"list"`
// The state of the malware that was observed.
MalwareState []*StringFilter `type:"list"`
// The type of the malware that was observed.
MalwareType []*StringFilter `type:"list"`
// The destination domain of network-related information about a finding.
NetworkDestinationDomain []*StringFilter `type:"list"`
// The destination IPv4 address of network-related information about a finding.
NetworkDestinationIpV4 []*IpFilter `type:"list"`
// The destination IPv6 address of network-related information about a finding.
NetworkDestinationIpV6 []*IpFilter `type:"list"`
// The destination port of network-related information about a finding.
NetworkDestinationPort []*NumberFilter `type:"list"`
// Indicates the direction of network traffic associated with a finding.
NetworkDirection []*StringFilter `type:"list"`
// The protocol of network-related information about a finding.
NetworkProtocol []*StringFilter `type:"list"`
// The source domain of network-related information about a finding.
NetworkSourceDomain []*StringFilter `type:"list"`
// The source IPv4 address of network-related information about a finding.
NetworkSourceIpV4 []*IpFilter `type:"list"`
// The source IPv6 address of network-related information about a finding.
NetworkSourceIpV6 []*IpFilter `type:"list"`
// The source media access control (MAC) address of network-related information
// about a finding.
NetworkSourceMac []*StringFilter `type:"list"`
// The source port of network-related information about a finding.
NetworkSourcePort []*NumberFilter `type:"list"`
// The text of a note.
NoteText []*StringFilter `type:"list"`
// The timestamp of when the note was updated.
NoteUpdatedAt []*DateFilter `type:"list"`
// The principal that created a note.
NoteUpdatedBy []*StringFilter `type:"list"`
// The date/time that the process was launched.
ProcessLaunchedAt []*DateFilter `type:"list"`
// The name of the process.
ProcessName []*StringFilter `type:"list"`
// The parent process ID.
ProcessParentPid []*NumberFilter `type:"list"`
// The path to the process executable.
ProcessPath []*StringFilter `type:"list"`
// The process ID.
ProcessPid []*NumberFilter `type:"list"`
// The date/time that the process was terminated.
ProcessTerminatedAt []*DateFilter `type:"list"`
// The ARN generated by Security Hub that uniquely identifies a third-party
// company (security findings provider) once this provider's product (solution
// that generates findings) is registered with Security Hub.
ProductArn []*StringFilter `type:"list"`
// A data type where security findings providers can include additional solution-specific
// details that are not part of the defined AwsSecurityFinding format.
ProductFields []*MapFilter `type:"list"`
// The name of the solution (product) that generates findings.
ProductName []*StringFilter `type:"list"`
// The recommendation of what to do about the issue described in a finding.
RecommendationText []*StringFilter `type:"list"`
// The updated record state for the finding.
RecordState []*StringFilter `type:"list"`
// The solution-generated identifier for a related finding.
RelatedFindingsId []*StringFilter `type:"list"`
// The ARN of the solution that generated a related finding.
RelatedFindingsProductArn []*StringFilter `type:"list"`
// The IAM profile ARN of the instance.
ResourceAwsEc2InstanceIamInstanceProfileArn []*StringFilter `type:"list"`
// The Amazon Machine Image (AMI) ID of the instance.
ResourceAwsEc2InstanceImageId []*StringFilter `type:"list"`
// The IPv4 addresses associated with the instance.
ResourceAwsEc2InstanceIpV4Addresses []*IpFilter `type:"list"`
// The IPv6 addresses associated with the instance.
ResourceAwsEc2InstanceIpV6Addresses []*IpFilter `type:"list"`
// The key name associated with the instance.
ResourceAwsEc2InstanceKeyName []*StringFilter `type:"list"`
// The date/time the instance was launched.
ResourceAwsEc2InstanceLaunchedAt []*DateFilter `type:"list"`
// The identifier of the subnet in which the instance was launched.
ResourceAwsEc2InstanceSubnetId []*StringFilter `type:"list"`
// The instance type of the instance.
ResourceAwsEc2InstanceType []*StringFilter `type:"list"`
// The identifier of the VPC in which the instance was launched.
ResourceAwsEc2InstanceVpcId []*StringFilter `type:"list"`
// The creation date/time of the IAM access key related to a finding.
ResourceAwsIamAccessKeyCreatedAt []*DateFilter `type:"list"`
// The status of the IAM access key related to a finding.
ResourceAwsIamAccessKeyStatus []*StringFilter `type:"list"`
// The user associated with the IAM access key related to a finding.
ResourceAwsIamAccessKeyUserName []*StringFilter `type:"list"`
// The canonical user ID of the owner of the S3 bucket.
ResourceAwsS3BucketOwnerId []*StringFilter `type:"list"`
// The display name of the owner of the S3 bucket.
ResourceAwsS3BucketOwnerName []*StringFilter `type:"list"`
// The identifier of the image related to a finding.
ResourceContainerImageId []*StringFilter `type:"list"`
// The name of the image related to a finding.
ResourceContainerImageName []*StringFilter `type:"list"`
// The date/time that the container was started.
ResourceContainerLaunchedAt []*DateFilter `type:"list"`
// The name of the container related to a finding.
ResourceContainerName []*StringFilter `type:"list"`
// The details of a resource that does not have a specific sub-field for the
// resource type defined.
ResourceDetailsOther []*MapFilter `type:"list"`
// The canonical identifier for the given resource type.
ResourceId []*StringFilter `type:"list"`
// The canonical AWS partition name to which the region is assigned.
ResourcePartition []*StringFilter `type:"list"`
// The canonical AWS external region name where this resource is located.
ResourceRegion []*StringFilter `type:"list"`
// A list of AWS tags associated with a resource at the time the finding was
// processed.
ResourceTags []*MapFilter `type:"list"`
// Specifies the type of the resource for which details are provided.
ResourceType []*StringFilter `type:"list"`
// The label of a finding's severity.
SeverityLabel []*StringFilter `type:"list"`
// The normalized severity of a finding.
SeverityNormalized []*NumberFilter `type:"list"`
// The native severity as defined by the security findings provider's solution
// that generated the finding.
SeverityProduct []*NumberFilter `type:"list"`
// A URL that links to a page about the current finding in the security findings
// provider's solution.
SourceUrl []*StringFilter `type:"list"`
// The category of a threat intel indicator.
ThreatIntelIndicatorCategory []*StringFilter `type:"list"`
// The date/time of the last observation of a threat intel indicator.
ThreatIntelIndicatorLastObservedAt []*DateFilter `type:"list"`
// The source of the threat intel.
ThreatIntelIndicatorSource []*StringFilter `type:"list"`
// The URL for more details from the source of the threat intel.
ThreatIntelIndicatorSourceUrl []*StringFilter `type:"list"`
// The type of a threat intel indicator.
ThreatIntelIndicatorType []*StringFilter `type:"list"`
// The value of a threat intel indicator.
ThreatIntelIndicatorValue []*StringFilter `type:"list"`
// A finding's title.
Title []*StringFilter `type:"list"`
// A finding type in the format of 'namespace/category/classifier' that classifies
// a finding.
Type []*StringFilter `type:"list"`
// An ISO8601-formatted timestamp that indicates when the finding record was
// last updated by the security findings provider.
UpdatedAt []*DateFilter `type:"list"`
// A list of name/value string pairs associated with the finding. These are
// custom, user-defined fields added to a finding.
UserDefinedFields []*MapFilter `type:"list"`
// Indicates the veracity of a finding.
VerificationState []*StringFilter `type:"list"`
// The workflow state of a finding.
WorkflowState []*StringFilter `type:"list"`
}
// String returns the string representation
func (s AwsSecurityFindingFilters) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AwsSecurityFindingFilters) GoString() string {
return s.String()
}
// SetAwsAccountId sets the AwsAccountId field's value.
func (s *AwsSecurityFindingFilters) SetAwsAccountId(v []*StringFilter) *AwsSecurityFindingFilters {
s.AwsAccountId = v
return s
}
// SetCompanyName sets the CompanyName field's value.
func (s *AwsSecurityFindingFilters) SetCompanyName(v []*StringFilter) *AwsSecurityFindingFilters {
s.CompanyName = v
return s
}
// SetComplianceStatus sets the ComplianceStatus field's value.
func (s *AwsSecurityFindingFilters) SetComplianceStatus(v []*StringFilter) *AwsSecurityFindingFilters {
s.ComplianceStatus = v
return s
}
// SetConfidence sets the Confidence field's value.
func (s *AwsSecurityFindingFilters) SetConfidence(v []*NumberFilter) *AwsSecurityFindingFilters {
s.Confidence = v
return s
}
// SetCreatedAt sets the CreatedAt field's value.
func (s *AwsSecurityFindingFilters) SetCreatedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.CreatedAt = v
return s
}
// SetCriticality sets the Criticality field's value.
func (s *AwsSecurityFindingFilters) SetCriticality(v []*NumberFilter) *AwsSecurityFindingFilters {
s.Criticality = v
return s
}
// SetDescription sets the Description field's value.
func (s *AwsSecurityFindingFilters) SetDescription(v []*StringFilter) *AwsSecurityFindingFilters {
s.Description = v
return s
}
// SetFirstObservedAt sets the FirstObservedAt field's value.
func (s *AwsSecurityFindingFilters) SetFirstObservedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.FirstObservedAt = v
return s
}
// SetGeneratorId sets the GeneratorId field's value.
func (s *AwsSecurityFindingFilters) SetGeneratorId(v []*StringFilter) *AwsSecurityFindingFilters {
s.GeneratorId = v
return s
}
// SetId sets the Id field's value.
func (s *AwsSecurityFindingFilters) SetId(v []*StringFilter) *AwsSecurityFindingFilters {
s.Id = v
return s
}
// SetKeyword sets the Keyword field's value.
func (s *AwsSecurityFindingFilters) SetKeyword(v []*KeywordFilter) *AwsSecurityFindingFilters {
s.Keyword = v
return s
}
// SetLastObservedAt sets the LastObservedAt field's value.
func (s *AwsSecurityFindingFilters) SetLastObservedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.LastObservedAt = v
return s
}
// SetMalwareName sets the MalwareName field's value.
func (s *AwsSecurityFindingFilters) SetMalwareName(v []*StringFilter) *AwsSecurityFindingFilters {
s.MalwareName = v
return s
}
// SetMalwarePath sets the MalwarePath field's value.
func (s *AwsSecurityFindingFilters) SetMalwarePath(v []*StringFilter) *AwsSecurityFindingFilters {
s.MalwarePath = v
return s
}
// SetMalwareState sets the MalwareState field's value.
func (s *AwsSecurityFindingFilters) SetMalwareState(v []*StringFilter) *AwsSecurityFindingFilters {
s.MalwareState = v
return s
}
// SetMalwareType sets the MalwareType field's value.
func (s *AwsSecurityFindingFilters) SetMalwareType(v []*StringFilter) *AwsSecurityFindingFilters {
s.MalwareType = v
return s
}
// SetNetworkDestinationDomain sets the NetworkDestinationDomain field's value.
func (s *AwsSecurityFindingFilters) SetNetworkDestinationDomain(v []*StringFilter) *AwsSecurityFindingFilters {
s.NetworkDestinationDomain = v
return s
}
// SetNetworkDestinationIpV4 sets the NetworkDestinationIpV4 field's value.
func (s *AwsSecurityFindingFilters) SetNetworkDestinationIpV4(v []*IpFilter) *AwsSecurityFindingFilters {
s.NetworkDestinationIpV4 = v
return s
}
// SetNetworkDestinationIpV6 sets the NetworkDestinationIpV6 field's value.
func (s *AwsSecurityFindingFilters) SetNetworkDestinationIpV6(v []*IpFilter) *AwsSecurityFindingFilters {
s.NetworkDestinationIpV6 = v
return s
}
// SetNetworkDestinationPort sets the NetworkDestinationPort field's value.
func (s *AwsSecurityFindingFilters) SetNetworkDestinationPort(v []*NumberFilter) *AwsSecurityFindingFilters {
s.NetworkDestinationPort = v
return s
}
// SetNetworkDirection sets the NetworkDirection field's value.
func (s *AwsSecurityFindingFilters) SetNetworkDirection(v []*StringFilter) *AwsSecurityFindingFilters {
s.NetworkDirection = v
return s
}
// SetNetworkProtocol sets the NetworkProtocol field's value.
func (s *AwsSecurityFindingFilters) SetNetworkProtocol(v []*StringFilter) *AwsSecurityFindingFilters {
s.NetworkProtocol = v
return s
}
// SetNetworkSourceDomain sets the NetworkSourceDomain field's value.
func (s *AwsSecurityFindingFilters) SetNetworkSourceDomain(v []*StringFilter) *AwsSecurityFindingFilters {
s.NetworkSourceDomain = v
return s
}
// SetNetworkSourceIpV4 sets the NetworkSourceIpV4 field's value.
func (s *AwsSecurityFindingFilters) SetNetworkSourceIpV4(v []*IpFilter) *AwsSecurityFindingFilters {
s.NetworkSourceIpV4 = v
return s
}
// SetNetworkSourceIpV6 sets the NetworkSourceIpV6 field's value.
func (s *AwsSecurityFindingFilters) SetNetworkSourceIpV6(v []*IpFilter) *AwsSecurityFindingFilters {
s.NetworkSourceIpV6 = v
return s
}
// SetNetworkSourceMac sets the NetworkSourceMac field's value.
func (s *AwsSecurityFindingFilters) SetNetworkSourceMac(v []*StringFilter) *AwsSecurityFindingFilters {
s.NetworkSourceMac = v
return s
}
// SetNetworkSourcePort sets the NetworkSourcePort field's value.
func (s *AwsSecurityFindingFilters) SetNetworkSourcePort(v []*NumberFilter) *AwsSecurityFindingFilters {
s.NetworkSourcePort = v
return s
}
// SetNoteText sets the NoteText field's value.
func (s *AwsSecurityFindingFilters) SetNoteText(v []*StringFilter) *AwsSecurityFindingFilters {
s.NoteText = v
return s
}
// SetNoteUpdatedAt sets the NoteUpdatedAt field's value.
func (s *AwsSecurityFindingFilters) SetNoteUpdatedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.NoteUpdatedAt = v
return s
}
// SetNoteUpdatedBy sets the NoteUpdatedBy field's value.
func (s *AwsSecurityFindingFilters) SetNoteUpdatedBy(v []*StringFilter) *AwsSecurityFindingFilters {
s.NoteUpdatedBy = v
return s
}
// SetProcessLaunchedAt sets the ProcessLaunchedAt field's value.
func (s *AwsSecurityFindingFilters) SetProcessLaunchedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.ProcessLaunchedAt = v
return s
}
// SetProcessName sets the ProcessName field's value.
func (s *AwsSecurityFindingFilters) SetProcessName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ProcessName = v
return s
}
// SetProcessParentPid sets the ProcessParentPid field's value.
func (s *AwsSecurityFindingFilters) SetProcessParentPid(v []*NumberFilter) *AwsSecurityFindingFilters {
s.ProcessParentPid = v
return s
}
// SetProcessPath sets the ProcessPath field's value.
func (s *AwsSecurityFindingFilters) SetProcessPath(v []*StringFilter) *AwsSecurityFindingFilters {
s.ProcessPath = v
return s
}
// SetProcessPid sets the ProcessPid field's value.
func (s *AwsSecurityFindingFilters) SetProcessPid(v []*NumberFilter) *AwsSecurityFindingFilters {
s.ProcessPid = v
return s
}
// SetProcessTerminatedAt sets the ProcessTerminatedAt field's value.
func (s *AwsSecurityFindingFilters) SetProcessTerminatedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.ProcessTerminatedAt = v
return s
}
// SetProductArn sets the ProductArn field's value.
func (s *AwsSecurityFindingFilters) SetProductArn(v []*StringFilter) *AwsSecurityFindingFilters {
s.ProductArn = v
return s
}
// SetProductFields sets the ProductFields field's value.
func (s *AwsSecurityFindingFilters) SetProductFields(v []*MapFilter) *AwsSecurityFindingFilters {
s.ProductFields = v
return s
}
// SetProductName sets the ProductName field's value.
func (s *AwsSecurityFindingFilters) SetProductName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ProductName = v
return s
}
// SetRecommendationText sets the RecommendationText field's value.
func (s *AwsSecurityFindingFilters) SetRecommendationText(v []*StringFilter) *AwsSecurityFindingFilters {
s.RecommendationText = v
return s
}
// SetRecordState sets the RecordState field's value.
func (s *AwsSecurityFindingFilters) SetRecordState(v []*StringFilter) *AwsSecurityFindingFilters {
s.RecordState = v
return s
}
// SetRelatedFindingsId sets the RelatedFindingsId field's value.
func (s *AwsSecurityFindingFilters) SetRelatedFindingsId(v []*StringFilter) *AwsSecurityFindingFilters {
s.RelatedFindingsId = v
return s
}
// SetRelatedFindingsProductArn sets the RelatedFindingsProductArn field's value.
func (s *AwsSecurityFindingFilters) SetRelatedFindingsProductArn(v []*StringFilter) *AwsSecurityFindingFilters {
s.RelatedFindingsProductArn = v
return s
}
// SetResourceAwsEc2InstanceIamInstanceProfileArn sets the ResourceAwsEc2InstanceIamInstanceProfileArn field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceIamInstanceProfileArn(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceIamInstanceProfileArn = v
return s
}
// SetResourceAwsEc2InstanceImageId sets the ResourceAwsEc2InstanceImageId field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceImageId(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceImageId = v
return s
}
// SetResourceAwsEc2InstanceIpV4Addresses sets the ResourceAwsEc2InstanceIpV4Addresses field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceIpV4Addresses(v []*IpFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceIpV4Addresses = v
return s
}
// SetResourceAwsEc2InstanceIpV6Addresses sets the ResourceAwsEc2InstanceIpV6Addresses field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceIpV6Addresses(v []*IpFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceIpV6Addresses = v
return s
}
// SetResourceAwsEc2InstanceKeyName sets the ResourceAwsEc2InstanceKeyName field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceKeyName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceKeyName = v
return s
}
// SetResourceAwsEc2InstanceLaunchedAt sets the ResourceAwsEc2InstanceLaunchedAt field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceLaunchedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceLaunchedAt = v
return s
}
// SetResourceAwsEc2InstanceSubnetId sets the ResourceAwsEc2InstanceSubnetId field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceSubnetId(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceSubnetId = v
return s
}
// SetResourceAwsEc2InstanceType sets the ResourceAwsEc2InstanceType field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceType(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceType = v
return s
}
// SetResourceAwsEc2InstanceVpcId sets the ResourceAwsEc2InstanceVpcId field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsEc2InstanceVpcId(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsEc2InstanceVpcId = v
return s
}
// SetResourceAwsIamAccessKeyCreatedAt sets the ResourceAwsIamAccessKeyCreatedAt field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsIamAccessKeyCreatedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.ResourceAwsIamAccessKeyCreatedAt = v
return s
}
// SetResourceAwsIamAccessKeyStatus sets the ResourceAwsIamAccessKeyStatus field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsIamAccessKeyStatus(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsIamAccessKeyStatus = v
return s
}
// SetResourceAwsIamAccessKeyUserName sets the ResourceAwsIamAccessKeyUserName field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsIamAccessKeyUserName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsIamAccessKeyUserName = v
return s
}
// SetResourceAwsS3BucketOwnerId sets the ResourceAwsS3BucketOwnerId field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsS3BucketOwnerId(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsS3BucketOwnerId = v
return s
}
// SetResourceAwsS3BucketOwnerName sets the ResourceAwsS3BucketOwnerName field's value.
func (s *AwsSecurityFindingFilters) SetResourceAwsS3BucketOwnerName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceAwsS3BucketOwnerName = v
return s
}
// SetResourceContainerImageId sets the ResourceContainerImageId field's value.
func (s *AwsSecurityFindingFilters) SetResourceContainerImageId(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceContainerImageId = v
return s
}
// SetResourceContainerImageName sets the ResourceContainerImageName field's value.
func (s *AwsSecurityFindingFilters) SetResourceContainerImageName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceContainerImageName = v
return s
}
// SetResourceContainerLaunchedAt sets the ResourceContainerLaunchedAt field's value.
func (s *AwsSecurityFindingFilters) SetResourceContainerLaunchedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.ResourceContainerLaunchedAt = v
return s
}
// SetResourceContainerName sets the ResourceContainerName field's value.
func (s *AwsSecurityFindingFilters) SetResourceContainerName(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceContainerName = v
return s
}
// SetResourceDetailsOther sets the ResourceDetailsOther field's value.
func (s *AwsSecurityFindingFilters) SetResourceDetailsOther(v []*MapFilter) *AwsSecurityFindingFilters {
s.ResourceDetailsOther = v
return s
}
// SetResourceId sets the ResourceId field's value.
func (s *AwsSecurityFindingFilters) SetResourceId(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceId = v
return s
}
// SetResourcePartition sets the ResourcePartition field's value.
func (s *AwsSecurityFindingFilters) SetResourcePartition(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourcePartition = v
return s
}
// SetResourceRegion sets the ResourceRegion field's value.
func (s *AwsSecurityFindingFilters) SetResourceRegion(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceRegion = v
return s
}
// SetResourceTags sets the ResourceTags field's value.
func (s *AwsSecurityFindingFilters) SetResourceTags(v []*MapFilter) *AwsSecurityFindingFilters {
s.ResourceTags = v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *AwsSecurityFindingFilters) SetResourceType(v []*StringFilter) *AwsSecurityFindingFilters {
s.ResourceType = v
return s
}
// SetSeverityLabel sets the SeverityLabel field's value.
func (s *AwsSecurityFindingFilters) SetSeverityLabel(v []*StringFilter) *AwsSecurityFindingFilters {
s.SeverityLabel = v
return s
}
// SetSeverityNormalized sets the SeverityNormalized field's value.
func (s *AwsSecurityFindingFilters) SetSeverityNormalized(v []*NumberFilter) *AwsSecurityFindingFilters {
s.SeverityNormalized = v
return s
}
// SetSeverityProduct sets the SeverityProduct field's value.
func (s *AwsSecurityFindingFilters) SetSeverityProduct(v []*NumberFilter) *AwsSecurityFindingFilters {
s.SeverityProduct = v
return s
}
// SetSourceUrl sets the SourceUrl field's value.
func (s *AwsSecurityFindingFilters) SetSourceUrl(v []*StringFilter) *AwsSecurityFindingFilters {
s.SourceUrl = v
return s
}
// SetThreatIntelIndicatorCategory sets the ThreatIntelIndicatorCategory field's value.
func (s *AwsSecurityFindingFilters) SetThreatIntelIndicatorCategory(v []*StringFilter) *AwsSecurityFindingFilters {
s.ThreatIntelIndicatorCategory = v
return s
}
// SetThreatIntelIndicatorLastObservedAt sets the ThreatIntelIndicatorLastObservedAt field's value.
func (s *AwsSecurityFindingFilters) SetThreatIntelIndicatorLastObservedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.ThreatIntelIndicatorLastObservedAt = v
return s
}
// SetThreatIntelIndicatorSource sets the ThreatIntelIndicatorSource field's value.
func (s *AwsSecurityFindingFilters) SetThreatIntelIndicatorSource(v []*StringFilter) *AwsSecurityFindingFilters {
s.ThreatIntelIndicatorSource = v
return s
}
// SetThreatIntelIndicatorSourceUrl sets the ThreatIntelIndicatorSourceUrl field's value.
func (s *AwsSecurityFindingFilters) SetThreatIntelIndicatorSourceUrl(v []*StringFilter) *AwsSecurityFindingFilters {
s.ThreatIntelIndicatorSourceUrl = v
return s
}
// SetThreatIntelIndicatorType sets the ThreatIntelIndicatorType field's value.
func (s *AwsSecurityFindingFilters) SetThreatIntelIndicatorType(v []*StringFilter) *AwsSecurityFindingFilters {
s.ThreatIntelIndicatorType = v
return s
}
// SetThreatIntelIndicatorValue sets the ThreatIntelIndicatorValue field's value.
func (s *AwsSecurityFindingFilters) SetThreatIntelIndicatorValue(v []*StringFilter) *AwsSecurityFindingFilters {
s.ThreatIntelIndicatorValue = v
return s
}
// SetTitle sets the Title field's value.
func (s *AwsSecurityFindingFilters) SetTitle(v []*StringFilter) *AwsSecurityFindingFilters {
s.Title = v
return s
}
// SetType sets the Type field's value.
func (s *AwsSecurityFindingFilters) SetType(v []*StringFilter) *AwsSecurityFindingFilters {
s.Type = v
return s
}
// SetUpdatedAt sets the UpdatedAt field's value.
func (s *AwsSecurityFindingFilters) SetUpdatedAt(v []*DateFilter) *AwsSecurityFindingFilters {
s.UpdatedAt = v
return s
}
// SetUserDefinedFields sets the UserDefinedFields field's value.
func (s *AwsSecurityFindingFilters) SetUserDefinedFields(v []*MapFilter) *AwsSecurityFindingFilters {
s.UserDefinedFields = v
return s
}
// SetVerificationState sets the VerificationState field's value.
func (s *AwsSecurityFindingFilters) SetVerificationState(v []*StringFilter) *AwsSecurityFindingFilters {
s.VerificationState = v
return s
}
// SetWorkflowState sets the WorkflowState field's value.
func (s *AwsSecurityFindingFilters) SetWorkflowState(v []*StringFilter) *AwsSecurityFindingFilters {
s.WorkflowState = v
return s
}
type BatchDisableStandardsInput struct {
_ struct{} `type:"structure"`
// The ARNS of the standards subscriptions that you want to disable.
//
// StandardsSubscriptionArns is a required field
StandardsSubscriptionArns []*string `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s BatchDisableStandardsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BatchDisableStandardsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *BatchDisableStandardsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "BatchDisableStandardsInput"}
if s.StandardsSubscriptionArns == nil {
invalidParams.Add(request.NewErrParamRequired("StandardsSubscriptionArns"))
}
if s.StandardsSubscriptionArns != nil && len(s.StandardsSubscriptionArns) < 1 {
invalidParams.Add(request.NewErrParamMinLen("StandardsSubscriptionArns", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetStandardsSubscriptionArns sets the StandardsSubscriptionArns field's value.
func (s *BatchDisableStandardsInput) SetStandardsSubscriptionArns(v []*string) *BatchDisableStandardsInput {
s.StandardsSubscriptionArns = v
return s
}
type BatchDisableStandardsOutput struct {
_ struct{} `type:"structure"`
// The details of the standards subscriptions that were disabled.
StandardsSubscriptions []*StandardsSubscription `type:"list"`
}
// String returns the string representation
func (s BatchDisableStandardsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BatchDisableStandardsOutput) GoString() string {
return s.String()
}
// SetStandardsSubscriptions sets the StandardsSubscriptions field's value.
func (s *BatchDisableStandardsOutput) SetStandardsSubscriptions(v []*StandardsSubscription) *BatchDisableStandardsOutput {
s.StandardsSubscriptions = v
return s
}
type BatchEnableStandardsInput struct {
_ struct{} `type:"structure"`
// The list of standards that you want to enable.
//
// StandardsSubscriptionRequests is a required field
StandardsSubscriptionRequests []*StandardsSubscriptionRequest `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s BatchEnableStandardsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BatchEnableStandardsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *BatchEnableStandardsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "BatchEnableStandardsInput"}
if s.StandardsSubscriptionRequests == nil {
invalidParams.Add(request.NewErrParamRequired("StandardsSubscriptionRequests"))
}
if s.StandardsSubscriptionRequests != nil && len(s.StandardsSubscriptionRequests) < 1 {
invalidParams.Add(request.NewErrParamMinLen("StandardsSubscriptionRequests", 1))
}
if s.StandardsSubscriptionRequests != nil {
for i, v := range s.StandardsSubscriptionRequests {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StandardsSubscriptionRequests", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetStandardsSubscriptionRequests sets the StandardsSubscriptionRequests field's value.
func (s *BatchEnableStandardsInput) SetStandardsSubscriptionRequests(v []*StandardsSubscriptionRequest) *BatchEnableStandardsInput {
s.StandardsSubscriptionRequests = v
return s
}
type BatchEnableStandardsOutput struct {
_ struct{} `type:"structure"`
// The details of the standards subscriptions that were enabled.
StandardsSubscriptions []*StandardsSubscription `type:"list"`
}
// String returns the string representation
func (s BatchEnableStandardsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BatchEnableStandardsOutput) GoString() string {
return s.String()
}
// SetStandardsSubscriptions sets the StandardsSubscriptions field's value.
func (s *BatchEnableStandardsOutput) SetStandardsSubscriptions(v []*StandardsSubscription) *BatchEnableStandardsOutput {
s.StandardsSubscriptions = v
return s
}
type BatchImportFindingsInput struct {
_ struct{} `type:"structure"`
// A list of findings that you want to import. Must be submitted in the AWSSecurityFinding
// format.
//
// Findings is a required field
Findings []*AwsSecurityFinding `type:"list" required:"true"`
}
// String returns the string representation
func (s BatchImportFindingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BatchImportFindingsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *BatchImportFindingsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "BatchImportFindingsInput"}
if s.Findings == nil {
invalidParams.Add(request.NewErrParamRequired("Findings"))
}
if s.Findings != nil {
for i, v := range s.Findings {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Findings", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetFindings sets the Findings field's value.
func (s *BatchImportFindingsInput) SetFindings(v []*AwsSecurityFinding) *BatchImportFindingsInput {
s.Findings = v
return s
}
type BatchImportFindingsOutput struct {
_ struct{} `type:"structure"`
// The number of findings that cannot be imported.
//
// FailedCount is a required field
FailedCount *int64 `type:"integer" required:"true"`
// The list of the findings that cannot be imported.
FailedFindings []*ImportFindingsError `type:"list"`
// The number of findings that were successfully imported
//
// SuccessCount is a required field
SuccessCount *int64 `type:"integer" required:"true"`
}
// String returns the string representation
func (s BatchImportFindingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BatchImportFindingsOutput) GoString() string {
return s.String()
}
// SetFailedCount sets the FailedCount field's value.
func (s *BatchImportFindingsOutput) SetFailedCount(v int64) *BatchImportFindingsOutput {
s.FailedCount = &v
return s
}
// SetFailedFindings sets the FailedFindings field's value.
func (s *BatchImportFindingsOutput) SetFailedFindings(v []*ImportFindingsError) *BatchImportFindingsOutput {
s.FailedFindings = v
return s
}
// SetSuccessCount sets the SuccessCount field's value.
func (s *BatchImportFindingsOutput) SetSuccessCount(v int64) *BatchImportFindingsOutput {
s.SuccessCount = &v
return s
}
// Exclusive to findings that are generated as the result of a check run against
// a specific rule in a supported standard (for example, AWS CIS Foundations).
// Contains compliance-related finding details.
type Compliance struct {
_ struct{} `type:"structure"`
// Indicates the result of a compliance check.
Status *string `type:"string" enum:"ComplianceStatus"`
}
// String returns the string representation
func (s Compliance) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Compliance) GoString() string {
return s.String()
}
// SetStatus sets the Status field's value.
func (s *Compliance) SetStatus(v string) *Compliance {
s.Status = &v
return s
}
// Container details related to a finding.
type ContainerDetails struct {
_ struct{} `type:"structure"`
// The identifier of the image related to a finding.
ImageId *string `type:"string"`
// The name of the image related to a finding.
ImageName *string `type:"string"`
// The date/time that the container was started.
LaunchedAt *string `type:"string"`
// The name of the container related to a finding.
Name *string `type:"string"`
}
// String returns the string representation
func (s ContainerDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ContainerDetails) GoString() string {
return s.String()
}
// SetImageId sets the ImageId field's value.
func (s *ContainerDetails) SetImageId(v string) *ContainerDetails {
s.ImageId = &v
return s
}
// SetImageName sets the ImageName field's value.
func (s *ContainerDetails) SetImageName(v string) *ContainerDetails {
s.ImageName = &v
return s
}
// SetLaunchedAt sets the LaunchedAt field's value.
func (s *ContainerDetails) SetLaunchedAt(v string) *ContainerDetails {
s.LaunchedAt = &v
return s
}
// SetName sets the Name field's value.
func (s *ContainerDetails) SetName(v string) *ContainerDetails {
s.Name = &v
return s
}
type CreateInsightInput struct {
_ struct{} `type:"structure"`
// A collection of attributes that are applied to all active Security Hub-aggregated
// findings and that result in a subset of findings that are included in this
// insight.
//
// Filters is a required field
Filters *AwsSecurityFindingFilters `type:"structure" required:"true"`
// The attribute by which the insight's findings are grouped. This attribute
// is used as a findings aggregator for the purposes of viewing and managing
// multiple related findings under a single operand.
//
// GroupByAttribute is a required field
GroupByAttribute *string `type:"string" required:"true"`
// The user-defined name that identifies the insight that you want to create.
//
// Name is a required field
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateInsightInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateInsightInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateInsightInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateInsightInput"}
if s.Filters == nil {
invalidParams.Add(request.NewErrParamRequired("Filters"))
}
if s.GroupByAttribute == nil {
invalidParams.Add(request.NewErrParamRequired("GroupByAttribute"))
}
if s.Name == nil {
invalidParams.Add(request.NewErrParamRequired("Name"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetFilters sets the Filters field's value.
func (s *CreateInsightInput) SetFilters(v *AwsSecurityFindingFilters) *CreateInsightInput {
s.Filters = v
return s
}
// SetGroupByAttribute sets the GroupByAttribute field's value.
func (s *CreateInsightInput) SetGroupByAttribute(v string) *CreateInsightInput {
s.GroupByAttribute = &v
return s
}
// SetName sets the Name field's value.
func (s *CreateInsightInput) SetName(v string) *CreateInsightInput {
s.Name = &v
return s
}
type CreateInsightOutput struct {
_ struct{} `type:"structure"`
// The ARN Of the created insight.
//
// InsightArn is a required field
InsightArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateInsightOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateInsightOutput) GoString() string {
return s.String()
}
// SetInsightArn sets the InsightArn field's value.
func (s *CreateInsightOutput) SetInsightArn(v string) *CreateInsightOutput {
s.InsightArn = &v
return s
}
type CreateMembersInput struct {
_ struct{} `type:"structure"`
// A list of account ID and email address pairs of the accounts that you want
// to associate with the master Security Hub account.
AccountDetails []*AccountDetails `type:"list"`
}
// String returns the string representation
func (s CreateMembersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateMembersInput) GoString() string {
return s.String()
}
// SetAccountDetails sets the AccountDetails field's value.
func (s *CreateMembersInput) SetAccountDetails(v []*AccountDetails) *CreateMembersInput {
s.AccountDetails = v
return s
}
type CreateMembersOutput struct {
_ struct{} `type:"structure"`
// A list of account ID and email address pairs of the AWS accounts that could
// not be processed.
UnprocessedAccounts []*Result `type:"list"`
}
// String returns the string representation
func (s CreateMembersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateMembersOutput) GoString() string {
return s.String()
}
// SetUnprocessedAccounts sets the UnprocessedAccounts field's value.
func (s *CreateMembersOutput) SetUnprocessedAccounts(v []*Result) *CreateMembersOutput {
s.UnprocessedAccounts = v
return s
}
// A date filter for querying findings.
type DateFilter struct {
_ struct{} `type:"structure"`
// A date range for the date filter.
DateRange *DateRange `type:"structure"`
// An end date for the date filter.
End *string `type:"string"`
// A start date for the date filter.
Start *string `type:"string"`
}
// String returns the string representation
func (s DateFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DateFilter) GoString() string {
return s.String()
}
// SetDateRange sets the DateRange field's value.
func (s *DateFilter) SetDateRange(v *DateRange) *DateFilter {
s.DateRange = v
return s
}
// SetEnd sets the End field's value.
func (s *DateFilter) SetEnd(v string) *DateFilter {
s.End = &v
return s
}
// SetStart sets the Start field's value.
func (s *DateFilter) SetStart(v string) *DateFilter {
s.Start = &v
return s
}
// A date range for the date filter.
type DateRange struct {
_ struct{} `type:"structure"`
// A date range unit for the date filter.
Unit *string `type:"string" enum:"DateRangeUnit"`
// A date range value for the date filter.
Value *int64 `type:"integer"`
}
// String returns the string representation
func (s DateRange) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DateRange) GoString() string {
return s.String()
}
// SetUnit sets the Unit field's value.
func (s *DateRange) SetUnit(v string) *DateRange {
s.Unit = &v
return s
}
// SetValue sets the Value field's value.
func (s *DateRange) SetValue(v int64) *DateRange {
s.Value = &v
return s
}
type DeclineInvitationsInput struct {
_ struct{} `type:"structure"`
// A list of account IDs specifying accounts whose invitations to Security Hub
// you want to decline.
AccountIds []*string `type:"list"`
}
// String returns the string representation
func (s DeclineInvitationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeclineInvitationsInput) GoString() string {
return s.String()
}
// SetAccountIds sets the AccountIds field's value.
func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitationsInput {
s.AccountIds = v
return s
}
type DeclineInvitationsOutput struct {
_ struct{} `type:"structure"`
// A list of account ID and email address pairs of the AWS accounts that could
// not be processed.
UnprocessedAccounts []*Result `type:"list"`
}
// String returns the string representation
func (s DeclineInvitationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeclineInvitationsOutput) GoString() string {
return s.String()
}
// SetUnprocessedAccounts sets the UnprocessedAccounts field's value.
func (s *DeclineInvitationsOutput) SetUnprocessedAccounts(v []*Result) *DeclineInvitationsOutput {
s.UnprocessedAccounts = v
return s
}
type DeleteInsightInput struct {
_ struct{} `type:"structure"`
// The ARN of the insight that you want to delete.
//
// InsightArn is a required field
InsightArn *string `location:"uri" locationName:"InsightArn" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteInsightInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteInsightInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteInsightInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteInsightInput"}
if s.InsightArn == nil {
invalidParams.Add(request.NewErrParamRequired("InsightArn"))
}
if s.InsightArn != nil && len(*s.InsightArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("InsightArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetInsightArn sets the InsightArn field's value.
func (s *DeleteInsightInput) SetInsightArn(v string) *DeleteInsightInput {
s.InsightArn = &v
return s
}
type DeleteInsightOutput struct {
_ struct{} `type:"structure"`
// The ARN of the insight that was deleted.
//
// InsightArn is a required field
InsightArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteInsightOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteInsightOutput) GoString() string {
return s.String()
}
// SetInsightArn sets the InsightArn field's value.
func (s *DeleteInsightOutput) SetInsightArn(v string) *DeleteInsightOutput {
s.InsightArn = &v
return s
}
type DeleteInvitationsInput struct {
_ struct{} `type:"structure"`
// A list of account IDs specifying accounts whose invitations to Security Hub
// you want to delete.
AccountIds []*string `type:"list"`
}
// String returns the string representation
func (s DeleteInvitationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteInvitationsInput) GoString() string {
return s.String()
}
// SetAccountIds sets the AccountIds field's value.
func (s *DeleteInvitationsInput) SetAccountIds(v []*string) *DeleteInvitationsInput {
s.AccountIds = v
return s
}
type DeleteInvitationsOutput struct {
_ struct{} `type:"structure"`
// A list of account ID and email address pairs of the AWS accounts that could
// not be processed.
UnprocessedAccounts []*Result `type:"list"`
}
// String returns the string representation
func (s DeleteInvitationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteInvitationsOutput) GoString() string {
return s.String()
}
// SetUnprocessedAccounts sets the UnprocessedAccounts field's value.
func (s *DeleteInvitationsOutput) SetUnprocessedAccounts(v []*Result) *DeleteInvitationsOutput {
s.UnprocessedAccounts = v
return s
}
type DeleteMembersInput struct {
_ struct{} `type:"structure"`
// A list of account IDs of the Security Hub member accounts that you want to
// delete.
AccountIds []*string `type:"list"`
}
// String returns the string representation
func (s DeleteMembersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteMembersInput) GoString() string {
return s.String()
}
// SetAccountIds sets the AccountIds field's value.
func (s *DeleteMembersInput) SetAccountIds(v []*string) *DeleteMembersInput {
s.AccountIds = v
return s
}
type DeleteMembersOutput struct {
_ struct{} `type:"structure"`
// A list of account ID and email address pairs of the AWS accounts that could
// not be processed.
UnprocessedAccounts []*Result `type:"list"`
}
// String returns the string representation
func (s DeleteMembersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteMembersOutput) GoString() string {
return s.String()
}
// SetUnprocessedAccounts sets the UnprocessedAccounts field's value.
func (s *DeleteMembersOutput) SetUnprocessedAccounts(v []*Result) *DeleteMembersOutput {
s.UnprocessedAccounts = v
return s
}
type DisableImportFindingsForProductInput struct {
_ struct{} `type:"structure"`
// The ARN of a resource that represents your subscription to a supported product.
//
// ProductSubscriptionArn is a required field
ProductSubscriptionArn *string `location:"uri" locationName:"ProductSubscriptionArn" type:"string" required:"true"`
}
// String returns the string representation
func (s DisableImportFindingsForProductInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisableImportFindingsForProductInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DisableImportFindingsForProductInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DisableImportFindingsForProductInput"}
if s.ProductSubscriptionArn == nil {
invalidParams.Add(request.NewErrParamRequired("ProductSubscriptionArn"))
}
if s.ProductSubscriptionArn != nil && len(*s.ProductSubscriptionArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ProductSubscriptionArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetProductSubscriptionArn sets the ProductSubscriptionArn field's value.
func (s *DisableImportFindingsForProductInput) SetProductSubscriptionArn(v string) *DisableImportFindingsForProductInput {
s.ProductSubscriptionArn = &v
return s
}
type DisableImportFindingsForProductOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisableImportFindingsForProductOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisableImportFindingsForProductOutput) GoString() string {
return s.String()
}
type DisableSecurityHubInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisableSecurityHubInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisableSecurityHubInput) GoString() string {
return s.String()
}
type DisableSecurityHubOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisableSecurityHubOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisableSecurityHubOutput) GoString() string {
return s.String()
}
type DisassociateFromMasterAccountInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisassociateFromMasterAccountInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisassociateFromMasterAccountInput) GoString() string {
return s.String()
}
type DisassociateFromMasterAccountOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisassociateFromMasterAccountOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisassociateFromMasterAccountOutput) GoString() string {
return s.String()
}
type DisassociateMembersInput struct {
_ struct{} `type:"structure"`
// The account IDs of the member accounts that you want to disassociate from
// the master account.
AccountIds []*string `type:"list"`
}
// String returns the string representation
func (s DisassociateMembersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisassociateMembersInput) GoString() string {
return s.String()
}
// SetAccountIds sets the AccountIds field's value.
func (s *DisassociateMembersInput) SetAccountIds(v []*string) *DisassociateMembersInput {
s.AccountIds = v
return s
}
type DisassociateMembersOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisassociateMembersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisassociateMembersOutput) GoString() string {
return s.String()
}
type EnableImportFindingsForProductInput struct {
_ struct{} `type:"structure"`
// The ARN of the product that generates findings that you want to import into
// Security Hub.
//
// ProductArn is a required field
ProductArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s EnableImportFindingsForProductInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EnableImportFindingsForProductInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *EnableImportFindingsForProductInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "EnableImportFindingsForProductInput"}
if s.ProductArn == nil {
invalidParams.Add(request.NewErrParamRequired("ProductArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetProductArn sets the ProductArn field's value.
func (s *EnableImportFindingsForProductInput) SetProductArn(v string) *EnableImportFindingsForProductInput {
s.ProductArn = &v
return s
}
type EnableImportFindingsForProductOutput struct {
_ struct{} `type:"structure"`
// The ARN of a resource that represents your subscription to the product that
// generates the findings that you want to import into Security Hub.
ProductSubscriptionArn *string `type:"string"`
}
// String returns the string representation
func (s EnableImportFindingsForProductOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EnableImportFindingsForProductOutput) GoString() string {
return s.String()
}
// SetProductSubscriptionArn sets the ProductSubscriptionArn field's value.
func (s *EnableImportFindingsForProductOutput) SetProductSubscriptionArn(v string) *EnableImportFindingsForProductOutput {
s.ProductSubscriptionArn = &v
return s
}
type EnableSecurityHubInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s EnableSecurityHubInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EnableSecurityHubInput) GoString() string {
return s.String()
}
type EnableSecurityHubOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s EnableSecurityHubOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EnableSecurityHubOutput) GoString() string {
return s.String()
}
type GetEnabledStandardsInput struct {
_ struct{} `type:"structure"`
// Indicates the maximum number of items that you want in the response.
MaxResults *int64 `min:"1" type:"integer"`
// Paginates results. Set the value of this parameter to NULL on your first
// call to the GetEnabledStandards operation. For subsequent calls to the operation,
// fill nextToken in the request with the value of nextToken from the previous
// response to continue listing data.
NextToken *string `type:"string"`
// The list of standards subscription ARNS that you want to list and describe.
StandardsSubscriptionArns []*string `min:"1" type:"list"`
}
// String returns the string representation
func (s GetEnabledStandardsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetEnabledStandardsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetEnabledStandardsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetEnabledStandardsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if s.StandardsSubscriptionArns != nil && len(s.StandardsSubscriptionArns) < 1 {
invalidParams.Add(request.NewErrParamMinLen("StandardsSubscriptionArns", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *GetEnabledStandardsInput) SetMaxResults(v int64) *GetEnabledStandardsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *GetEnabledStandardsInput) SetNextToken(v string) *GetEnabledStandardsInput {
s.NextToken = &v
return s
}
// SetStandardsSubscriptionArns sets the StandardsSubscriptionArns field's value.
func (s *GetEnabledStandardsInput) SetStandardsSubscriptionArns(v []*string) *GetEnabledStandardsInput {
s.StandardsSubscriptionArns = v
return s
}
type GetEnabledStandardsOutput struct {
_ struct{} `type:"structure"`
// The token that is required for pagination.
NextToken *string `type:"string"`
// The standards subscription details returned by the operation.
StandardsSubscriptions []*StandardsSubscription `type:"list"`
}
// String returns the string representation
func (s GetEnabledStandardsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetEnabledStandardsOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *GetEnabledStandardsOutput) SetNextToken(v string) *GetEnabledStandardsOutput {
s.NextToken = &v
return s
}
// SetStandardsSubscriptions sets the StandardsSubscriptions field's value.
func (s *GetEnabledStandardsOutput) SetStandardsSubscriptions(v []*StandardsSubscription) *GetEnabledStandardsOutput {
s.StandardsSubscriptions = v
return s
}
type GetFindingsInput struct {
_ struct{} `type:"structure"`
// A collection of attributes that is use for querying findings.
Filters *AwsSecurityFindingFilters `type:"structure"`
// Indicates the maximum number of items that you want in the response.
MaxResults *int64 `min:"1" type:"integer"`
// Paginates results. Set the value of this parameter to NULL on your first
// call to the GetFindings operation. For subsequent calls to the operation,
// fill nextToken in the request with the value of nextToken from the previous
// response to continue listing data.
NextToken *string `type:"string"`
// A collection of attributes used for sorting findings.
SortCriteria []*SortCriterion `type:"list"`
}
// String returns the string representation
func (s GetFindingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetFindingsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetFindingsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetFindingsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetFilters sets the Filters field's value.
func (s *GetFindingsInput) SetFilters(v *AwsSecurityFindingFilters) *GetFindingsInput {
s.Filters = v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *GetFindingsInput) SetMaxResults(v int64) *GetFindingsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *GetFindingsInput) SetNextToken(v string) *GetFindingsInput {
s.NextToken = &v
return s
}
// SetSortCriteria sets the SortCriteria field's value.
func (s *GetFindingsInput) SetSortCriteria(v []*SortCriterion) *GetFindingsInput {
s.SortCriteria = v
return s
}
type GetFindingsOutput struct {
_ struct{} `type:"structure"`
// Findings details returned by the operation.
//
// Findings is a required field
Findings []*AwsSecurityFinding `type:"list" required:"true"`
// The token that is required for pagination.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s GetFindingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetFindingsOutput) GoString() string {
return s.String()
}
// SetFindings sets the Findings field's value.
func (s *GetFindingsOutput) SetFindings(v []*AwsSecurityFinding) *GetFindingsOutput {
s.Findings = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *GetFindingsOutput) SetNextToken(v string) *GetFindingsOutput {
s.NextToken = &v
return s
}
type GetInsightResultsInput struct {
_ struct{} `type:"structure"`
// The ARN of the insight whose results you want to see.
//
// InsightArn is a required field
InsightArn *string `location:"uri" locationName:"InsightArn" type:"string" required:"true"`
}
// String returns the string representation
func (s GetInsightResultsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetInsightResultsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetInsightResultsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetInsightResultsInput"}
if s.InsightArn == nil {
invalidParams.Add(request.NewErrParamRequired("InsightArn"))
}
if s.InsightArn != nil && len(*s.InsightArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("InsightArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetInsightArn sets the InsightArn field's value.
func (s *GetInsightResultsInput) SetInsightArn(v string) *GetInsightResultsInput {
s.InsightArn = &v
return s
}
type GetInsightResultsOutput struct {
_ struct{} `type:"structure"`
// The insight results returned by the operation.
//
// InsightResults is a required field
InsightResults *InsightResults `type:"structure" required:"true"`
}
// String returns the string representation
func (s GetInsightResultsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetInsightResultsOutput) GoString() string {
return s.String()
}
// SetInsightResults sets the InsightResults field's value.
func (s *GetInsightResultsOutput) SetInsightResults(v *InsightResults) *GetInsightResultsOutput {
s.InsightResults = v
return s
}
type GetInsightsInput struct {
_ struct{} `type:"structure"`
// The ARNS of the insights that you want to describe.
InsightArns []*string `type:"list"`
// Indicates the maximum number of items that you want in the response.
MaxResults *int64 `min:"1" type:"integer"`
// Paginates results. Set the value of this parameter to NULL on your first
// call to the GetInsights operation. For subsequent calls to the operation,
// fill nextToken in the request with the value of nextToken from the previous
// response to continue listing data.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s GetInsightsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetInsightsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetInsightsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetInsightsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetInsightArns sets the InsightArns field's value.
func (s *GetInsightsInput) SetInsightArns(v []*string) *GetInsightsInput {
s.InsightArns = v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *GetInsightsInput) SetMaxResults(v int64) *GetInsightsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *GetInsightsInput) SetNextToken(v string) *GetInsightsInput {
s.NextToken = &v
return s
}
type GetInsightsOutput struct {
_ struct{} `type:"structure"`
// The insights returned by the operation.
//
// Insights is a required field
Insights []*Insight `type:"list" required:"true"`
// The token that is required for pagination.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s GetInsightsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetInsightsOutput) GoString() string {
return s.String()
}
// SetInsights sets the Insights field's value.
func (s *GetInsightsOutput) SetInsights(v []*Insight) *GetInsightsOutput {
s.Insights = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *GetInsightsOutput) SetNextToken(v string) *GetInsightsOutput {
s.NextToken = &v
return s
}
type GetInvitationsCountInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s GetInvitationsCountInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetInvitationsCountInput) GoString() string {
return s.String()
}
type GetInvitationsCountOutput struct {
_ struct{} `type:"structure"`
// The number of all membership invitations sent to this Security Hub member
// account, not including the currently accepted invitation.
InvitationsCount *int64 `type:"integer"`
}
// String returns the string representation
func (s GetInvitationsCountOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetInvitationsCountOutput) GoString() string {
return s.String()
}
// SetInvitationsCount sets the InvitationsCount field's value.
func (s *GetInvitationsCountOutput) SetInvitationsCount(v int64) *GetInvitationsCountOutput {
s.InvitationsCount = &v
return s
}
type GetMasterAccountInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s GetMasterAccountInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetMasterAccountInput) GoString() string {
return s.String()
}
type GetMasterAccountOutput struct {
_ struct{} `type:"structure"`
// A list of details about the Security Hub master account for the current member
// account.
Master *Invitation `type:"structure"`
}
// String returns the string representation
func (s GetMasterAccountOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetMasterAccountOutput) GoString() string {
return s.String()
}
// SetMaster sets the Master field's value.
func (s *GetMasterAccountOutput) SetMaster(v *Invitation) *GetMasterAccountOutput {
s.Master = v
return s
}
type GetMembersInput struct {
_ struct{} `type:"structure"`
// A list of account IDs for the Security Hub member accounts on which you want
// to return the details.
//
// AccountIds is a required field
AccountIds []*string `type:"list" required:"true"`
}
// String returns the string representation
func (s GetMembersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetMembersInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetMembersInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetMembersInput"}
if s.AccountIds == nil {
invalidParams.Add(request.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAccountIds sets the AccountIds field's value.
func (s *GetMembersInput) SetAccountIds(v []*string) *GetMembersInput {
s.AccountIds = v
return s
}
type GetMembersOutput struct {
_ struct{} `type:"structure"`
// A list of details about the Security Hub member accounts.
Members []*Member `type:"list"`
// A list of account ID and email address pairs of the AWS accounts that could
// not be processed.
UnprocessedAccounts []*Result `type:"list"`
}
// String returns the string representation
func (s GetMembersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetMembersOutput) GoString() string {
return s.String()
}
// SetMembers sets the Members field's value.
func (s *GetMembersOutput) SetMembers(v []*Member) *GetMembersOutput {
s.Members = v
return s
}
// SetUnprocessedAccounts sets the UnprocessedAccounts field's value.
func (s *GetMembersOutput) SetUnprocessedAccounts(v []*Result) *GetMembersOutput {
s.UnprocessedAccounts = v
return s
}
// Includes details of the list of the findings that cannot be imported.
type ImportFindingsError struct {
_ struct{} `type:"structure"`
// The code of the error made during the BatchImportFindings operation.
//
// ErrorCode is a required field
ErrorCode *string `type:"string" required:"true"`
// The message of the error made during the BatchImportFindings operation.
//
// ErrorMessage is a required field
ErrorMessage *string `type:"string" required:"true"`
// The id of the error made during the BatchImportFindings operation.
//
// Id is a required field
Id *string `type:"string" required:"true"`
}
// String returns the string representation
func (s ImportFindingsError) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ImportFindingsError) GoString() string {
return s.String()
}
// SetErrorCode sets the ErrorCode field's value.
func (s *ImportFindingsError) SetErrorCode(v string) *ImportFindingsError {
s.ErrorCode = &v
return s
}
// SetErrorMessage sets the ErrorMessage field's value.
func (s *ImportFindingsError) SetErrorMessage(v string) *ImportFindingsError {
s.ErrorMessage = &v
return s
}
// SetId sets the Id field's value.
func (s *ImportFindingsError) SetId(v string) *ImportFindingsError {
s.Id = &v
return s
}
// Contains information about a Security Hub insight.
type Insight struct {
_ struct{} `type:"structure"`
// A collection of attributes that are applied to all active Security Hub-aggregated
// findings and that result in a subset of findings that are included in this
// insight.
//
// Filters is a required field
Filters *AwsSecurityFindingFilters `type:"structure" required:"true"`
// The attribute by which the insight's findings are grouped. This attribute
// is used as a findings aggregator for the purposes of viewing and managing
// multiple related findings under a single operand.
//
// GroupByAttribute is a required field
GroupByAttribute *string `type:"string" required:"true"`
// The ARN of a Security Hub insight.
//
// InsightArn is a required field
InsightArn *string `type:"string" required:"true"`
// The name of a Security Hub insight.
//
// Name is a required field
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Insight) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Insight) GoString() string {
return s.String()
}
// SetFilters sets the Filters field's value.
func (s *Insight) SetFilters(v *AwsSecurityFindingFilters) *Insight {
s.Filters = v
return s
}
// SetGroupByAttribute sets the GroupByAttribute field's value.
func (s *Insight) SetGroupByAttribute(v string) *Insight {
s.GroupByAttribute = &v
return s
}
// SetInsightArn sets the InsightArn field's value.
func (s *Insight) SetInsightArn(v string) *Insight {
s.InsightArn = &v
return s
}
// SetName sets the Name field's value.
func (s *Insight) SetName(v string) *Insight {
s.Name = &v
return s
}
// The insight result values returned by the GetInsightResults operation.
type InsightResultValue struct {
_ struct{} `type:"structure"`
// The number of findings returned for each GroupByAttributeValue.
//
// Count is a required field
Count *int64 `type:"integer" required:"true"`
// The value of the attribute by which the findings are grouped for the insight's
// whose results are returned by the GetInsightResults operation.
//
// GroupByAttributeValue is a required field
GroupByAttributeValue *string `type:"string" required:"true"`
}
// String returns the string representation
func (s InsightResultValue) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InsightResultValue) GoString() string {
return s.String()
}
// SetCount sets the Count field's value.
func (s *InsightResultValue) SetCount(v int64) *InsightResultValue {
s.Count = &v
return s
}
// SetGroupByAttributeValue sets the GroupByAttributeValue field's value.
func (s *InsightResultValue) SetGroupByAttributeValue(v string) *InsightResultValue {
s.GroupByAttributeValue = &v
return s
}
// The insight results returned by the GetInsightResults operation.
type InsightResults struct {
_ struct{} `type:"structure"`
// The attribute by which the findings are grouped for the insight's whose results
// are returned by the GetInsightResults operation.
//
// GroupByAttribute is a required field
GroupByAttribute *string `type:"string" required:"true"`
// The ARN of the insight whose results are returned by the GetInsightResults
// operation.
//
// InsightArn is a required field
InsightArn *string `type:"string" required:"true"`
// The list of insight result values returned by the GetInsightResults operation.
//
// ResultValues is a required field
ResultValues []*InsightResultValue `type:"list" required:"true"`
}
// String returns the string representation
func (s InsightResults) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InsightResults) GoString() string {
return s.String()
}
// SetGroupByAttribute sets the GroupByAttribute field's value.
func (s *InsightResults) SetGroupByAttribute(v string) *InsightResults {
s.GroupByAttribute = &v
return s
}
// SetInsightArn sets the InsightArn field's value.
func (s *InsightResults) SetInsightArn(v string) *InsightResults {
s.InsightArn = &v
return s
}
// SetResultValues sets the ResultValues field's value.
func (s *InsightResults) SetResultValues(v []*InsightResultValue) *InsightResults {
s.ResultValues = v
return s
}
// The details of an invitation sent to an AWS account by the Security Hub master
// account.
type Invitation struct {
_ struct{} `type:"structure"`
// The account ID of the master Security Hub account who sent the invitation.
AccountId *string `type:"string"`
// The ID of the invitation sent by the master Security Hub account.
InvitationId *string `type:"string"`
// The timestamp of when the invitation was sent.
InvitedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The current relationship status between the inviter and invitee accounts.
MemberStatus *string `type:"string"`
}
// String returns the string representation
func (s Invitation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Invitation) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *Invitation) SetAccountId(v string) *Invitation {
s.AccountId = &v
return s
}
// SetInvitationId sets the InvitationId field's value.
func (s *Invitation) SetInvitationId(v string) *Invitation {
s.InvitationId = &v
return s
}
// SetInvitedAt sets the InvitedAt field's value.
func (s *Invitation) SetInvitedAt(v time.Time) *Invitation {
s.InvitedAt = &v
return s
}
// SetMemberStatus sets the MemberStatus field's value.
func (s *Invitation) SetMemberStatus(v string) *Invitation {
s.MemberStatus = &v
return s
}
type InviteMembersInput struct {
_ struct{} `type:"structure"`
// A list of IDs of the AWS accounts that you want to invite to Security Hub
// as members.
AccountIds []*string `type:"list"`
}
// String returns the string representation
func (s InviteMembersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InviteMembersInput) GoString() string {
return s.String()
}
// SetAccountIds sets the AccountIds field's value.
func (s *InviteMembersInput) SetAccountIds(v []*string) *InviteMembersInput {
s.AccountIds = v
return s
}
type InviteMembersOutput struct {
_ struct{} `type:"structure"`
// A list of account ID and email address pairs of the AWS accounts that could
// not be processed.
UnprocessedAccounts []*Result `type:"list"`
}
// String returns the string representation
func (s InviteMembersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InviteMembersOutput) GoString() string {
return s.String()
}
// SetUnprocessedAccounts sets the UnprocessedAccounts field's value.
func (s *InviteMembersOutput) SetUnprocessedAccounts(v []*Result) *InviteMembersOutput {
s.UnprocessedAccounts = v
return s
}
// The IP filter for querying findings.>
type IpFilter struct {
_ struct{} `type:"structure"`
// Finding's CIDR value.
Cidr *string `type:"string"`
}
// String returns the string representation
func (s IpFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IpFilter) GoString() string {
return s.String()
}
// SetCidr sets the Cidr field's value.
func (s *IpFilter) SetCidr(v string) *IpFilter {
s.Cidr = &v
return s
}
// A keyword filter for querying findings.
type KeywordFilter struct {
_ struct{} `type:"structure"`
// A value for the keyword.
Value *string `type:"string"`
}
// String returns the string representation
func (s KeywordFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s KeywordFilter) GoString() string {
return s.String()
}
// SetValue sets the Value field's value.
func (s *KeywordFilter) SetValue(v string) *KeywordFilter {
s.Value = &v
return s
}
type ListEnabledProductsForImportInput struct {
_ struct{} `type:"structure"`
// Indicates the maximum number of items that you want in the response.
MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"`
// Paginates results. Set the value of this parameter to NULL on your first
// call to the ListEnabledProductsForImport operation. For subsequent calls
// to the operation, fill nextToken in the request with the value of NextToken
// from the previous response to continue listing data.
NextToken *string `location:"querystring" locationName:"NextToken" type:"string"`
}
// String returns the string representation
func (s ListEnabledProductsForImportInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListEnabledProductsForImportInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListEnabledProductsForImportInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListEnabledProductsForImportInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListEnabledProductsForImportInput) SetMaxResults(v int64) *ListEnabledProductsForImportInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListEnabledProductsForImportInput) SetNextToken(v string) *ListEnabledProductsForImportInput {
s.NextToken = &v
return s
}
type ListEnabledProductsForImportOutput struct {
_ struct{} `type:"structure"`
// The token that is required for pagination.
NextToken *string `type:"string"`
// A list of ARNs for the resources that represent your subscriptions to products.
ProductSubscriptions []*string `type:"list"`
}
// String returns the string representation
func (s ListEnabledProductsForImportOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListEnabledProductsForImportOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListEnabledProductsForImportOutput) SetNextToken(v string) *ListEnabledProductsForImportOutput {
s.NextToken = &v
return s
}
// SetProductSubscriptions sets the ProductSubscriptions field's value.
func (s *ListEnabledProductsForImportOutput) SetProductSubscriptions(v []*string) *ListEnabledProductsForImportOutput {
s.ProductSubscriptions = v
return s
}
type ListInvitationsInput struct {
_ struct{} `type:"structure"`
// Indicates the maximum number of items that you want in the response.
MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"`
// Paginates results. Set the value of this parameter to NULL on your first
// call to the ListInvitations operation. For subsequent calls to the operation,
// fill nextToken in the request with the value of NextToken from the previous
// response to continue listing data.
NextToken *string `location:"querystring" locationName:"NextToken" type:"string"`
}
// String returns the string representation
func (s ListInvitationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListInvitationsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListInvitationsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListInvitationsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListInvitationsInput) SetMaxResults(v int64) *ListInvitationsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListInvitationsInput) SetNextToken(v string) *ListInvitationsInput {
s.NextToken = &v
return s
}
type ListInvitationsOutput struct {
_ struct{} `type:"structure"`
// The details of the invitations returned by the operation.
Invitations []*Invitation `type:"list"`
// The token that is required for pagination.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListInvitationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListInvitationsOutput) GoString() string {
return s.String()
}
// SetInvitations sets the Invitations field's value.
func (s *ListInvitationsOutput) SetInvitations(v []*Invitation) *ListInvitationsOutput {
s.Invitations = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListInvitationsOutput) SetNextToken(v string) *ListInvitationsOutput {
s.NextToken = &v
return s
}
type ListMembersInput struct {
_ struct{} `type:"structure"`
// Indicates the maximum number of items that you want in the response.
MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"`
// Paginates results. Set the value of this parameter to NULL on your first
// call to the ListMembers operation. For subsequent calls to the operation,
// fill nextToken in the request with the value of NextToken from the previous
// response to continue listing data.
NextToken *string `location:"querystring" locationName:"NextToken" type:"string"`
// Specifies what member accounts the response includes based on their relationship
// status with the master account. The default value is TRUE. If onlyAssociated
// is set to TRUE, the response includes member accounts whose relationship
// status with the master is set to ENABLED or DISABLED. If onlyAssociated is
// set to FALSE, the response includes all existing member accounts.
OnlyAssociated *bool `location:"querystring" locationName:"OnlyAssociated" type:"boolean"`
}
// String returns the string representation
func (s ListMembersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListMembersInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListMembersInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListMembersInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListMembersInput) SetMaxResults(v int64) *ListMembersInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListMembersInput) SetNextToken(v string) *ListMembersInput {
s.NextToken = &v
return s
}
// SetOnlyAssociated sets the OnlyAssociated field's value.
func (s *ListMembersInput) SetOnlyAssociated(v bool) *ListMembersInput {
s.OnlyAssociated = &v
return s
}
type ListMembersOutput struct {
_ struct{} `type:"structure"`
// Member details returned by the operation.
Members []*Member `type:"list"`
// The token that is required for pagination.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListMembersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListMembersOutput) GoString() string {
return s.String()
}
// SetMembers sets the Members field's value.
func (s *ListMembersOutput) SetMembers(v []*Member) *ListMembersOutput {
s.Members = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListMembersOutput) SetNextToken(v string) *ListMembersOutput {
s.NextToken = &v
return s
}
// A list of malware related to a finding.
type Malware struct {
_ struct{} `type:"structure"`
// The name of the malware that was observed.
//
// Name is a required field
Name *string `type:"string" required:"true"`
// The filesystem path of the malware that was observed.
Path *string `type:"string"`
// The state of the malware that was observed.
State *string `type:"string" enum:"MalwareState"`
// The type of the malware that was observed.
Type *string `type:"string" enum:"MalwareType"`
}
// String returns the string representation
func (s Malware) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Malware) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Malware) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Malware"}
if s.Name == nil {
invalidParams.Add(request.NewErrParamRequired("Name"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetName sets the Name field's value.
func (s *Malware) SetName(v string) *Malware {
s.Name = &v
return s
}
// SetPath sets the Path field's value.
func (s *Malware) SetPath(v string) *Malware {
s.Path = &v
return s
}
// SetState sets the State field's value.
func (s *Malware) SetState(v string) *Malware {
s.State = &v
return s
}
// SetType sets the Type field's value.
func (s *Malware) SetType(v string) *Malware {
s.Type = &v
return s
}
// The map filter for querying findings.
type MapFilter struct {
_ struct{} `type:"structure"`
// Represents the condition to be applied to a key value when querying for findings
// with a map filter.
Comparison *string `type:"string" enum:"MapFilterComparison"`
// The key of the map filter.
Key *string `type:"string"`
// The value for the key in the map filter.
Value *string `type:"string"`
}
// String returns the string representation
func (s MapFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s MapFilter) GoString() string {
return s.String()
}
// SetComparison sets the Comparison field's value.
func (s *MapFilter) SetComparison(v string) *MapFilter {
s.Comparison = &v
return s
}
// SetKey sets the Key field's value.
func (s *MapFilter) SetKey(v string) *MapFilter {
s.Key = &v
return s
}
// SetValue sets the Value field's value.
func (s *MapFilter) SetValue(v string) *MapFilter {
s.Value = &v
return s
}
// The details for a Security Hub member account.
type Member struct {
_ struct{} `type:"structure"`
// The AWS account ID of a Security Hub member account.
AccountId *string `type:"string"`
// The email of a Security Hub member account.
Email *string `type:"string"`
// Time stamp at which the member account was invited to Security Hub.
InvitedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The AWS account ID of the master Security Hub account to this member account.
MasterId *string `type:"string"`
// The status of the relationship between the member account and its master
// account.
MemberStatus *string `type:"string"`
// Time stamp at which this member account was updated.
UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"`
}
// String returns the string representation
func (s Member) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Member) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *Member) SetAccountId(v string) *Member {
s.AccountId = &v
return s
}
// SetEmail sets the Email field's value.
func (s *Member) SetEmail(v string) *Member {
s.Email = &v
return s
}
// SetInvitedAt sets the InvitedAt field's value.
func (s *Member) SetInvitedAt(v time.Time) *Member {
s.InvitedAt = &v
return s
}
// SetMasterId sets the MasterId field's value.
func (s *Member) SetMasterId(v string) *Member {
s.MasterId = &v
return s
}
// SetMemberStatus sets the MemberStatus field's value.
func (s *Member) SetMemberStatus(v string) *Member {
s.MemberStatus = &v
return s
}
// SetUpdatedAt sets the UpdatedAt field's value.
func (s *Member) SetUpdatedAt(v time.Time) *Member {
s.UpdatedAt = &v
return s
}
// The details of network-related information about a finding.
type Network struct {
_ struct{} `type:"structure"`
// The destination domain of network-related information about a finding.
DestinationDomain *string `type:"string"`
// The destination IPv4 address of network-related information about a finding.
DestinationIpV4 *string `type:"string"`
// The destination IPv6 address of network-related information about a finding.
DestinationIpV6 *string `type:"string"`
// The destination port of network-related information about a finding.
DestinationPort *int64 `type:"integer"`
// Indicates the direction of network traffic associated with a finding.
Direction *string `type:"string" enum:"NetworkDirection"`
// The protocol of network-related information about a finding.
Protocol *string `type:"string"`
// The source domain of network-related information about a finding.
SourceDomain *string `type:"string"`
// The source IPv4 address of network-related information about a finding.
SourceIpV4 *string `type:"string"`
// The source IPv6 address of network-related information about a finding.
SourceIpV6 *string `type:"string"`
// The source media access control (MAC) address of network-related information
// about a finding.
SourceMac *string `type:"string"`
// The source port of network-related information about a finding.
SourcePort *int64 `type:"integer"`
}
// String returns the string representation
func (s Network) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Network) GoString() string {
return s.String()
}
// SetDestinationDomain sets the DestinationDomain field's value.
func (s *Network) SetDestinationDomain(v string) *Network {
s.DestinationDomain = &v
return s
}
// SetDestinationIpV4 sets the DestinationIpV4 field's value.
func (s *Network) SetDestinationIpV4(v string) *Network {
s.DestinationIpV4 = &v
return s
}
// SetDestinationIpV6 sets the DestinationIpV6 field's value.
func (s *Network) SetDestinationIpV6(v string) *Network {
s.DestinationIpV6 = &v
return s
}
// SetDestinationPort sets the DestinationPort field's value.
func (s *Network) SetDestinationPort(v int64) *Network {
s.DestinationPort = &v
return s
}
// SetDirection sets the Direction field's value.
func (s *Network) SetDirection(v string) *Network {
s.Direction = &v
return s
}
// SetProtocol sets the Protocol field's value.
func (s *Network) SetProtocol(v string) *Network {
s.Protocol = &v
return s
}
// SetSourceDomain sets the SourceDomain field's value.
func (s *Network) SetSourceDomain(v string) *Network {
s.SourceDomain = &v
return s
}
// SetSourceIpV4 sets the SourceIpV4 field's value.
func (s *Network) SetSourceIpV4(v string) *Network {
s.SourceIpV4 = &v
return s
}
// SetSourceIpV6 sets the SourceIpV6 field's value.
func (s *Network) SetSourceIpV6(v string) *Network {
s.SourceIpV6 = &v
return s
}
// SetSourceMac sets the SourceMac field's value.
func (s *Network) SetSourceMac(v string) *Network {
s.SourceMac = &v
return s
}
// SetSourcePort sets the SourcePort field's value.
func (s *Network) SetSourcePort(v int64) *Network {
s.SourcePort = &v
return s
}
// A user-defined note added to a finding.
type Note struct {
_ struct{} `type:"structure"`
// The text of a note.
//
// Text is a required field
Text *string `type:"string" required:"true"`
// The timestamp of when the note was updated.
//
// UpdatedAt is a required field
UpdatedAt *string `type:"string" required:"true"`
// The principal that created a note.
//
// UpdatedBy is a required field
UpdatedBy *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Note) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Note) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Note) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Note"}
if s.Text == nil {
invalidParams.Add(request.NewErrParamRequired("Text"))
}
if s.UpdatedAt == nil {
invalidParams.Add(request.NewErrParamRequired("UpdatedAt"))
}
if s.UpdatedBy == nil {
invalidParams.Add(request.NewErrParamRequired("UpdatedBy"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetText sets the Text field's value.
func (s *Note) SetText(v string) *Note {
s.Text = &v
return s
}
// SetUpdatedAt sets the UpdatedAt field's value.
func (s *Note) SetUpdatedAt(v string) *Note {
s.UpdatedAt = &v
return s
}
// SetUpdatedBy sets the UpdatedBy field's value.
func (s *Note) SetUpdatedBy(v string) *Note {
s.UpdatedBy = &v
return s
}
// The updated note.
type NoteUpdate struct {
_ struct{} `type:"structure"`
// The updated note text.
//
// Text is a required field
Text *string `type:"string" required:"true"`
// The principal that updated the note.
//
// UpdatedBy is a required field
UpdatedBy *string `type:"string" required:"true"`
}
// String returns the string representation
func (s NoteUpdate) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NoteUpdate) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *NoteUpdate) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "NoteUpdate"}
if s.Text == nil {
invalidParams.Add(request.NewErrParamRequired("Text"))
}
if s.UpdatedBy == nil {
invalidParams.Add(request.NewErrParamRequired("UpdatedBy"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetText sets the Text field's value.
func (s *NoteUpdate) SetText(v string) *NoteUpdate {
s.Text = &v
return s
}
// SetUpdatedBy sets the UpdatedBy field's value.
func (s *NoteUpdate) SetUpdatedBy(v string) *NoteUpdate {
s.UpdatedBy = &v
return s
}
// A number filter for querying findings.
type NumberFilter struct {
_ struct{} `type:"structure"`
// Represents the "equal to" condition to be applied to a single field when
// querying for findings.
Eq *float64 `type:"double"`
// Represents the "greater than equal" condition to be applied to a single field
// when querying for findings.
Gte *float64 `type:"double"`
// Represents the "less than equal" condition to be applied to a single field
// when querying for findings.
Lte *float64 `type:"double"`
}
// String returns the string representation
func (s NumberFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NumberFilter) GoString() string {
return s.String()
}
// SetEq sets the Eq field's value.
func (s *NumberFilter) SetEq(v float64) *NumberFilter {
s.Eq = &v
return s
}
// SetGte sets the Gte field's value.
func (s *NumberFilter) SetGte(v float64) *NumberFilter {
s.Gte = &v
return s
}
// SetLte sets the Lte field's value.
func (s *NumberFilter) SetLte(v float64) *NumberFilter {
s.Lte = &v
return s
}
// The details of process-related information about a finding.
type ProcessDetails struct {
_ struct{} `type:"structure"`
// The date/time that the process was launched.
LaunchedAt *string `type:"string"`
// The name of the process.
Name *string `type:"string"`
// The parent process ID.
ParentPid *int64 `type:"integer"`
// The path to the process executable.
Path *string `type:"string"`
// The process ID.
Pid *int64 `type:"integer"`
// The date/time that the process was terminated.
TerminatedAt *string `type:"string"`
}
// String returns the string representation
func (s ProcessDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ProcessDetails) GoString() string {
return s.String()
}
// SetLaunchedAt sets the LaunchedAt field's value.
func (s *ProcessDetails) SetLaunchedAt(v string) *ProcessDetails {
s.LaunchedAt = &v
return s
}
// SetName sets the Name field's value.
func (s *ProcessDetails) SetName(v string) *ProcessDetails {
s.Name = &v
return s
}
// SetParentPid sets the ParentPid field's value.
func (s *ProcessDetails) SetParentPid(v int64) *ProcessDetails {
s.ParentPid = &v
return s
}
// SetPath sets the Path field's value.
func (s *ProcessDetails) SetPath(v string) *ProcessDetails {
s.Path = &v
return s
}
// SetPid sets the Pid field's value.
func (s *ProcessDetails) SetPid(v int64) *ProcessDetails {
s.Pid = &v
return s
}
// SetTerminatedAt sets the TerminatedAt field's value.
func (s *ProcessDetails) SetTerminatedAt(v string) *ProcessDetails {
s.TerminatedAt = &v
return s
}
// Provides a recommendation on how to remediate the issue identified within
// a finding.
type Recommendation struct {
_ struct{} `type:"structure"`
// The recommendation of what to do about the issue described in a finding.
Text *string `type:"string"`
// A URL to link to general remediation information for the finding type of
// a finding.
Url *string `type:"string"`
}
// String returns the string representation
func (s Recommendation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Recommendation) GoString() string {
return s.String()
}
// SetText sets the Text field's value.
func (s *Recommendation) SetText(v string) *Recommendation {
s.Text = &v
return s
}
// SetUrl sets the Url field's value.
func (s *Recommendation) SetUrl(v string) *Recommendation {
s.Url = &v
return s
}
// Related finding's details.
type RelatedFinding struct {
_ struct{} `type:"structure"`
// The solution-generated identifier for a related finding.
//
// Id is a required field
Id *string `type:"string" required:"true"`
// The ARN of the solution that generated a related finding.
//
// ProductArn is a required field
ProductArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s RelatedFinding) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RelatedFinding) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RelatedFinding) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RelatedFinding"}
if s.Id == nil {
invalidParams.Add(request.NewErrParamRequired("Id"))
}
if s.ProductArn == nil {
invalidParams.Add(request.NewErrParamRequired("ProductArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetId sets the Id field's value.
func (s *RelatedFinding) SetId(v string) *RelatedFinding {
s.Id = &v
return s
}
// SetProductArn sets the ProductArn field's value.
func (s *RelatedFinding) SetProductArn(v string) *RelatedFinding {
s.ProductArn = &v
return s
}
// The remediation options for a finding.
type Remediation struct {
_ struct{} `type:"structure"`
// Provides a recommendation on how to remediate the issue identified within
// a finding.
Recommendation *Recommendation `type:"structure"`
}
// String returns the string representation
func (s Remediation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Remediation) GoString() string {
return s.String()
}
// SetRecommendation sets the Recommendation field's value.
func (s *Remediation) SetRecommendation(v *Recommendation) *Remediation {
s.Recommendation = v
return s
}
// A resource data type that describes a resource to which the finding refers.
type Resource struct {
_ struct{} `type:"structure"`
// Provides additional details about the resource.
Details *ResourceDetails `type:"structure"`
// The canonical identifier for the given resource type.
//
// Id is a required field
Id *string `type:"string" required:"true"`
// The canonical AWS partition name to which the region is assigned.
Partition *string `type:"string" enum:"Partition"`
// The canonical AWS external region name where this resource is located.
Region *string `type:"string"`
// A list of AWS tags associated with a resource at the time the finding was
// processed.
Tags map[string]*string `type:"map"`
// Specifies the type of the resource for which details are provided.
//
// Type is a required field
Type *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Resource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Resource) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Resource) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Resource"}
if s.Id == nil {
invalidParams.Add(request.NewErrParamRequired("Id"))
}
if s.Type == nil {
invalidParams.Add(request.NewErrParamRequired("Type"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDetails sets the Details field's value.
func (s *Resource) SetDetails(v *ResourceDetails) *Resource {
s.Details = v
return s
}
// SetId sets the Id field's value.
func (s *Resource) SetId(v string) *Resource {
s.Id = &v
return s
}
// SetPartition sets the Partition field's value.
func (s *Resource) SetPartition(v string) *Resource {
s.Partition = &v
return s
}
// SetRegion sets the Region field's value.
func (s *Resource) SetRegion(v string) *Resource {
s.Region = &v
return s
}
// SetTags sets the Tags field's value.
func (s *Resource) SetTags(v map[string]*string) *Resource {
s.Tags = v
return s
}
// SetType sets the Type field's value.
func (s *Resource) SetType(v string) *Resource {
s.Type = &v
return s
}
// Provides additional details about the resource.
type ResourceDetails struct {
_ struct{} `type:"structure"`
// The details of an AWS EC2 instance.
AwsEc2Instance *AwsEc2InstanceDetails `type:"structure"`
// AWS IAM access key details related to a finding.
AwsIamAccessKey *AwsIamAccessKeyDetails `type:"structure"`
// The details of an AWS S3 Bucket.
AwsS3Bucket *AwsS3BucketDetails `type:"structure"`
// Container details related to a finding.
Container *ContainerDetails `type:"structure"`
// The details of a resource that does not have a specific sub-field for the
// resource type defined.
Other map[string]*string `type:"map"`
}
// String returns the string representation
func (s ResourceDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ResourceDetails) GoString() string {
return s.String()
}
// SetAwsEc2Instance sets the AwsEc2Instance field's value.
func (s *ResourceDetails) SetAwsEc2Instance(v *AwsEc2InstanceDetails) *ResourceDetails {
s.AwsEc2Instance = v
return s
}
// SetAwsIamAccessKey sets the AwsIamAccessKey field's value.
func (s *ResourceDetails) SetAwsIamAccessKey(v *AwsIamAccessKeyDetails) *ResourceDetails {
s.AwsIamAccessKey = v
return s
}
// SetAwsS3Bucket sets the AwsS3Bucket field's value.
func (s *ResourceDetails) SetAwsS3Bucket(v *AwsS3BucketDetails) *ResourceDetails {
s.AwsS3Bucket = v
return s
}
// SetContainer sets the Container field's value.
func (s *ResourceDetails) SetContainer(v *ContainerDetails) *ResourceDetails {
s.Container = v
return s
}
// SetOther sets the Other field's value.
func (s *ResourceDetails) SetOther(v map[string]*string) *ResourceDetails {
s.Other = v
return s
}
// The account details that could not be processed.
type Result struct {
_ struct{} `type:"structure"`
// An ID of the AWS account that could not be processed.
AccountId *string `type:"string"`
// The reason for why an account could not be processed.
ProcessingResult *string `type:"string"`
}
// String returns the string representation
func (s Result) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Result) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *Result) SetAccountId(v string) *Result {
s.AccountId = &v
return s
}
// SetProcessingResult sets the ProcessingResult field's value.
func (s *Result) SetProcessingResult(v string) *Result {
s.ProcessingResult = &v
return s
}
// A finding's severity.
type Severity struct {
_ struct{} `type:"structure"`
// The normalized severity of a finding.
//
// Normalized is a required field
Normalized *int64 `type:"integer" required:"true"`
// The native severity as defined by the security findings provider's solution
// that generated the finding.
Product *float64 `type:"double"`
}
// String returns the string representation
func (s Severity) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Severity) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Severity) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Severity"}
if s.Normalized == nil {
invalidParams.Add(request.NewErrParamRequired("Normalized"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetNormalized sets the Normalized field's value.
func (s *Severity) SetNormalized(v int64) *Severity {
s.Normalized = &v
return s
}
// SetProduct sets the Product field's value.
func (s *Severity) SetProduct(v float64) *Severity {
s.Product = &v
return s
}
// A collection of attributes used for sorting findings.
type SortCriterion struct {
_ struct{} `type:"structure"`
// The finding attribute used for sorting findings.
Field *string `type:"string"`
// The order used for sorting findings.
SortOrder *string `type:"string" enum:"SortOrder"`
}
// String returns the string representation
func (s SortCriterion) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SortCriterion) GoString() string {
return s.String()
}
// SetField sets the Field field's value.
func (s *SortCriterion) SetField(v string) *SortCriterion {
s.Field = &v
return s
}
// SetSortOrder sets the SortOrder field's value.
func (s *SortCriterion) SetSortOrder(v string) *SortCriterion {
s.SortOrder = &v
return s
}
// A resource that represents your subscription to a supported standard.
type StandardsSubscription struct {
_ struct{} `type:"structure"`
// The ARN of a standard.
//
// StandardsArn is a required field
StandardsArn *string `type:"string" required:"true"`
// StandardsInput is a required field
StandardsInput map[string]*string `type:"map" required:"true"`
// The standard's status.
//
// StandardsStatus is a required field
StandardsStatus *string `type:"string" required:"true" enum:"StandardsStatus"`
// The ARN of a resource that represents your subscription to a supported standard.
//
// StandardsSubscriptionArn is a required field
StandardsSubscriptionArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s StandardsSubscription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StandardsSubscription) GoString() string {
return s.String()
}
// SetStandardsArn sets the StandardsArn field's value.
func (s *StandardsSubscription) SetStandardsArn(v string) *StandardsSubscription {
s.StandardsArn = &v
return s
}
// SetStandardsInput sets the StandardsInput field's value.
func (s *StandardsSubscription) SetStandardsInput(v map[string]*string) *StandardsSubscription {
s.StandardsInput = v
return s
}
// SetStandardsStatus sets the StandardsStatus field's value.
func (s *StandardsSubscription) SetStandardsStatus(v string) *StandardsSubscription {
s.StandardsStatus = &v
return s
}
// SetStandardsSubscriptionArn sets the StandardsSubscriptionArn field's value.
func (s *StandardsSubscription) SetStandardsSubscriptionArn(v string) *StandardsSubscription {
s.StandardsSubscriptionArn = &v
return s
}
// The standard that you want to enable.
type StandardsSubscriptionRequest struct {
_ struct{} `type:"structure"`
// The ARN of the standard that you want to enable.
//
// StandardsArn is a required field
StandardsArn *string `type:"string" required:"true"`
StandardsInput map[string]*string `type:"map"`
}
// String returns the string representation
func (s StandardsSubscriptionRequest) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StandardsSubscriptionRequest) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StandardsSubscriptionRequest) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StandardsSubscriptionRequest"}
if s.StandardsArn == nil {
invalidParams.Add(request.NewErrParamRequired("StandardsArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetStandardsArn sets the StandardsArn field's value.
func (s *StandardsSubscriptionRequest) SetStandardsArn(v string) *StandardsSubscriptionRequest {
s.StandardsArn = &v
return s
}
// SetStandardsInput sets the StandardsInput field's value.
func (s *StandardsSubscriptionRequest) SetStandardsInput(v map[string]*string) *StandardsSubscriptionRequest {
s.StandardsInput = v
return s
}
// A string filter for querying findings.
type StringFilter struct {
_ struct{} `type:"structure"`
// Represents the condition to be applied to a string value when querying for
// findings.
Comparison *string `type:"string" enum:"StringFilterComparison"`
// The string filter value.
Value *string `type:"string"`
}
// String returns the string representation
func (s StringFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StringFilter) GoString() string {
return s.String()
}
// SetComparison sets the Comparison field's value.
func (s *StringFilter) SetComparison(v string) *StringFilter {
s.Comparison = &v
return s
}
// SetValue sets the Value field's value.
func (s *StringFilter) SetValue(v string) *StringFilter {
s.Value = &v
return s
}
// Threat intel details related to a finding.
type ThreatIntelIndicator struct {
_ struct{} `type:"structure"`
// The category of a threat intel indicator.
Category *string `type:"string" enum:"ThreatIntelIndicatorCategory"`
// The date/time of the last observation of a threat intel indicator.
LastObservedAt *string `type:"string"`
// The source of the threat intel.
Source *string `type:"string"`
// The URL for more details from the source of the threat intel.
SourceUrl *string `type:"string"`
// The type of a threat intel indicator.
Type *string `type:"string" enum:"ThreatIntelIndicatorType"`
// The value of a threat intel indicator.
Value *string `type:"string"`
}
// String returns the string representation
func (s ThreatIntelIndicator) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ThreatIntelIndicator) GoString() string {
return s.String()
}
// SetCategory sets the Category field's value.
func (s *ThreatIntelIndicator) SetCategory(v string) *ThreatIntelIndicator {
s.Category = &v
return s
}
// SetLastObservedAt sets the LastObservedAt field's value.
func (s *ThreatIntelIndicator) SetLastObservedAt(v string) *ThreatIntelIndicator {
s.LastObservedAt = &v
return s
}
// SetSource sets the Source field's value.
func (s *ThreatIntelIndicator) SetSource(v string) *ThreatIntelIndicator {
s.Source = &v
return s
}
// SetSourceUrl sets the SourceUrl field's value.
func (s *ThreatIntelIndicator) SetSourceUrl(v string) *ThreatIntelIndicator {
s.SourceUrl = &v
return s
}
// SetType sets the Type field's value.
func (s *ThreatIntelIndicator) SetType(v string) *ThreatIntelIndicator {
s.Type = &v
return s
}
// SetValue sets the Value field's value.
func (s *ThreatIntelIndicator) SetValue(v string) *ThreatIntelIndicator {
s.Value = &v
return s
}
type UpdateFindingsInput struct {
_ struct{} `type:"structure"`
// A collection of attributes that specify what findings you want to update.
//
// Filters is a required field
Filters *AwsSecurityFindingFilters `type:"structure" required:"true"`
// The updated note for the finding.
Note *NoteUpdate `type:"structure"`
// The updated record state for the finding.
RecordState *string `type:"string" enum:"RecordState"`
}
// String returns the string representation
func (s UpdateFindingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateFindingsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateFindingsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateFindingsInput"}
if s.Filters == nil {
invalidParams.Add(request.NewErrParamRequired("Filters"))
}
if s.Note != nil {
if err := s.Note.Validate(); err != nil {
invalidParams.AddNested("Note", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetFilters sets the Filters field's value.
func (s *UpdateFindingsInput) SetFilters(v *AwsSecurityFindingFilters) *UpdateFindingsInput {
s.Filters = v
return s
}
// SetNote sets the Note field's value.
func (s *UpdateFindingsInput) SetNote(v *NoteUpdate) *UpdateFindingsInput {
s.Note = v
return s
}
// SetRecordState sets the RecordState field's value.
func (s *UpdateFindingsInput) SetRecordState(v string) *UpdateFindingsInput {
s.RecordState = &v
return s
}
type UpdateFindingsOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UpdateFindingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateFindingsOutput) GoString() string {
return s.String()
}
type UpdateInsightInput struct {
_ struct{} `type:"structure"`
// The updated filters that define this insight.
Filters *AwsSecurityFindingFilters `type:"structure"`
// The updated GroupBy attribute that defines this insight.
GroupByAttribute *string `type:"string"`
// The ARN of the insight that you want to update.
//
// InsightArn is a required field
InsightArn *string `location:"uri" locationName:"InsightArn" type:"string" required:"true"`
// The updated name for the insight.
Name *string `type:"string"`
}
// String returns the string representation
func (s UpdateInsightInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateInsightInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateInsightInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateInsightInput"}
if s.InsightArn == nil {
invalidParams.Add(request.NewErrParamRequired("InsightArn"))
}
if s.InsightArn != nil && len(*s.InsightArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("InsightArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetFilters sets the Filters field's value.
func (s *UpdateInsightInput) SetFilters(v *AwsSecurityFindingFilters) *UpdateInsightInput {
s.Filters = v
return s
}
// SetGroupByAttribute sets the GroupByAttribute field's value.
func (s *UpdateInsightInput) SetGroupByAttribute(v string) *UpdateInsightInput {
s.GroupByAttribute = &v
return s
}
// SetInsightArn sets the InsightArn field's value.
func (s *UpdateInsightInput) SetInsightArn(v string) *UpdateInsightInput {
s.InsightArn = &v
return s
}
// SetName sets the Name field's value.
func (s *UpdateInsightInput) SetName(v string) *UpdateInsightInput {
s.Name = &v
return s
}
type UpdateInsightOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UpdateInsightOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateInsightOutput) GoString() string {
return s.String()
}
const (
// AwsIamAccessKeyStatusActive is a AwsIamAccessKeyStatus enum value
AwsIamAccessKeyStatusActive = "Active"
// AwsIamAccessKeyStatusInactive is a AwsIamAccessKeyStatus enum value
AwsIamAccessKeyStatusInactive = "Inactive"
)
const (
// ComplianceStatusPassed is a ComplianceStatus enum value
ComplianceStatusPassed = "PASSED"
// ComplianceStatusWarning is a ComplianceStatus enum value
ComplianceStatusWarning = "WARNING"
// ComplianceStatusFailed is a ComplianceStatus enum value
ComplianceStatusFailed = "FAILED"
// ComplianceStatusNotAvailable is a ComplianceStatus enum value
ComplianceStatusNotAvailable = "NOT_AVAILABLE"
)
const (
// DateRangeUnitDays is a DateRangeUnit enum value
DateRangeUnitDays = "DAYS"
)
const (
// MalwareStateObserved is a MalwareState enum value
MalwareStateObserved = "OBSERVED"
// MalwareStateRemovalFailed is a MalwareState enum value
MalwareStateRemovalFailed = "REMOVAL_FAILED"
// MalwareStateRemoved is a MalwareState enum value
MalwareStateRemoved = "REMOVED"
)
const (
// MalwareTypeAdware is a MalwareType enum value
MalwareTypeAdware = "ADWARE"
// MalwareTypeBlendedThreat is a MalwareType enum value
MalwareTypeBlendedThreat = "BLENDED_THREAT"
// MalwareTypeBotnetAgent is a MalwareType enum value
MalwareTypeBotnetAgent = "BOTNET_AGENT"
// MalwareTypeCoinMiner is a MalwareType enum value
MalwareTypeCoinMiner = "COIN_MINER"
// MalwareTypeExploitKit is a MalwareType enum value
MalwareTypeExploitKit = "EXPLOIT_KIT"
// MalwareTypeKeylogger is a MalwareType enum value
MalwareTypeKeylogger = "KEYLOGGER"
// MalwareTypeMacro is a MalwareType enum value
MalwareTypeMacro = "MACRO"
// MalwareTypePotentiallyUnwanted is a MalwareType enum value
MalwareTypePotentiallyUnwanted = "POTENTIALLY_UNWANTED"
// MalwareTypeSpyware is a MalwareType enum value
MalwareTypeSpyware = "SPYWARE"
// MalwareTypeRansomware is a MalwareType enum value
MalwareTypeRansomware = "RANSOMWARE"
// MalwareTypeRemoteAccess is a MalwareType enum value
MalwareTypeRemoteAccess = "REMOTE_ACCESS"
// MalwareTypeRootkit is a MalwareType enum value
MalwareTypeRootkit = "ROOTKIT"
// MalwareTypeTrojan is a MalwareType enum value
MalwareTypeTrojan = "TROJAN"
// MalwareTypeVirus is a MalwareType enum value
MalwareTypeVirus = "VIRUS"
// MalwareTypeWorm is a MalwareType enum value
MalwareTypeWorm = "WORM"
)
const (
// MapFilterComparisonContains is a MapFilterComparison enum value
MapFilterComparisonContains = "CONTAINS"
)
const (
// NetworkDirectionIn is a NetworkDirection enum value
NetworkDirectionIn = "IN"
// NetworkDirectionOut is a NetworkDirection enum value
NetworkDirectionOut = "OUT"
)
const (
// PartitionAws is a Partition enum value
PartitionAws = "aws"
// PartitionAwsCn is a Partition enum value
PartitionAwsCn = "aws-cn"
// PartitionAwsUsGov is a Partition enum value
PartitionAwsUsGov = "aws-us-gov"
)
const (
// RecordStateActive is a RecordState enum value
RecordStateActive = "ACTIVE"
// RecordStateArchived is a RecordState enum value
RecordStateArchived = "ARCHIVED"
)
const (
// SortOrderAsc is a SortOrder enum value
SortOrderAsc = "asc"
// SortOrderDesc is a SortOrder enum value
SortOrderDesc = "desc"
)
const (
// StandardsStatusPending is a StandardsStatus enum value
StandardsStatusPending = "PENDING"
// StandardsStatusReady is a StandardsStatus enum value
StandardsStatusReady = "READY"
// StandardsStatusFailed is a StandardsStatus enum value
StandardsStatusFailed = "FAILED"
// StandardsStatusDeleting is a StandardsStatus enum value
StandardsStatusDeleting = "DELETING"
)
const (
// StringFilterComparisonEquals is a StringFilterComparison enum value
StringFilterComparisonEquals = "EQUALS"
// StringFilterComparisonContains is a StringFilterComparison enum value
StringFilterComparisonContains = "CONTAINS"
// StringFilterComparisonPrefix is a StringFilterComparison enum value
StringFilterComparisonPrefix = "PREFIX"
)
const (
// ThreatIntelIndicatorCategoryBackdoor is a ThreatIntelIndicatorCategory enum value
ThreatIntelIndicatorCategoryBackdoor = "BACKDOOR"
// ThreatIntelIndicatorCategoryCardStealer is a ThreatIntelIndicatorCategory enum value
ThreatIntelIndicatorCategoryCardStealer = "CARD_STEALER"
// ThreatIntelIndicatorCategoryCommandAndControl is a ThreatIntelIndicatorCategory enum value
ThreatIntelIndicatorCategoryCommandAndControl = "COMMAND_AND_CONTROL"
// ThreatIntelIndicatorCategoryDropSite is a ThreatIntelIndicatorCategory enum value
ThreatIntelIndicatorCategoryDropSite = "DROP_SITE"
// ThreatIntelIndicatorCategoryExploitSite is a ThreatIntelIndicatorCategory enum value
ThreatIntelIndicatorCategoryExploitSite = "EXPLOIT_SITE"
// ThreatIntelIndicatorCategoryKeylogger is a ThreatIntelIndicatorCategory enum value
ThreatIntelIndicatorCategoryKeylogger = "KEYLOGGER"
)
const (
// ThreatIntelIndicatorTypeDomain is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeDomain = "DOMAIN"
// ThreatIntelIndicatorTypeEmailAddress is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeEmailAddress = "EMAIL_ADDRESS"
// ThreatIntelIndicatorTypeHashMd5 is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeHashMd5 = "HASH_MD5"
// ThreatIntelIndicatorTypeHashSha1 is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeHashSha1 = "HASH_SHA1"
// ThreatIntelIndicatorTypeHashSha256 is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeHashSha256 = "HASH_SHA256"
// ThreatIntelIndicatorTypeHashSha512 is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeHashSha512 = "HASH_SHA512"
// ThreatIntelIndicatorTypeIpv4Address is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeIpv4Address = "IPV4_ADDRESS"
// ThreatIntelIndicatorTypeIpv6Address is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeIpv6Address = "IPV6_ADDRESS"
// ThreatIntelIndicatorTypeMutex is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeMutex = "MUTEX"
// ThreatIntelIndicatorTypeProcess is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeProcess = "PROCESS"
// ThreatIntelIndicatorTypeUrl is a ThreatIntelIndicatorType enum value
ThreatIntelIndicatorTypeUrl = "URL"
)
const (
// VerificationStateUnknown is a VerificationState enum value
VerificationStateUnknown = "UNKNOWN"
// VerificationStateTruePositive is a VerificationState enum value
VerificationStateTruePositive = "TRUE_POSITIVE"
// VerificationStateFalsePositive is a VerificationState enum value
VerificationStateFalsePositive = "FALSE_POSITIVE"
// VerificationStateBenignPositive is a VerificationState enum value
VerificationStateBenignPositive = "BENIGN_POSITIVE"
)
const (
// WorkflowStateNew is a WorkflowState enum value
WorkflowStateNew = "NEW"
// WorkflowStateAssigned is a WorkflowState enum value
WorkflowStateAssigned = "ASSIGNED"
// WorkflowStateInProgress is a WorkflowState enum value
WorkflowStateInProgress = "IN_PROGRESS"
// WorkflowStateDeferred is a WorkflowState enum value
WorkflowStateDeferred = "DEFERRED"
// WorkflowStateResolved is a WorkflowState enum value
WorkflowStateResolved = "RESOLVED"
)
| pweil-/origin | vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go | GO | apache-2.0 | 258,064 |
scp -i /home/hduser/.ec2/keys/mrjenkins_test.pem /home/kevin/h2o/py/h2o.py [email protected]:/home/hduser/h2o.py.new
| janezhango/BigDataMachineLearning | py/ec2_stuff/to_ec2_copy.sh | Shell | apache-2.0 | 121 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.calcite.sql.validate;
import org.apache.calcite.util.CalciteValidatorException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// NOTE: This class gets compiled independently of everything else so that
// resource generation can use reflection. That means it must have no
// dependencies on other Calcite code.
/**
* Exception thrown while validating a SQL statement.
*
* <p>Unlike {@link org.apache.calcite.runtime.CalciteException}, this is a
* checked exception, which reminds code authors to wrap it in another exception
* containing the line/column context.
*/
public class SqlValidatorException extends Exception
implements CalciteValidatorException {
//~ Static fields/initializers ---------------------------------------------
private static final Logger LOGGER =
LoggerFactory.getLogger("org.apache.calcite.runtime.CalciteException");
static final long serialVersionUID = -831683113957131387L;
//~ Constructors -----------------------------------------------------------
/**
* Creates a new SqlValidatorException object.
*
* @param message error message
* @param cause underlying cause
*/
public SqlValidatorException(
String message,
Throwable cause) {
super(message, cause);
// TODO: see note in CalciteException constructor
LOGGER.trace("SqlValidatorException", this);
LOGGER.error(toString());
}
}
// End SqlValidatorException.java
| minji-kim/calcite | core/src/main/java/org/apache/calcite/sql/validate/SqlValidatorException.java | Java | apache-2.0 | 2,259 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.