text
stringlengths 2
100k
| meta
dict |
---|---|
var portalLib = require('/lib/xp/portal');
var assert = require('/lib/xp/testing');
// BEGIN
var url = portalLib.serviceUrl({
service: 'myservice',
params: {
a: 1,
b: 2
}
});
// END
assert.assertEquals('ServiceUrlParams{type=server, params={a=[1], b=[2]}, service=myservice}', url);
| {
"pile_set_name": "Github"
} |
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
// Developed by Minigraph
//
// Author(s): Alex Nankervis
// James Stanard
//
#include "ForwardPlusLighting.h"
#include "PipelineState.h"
#include "RootSignature.h"
#include "CommandContext.h"
#include "Camera.h"
#include "BufferManager.h"
#include "CompiledShaders/FillLightGridCS_8.h"
#include "CompiledShaders/FillLightGridCS_16.h"
#include "CompiledShaders/FillLightGridCS_24.h"
#include "CompiledShaders/FillLightGridCS_32.h"
using namespace Math;
using namespace Graphics;
// must keep in sync with HLSL
struct LightData
{
float pos[3];
float radiusSq;
float color[3];
uint32_t type;
float coneDir[3];
float coneAngles[2];
float shadowTextureMatrix[16];
};
enum { kMinLightGridDim = 8 };
namespace Lighting
{
IntVar LightGridDim("Application/Forward+/Light Grid Dim", 16, kMinLightGridDim, 32, 8 );
RootSignature m_FillLightRootSig;
ComputePSO m_FillLightGridCS_8;
ComputePSO m_FillLightGridCS_16;
ComputePSO m_FillLightGridCS_24;
ComputePSO m_FillLightGridCS_32;
LightData m_LightData[MaxLights];
StructuredBuffer m_LightBuffer;
ByteAddressBuffer m_LightGrid;
ByteAddressBuffer m_LightGridBitMask;
uint32_t m_FirstConeLight;
uint32_t m_FirstConeShadowedLight;
enum {shadowDim = 512};
ColorBuffer m_LightShadowArray;
ShadowBuffer m_LightShadowTempBuffer;
Matrix4 m_LightShadowMatrix[MaxLights];
void InitializeResources(void);
void CreateRandomLights(const Vector3 minBound, const Vector3 maxBound);
void FillLightGrid(GraphicsContext& gfxContext, const Camera& camera);
void Shutdown(void);
}
void Lighting::InitializeResources( void )
{
m_FillLightRootSig.Reset(3, 0);
m_FillLightRootSig[0].InitAsConstantBuffer(0);
m_FillLightRootSig[1].InitAsDescriptorRange(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 0, 2);
m_FillLightRootSig[2].InitAsDescriptorRange(D3D12_DESCRIPTOR_RANGE_TYPE_UAV, 0, 2);
m_FillLightRootSig.Finalize(L"FillLightRS");
m_FillLightGridCS_8.SetRootSignature(m_FillLightRootSig);
m_FillLightGridCS_8.SetComputeShader(g_pFillLightGridCS_8, sizeof(g_pFillLightGridCS_8));
m_FillLightGridCS_8.Finalize();
m_FillLightGridCS_16.SetRootSignature(m_FillLightRootSig);
m_FillLightGridCS_16.SetComputeShader(g_pFillLightGridCS_16, sizeof(g_pFillLightGridCS_16));
m_FillLightGridCS_16.Finalize();
m_FillLightGridCS_24.SetRootSignature(m_FillLightRootSig);
m_FillLightGridCS_24.SetComputeShader(g_pFillLightGridCS_24, sizeof(g_pFillLightGridCS_24));
m_FillLightGridCS_24.Finalize();
m_FillLightGridCS_32.SetRootSignature(m_FillLightRootSig);
m_FillLightGridCS_32.SetComputeShader(g_pFillLightGridCS_32, sizeof(g_pFillLightGridCS_32));
m_FillLightGridCS_32.Finalize();
}
void Lighting::CreateRandomLights( const Vector3 minBound, const Vector3 maxBound )
{
Vector3 posScale = maxBound - minBound;
Vector3 posBias = minBound;
// todo: replace this with MT
srand(12645);
auto randUint = []() -> uint32_t
{
return rand(); // [0, RAND_MAX]
};
auto randFloat = [randUint]() -> float
{
return randUint() * (1.0f / RAND_MAX); // convert [0, RAND_MAX] to [0, 1]
};
auto randVecUniform = [randFloat]() -> Vector3
{
return Vector3(randFloat(), randFloat(), randFloat());
};
auto randGaussian = [randFloat]() -> float
{
// polar box-muller
static bool gaussianPair = true;
static float y2;
if (gaussianPair)
{
gaussianPair = false;
float x1, x2, w;
do
{
x1 = 2 * randFloat() - 1;
x2 = 2 * randFloat() - 1;
w = x1 * x1 + x2 * x2;
} while (w >= 1);
w = sqrtf(-2 * logf(w) / w);
y2 = x2 * w;
return x1 * w;
}
else
{
gaussianPair = true;
return y2;
}
};
auto randVecGaussian = [randGaussian]() -> Vector3
{
return Normalize(Vector3(randGaussian(), randGaussian(), randGaussian()));
};
const float pi = 3.14159265359f;
for (uint32_t n = 0; n < MaxLights; n++)
{
Vector3 pos = randVecUniform() * posScale + posBias;
float lightRadius = randFloat() * 800.0f + 200.0f;
Vector3 color = randVecUniform();
float colorScale = randFloat() * .3f + .3f;
color = color * colorScale;
uint32_t type;
// force types to match 32-bit boundaries for the BIT_MASK_SORTED case
if (n < 32 * 1)
type = 0;
else if (n < 32 * 3)
type = 1;
else
type = 2;
Vector3 coneDir = randVecGaussian();
float coneInner = (randFloat() * .2f + .025f) * pi;
float coneOuter = coneInner + randFloat() * .1f * pi;
if (type == 1 || type == 2)
{
// emphasize cone lights
color = color * 5.0f;
}
Math::Camera shadowCamera;
shadowCamera.SetEyeAtUp(pos, pos + coneDir, Vector3(0, 1, 0));
shadowCamera.SetPerspectiveMatrix(coneOuter * 2, 1.0f, lightRadius * .05f, lightRadius * 1.0f);
shadowCamera.Update();
m_LightShadowMatrix[n] = shadowCamera.GetViewProjMatrix();
Matrix4 shadowTextureMatrix = Matrix4(AffineTransform(Matrix3::MakeScale( 0.5f, -0.5f, 1.0f ), Vector3(0.5f, 0.5f, 0.0f))) * m_LightShadowMatrix[n];
m_LightData[n].pos[0] = pos.GetX();
m_LightData[n].pos[1] = pos.GetY();
m_LightData[n].pos[2] = pos.GetZ();
m_LightData[n].radiusSq = lightRadius * lightRadius;
m_LightData[n].color[0] = color.GetX();
m_LightData[n].color[1] = color.GetY();
m_LightData[n].color[2] = color.GetZ();
m_LightData[n].type = type;
m_LightData[n].coneDir[0] = coneDir.GetX();
m_LightData[n].coneDir[1] = coneDir.GetY();
m_LightData[n].coneDir[2] = coneDir.GetZ();
m_LightData[n].coneAngles[0] = 1.0f / (cosf(coneInner) - cosf(coneOuter));
m_LightData[n].coneAngles[1] = cosf(coneOuter);
std::memcpy(m_LightData[n].shadowTextureMatrix, &shadowTextureMatrix, sizeof(shadowTextureMatrix));
//*(Matrix4*)(m_LightData[n].shadowTextureMatrix) = shadowTextureMatrix;
}
// sort lights by type, needed for efficiency in the BIT_MASK approach
/* {
Matrix4 copyLightShadowMatrix[MaxLights];
memcpy(copyLightShadowMatrix, m_LightShadowMatrix, sizeof(Matrix4) * MaxLights);
LightData copyLightData[MaxLights];
memcpy(copyLightData, m_LightData, sizeof(LightData) * MaxLights);
uint32_t sortArray[MaxLights];
for (uint32_t n = 0; n < MaxLights; n++)
{
sortArray[n] = n;
}
std::sort(sortArray, sortArray + MaxLights,
[this](const uint32_t &a, const uint32_t &b) -> bool
{
return this->m_LightData[a].type < this->m_LightData[b].type;
});
for (uint32_t n = 0; n < MaxLights; n++)
{
m_LightShadowMatrix[n] = copyLightShadowMatrix[sortArray[n]];
m_LightData[n] = copyLightData[sortArray[n]];
}
}*/
for (uint32_t n = 0; n < MaxLights; n++)
{
if (m_LightData[n].type == 1)
{
m_FirstConeLight = n;
break;
}
}
for (uint32_t n = 0; n < MaxLights; n++)
{
if (m_LightData[n].type == 2)
{
m_FirstConeShadowedLight = n;
break;
}
}
m_LightBuffer.Create(L"m_LightBuffer", MaxLights, sizeof(LightData), m_LightData);
// todo: assumes max resolution of 1920x1080
uint32_t lightGridCells = Math::DivideByMultiple(1920, kMinLightGridDim) * Math::DivideByMultiple(1080, kMinLightGridDim);
uint32_t lightGridSizeBytes = lightGridCells * (4 + MaxLights * 4);
m_LightGrid.Create(L"m_LightGrid", lightGridSizeBytes, 1, nullptr);
uint32_t lightGridBitMaskSizeBytes = lightGridCells * 4 * 4;
m_LightGridBitMask.Create(L"m_LightGridBitMask", lightGridBitMaskSizeBytes, 1, nullptr);
m_LightShadowArray.CreateArray(L"m_LightShadowArray", shadowDim, shadowDim, MaxLights, DXGI_FORMAT_R16_UNORM);
m_LightShadowTempBuffer.Create(L"m_LightShadowTempBuffer", shadowDim, shadowDim);
}
void Lighting::Shutdown(void)
{
m_LightBuffer.Destroy();
m_LightGrid.Destroy();
m_LightGridBitMask.Destroy();
m_LightShadowArray.Destroy();
m_LightShadowTempBuffer.Destroy();
}
void Lighting::FillLightGrid(GraphicsContext& gfxContext, const Camera& camera)
{
ScopedTimer _prof(L"FillLightGrid", gfxContext);
ComputeContext& Context = gfxContext.GetComputeContext();
Context.SetRootSignature(m_FillLightRootSig);
switch ((int)LightGridDim)
{
case 8: Context.SetPipelineState(m_FillLightGridCS_8 ); break;
case 16: Context.SetPipelineState(m_FillLightGridCS_16); break;
case 24: Context.SetPipelineState(m_FillLightGridCS_24); break;
case 32: Context.SetPipelineState(m_FillLightGridCS_32); break;
default: ASSERT(false); break;
}
ColorBuffer& LinearDepth = g_LinearDepth[ Graphics::GetFrameCount() % 2 ];
Context.TransitionResource(m_LightBuffer, D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
Context.TransitionResource(LinearDepth, D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
Context.TransitionResource(g_SceneDepthBuffer, D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
Context.TransitionResource(m_LightGrid, D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
Context.TransitionResource(m_LightGridBitMask, D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
Context.SetDynamicDescriptor(1, 0, m_LightBuffer.GetSRV());
Context.SetDynamicDescriptor(1, 1, LinearDepth.GetSRV());
//Context.SetDynamicDescriptor(1, 1, g_SceneDepthBuffer.GetDepthSRV());
Context.SetDynamicDescriptor(2, 0, m_LightGrid.GetUAV());
Context.SetDynamicDescriptor(2, 1, m_LightGridBitMask.GetUAV());
// todo: assumes 1920x1080 resolution
uint32_t tileCountX = Math::DivideByMultiple(g_SceneColorBuffer.GetWidth(), LightGridDim);
uint32_t tileCountY = Math::DivideByMultiple(g_SceneColorBuffer.GetHeight(), LightGridDim);
float FarClipDist = camera.GetFarClip();
float NearClipDist = camera.GetNearClip();
const float RcpZMagic = NearClipDist / (FarClipDist - NearClipDist);
struct CSConstants
{
uint32_t ViewportWidth, ViewportHeight;
float InvTileDim;
float RcpZMagic;
uint32_t TileCount;
Matrix4 ViewProjMatrix;
} csConstants;
// todo: assumes 1920x1080 resolution
csConstants.ViewportWidth = g_SceneColorBuffer.GetWidth();
csConstants.ViewportHeight = g_SceneColorBuffer.GetHeight();
csConstants.InvTileDim = 1.0f / LightGridDim;
csConstants.RcpZMagic = RcpZMagic;
csConstants.TileCount = tileCountX;
csConstants.ViewProjMatrix = camera.GetViewProjMatrix();
Context.SetDynamicConstantBufferView(0, sizeof(CSConstants), &csConstants);
Context.Dispatch(tileCountX, tileCountY, 1);
Context.TransitionResource(m_LightGrid, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
Context.TransitionResource(m_LightGridBitMask, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
}
| {
"pile_set_name": "Github"
} |
/* ----------------------------------------------------------------------------
* SAM Software Package License
* ----------------------------------------------------------------------------
* Copyright (c) 2012, Atmel Corporation
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following condition is met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Atmel's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
#ifndef _SAM3XA_DMAC_INSTANCE_
#define _SAM3XA_DMAC_INSTANCE_
/* ========== Register definition for DMAC peripheral ========== */
#if (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
#define REG_DMAC_GCFG (0x400C4000U) /**< \brief (DMAC) DMAC Global Configuration Register */
#define REG_DMAC_EN (0x400C4004U) /**< \brief (DMAC) DMAC Enable Register */
#define REG_DMAC_SREQ (0x400C4008U) /**< \brief (DMAC) DMAC Software Single Request Register */
#define REG_DMAC_CREQ (0x400C400CU) /**< \brief (DMAC) DMAC Software Chunk Transfer Request Register */
#define REG_DMAC_LAST (0x400C4010U) /**< \brief (DMAC) DMAC Software Last Transfer Flag Register */
#define REG_DMAC_EBCIER (0x400C4018U) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer Transfer Completed Interrupt Enable register. */
#define REG_DMAC_EBCIDR (0x400C401CU) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer Transfer Completed Interrupt Disable register. */
#define REG_DMAC_EBCIMR (0x400C4020U) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer transfer completed Mask Register. */
#define REG_DMAC_EBCISR (0x400C4024U) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer transfer completed Status Register. */
#define REG_DMAC_CHER (0x400C4028U) /**< \brief (DMAC) DMAC Channel Handler Enable Register */
#define REG_DMAC_CHDR (0x400C402CU) /**< \brief (DMAC) DMAC Channel Handler Disable Register */
#define REG_DMAC_CHSR (0x400C4030U) /**< \brief (DMAC) DMAC Channel Handler Status Register */
#define REG_DMAC_SADDR0 (0x400C403CU) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 0) */
#define REG_DMAC_DADDR0 (0x400C4040U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 0) */
#define REG_DMAC_DSCR0 (0x400C4044U) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 0) */
#define REG_DMAC_CTRLA0 (0x400C4048U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 0) */
#define REG_DMAC_CTRLB0 (0x400C404CU) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 0) */
#define REG_DMAC_CFG0 (0x400C4050U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 0) */
#define REG_DMAC_SADDR1 (0x400C4064U) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 1) */
#define REG_DMAC_DADDR1 (0x400C4068U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 1) */
#define REG_DMAC_DSCR1 (0x400C406CU) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 1) */
#define REG_DMAC_CTRLA1 (0x400C4070U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 1) */
#define REG_DMAC_CTRLB1 (0x400C4074U) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 1) */
#define REG_DMAC_CFG1 (0x400C4078U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 1) */
#define REG_DMAC_SADDR2 (0x400C408CU) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 2) */
#define REG_DMAC_DADDR2 (0x400C4090U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 2) */
#define REG_DMAC_DSCR2 (0x400C4094U) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 2) */
#define REG_DMAC_CTRLA2 (0x400C4098U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 2) */
#define REG_DMAC_CTRLB2 (0x400C409CU) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 2) */
#define REG_DMAC_CFG2 (0x400C40A0U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 2) */
#define REG_DMAC_SADDR3 (0x400C40B4U) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 3) */
#define REG_DMAC_DADDR3 (0x400C40B8U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 3) */
#define REG_DMAC_DSCR3 (0x400C40BCU) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 3) */
#define REG_DMAC_CTRLA3 (0x400C40C0U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 3) */
#define REG_DMAC_CTRLB3 (0x400C40C4U) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 3) */
#define REG_DMAC_CFG3 (0x400C40C8U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 3) */
#define REG_DMAC_SADDR4 (0x400C40DCU) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 4) */
#define REG_DMAC_DADDR4 (0x400C40E0U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 4) */
#define REG_DMAC_DSCR4 (0x400C40E4U) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 4) */
#define REG_DMAC_CTRLA4 (0x400C40E8U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 4) */
#define REG_DMAC_CTRLB4 (0x400C40ECU) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 4) */
#define REG_DMAC_CFG4 (0x400C40F0U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 4) */
#define REG_DMAC_SADDR5 (0x400C4104U) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 5) */
#define REG_DMAC_DADDR5 (0x400C4108U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 5) */
#define REG_DMAC_DSCR5 (0x400C410CU) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 5) */
#define REG_DMAC_CTRLA5 (0x400C4110U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 5) */
#define REG_DMAC_CTRLB5 (0x400C4114U) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 5) */
#define REG_DMAC_CFG5 (0x400C4118U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 5) */
#define REG_DMAC_WPMR (0x400C41E4U) /**< \brief (DMAC) DMAC Write Protect Mode Register */
#define REG_DMAC_WPSR (0x400C41E8U) /**< \brief (DMAC) DMAC Write Protect Status Register */
#else
#define REG_DMAC_GCFG (*(RwReg*)0x400C4000U) /**< \brief (DMAC) DMAC Global Configuration Register */
#define REG_DMAC_EN (*(RwReg*)0x400C4004U) /**< \brief (DMAC) DMAC Enable Register */
#define REG_DMAC_SREQ (*(RwReg*)0x400C4008U) /**< \brief (DMAC) DMAC Software Single Request Register */
#define REG_DMAC_CREQ (*(RwReg*)0x400C400CU) /**< \brief (DMAC) DMAC Software Chunk Transfer Request Register */
#define REG_DMAC_LAST (*(RwReg*)0x400C4010U) /**< \brief (DMAC) DMAC Software Last Transfer Flag Register */
#define REG_DMAC_EBCIER (*(WoReg*)0x400C4018U) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer Transfer Completed Interrupt Enable register. */
#define REG_DMAC_EBCIDR (*(WoReg*)0x400C401CU) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer Transfer Completed Interrupt Disable register. */
#define REG_DMAC_EBCIMR (*(RoReg*)0x400C4020U) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer transfer completed Mask Register. */
#define REG_DMAC_EBCISR (*(RoReg*)0x400C4024U) /**< \brief (DMAC) DMAC Error, Chained Buffer Transfer Completed Interrupt and Buffer transfer completed Status Register. */
#define REG_DMAC_CHER (*(WoReg*)0x400C4028U) /**< \brief (DMAC) DMAC Channel Handler Enable Register */
#define REG_DMAC_CHDR (*(WoReg*)0x400C402CU) /**< \brief (DMAC) DMAC Channel Handler Disable Register */
#define REG_DMAC_CHSR (*(RoReg*)0x400C4030U) /**< \brief (DMAC) DMAC Channel Handler Status Register */
#define REG_DMAC_SADDR0 (*(RwReg*)0x400C403CU) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 0) */
#define REG_DMAC_DADDR0 (*(RwReg*)0x400C4040U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 0) */
#define REG_DMAC_DSCR0 (*(RwReg*)0x400C4044U) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 0) */
#define REG_DMAC_CTRLA0 (*(RwReg*)0x400C4048U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 0) */
#define REG_DMAC_CTRLB0 (*(RwReg*)0x400C404CU) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 0) */
#define REG_DMAC_CFG0 (*(RwReg*)0x400C4050U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 0) */
#define REG_DMAC_SADDR1 (*(RwReg*)0x400C4064U) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 1) */
#define REG_DMAC_DADDR1 (*(RwReg*)0x400C4068U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 1) */
#define REG_DMAC_DSCR1 (*(RwReg*)0x400C406CU) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 1) */
#define REG_DMAC_CTRLA1 (*(RwReg*)0x400C4070U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 1) */
#define REG_DMAC_CTRLB1 (*(RwReg*)0x400C4074U) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 1) */
#define REG_DMAC_CFG1 (*(RwReg*)0x400C4078U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 1) */
#define REG_DMAC_SADDR2 (*(RwReg*)0x400C408CU) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 2) */
#define REG_DMAC_DADDR2 (*(RwReg*)0x400C4090U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 2) */
#define REG_DMAC_DSCR2 (*(RwReg*)0x400C4094U) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 2) */
#define REG_DMAC_CTRLA2 (*(RwReg*)0x400C4098U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 2) */
#define REG_DMAC_CTRLB2 (*(RwReg*)0x400C409CU) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 2) */
#define REG_DMAC_CFG2 (*(RwReg*)0x400C40A0U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 2) */
#define REG_DMAC_SADDR3 (*(RwReg*)0x400C40B4U) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 3) */
#define REG_DMAC_DADDR3 (*(RwReg*)0x400C40B8U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 3) */
#define REG_DMAC_DSCR3 (*(RwReg*)0x400C40BCU) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 3) */
#define REG_DMAC_CTRLA3 (*(RwReg*)0x400C40C0U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 3) */
#define REG_DMAC_CTRLB3 (*(RwReg*)0x400C40C4U) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 3) */
#define REG_DMAC_CFG3 (*(RwReg*)0x400C40C8U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 3) */
#define REG_DMAC_SADDR4 (*(RwReg*)0x400C40DCU) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 4) */
#define REG_DMAC_DADDR4 (*(RwReg*)0x400C40E0U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 4) */
#define REG_DMAC_DSCR4 (*(RwReg*)0x400C40E4U) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 4) */
#define REG_DMAC_CTRLA4 (*(RwReg*)0x400C40E8U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 4) */
#define REG_DMAC_CTRLB4 (*(RwReg*)0x400C40ECU) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 4) */
#define REG_DMAC_CFG4 (*(RwReg*)0x400C40F0U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 4) */
#define REG_DMAC_SADDR5 (*(RwReg*)0x400C4104U) /**< \brief (DMAC) DMAC Channel Source Address Register (ch_num = 5) */
#define REG_DMAC_DADDR5 (*(RwReg*)0x400C4108U) /**< \brief (DMAC) DMAC Channel Destination Address Register (ch_num = 5) */
#define REG_DMAC_DSCR5 (*(RwReg*)0x400C410CU) /**< \brief (DMAC) DMAC Channel Descriptor Address Register (ch_num = 5) */
#define REG_DMAC_CTRLA5 (*(RwReg*)0x400C4110U) /**< \brief (DMAC) DMAC Channel Control A Register (ch_num = 5) */
#define REG_DMAC_CTRLB5 (*(RwReg*)0x400C4114U) /**< \brief (DMAC) DMAC Channel Control B Register (ch_num = 5) */
#define REG_DMAC_CFG5 (*(RwReg*)0x400C4118U) /**< \brief (DMAC) DMAC Channel Configuration Register (ch_num = 5) */
#define REG_DMAC_WPMR (*(RwReg*)0x400C41E4U) /**< \brief (DMAC) DMAC Write Protect Mode Register */
#define REG_DMAC_WPSR (*(RoReg*)0x400C41E8U) /**< \brief (DMAC) DMAC Write Protect Status Register */
#endif /* (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#endif /* _SAM3XA_DMAC_INSTANCE_ */
| {
"pile_set_name": "Github"
} |
[build_ext]
inplace=1
| {
"pile_set_name": "Github"
} |
/**
* @file
* @brief
*
* @date 27.12.12
* @author Alexander Kalmuk
*/
#include <javacall_memory.h>
#include <stdlib.h>
void* javacall_memory_heap_allocate(long size, /*OUT*/ long* outSize) {
void *heap = malloc(size);
if (!heap) {
*outSize = 0;
} else {
*outSize = size;
}
return heap;
}
void javacall_memory_heap_deallocate(void* heap) {
free(heap);
}
void* /*OPTIONAL*/ javacall_malloc(unsigned int size){
return malloc(size) ;
}
void /*OPTIONAL*/ javacall_free(void* ptr) {
free(ptr);
}
| {
"pile_set_name": "Github"
} |
# arm testcase for orr$cond${set-cc?} $rd,$rn,$imm12
# mach: unfinished
.include "testutils.inc"
start
.global orr_imm
orr_imm:
orr00 pc,pc,0
pass
# arm testcase for orr$cond${set-cc?} $rd,$rn,$rm,${operand2-shifttype} ${operand2-shiftimm}
# mach: unfinished
.include "testutils.inc"
start
.global orr_reg_imm_shift
orr_reg_imm_shift:
orr00 pc,pc,pc,lsl 0
pass
# arm testcase for orr$cond${set-cc?} $rd,$rn,$rm,${operand2-shifttype} ${operand2-shiftreg}
# mach: unfinished
.include "testutils.inc"
start
.global orr_reg_reg_shift
orr_reg_reg_shift:
orr00 pc,pc,pc,lsl pc
pass
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<!-- this file is auto-generated. DO NOT EDIT.
/*
** Copyright (c) 2012 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
-->
<html>
<head>
<meta charset="utf-8">
<title>WebGL GLSL conformance test: faceforward_001_to_006.html</title>
<link rel="stylesheet" href="../../../../resources/js-test-style.css" />
<link rel="stylesheet" href="../../../../resources/ogles-tests.css" />
<script src="../../../../js/js-test-pre.js"></script>
<script src="../../../../js/webgl-test-utils.js"></script>
<script src="../../ogles-utils.js"></script>
</head>
<body>
<canvas id="example" width="500" height="500" style="width: 16px; height: 16px;"></canvas>
<div id="description"></div>
<div id="console"></div>
</body>
<script>
"use strict";
OpenGLESTestRunner.run({
"tests": [
{
"referenceProgram": {
"vertexShader": "../default/default.vert",
"fragmentShader": "faceforward_float_frag_nvaryiconst_ref.frag"
},
"model": null,
"testProgram": {
"vertexShader": "../default/default.vert",
"fragmentShader": "faceforward_float_frag_nvaryiconst.frag"
},
"name": "faceforward_float_frag_nvaryiconst.test.html",
"pattern": "compare"
},
{
"referenceProgram": {
"vertexShader": "faceforward_float_vert_nvaryiconst_ref.vert",
"fragmentShader": "../default/default.frag"
},
"model": "grid",
"testProgram": {
"vertexShader": "faceforward_float_vert_nvaryiconst.vert",
"fragmentShader": "../default/default.frag"
},
"name": "faceforward_float_vert_nvaryiconst.test.html",
"pattern": "compare"
},
{
"referenceProgram": {
"vertexShader": "../default/default.vert",
"fragmentShader": "faceforward_vec2_frag_nvaryiconst_ref.frag"
},
"model": null,
"testProgram": {
"vertexShader": "../default/default.vert",
"fragmentShader": "faceforward_vec2_frag_nvaryiconst.frag"
},
"name": "faceforward_vec2_frag_nvaryiconst.test.html",
"pattern": "compare"
},
{
"referenceProgram": {
"vertexShader": "faceforward_vec2_vert_nvaryiconst_ref.vert",
"fragmentShader": "../default/default.frag"
},
"model": "grid",
"testProgram": {
"vertexShader": "faceforward_vec2_vert_nvaryiconst.vert",
"fragmentShader": "../default/default.frag"
},
"name": "faceforward_vec2_vert_nvaryiconst.test.html",
"pattern": "compare"
},
{
"referenceProgram": {
"vertexShader": "../default/default.vert",
"fragmentShader": "faceforward_vec3_frag_nvaryiconst_ref.frag"
},
"model": null,
"testProgram": {
"vertexShader": "../default/default.vert",
"fragmentShader": "faceforward_vec3_frag_nvaryiconst.frag"
},
"name": "faceforward_vec3_frag_nvaryiconst.test.html",
"pattern": "compare"
},
{
"referenceProgram": {
"vertexShader": "faceforward_vec3_vert_nvaryiconst_ref.vert",
"fragmentShader": "../default/default.frag"
},
"model": "grid",
"testProgram": {
"vertexShader": "faceforward_vec3_vert_nvaryiconst.vert",
"fragmentShader": "../default/default.frag"
},
"name": "faceforward_vec3_vert_nvaryiconst.test.html",
"pattern": "compare"
}
]
});
var successfullyParsed = true;
</script>
</html>
| {
"pile_set_name": "Github"
} |
# -----------------------------------------------------------------------------
# Copyright (c) 2017 Leandro T. C. Melo ([email protected])
#
# All rights reserved. Unauthorized copying of this file, through any
# medium, is strictly prohibited.
#
# This software is provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, explicit or implicit. In no event shall the
# author be liable for any claim or damages.
# -----------------------------------------------------------------------------
import os
import sys
from Diagnostics import DiagnosticReporter, ERROR_CREATING_CONFIG_DIRECTORY
from LicenseFile import LicenseFile
from Logger import debug
class EnvironmentController:
"""
Controls environment data.
"""
_id = 'env'
def __init__(self, home_dir_path):
self.home_dir_path = home_dir_path
self.config_dir_path = os.path.join(home_dir_path, '.cnippet')
def _ensure_config_dir_exists(self):
"""
Ensure that the application directory exists.
"""
if not os.path.isdir(self.config_dir_path):
debug(EnvironmentController._id,
'create config directory in %s' % self.home_dir_path)
try:
os.makedirs(self.config_dir_path)
except OSError:
# Check again due to concurrent access.
if not os.path.isdir(self.config_dir_path):
sys.exit(
DiagnosticReporter.fatal(ERROR_CREATING_CONFIG_DIRECTORY))
def _verify_license(self):
"""
Verify whether a license file exists and check its content.
"""
lic_path = os.path.join(self.config_dir_path, 'license.lic')
LicenseFile(lic_path).verify()
def check_all(self, non_commercial_use):
"""
Perform checks...
"""
self._ensure_config_dir_exists()
if not non_commercial_use:
self._verify_license()
| {
"pile_set_name": "Github"
} |
const createConsumer = require('../index')
const {
secureRandom,
createCluster,
createTopic,
newLogger,
waitForConsumerToJoinGroup,
flakyTest,
} = require('testHelpers')
describe('Consumer', () => {
let topicNames, groupId, consumer1, consumer2
beforeEach(async () => {
topicNames = [`test-topic-${secureRandom()}`, `test-topic-${secureRandom()}`]
groupId = `consumer-group-id-${secureRandom()}`
await Promise.all(topicNames.map(topicName => createTopic({ topic: topicName, partitions: 2 })))
consumer1 = createConsumer({
cluster: createCluster({ metadataMaxAge: 50 }),
groupId,
heartbeatInterval: 100,
maxWaitTimeInMs: 100,
rebalanceTimeout: 1000,
logger: newLogger(),
})
})
afterEach(async () => {
consumer1 && (await consumer1.disconnect())
consumer2 && (await consumer2.disconnect())
})
it('handles receiving assignments for unsubscribed topics', async () => {
await consumer1.connect()
await Promise.all(
topicNames.map(topicName => consumer1.subscribe({ topic: topicName, fromBeginning: true }))
)
consumer1.run({ eachMessage: () => {} })
await waitForConsumerToJoinGroup(consumer1, { label: 'consumer1' })
// Second consumer re-uses group id but only subscribes to one of the topics
consumer2 = createConsumer({
cluster: createCluster({ metadataMaxAge: 50 }),
groupId,
heartbeatInterval: 100,
maxWaitTimeInMs: 1000,
rebalanceTimeout: 1000,
logger: newLogger(),
})
await consumer2.connect()
await consumer2.subscribe({ topic: topicNames[0], fromBeginning: true })
consumer2.run({ eachMessage: () => {} })
const event = await waitForConsumerToJoinGroup(consumer2, { label: 'consumer2' })
// verify that the assigment does not contain the unsubscribed topic
expect(event.payload.memberAssignment[topicNames[1]]).toBeUndefined()
})
flakyTest('starts consuming from new topics after already having assignments', async () => {
consumer2 = createConsumer({
cluster: createCluster({ metadataMaxAge: 50 }),
groupId,
heartbeatInterval: 100,
maxWaitTimeInMs: 100,
rebalanceTimeout: 1000,
logger: newLogger(),
})
// Both consumers receive assignments for one topic
let assignments = await Promise.all(
[consumer1, consumer2].map(async consumer => {
await consumer.connect()
await consumer.subscribe({ topic: topicNames[0] })
consumer.run({ eachMessage: () => {} })
return waitForConsumerToJoinGroup(consumer)
})
)
assignments.forEach(assignment =>
expect(Object.keys(assignment.payload.memberAssignment)).toEqual([topicNames[0]])
)
// One consumer is replaced with a new one, subscribing to the old topic as well as a new one
await consumer1.disconnect()
consumer1 = createConsumer({
cluster: createCluster({ metadataMaxAge: 50 }),
groupId,
heartbeatInterval: 100,
maxWaitTimeInMs: 100,
rebalanceTimeout: 1000,
logger: newLogger(),
})
await consumer1.connect()
await Promise.all(topicNames.map(topic => consumer1.subscribe({ topic })))
consumer1.run({ eachMessage: () => {} })
await waitForConsumerToJoinGroup(consumer1)
// Second consumer is also replaced, subscribing to both topics
await consumer2.disconnect()
consumer2 = createConsumer({
cluster: createCluster({ metadataMaxAge: 50 }),
groupId,
heartbeatInterval: 100,
maxWaitTimeInMs: 100,
rebalanceTimeout: 1000,
logger: newLogger(),
})
await consumer2.connect()
await Promise.all(topicNames.map(topic => consumer2.subscribe({ topic })))
consumer2.run({ eachMessage: () => {} })
// Both consumers are assigned to both topics
assignments = await Promise.all(
[consumer1, consumer2].map(consumer => waitForConsumerToJoinGroup(consumer))
)
assignments.forEach(assignment =>
expect(Object.keys(assignment.payload.memberAssignment)).toEqual(topicNames)
)
})
})
| {
"pile_set_name": "Github"
} |
/*
* PS3 AV backend support.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_POWERPC_PS3AV_H_
#define _ASM_POWERPC_PS3AV_H_
/** command for ioctl() **/
#define PS3AV_VERSION 0x205 /* version of ps3av command */
#define PS3AV_CID_AV_INIT 0x00000001
#define PS3AV_CID_AV_FIN 0x00000002
#define PS3AV_CID_AV_GET_HW_CONF 0x00000003
#define PS3AV_CID_AV_GET_MONITOR_INFO 0x00000004
#define PS3AV_CID_AV_ENABLE_EVENT 0x00000006
#define PS3AV_CID_AV_DISABLE_EVENT 0x00000007
#define PS3AV_CID_AV_TV_MUTE 0x0000000a
#define PS3AV_CID_AV_VIDEO_CS 0x00010001
#define PS3AV_CID_AV_VIDEO_MUTE 0x00010002
#define PS3AV_CID_AV_VIDEO_DISABLE_SIG 0x00010003
#define PS3AV_CID_AV_AUDIO_PARAM 0x00020001
#define PS3AV_CID_AV_AUDIO_MUTE 0x00020002
#define PS3AV_CID_AV_HDMI_MODE 0x00040001
#define PS3AV_CID_VIDEO_INIT 0x01000001
#define PS3AV_CID_VIDEO_MODE 0x01000002
#define PS3AV_CID_VIDEO_FORMAT 0x01000004
#define PS3AV_CID_VIDEO_PITCH 0x01000005
#define PS3AV_CID_AUDIO_INIT 0x02000001
#define PS3AV_CID_AUDIO_MODE 0x02000002
#define PS3AV_CID_AUDIO_MUTE 0x02000003
#define PS3AV_CID_AUDIO_ACTIVE 0x02000004
#define PS3AV_CID_AUDIO_INACTIVE 0x02000005
#define PS3AV_CID_AUDIO_SPDIF_BIT 0x02000006
#define PS3AV_CID_AUDIO_CTRL 0x02000007
#define PS3AV_CID_EVENT_UNPLUGGED 0x10000001
#define PS3AV_CID_EVENT_PLUGGED 0x10000002
#define PS3AV_CID_EVENT_HDCP_DONE 0x10000003
#define PS3AV_CID_EVENT_HDCP_FAIL 0x10000004
#define PS3AV_CID_EVENT_HDCP_AUTH 0x10000005
#define PS3AV_CID_EVENT_HDCP_ERROR 0x10000006
#define PS3AV_CID_AVB_PARAM 0x04000001
/* max backend ports */
#define PS3AV_HDMI_MAX 2 /* HDMI_0 HDMI_1 */
#define PS3AV_AVMULTI_MAX 1 /* AVMULTI_0 */
#define PS3AV_AV_PORT_MAX (PS3AV_HDMI_MAX + PS3AV_AVMULTI_MAX)
#define PS3AV_OPT_PORT_MAX 1 /* SPDIF0 */
#define PS3AV_HEAD_MAX 2 /* HEAD_A HEAD_B */
/* num of pkt for PS3AV_CID_AVB_PARAM */
#define PS3AV_AVB_NUM_VIDEO PS3AV_HEAD_MAX
#define PS3AV_AVB_NUM_AUDIO 0 /* not supported */
#define PS3AV_AVB_NUM_AV_VIDEO PS3AV_AV_PORT_MAX
#define PS3AV_AVB_NUM_AV_AUDIO PS3AV_HDMI_MAX
#define PS3AV_MUTE_PORT_MAX 1 /* num of ports in mute pkt */
/* event_bit */
#define PS3AV_CMD_EVENT_BIT_UNPLUGGED (1 << 0)
#define PS3AV_CMD_EVENT_BIT_PLUGGED (1 << 1)
#define PS3AV_CMD_EVENT_BIT_HDCP_DONE (1 << 2)
#define PS3AV_CMD_EVENT_BIT_HDCP_FAIL (1 << 3)
#define PS3AV_CMD_EVENT_BIT_HDCP_REAUTH (1 << 4)
#define PS3AV_CMD_EVENT_BIT_HDCP_TOPOLOGY (1 << 5)
/* common params */
/* mute */
#define PS3AV_CMD_MUTE_OFF 0x0000
#define PS3AV_CMD_MUTE_ON 0x0001
/* avport */
#define PS3AV_CMD_AVPORT_HDMI_0 0x0000
#define PS3AV_CMD_AVPORT_HDMI_1 0x0001
#define PS3AV_CMD_AVPORT_AVMULTI_0 0x0010
#define PS3AV_CMD_AVPORT_SPDIF_0 0x0020
#define PS3AV_CMD_AVPORT_SPDIF_1 0x0021
/* for av backend */
/* av_mclk */
#define PS3AV_CMD_AV_MCLK_128 0x0000
#define PS3AV_CMD_AV_MCLK_256 0x0001
#define PS3AV_CMD_AV_MCLK_512 0x0003
/* av_inputlen */
#define PS3AV_CMD_AV_INPUTLEN_16 0x02
#define PS3AV_CMD_AV_INPUTLEN_20 0x0a
#define PS3AV_CMD_AV_INPUTLEN_24 0x0b
/* alayout */
#define PS3AV_CMD_AV_LAYOUT_32 (1 << 0)
#define PS3AV_CMD_AV_LAYOUT_44 (1 << 1)
#define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
#define PS3AV_CMD_AV_LAYOUT_88 (1 << 3)
#define PS3AV_CMD_AV_LAYOUT_96 (1 << 4)
#define PS3AV_CMD_AV_LAYOUT_176 (1 << 5)
#define PS3AV_CMD_AV_LAYOUT_192 (1 << 6)
/* hdmi_mode */
#define PS3AV_CMD_AV_HDMI_MODE_NORMAL 0xff
#define PS3AV_CMD_AV_HDMI_HDCP_OFF 0x01
#define PS3AV_CMD_AV_HDMI_EDID_PASS 0x80
#define PS3AV_CMD_AV_HDMI_DVI 0x40
/* for video module */
/* video_head */
#define PS3AV_CMD_VIDEO_HEAD_A 0x0000
#define PS3AV_CMD_VIDEO_HEAD_B 0x0001
/* video_cs_out video_cs_in */
#define PS3AV_CMD_VIDEO_CS_NONE 0x0000
#define PS3AV_CMD_VIDEO_CS_RGB_8 0x0001
#define PS3AV_CMD_VIDEO_CS_YUV444_8 0x0002
#define PS3AV_CMD_VIDEO_CS_YUV422_8 0x0003
#define PS3AV_CMD_VIDEO_CS_XVYCC_8 0x0004
#define PS3AV_CMD_VIDEO_CS_RGB_10 0x0005
#define PS3AV_CMD_VIDEO_CS_YUV444_10 0x0006
#define PS3AV_CMD_VIDEO_CS_YUV422_10 0x0007
#define PS3AV_CMD_VIDEO_CS_XVYCC_10 0x0008
#define PS3AV_CMD_VIDEO_CS_RGB_12 0x0009
#define PS3AV_CMD_VIDEO_CS_YUV444_12 0x000a
#define PS3AV_CMD_VIDEO_CS_YUV422_12 0x000b
#define PS3AV_CMD_VIDEO_CS_XVYCC_12 0x000c
/* video_vid */
#define PS3AV_CMD_VIDEO_VID_NONE 0x0000
#define PS3AV_CMD_VIDEO_VID_480I 0x0001
#define PS3AV_CMD_VIDEO_VID_576I 0x0003
#define PS3AV_CMD_VIDEO_VID_480P 0x0005
#define PS3AV_CMD_VIDEO_VID_576P 0x0006
#define PS3AV_CMD_VIDEO_VID_1080I_60HZ 0x0007
#define PS3AV_CMD_VIDEO_VID_1080I_50HZ 0x0008
#define PS3AV_CMD_VIDEO_VID_720P_60HZ 0x0009
#define PS3AV_CMD_VIDEO_VID_720P_50HZ 0x000a
#define PS3AV_CMD_VIDEO_VID_1080P_60HZ 0x000b
#define PS3AV_CMD_VIDEO_VID_1080P_50HZ 0x000c
#define PS3AV_CMD_VIDEO_VID_WXGA 0x000d
#define PS3AV_CMD_VIDEO_VID_SXGA 0x000e
#define PS3AV_CMD_VIDEO_VID_WUXGA 0x000f
#define PS3AV_CMD_VIDEO_VID_480I_A 0x0010
/* video_format */
#define PS3AV_CMD_VIDEO_FORMAT_BLACK 0x0000
#define PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT 0x0007
/* video_order */
#define PS3AV_CMD_VIDEO_ORDER_RGB 0x0000
#define PS3AV_CMD_VIDEO_ORDER_BGR 0x0001
/* video_fmt */
#define PS3AV_CMD_VIDEO_FMT_X8R8G8B8 0x0000
/* video_out_format */
#define PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT 0x0000
/* video_cl_cnv */
#define PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT 0x0000
#define PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT 0x0010
/* video_sync */
#define PS3AV_CMD_VIDEO_SYNC_VSYNC 0x0001
#define PS3AV_CMD_VIDEO_SYNC_CSYNC 0x0004
#define PS3AV_CMD_VIDEO_SYNC_HSYNC 0x0010
/* for audio module */
/* num_of_ch */
#define PS3AV_CMD_AUDIO_NUM_OF_CH_2 0x0000
#define PS3AV_CMD_AUDIO_NUM_OF_CH_3 0x0001
#define PS3AV_CMD_AUDIO_NUM_OF_CH_4 0x0002
#define PS3AV_CMD_AUDIO_NUM_OF_CH_5 0x0003
#define PS3AV_CMD_AUDIO_NUM_OF_CH_6 0x0004
#define PS3AV_CMD_AUDIO_NUM_OF_CH_7 0x0005
#define PS3AV_CMD_AUDIO_NUM_OF_CH_8 0x0006
/* audio_fs */
#define PS3AV_CMD_AUDIO_FS_32K 0x0001
#define PS3AV_CMD_AUDIO_FS_44K 0x0002
#define PS3AV_CMD_AUDIO_FS_48K 0x0003
#define PS3AV_CMD_AUDIO_FS_88K 0x0004
#define PS3AV_CMD_AUDIO_FS_96K 0x0005
#define PS3AV_CMD_AUDIO_FS_176K 0x0006
#define PS3AV_CMD_AUDIO_FS_192K 0x0007
/* audio_word_bits */
#define PS3AV_CMD_AUDIO_WORD_BITS_16 0x0001
#define PS3AV_CMD_AUDIO_WORD_BITS_20 0x0002
#define PS3AV_CMD_AUDIO_WORD_BITS_24 0x0003
/* audio_format */
#define PS3AV_CMD_AUDIO_FORMAT_PCM 0x0001
#define PS3AV_CMD_AUDIO_FORMAT_BITSTREAM 0x00ff
/* audio_source */
#define PS3AV_CMD_AUDIO_SOURCE_SERIAL 0x0000
#define PS3AV_CMD_AUDIO_SOURCE_SPDIF 0x0001
/* audio_swap */
#define PS3AV_CMD_AUDIO_SWAP_0 0x0000
#define PS3AV_CMD_AUDIO_SWAP_1 0x0000
/* audio_map */
#define PS3AV_CMD_AUDIO_MAP_OUTPUT_0 0x0000
#define PS3AV_CMD_AUDIO_MAP_OUTPUT_1 0x0001
#define PS3AV_CMD_AUDIO_MAP_OUTPUT_2 0x0002
#define PS3AV_CMD_AUDIO_MAP_OUTPUT_3 0x0003
/* audio_layout */
#define PS3AV_CMD_AUDIO_LAYOUT_2CH 0x0000
#define PS3AV_CMD_AUDIO_LAYOUT_6CH 0x000b /* LREClr */
#define PS3AV_CMD_AUDIO_LAYOUT_8CH 0x001f /* LREClrXY */
/* audio_downmix */
#define PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED 0x0000
#define PS3AV_CMD_AUDIO_DOWNMIX_PROHIBITED 0x0001
/* audio_port */
#define PS3AV_CMD_AUDIO_PORT_HDMI_0 ( 1 << 0 )
#define PS3AV_CMD_AUDIO_PORT_HDMI_1 ( 1 << 1 )
#define PS3AV_CMD_AUDIO_PORT_AVMULTI_0 ( 1 << 10 )
#define PS3AV_CMD_AUDIO_PORT_SPDIF_0 ( 1 << 20 )
#define PS3AV_CMD_AUDIO_PORT_SPDIF_1 ( 1 << 21 )
/* audio_ctrl_id */
#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_RESET 0x0000
#define PS3AV_CMD_AUDIO_CTRL_ID_DAC_DE_EMPHASIS 0x0001
#define PS3AV_CMD_AUDIO_CTRL_ID_AVCLK 0x0002
/* audio_ctrl_data[0] reset */
#define PS3AV_CMD_AUDIO_CTRL_RESET_NEGATE 0x0000
#define PS3AV_CMD_AUDIO_CTRL_RESET_ASSERT 0x0001
/* audio_ctrl_data[0] de-emphasis */
#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_OFF 0x0000
#define PS3AV_CMD_AUDIO_CTRL_DE_EMPHASIS_ON 0x0001
/* audio_ctrl_data[0] avclk */
#define PS3AV_CMD_AUDIO_CTRL_AVCLK_22 0x0000
#define PS3AV_CMD_AUDIO_CTRL_AVCLK_18 0x0001
/* av_vid */
/* do not use these params directly, use vid_video2av */
#define PS3AV_CMD_AV_VID_480I 0x0000
#define PS3AV_CMD_AV_VID_480P 0x0001
#define PS3AV_CMD_AV_VID_720P_60HZ 0x0002
#define PS3AV_CMD_AV_VID_1080I_60HZ 0x0003
#define PS3AV_CMD_AV_VID_1080P_60HZ 0x0004
#define PS3AV_CMD_AV_VID_576I 0x0005
#define PS3AV_CMD_AV_VID_576P 0x0006
#define PS3AV_CMD_AV_VID_720P_50HZ 0x0007
#define PS3AV_CMD_AV_VID_1080I_50HZ 0x0008
#define PS3AV_CMD_AV_VID_1080P_50HZ 0x0009
#define PS3AV_CMD_AV_VID_WXGA 0x000a
#define PS3AV_CMD_AV_VID_SXGA 0x000b
#define PS3AV_CMD_AV_VID_WUXGA 0x000c
/* av_cs_out av_cs_in */
/* use cs_video2av() */
#define PS3AV_CMD_AV_CS_RGB_8 0x0000
#define PS3AV_CMD_AV_CS_YUV444_8 0x0001
#define PS3AV_CMD_AV_CS_YUV422_8 0x0002
#define PS3AV_CMD_AV_CS_XVYCC_8 0x0003
#define PS3AV_CMD_AV_CS_RGB_10 0x0004
#define PS3AV_CMD_AV_CS_YUV444_10 0x0005
#define PS3AV_CMD_AV_CS_YUV422_10 0x0006
#define PS3AV_CMD_AV_CS_XVYCC_10 0x0007
#define PS3AV_CMD_AV_CS_RGB_12 0x0008
#define PS3AV_CMD_AV_CS_YUV444_12 0x0009
#define PS3AV_CMD_AV_CS_YUV422_12 0x000a
#define PS3AV_CMD_AV_CS_XVYCC_12 0x000b
#define PS3AV_CMD_AV_CS_8 0x0000
#define PS3AV_CMD_AV_CS_10 0x0001
#define PS3AV_CMD_AV_CS_12 0x0002
/* dither */
#define PS3AV_CMD_AV_DITHER_OFF 0x0000
#define PS3AV_CMD_AV_DITHER_ON 0x0001
#define PS3AV_CMD_AV_DITHER_8BIT 0x0000
#define PS3AV_CMD_AV_DITHER_10BIT 0x0002
#define PS3AV_CMD_AV_DITHER_12BIT 0x0004
/* super_white */
#define PS3AV_CMD_AV_SUPER_WHITE_OFF 0x0000
#define PS3AV_CMD_AV_SUPER_WHITE_ON 0x0001
/* aspect */
#define PS3AV_CMD_AV_ASPECT_16_9 0x0000
#define PS3AV_CMD_AV_ASPECT_4_3 0x0001
/* video_cs_cnv() */
#define PS3AV_CMD_VIDEO_CS_RGB 0x0001
#define PS3AV_CMD_VIDEO_CS_YUV422 0x0002
#define PS3AV_CMD_VIDEO_CS_YUV444 0x0003
/* for broadcast automode */
#define PS3AV_RESBIT_720x480P 0x0003 /* 0x0001 | 0x0002 */
#define PS3AV_RESBIT_720x576P 0x0003 /* 0x0001 | 0x0002 */
#define PS3AV_RESBIT_1280x720P 0x0004
#define PS3AV_RESBIT_1920x1080I 0x0008
#define PS3AV_RESBIT_1920x1080P 0x4000
#define PS3AV_RES_MASK_60 (PS3AV_RESBIT_720x480P \
| PS3AV_RESBIT_1280x720P \
| PS3AV_RESBIT_1920x1080I \
| PS3AV_RESBIT_1920x1080P)
#define PS3AV_RES_MASK_50 (PS3AV_RESBIT_720x576P \
| PS3AV_RESBIT_1280x720P \
| PS3AV_RESBIT_1920x1080I \
| PS3AV_RESBIT_1920x1080P)
/* for VESA automode */
#define PS3AV_RESBIT_VGA 0x0001
#define PS3AV_RESBIT_WXGA 0x0002
#define PS3AV_RESBIT_SXGA 0x0004
#define PS3AV_RESBIT_WUXGA 0x0008
#define PS3AV_RES_MASK_VESA (PS3AV_RESBIT_WXGA |\
PS3AV_RESBIT_SXGA |\
PS3AV_RESBIT_WUXGA)
#define PS3AV_MONITOR_TYPE_HDMI 1 /* HDMI */
#define PS3AV_MONITOR_TYPE_DVI 2 /* DVI */
/* for video mode */
enum ps3av_mode_num {
PS3AV_MODE_AUTO = 0,
PS3AV_MODE_480I = 1,
PS3AV_MODE_480P = 2,
PS3AV_MODE_720P60 = 3,
PS3AV_MODE_1080I60 = 4,
PS3AV_MODE_1080P60 = 5,
PS3AV_MODE_576I = 6,
PS3AV_MODE_576P = 7,
PS3AV_MODE_720P50 = 8,
PS3AV_MODE_1080I50 = 9,
PS3AV_MODE_1080P50 = 10,
PS3AV_MODE_WXGA = 11,
PS3AV_MODE_SXGA = 12,
PS3AV_MODE_WUXGA = 13,
};
#define PS3AV_MODE_MASK 0x000F
#define PS3AV_MODE_HDCP_OFF 0x1000 /* Retail PS3 product doesn't support this */
#define PS3AV_MODE_DITHER 0x0800
#define PS3AV_MODE_COLOR 0x0400
#define PS3AV_MODE_WHITE 0x0200
#define PS3AV_MODE_FULL 0x0080
#define PS3AV_MODE_DVI 0x0040
#define PS3AV_MODE_RGB 0x0020
#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_60 PS3AV_MODE_480P
#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60 PS3AV_MODE_480I
#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_50 PS3AV_MODE_576P
#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50 PS3AV_MODE_576I
#define PS3AV_REGION_60 0x01
#define PS3AV_REGION_50 0x02
#define PS3AV_REGION_RGB 0x10
#define get_status(buf) (((__u32 *)buf)[2])
#define PS3AV_HDR_SIZE 4 /* version + size */
/** command packet structure **/
struct ps3av_send_hdr {
u16 version;
u16 size; /* size of command packet */
u32 cid; /* command id */
};
struct ps3av_reply_hdr {
u16 version;
u16 size;
u32 cid;
u32 status;
};
/* backend: initialization */
struct ps3av_pkt_av_init {
struct ps3av_send_hdr send_hdr;
u32 event_bit;
};
/* backend: finalize */
struct ps3av_pkt_av_fin {
struct ps3av_send_hdr send_hdr;
/* recv */
u32 reserved;
};
/* backend: get port */
struct ps3av_pkt_av_get_hw_conf {
struct ps3av_send_hdr send_hdr;
/* recv */
u32 status;
u16 num_of_hdmi; /* out: number of hdmi */
u16 num_of_avmulti; /* out: number of avmulti */
u16 num_of_spdif; /* out: number of hdmi */
u16 reserved;
};
/* backend: get monitor info */
struct ps3av_info_resolution {
u32 res_bits;
u32 native;
};
struct ps3av_info_cs {
u8 rgb;
u8 yuv444;
u8 yuv422;
u8 reserved;
};
struct ps3av_info_color {
u16 red_x;
u16 red_y;
u16 green_x;
u16 green_y;
u16 blue_x;
u16 blue_y;
u16 white_x;
u16 white_y;
u32 gamma;
};
struct ps3av_info_audio {
u8 type;
u8 max_num_of_ch;
u8 fs;
u8 sbit;
};
struct ps3av_info_monitor {
u8 avport;
u8 monitor_id[10];
u8 monitor_type;
u8 monitor_name[16];
struct ps3av_info_resolution res_60;
struct ps3av_info_resolution res_50;
struct ps3av_info_resolution res_other;
struct ps3av_info_resolution res_vesa;
struct ps3av_info_cs cs;
struct ps3av_info_color color;
u8 supported_ai;
u8 speaker_info;
u8 num_of_audio_block;
struct ps3av_info_audio audio[0]; /* 0 or more audio blocks */
u8 reserved[169];
} __attribute__ ((packed));
struct ps3av_pkt_av_get_monitor_info {
struct ps3av_send_hdr send_hdr;
u16 avport; /* in: avport */
u16 reserved;
/* recv */
struct ps3av_info_monitor info; /* out: monitor info */
};
/* backend: enable/disable event */
struct ps3av_pkt_av_event {
struct ps3av_send_hdr send_hdr;
u32 event_bit; /* in */
};
/* backend: video cs param */
struct ps3av_pkt_av_video_cs {
struct ps3av_send_hdr send_hdr;
u16 avport; /* in: avport */
u16 av_vid; /* in: video resolution */
u16 av_cs_out; /* in: output color space */
u16 av_cs_in; /* in: input color space */
u8 dither; /* in: dither bit length */
u8 bitlen_out; /* in: bit length */
u8 super_white; /* in: super white */
u8 aspect; /* in: aspect ratio */
};
/* backend: video mute */
struct ps3av_av_mute {
u16 avport; /* in: avport */
u16 mute; /* in: mute on/off */
};
struct ps3av_pkt_av_video_mute {
struct ps3av_send_hdr send_hdr;
struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
};
/* backend: video disable signal */
struct ps3av_pkt_av_video_disable_sig {
struct ps3av_send_hdr send_hdr;
u16 avport; /* in: avport */
u16 reserved;
};
/* backend: audio param */
struct ps3av_audio_info_frame {
struct pb1_bit {
u8 ct:4;
u8 rsv:1;
u8 cc:3;
} pb1;
struct pb2_bit {
u8 rsv:3;
u8 sf:3;
u8 ss:2;
} pb2;
u8 pb3;
u8 pb4;
struct pb5_bit {
u8 dm:1;
u8 lsv:4;
u8 rsv:3;
} pb5;
};
struct ps3av_pkt_av_audio_param {
struct ps3av_send_hdr send_hdr;
u16 avport; /* in: avport */
u16 reserved;
u8 mclk; /* in: audio mclk */
u8 ns[3]; /* in: audio ns val */
u8 enable; /* in: audio enable */
u8 swaplr; /* in: audio swap */
u8 fifomap; /* in: audio fifomap */
u8 inputctrl; /* in: audio input ctrl */
u8 inputlen; /* in: sample bit size */
u8 layout; /* in: speaker layout param */
struct ps3av_audio_info_frame info; /* in: info */
u8 chstat[5]; /* in: ch stat */
};
/* backend: audio_mute */
struct ps3av_pkt_av_audio_mute {
struct ps3av_send_hdr send_hdr;
struct ps3av_av_mute mute[PS3AV_MUTE_PORT_MAX];
};
/* backend: hdmi_mode */
struct ps3av_pkt_av_hdmi_mode {
struct ps3av_send_hdr send_hdr;
u8 mode; /* in: hdmi_mode */
u8 reserved0;
u8 reserved1;
u8 reserved2;
};
/* backend: tv_mute */
struct ps3av_pkt_av_tv_mute {
struct ps3av_send_hdr send_hdr;
u16 avport; /* in: avport HDMI only */
u16 mute; /* in: mute */
};
/* video: initialize */
struct ps3av_pkt_video_init {
struct ps3av_send_hdr send_hdr;
/* recv */
u32 reserved;
};
/* video: mode setting */
struct ps3av_pkt_video_mode {
struct ps3av_send_hdr send_hdr;
u32 video_head; /* in: head */
u32 reserved;
u32 video_vid; /* in: video resolution */
u16 reserved1;
u16 width; /* in: width in pixel */
u16 reserved2;
u16 height; /* in: height in pixel */
u32 pitch; /* in: line size in byte */
u32 video_out_format; /* in: out format */
u32 video_format; /* in: input frame buffer format */
u8 reserved3;
u8 video_cl_cnv; /* in: color conversion */
u16 video_order; /* in: input RGB order */
u32 reserved4;
};
/* video: format */
struct ps3av_pkt_video_format {
struct ps3av_send_hdr send_hdr;
u32 video_head; /* in: head */
u32 video_format; /* in: frame buffer format */
u8 reserved;
u8 video_cl_cnv; /* in: color conversion */
u16 video_order; /* in: input RGB order */
};
/* video: pitch */
struct ps3av_pkt_video_pitch {
u16 version;
u16 size; /* size of command packet */
u32 cid; /* command id */
u32 video_head; /* in: head */
u32 pitch; /* in: line size in byte */
};
/* audio: initialize */
struct ps3av_pkt_audio_init {
struct ps3av_send_hdr send_hdr;
/* recv */
u32 reserved;
};
/* audio: mode setting */
struct ps3av_pkt_audio_mode {
struct ps3av_send_hdr send_hdr;
u8 avport; /* in: avport */
u8 reserved0[3];
u32 mask; /* in: mask */
u32 audio_num_of_ch; /* in: number of ch */
u32 audio_fs; /* in: sampling freq */
u32 audio_word_bits; /* in: sample bit size */
u32 audio_format; /* in: audio output format */
u32 audio_source; /* in: audio source */
u8 audio_enable[4]; /* in: audio enable */
u8 audio_swap[4]; /* in: audio swap */
u8 audio_map[4]; /* in: audio map */
u32 audio_layout; /* in: speaker layout */
u32 audio_downmix; /* in: audio downmix permission */
u32 audio_downmix_level;
u8 audio_cs_info[8]; /* in: IEC channel status */
};
/* audio: mute */
struct ps3av_audio_mute {
u8 avport; /* in: opt_port optical */
u8 reserved[3];
u32 mute; /* in: mute */
};
struct ps3av_pkt_audio_mute {
struct ps3av_send_hdr send_hdr;
struct ps3av_audio_mute mute[PS3AV_OPT_PORT_MAX];
};
/* audio: active/inactive */
struct ps3av_pkt_audio_active {
struct ps3av_send_hdr send_hdr;
u32 audio_port; /* in: audio active/inactive port */
};
/* audio: SPDIF user bit */
struct ps3av_pkt_audio_spdif_bit {
u16 version;
u16 size; /* size of command packet */
u32 cid; /* command id */
u8 avport; /* in: avport SPDIF only */
u8 reserved[3];
u32 audio_port; /* in: SPDIF only */
u32 spdif_bit_data[12]; /* in: user bit data */
};
/* audio: audio control */
struct ps3av_pkt_audio_ctrl {
u16 version;
u16 size; /* size of command packet */
u32 cid; /* command id */
u32 audio_ctrl_id; /* in: control id */
u32 audio_ctrl_data[4]; /* in: control data */
};
/* avb:param */
#define PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE \
(PS3AV_AVB_NUM_VIDEO*sizeof(struct ps3av_pkt_video_mode) + \
PS3AV_AVB_NUM_AUDIO*sizeof(struct ps3av_pkt_audio_mode) + \
PS3AV_AVB_NUM_AV_VIDEO*sizeof(struct ps3av_pkt_av_video_cs) + \
PS3AV_AVB_NUM_AV_AUDIO*sizeof(struct ps3av_pkt_av_audio_param))
struct ps3av_pkt_avb_param {
struct ps3av_send_hdr send_hdr;
u16 num_of_video_pkt;
u16 num_of_audio_pkt;
u16 num_of_av_video_pkt;
u16 num_of_av_audio_pkt;
/*
* The actual buffer layout depends on the fields above:
*
* struct ps3av_pkt_video_mode video[num_of_video_pkt];
* struct ps3av_pkt_audio_mode audio[num_of_audio_pkt];
* struct ps3av_pkt_av_video_cs av_video[num_of_av_video_pkt];
* struct ps3av_pkt_av_audio_param av_audio[num_of_av_audio_pkt];
*/
u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE];
};
/* channel status */
extern u8 ps3av_mode_cs_info[];
/** command status **/
#define PS3AV_STATUS_SUCCESS 0x0000 /* success */
#define PS3AV_STATUS_RECEIVE_VUART_ERROR 0x0001 /* receive vuart error */
#define PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL 0x0002 /* syscon communication error */
#define PS3AV_STATUS_INVALID_COMMAND 0x0003 /* obsolete invalid CID */
#define PS3AV_STATUS_INVALID_PORT 0x0004 /* invalid port number */
#define PS3AV_STATUS_INVALID_VID 0x0005 /* invalid video format */
#define PS3AV_STATUS_INVALID_COLOR_SPACE 0x0006 /* invalid video colose space */
#define PS3AV_STATUS_INVALID_FS 0x0007 /* invalid audio sampling freq */
#define PS3AV_STATUS_INVALID_AUDIO_CH 0x0008 /* invalid audio channel number */
#define PS3AV_STATUS_UNSUPPORTED_VERSION 0x0009 /* version mismatch */
#define PS3AV_STATUS_INVALID_SAMPLE_SIZE 0x000a /* invalid audio sample bit size */
#define PS3AV_STATUS_FAILURE 0x000b /* other failures */
#define PS3AV_STATUS_UNSUPPORTED_COMMAND 0x000c /* unsupported cid */
#define PS3AV_STATUS_BUFFER_OVERFLOW 0x000d /* write buffer overflow */
#define PS3AV_STATUS_INVALID_VIDEO_PARAM 0x000e /* invalid video param */
#define PS3AV_STATUS_NO_SEL 0x000f /* not exist selector */
#define PS3AV_STATUS_INVALID_AV_PARAM 0x0010 /* invalid backend param */
#define PS3AV_STATUS_INVALID_AUDIO_PARAM 0x0011 /* invalid audio param */
#define PS3AV_STATUS_UNSUPPORTED_HDMI_MODE 0x0012 /* unsupported hdmi mode */
#define PS3AV_STATUS_NO_SYNC_HEAD 0x0013 /* sync head failed */
extern void ps3av_set_hdr(u32, u16, struct ps3av_send_hdr *);
extern int ps3av_do_pkt(u32, u16, size_t, struct ps3av_send_hdr *);
extern int ps3av_cmd_init(void);
extern int ps3av_cmd_fin(void);
extern int ps3av_cmd_av_video_mute(int, u32 *, u32);
extern int ps3av_cmd_av_video_disable_sig(u32);
extern int ps3av_cmd_av_tv_mute(u32, u32);
extern int ps3av_cmd_enable_event(void);
extern int ps3av_cmd_av_hdmi_mode(u8);
extern u32 ps3av_cmd_set_av_video_cs(void *, u32, int, int, int, u32);
extern u32 ps3av_cmd_set_video_mode(void *, u32, int, int, u32);
extern int ps3av_cmd_video_format_black(u32, u32, u32);
extern int ps3av_cmd_av_audio_mute(int, u32 *, u32);
extern u32 ps3av_cmd_set_av_audio_param(void *, u32,
const struct ps3av_pkt_audio_mode *,
u32);
extern void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *, u32, u32,
u32, u32, u32, u32);
extern int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *);
extern int ps3av_cmd_audio_mute(int, u32 *, u32);
extern int ps3av_cmd_audio_active(int, u32);
extern int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *, u32);
extern int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *);
extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *,
u32);
extern int ps3av_set_video_mode(int);
extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
extern int ps3av_get_auto_mode(void);
extern int ps3av_get_mode(void);
extern int ps3av_video_mode2res(u32, u32 *, u32 *);
extern int ps3av_video_mute(int);
extern int ps3av_audio_mute(int);
extern int ps3av_audio_mute_analog(int);
extern int ps3av_dev_open(void);
extern int ps3av_dev_close(void);
#endif /* _ASM_POWERPC_PS3AV_H_ */
| {
"pile_set_name": "Github"
} |
########################
# logstash Configuration Files - Bro IDS Logs
# Created by 505Forensics (http://www.505forensics.com)
# MIT License, so do what you want with it!
#
# For use with logstash, elasticsearch, and kibana to analyze logs
#
# Usage: Reference this config file for your instance of logstash to parse Bro dns logs
#
# Limitations: Standard bro log delimiter is tab.
#
#######################
input {
file {
type => "bro-dns_log"
start_position => "beginning"
sincedb_path => "/dev/null"
#Edit the following path to reflect the location of your log files. You can also change the extension if you use something else
path => "/path/to/your/bro/logs/dns*.log"
}
}
filter {
#Let's get rid of those header lines; they begin with a hash
if [message] =~ /^#/ {
drop { }
}
#Now, using the csv filter, we can define the Bro log fields
if [type] == "bro-dns_log" {
csv {
columns => ["ts","uid","id.orig_h","id.orig_p","id.resp_h","id.resp_p","proto","trans_id","query","qclass","qclass_name","qtype","qtype_name","rcode","rcode_name","AA","TC","RD","RA","Z","answers","TTLs","rejected"]
#If you use a custom delimiter, change the following value in between the quotes to your delimiter. Otherwise, leave the next line alone.
separator => ""
}
#Let's convert our timestamp into the 'ts' field, so we can use Kibana features natively
date {
match => [ "ts", "UNIX" ]
}
mutate {
convert => [ "id.orig_p", "integer" ]
convert => [ "id.resp_p", "integer" ]
convert => [ "trans_id", "integer" ]
convert => [ "qclass", "integer" ]
convert => [ "qtype", "integer" ]
convert => [ "rcode", "integer" ]
}
}
}
output {
elasticsearch {
embedded => true
}
}
| {
"pile_set_name": "Github"
} |
"""
Unit tests for format checking
"""
from __future__ import print_function
from nose.plugins.skip import SkipTest
import os
import pylearn2
from pylearn2.devtools.tests.docscrape import docstring_errors
from pylearn2.devtools.list_files import list_files
from pylearn2.devtools.tests.pep8.pep8 import StyleGuide
whitelist_pep8 = [
"rbm_tools.py",
"distributions/mnd.py",
"models/sparse_autoencoder.py",
"models/tests/test_dbm.py",
"models/tests/test_s3c_inference.py",
"models/tests/test_mnd.py",
"models/tests/test_s3c_misc.py",
"models/gsn.py",
"models/dbm/layer.py",
"models/dbm/__init__.py",
"models/dbm/ising.py",
"models/differentiable_sparse_coding.py",
"models/local_coordinate_coding.py",
"models/mnd.py",
"models/s3c.py",
"tests/test_monitor.py",
"kmeans.py",
"packaged_dependencies/theano_linear/conv2d.py",
"packaged_dependencies/theano_linear/imaging.py",
"packaged_dependencies/theano_linear/pyramid.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"unshared_conv.py",
"packaged_dependencies/theano_linear/linear.py",
"packaged_dependencies/theano_linear/test_spconv.py",
"packaged_dependencies/theano_linear/test_matrixmul.py",
"packaged_dependencies/theano_linear/spconv.py",
"expr/tests/test_coding.py",
"expr/tests/test_normalize.py",
"expr/tests/test_stochastic_pool.py",
"expr/stochastic_pool.py",
"expr/sampling.py",
"expr/information_theory.py",
"expr/basic.py",
"gui/graph_2D.py",
"sandbox/cuda_convnet/weight_acts.py",
"sandbox/cuda_convnet/filter_acts.py",
"sandbox/cuda_convnet/tests/test_filter_acts_strided.py",
"sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_filter_acts.py",
"sandbox/cuda_convnet/tests/test_weight_acts_strided.py",
"sandbox/cuda_convnet/tests/test_image_acts_strided.py",
"sandbox/cuda_convnet/tests/test_img_acts.py",
"sandbox/cuda_convnet/tests/test_stochastic_pool.py",
"sandbox/cuda_convnet/specialized_bench.py",
"sandbox/cuda_convnet/response_norm.py",
"sandbox/cuda_convnet/__init__.py",
"sandbox/cuda_convnet/img_acts.py",
"sandbox/cuda_convnet/convnet_compile.py",
"sandbox/cuda_convnet/pthreads.py",
"sandbox/cuda_convnet/pool.py",
"sandbox/cuda_convnet/bench.py",
"sandbox/cuda_convnet/stochastic_pool.py",
"sandbox/cuda_convnet/probabilistic_max_pooling.py",
"sandbox/tuple_var.py",
"sandbox/lisa_rl/bandit/average_agent.py",
"sandbox/lisa_rl/bandit/classifier_bandit.py",
"sandbox/lisa_rl/bandit/classifier_agent.py",
"sandbox/lisa_rl/bandit/plot_reward.py",
"config/old_config.py",
"utils/utlc.py",
"utils/tests/test_serial.py",
"utils/common_strings.py",
"utils/mem.py",
"dataset_get/dataset-get.py",
"dataset_get/helper-scripts/make-archive.py",
"dataset_get/dataset_resolver.py",
"optimization/minres.py",
"linear/conv2d.py",
"linear/local_c01b.py",
"linear/linear_transform.py",
"linear/conv2d_c01b.py",
"energy_functions/rbm_energy.py",
"scripts/pkl_inspector.py",
"scripts/show_binocular_greyscale_examples.py",
"scripts/jobman/tester.py",
"scripts/dbm/dbm_metrics.py",
"scripts/papers/maxout/svhn_preprocessing.py",
"scripts/papers/jia_huang_wkshp_11/fit_final_model.py",
"scripts/papers/jia_huang_wkshp_11/evaluate.py",
"scripts/papers/jia_huang_wkshp_11/extract_features.py",
"scripts/papers/jia_huang_wkshp_11/assemble.py",
"scripts/gpu_pkl_to_cpu_pkl.py",
"scripts/gsn_example.py",
"scripts/tutorials/deep_trainer/run_deep_trainer.py",
"scripts/tutorials/grbm_smd/test_grbm_smd.py",
"scripts/icml_2013_wrepl/multimodal/"
"extract_layer_2_kmeans_features.py",
"scripts/icml_2013_wrepl/multimodal/make_submission.py",
"scripts/icml_2013_wrepl/multimodal/lcn.py",
"scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py",
"scripts/icml_2013_wrepl/emotions/emotions_dataset.py",
"scripts/icml_2013_wrepl/emotions/make_submission.py",
"scripts/icml_2013_wrepl/black_box/black_box_dataset.py",
"scripts/icml_2013_wrepl/black_box/make_submission.py",
"scripts/diff_monitor.py",
"corruption.py",
"sandbox/lisa_rl/bandit/gaussian_bandit.py",
"utils/track_version.py",
"scripts/get_version.py",
"training_algorithms/tests/test_bgd.py",
"training_algorithms/tests/test_default.py",
"training_algorithms/default.py",
"training_algorithms/training_algorithm.py",
"distributions/tests/test_mnd.py",
"distributions/parzen.py",
"distributions/uniform_hypersphere.py",
"models/setup.py",
"models/independent_multiclass_logistic.py",
"models/softmax_regression.py",
"models/tests/test_reflection_clip.py",
"models/tests/test_maxout.py",
"models/tests/test_convelemwise_sigm.py",
"models/dbm/sampling_procedure.py",
"models/rbm.py",
"models/pca.py",
"tests/test_train.py",
"packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py",
"packaged_dependencies/theano_linear/linearmixin.py",
"packaged_dependencies/theano_linear/util.py",
"packaged_dependencies/theano_linear/__init__.py",
"packaged_dependencies/theano_linear/test_linear.py",
"expr/tests/test_nnet.py",
"expr/image.py",
"expr/coding.py",
"expr/normalize.py",
"expr/probabilistic_max_pooling.py",
"testing/tests/test.py",
"testing/skip.py",
"testing/prereqs.py",
"testing/__init__.py",
"gui/get_weights_report.py",
"gui/patch_viewer.py",
"sandbox/cuda_convnet/tests/test_response_norm.py",
"sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_rop_pool.py",
"sandbox/cuda_convnet/tests/test_pool.py",
"sandbox/cuda_convnet/tests/test_common.py",
"sandbox/cuda_convnet/shared_code.py",
"sandbox/cuda_convnet/code_templates.py",
"sandbox/lisa_rl/bandit/agent.py",
"sandbox/lisa_rl/bandit/algorithm.py",
"sandbox/lisa_rl/bandit/environment.py",
"sandbox/lisa_rl/__init__.py",
"datasets/avicenna.py",
"datasets/iris.py",
"datasets/adult.py",
"datasets/npy_npz.py",
"datasets/control.py",
"datasets/cifar100.py",
"datasets/transformer_dataset.py",
"termination_criteria/__init__.py",
"__init__.py",
"utils/logger.py",
"utils/tests/test_mnist_ubyte.py",
"utils/tests/test_data_specs.py",
"utils/tests/test_bit_strings.py",
"utils/tests/test_iteration.py",
"utils/theano_graph.py",
"utils/__init__.py",
"utils/datasets.py",
"utils/data_specs.py",
"utils/insert_along_axis.py",
"utils/environ.py",
"utils/call_check.py",
"utils/python26.py",
"deprecated/classifier.py",
"train.py",
"classifier.py",
"dataset_get/helper-scripts/make-sources.py",
"pca.py",
"optimization/test_linesearch.py",
"optimization/test_minres.py",
"optimization/test_batch_gradient_descent.py",
"optimization/linear_cg.py",
"optimization/test_feature_sign.py",
"optimization/feature_sign.py",
"optimization/test_linear_cg.py",
"optimization/linesearch.py",
"linear/tests/test_conv2d.py",
"linear/tests/test_conv2d_c01b.py",
"linear/matrixmul.py",
"energy_functions/energy_function.py",
"scripts/make_weights_image.py",
"scripts/plot_monitor.py",
"scripts/print_monitor.py",
"scripts/num_parameters.py",
"scripts/benchmark/time_relu.py",
"scripts/jobman/experiment.py",
"scripts/jobman/__init__.py",
"scripts/dbm/show_negative_chains.py",
"scripts/papers/maxout/compute_test_err.py",
"scripts/papers/jia_huang_wkshp_11/npy2mat.py",
"scripts/datasets/step_through_small_norb.py",
"scripts/datasets/step_through_norb_foveated.py",
"scripts/datasets/make_downsampled_stl10.py",
"scripts/datasets/browse_small_norb.py",
"scripts/datasets/make_mnistplus.py",
"scripts/mlp/predict_csv.py",
"scripts/find_gpu_fields.py",
"scripts/tutorials/deep_trainer/test_deep_trainer.py",
"scripts/icml_2013_wrepl/multimodal/make_wordlist.py",
"base.py",
"devtools/tests/test_via_pyflakes.py",
"devtools/tests/test_shebangs.py",
"devtools/tests/pep8/pep8.py",
"devtools/tests/docscrape.py",
"devtools/run_pyflakes.py",
"devtools/record.py",
"train_extensions/tests/test_window_flip.py",
"train_extensions/__init__.py",
]
whitelist_docstrings = [
'scripts/datasets/step_through_norb_foveated.py',
'blocks.py',
'datasets/hdf5.py',
'rbm_tools.py',
'training_algorithms/tests/test_bgd.py',
'training_algorithms/tests/test_sgd.py',
'training_algorithms/tests/test_default.py',
'training_algorithms/bgd.py',
'training_algorithms/default.py',
'training_algorithms/training_algorithm.py',
'training_algorithms/__init__.py',
'training_algorithms/sgd.py',
'distributions/tests/test_mnd.py',
'distributions/multinomial.py',
'distributions/parzen.py',
'distributions/__init__.py',
'distributions/mnd.py',
'distributions/uniform_hypersphere.py',
'models/setup.py',
'models/independent_multiclass_logistic.py',
'models/softmax_regression.py',
'models/sparse_autoencoder.py',
'models/tests/test_reflection_clip.py',
'models/tests/test_dbm.py',
'models/tests/test_gsn.py',
'models/tests/test_dropout.py',
'models/tests/test_autoencoder.py',
'models/tests/test_mlp.py',
'models/tests/test_s3c_inference.py',
'models/tests/test_maxout.py',
'models/tests/test_mnd.py',
'models/tests/test_vae.py',
'models/tests/test_rbm.py',
'models/tests/test_s3c_misc.py',
'models/gsn.py',
'models/dbm/sampling_procedure.py',
'models/dbm/layer.py',
'models/dbm/__init__.py',
'models/dbm/dbm.py',
'models/dbm/ising.py',
'models/differentiable_sparse_coding.py',
'models/local_coordinate_coding.py',
'models/maxout.py',
'models/s3c.py',
'models/mnd.py',
'models/rbm.py',
'models/autoencoder.py',
'tests/test_dbm_metrics.py',
'tests/test_monitor.py',
'tests/test_train.py',
'tests/rbm/test_ais.py',
'kmeans.py',
'packaged_dependencies/__init__.py',
'packaged_dependencies/theano_linear/imaging.py',
'packaged_dependencies/theano_linear/unshared_conv/__init__.py',
'packaged_dependencies/theano_linear/unshared_conv/unshared_conv.py',
'packaged_dependencies/theano_linear/linearmixin.py',
'packaged_dependencies/theano_linear/linear.py',
'packaged_dependencies/theano_linear/test_spconv.py',
'expr/activations.py',
'expr/tests/test_probabilistic_max_pooling.py',
'expr/tests/test_preprocessing.py',
'expr/tests/test_nnet.py',
'expr/tests/test_coding.py',
'expr/tests/test_normalize.py',
'expr/tests/test_stochastic_pool.py',
'expr/preprocessing.py',
'expr/image.py',
'expr/coding.py',
'expr/__init__.py',
'expr/stochastic_pool.py',
'expr/sampling.py',
'expr/normalize.py',
'expr/probabilistic_max_pooling.py',
'expr/information_theory.py',
'expr/basic.py',
'testing/tests/test.py',
'testing/skip.py',
'testing/prereqs.py',
'testing/__init__.py',
'testing/datasets.py',
'gui/get_weights_report.py',
'gui/__init__.py',
'gui/patch_viewer.py',
'scalar.py',
'sandbox/cuda_convnet/weight_acts.py',
'sandbox/cuda_convnet/filter_acts.py',
'sandbox/cuda_convnet/tests/test_filter_acts_strided.py',
'sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_filter_acts.py',
'sandbox/cuda_convnet/tests/test_img_acts.py',
'sandbox/cuda_convnet/tests/test_response_norm.py',
'sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_weight_acts.py',
'sandbox/cuda_convnet/tests/test_rop_pool.py',
'sandbox/cuda_convnet/tests/test_pool.py',
'sandbox/cuda_convnet/tests/test_common.py',
'sandbox/cuda_convnet/tests/test_stochastic_pool.py',
'sandbox/cuda_convnet/shared_code.py',
'sandbox/cuda_convnet/__init__.py',
'sandbox/cuda_convnet/img_acts.py',
'sandbox/cuda_convnet/base_acts.py',
'sandbox/cuda_convnet/pool.py',
'sandbox/cuda_convnet/stochastic_pool.py',
'sandbox/cuda_convnet/code_templates.py',
'sandbox/cuda_convnet/probabilistic_max_pooling.py',
'sandbox/tuple_var.py',
'sandbox/__init__.py',
'sandbox/lisa_rl/bandit/simulator.py',
'sandbox/lisa_rl/bandit/agent.py',
'sandbox/lisa_rl/bandit/algorithm.py',
'sandbox/lisa_rl/bandit/environment.py',
'sandbox/lisa_rl/bandit/average_agent.py',
'sandbox/lisa_rl/bandit/classifier_bandit.py',
'sandbox/lisa_rl/bandit/__init__.py',
'sandbox/lisa_rl/bandit/classifier_agent.py',
'sandbox/lisa_rl/bandit/gaussian_bandit.py',
'sandbox/lisa_rl/__init__.py',
'config/old_config.py',
'config/tests/test_yaml_parse.py',
'config/yaml_parse.py',
'space/tests/test_space.py',
'space/__init__.py',
'datasets/norb.py',
'datasets/utlc.py',
'datasets/mnistplus.py',
'datasets/cos_dataset.py',
'datasets/cifar10.py',
'datasets/svhn.py',
'datasets/tests/test_preprocessing.py',
'datasets/tests/test_mnist.py',
'datasets/tests/test_imports.py',
'datasets/tests/test_cifar10.py',
'datasets/tests/test_norb.py',
'datasets/tests/test_dense_design_matrix.py',
'datasets/tests/test_vector_spaces_dataset.py',
'datasets/tests/test_four_regions.py',
'datasets/tests/test_csv_dataset.py',
'datasets/tests/test_icml07.py',
'datasets/tests/test_utlc.py',
'datasets/preprocessing.py',
'datasets/avicenna.py',
'datasets/iris.py',
'datasets/config.py',
'datasets/dense_design_matrix.py',
'datasets/adult.py',
'datasets/tfd.py',
'datasets/icml07.py',
'datasets/filetensor.py',
'datasets/npy_npz.py',
'datasets/hepatitis.py',
'datasets/wiskott.py',
'datasets/control.py',
'datasets/exc.py',
'datasets/__init__.py',
'datasets/mnist.py',
'datasets/sparse_dataset.py',
'datasets/csv_dataset.py',
'datasets/cifar100.py',
'datasets/tl_challenge.py',
'datasets/transformer_dataset.py',
'datasets/norb_small.py',
'datasets/retina.py',
'datasets/ocr.py',
'datasets/stl10.py',
'datasets/matlab_dataset.py',
'datasets/vector_spaces_dataset.py',
'datasets/four_regions.py',
'datasets/debug.py',
'datasets/binarizer.py',
'termination_criteria/__init__.py',
'__init__.py',
'utils/utlc.py',
'utils/setup.py',
'utils/compile.py',
'utils/logger.py',
'utils/general.py',
'utils/testing.py',
'utils/tests/test_mnist_ubyte.py',
'utils/tests/test_data_specs.py',
'utils/tests/test_video.py',
'utils/tests/test_bit_strings.py',
'utils/tests/test_rng.py',
'utils/tests/test_pooling.py',
'utils/tests/test_iteration.py',
'utils/tests/test_insert_along_axis.py',
'utils/tests/test_utlc.py',
'utils/tests/test_compile.py',
'utils/tests/test_key_aware.py',
'utils/key_aware.py',
'utils/video.py',
'utils/bit_strings.py',
'utils/iteration.py',
'utils/pooling.py',
'utils/theano_graph.py',
'utils/common_strings.py',
'utils/datasets.py',
'utils/data_specs.py',
'utils/shell.py',
'utils/rng.py',
'utils/insert_along_axis.py',
'utils/environ.py',
'utils/call_check.py',
'utils/mnist_ubyte.py',
'utils/track_version.py',
'utils/mem.py',
'utils/python26.py',
'utils/timing.py',
'deprecated/__init__.py',
'deprecated/classifier.py',
'train.py',
'format/tests/test_target_format.py',
'format/__init__.py',
'dataset_get/dataset-get.py',
'dataset_get/helper-scripts/make-sources.py',
'dataset_get/helper-scripts/make-archive.py',
'dataset_get/dataset_resolver.py',
'pca.py',
'monitor.py',
'optimization/batch_gradient_descent.py',
'optimization/__init__.py',
'optimization/test_batch_gradient_descent.py',
'optimization/linear_cg.py',
'optimization/minres.py',
'optimization/test_feature_sign.py',
'optimization/feature_sign.py',
'optimization/linesearch.py',
'linear/conv2d.py',
'linear/tests/test_matrixmul.py',
'linear/local_c01b.py',
'linear/matrixmul.py',
'linear/__init__.py',
'linear/linear_transform.py',
'linear/conv2d_c01b.py',
'energy_functions/tests/__init__.py',
'energy_functions/rbm_energy.py',
'energy_functions/__init__.py',
'energy_functions/energy_function.py',
'scripts/plot_monitor.py',
'scripts/print_model.py',
'scripts/tests/__init__.py',
'scripts/pkl_inspector.py',
'scripts/get_version.py',
'scripts/print_monitor.py',
'scripts/show_binocular_greyscale_examples.py',
'scripts/num_parameters.py',
'scripts/jobman/tester.py',
'scripts/jobman/experiment.py',
'scripts/jobman/__init__.py',
'scripts/dbm/__init__.py',
'scripts/dbm/dbm_metrics.py',
'scripts/papers/__init__.py',
'scripts/papers/jia_huang_wkshp_11/extract_features.py',
'scripts/print_channel_doc.py',
'scripts/gpu_pkl_to_cpu_pkl.py',
'scripts/datasets/step_through_small_norb.py',
'scripts/datasets/download_mnist.py',
'scripts/datasets/download_binarized_mnist.py',
'scripts/datasets/browse_small_norb.py',
'scripts/datasets/make_mnistplus.py',
'scripts/__init__.py',
'scripts/gsn_example.py',
'scripts/mlp/predict_csv.py',
'scripts/mlp/__init__.py',
'scripts/find_gpu_fields.py',
'scripts/tutorials/dbm_demo/train_dbm.py',
'scripts/tutorials/dbm_demo/__init__.py',
'scripts/tutorials/tests/test_dbm.py',
'scripts/tutorials/tests/test_mlp_nested.py',
'scripts/tutorials/multilayer_perceptron/tests/test_mlp.py',
'scripts/tutorials/softmax_regression/tests/test_softmaxreg.py',
'scripts/tutorials/deep_trainer/__init__.py',
'scripts/tutorials/deep_trainer/run_deep_trainer.py',
'scripts/tutorials/grbm_smd/make_dataset.py',
'scripts/tutorials/grbm_smd/__init__.py',
'scripts/tutorials/grbm_smd/test_grbm_smd.py',
'scripts/tutorials/__init__.py',
'scripts/tutorials/jobman_demo/utils.py',
'scripts/tutorials/jobman_demo/__init__.py',
'scripts/tutorials/stacked_autoencoders/tests/test_dae.py',
'scripts/icml_2013_wrepl/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_layer_2_kmeans_features.py',
'scripts/icml_2013_wrepl/multimodal/make_submission.py',
'scripts/icml_2013_wrepl/multimodal/lcn.py',
'scripts/icml_2013_wrepl/multimodal/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py',
'scripts/icml_2013_wrepl/emotions/emotions_dataset.py',
'scripts/icml_2013_wrepl/emotions/make_submission.py',
'scripts/icml_2013_wrepl/emotions/__init__.py',
'scripts/icml_2013_wrepl/black_box/black_box_dataset.py',
'scripts/icml_2013_wrepl/black_box/make_submission.py',
'scripts/icml_2013_wrepl/black_box/__init__.py',
'scripts/diff_monitor.py',
'base.py',
'devtools/tests/test_via_pyflakes.py',
'devtools/tests/test_shebangs.py',
'devtools/tests/__init__.py',
'devtools/tests/docscrape.py',
'devtools/run_pyflakes.py',
'devtools/__init__.py',
'devtools/record.py',
'corruption.py',
'datasets/tests/test_tl_challenge.py',
'datasets/tests/test_tfd.py',
'datasets/tests/test_npy_npz.py',
'linear/tests/test_conv2d.py',
'devtools/tests/pep8/pep8.py',
'devtools/tests/pep8/__init__.py',
'scripts/lcc_tangents/make_dataset.py',
'scripts/icml_2013_wrepl/multimodal/make_wordlist.py',
'scripts/datasets/make_stl10_whitened.py',
'scripts/datasets/make_stl10_patches_8x8.py',
'scripts/datasets/make_stl10_patches.py',
'scripts/datasets/make_cifar10_whitened.py',
'scripts/datasets/make_cifar10_gcn_whitened.py',
'scripts/datasets/make_cifar100_patches.py',
'scripts/datasets/make_cifar100_gcn_whitened.py',
'scripts/datasets/make_svhn_pytables.py',
'energy_functions/tests/test_rbm_energy.py',
]
# add files which fail to run to whitelist_docstrings
whitelist_docstrings.extend([
'sandbox/rnn/models/mlp_hook.py',
'training_algorithms/tests/test_learning_rule.py',
'models/pca.py',
'datasets/tests/test_hdf5.py',
'linear/tests/test_conv2d_c01b.py',
'packaged_dependencies/theano_linear/conv2d.py',
'packaged_dependencies/theano_linear/pyramid.py',
'packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/'
'test_gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/test_localdot.py',
'packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/localdot.py',
'packaged_dependencies/theano_linear/util.py',
'packaged_dependencies/theano_linear/__init__.py',
'packaged_dependencies/theano_linear/test_matrixmul.py',
'packaged_dependencies/theano_linear/test_linear.py',
'packaged_dependencies/theano_linear/spconv.py',
'sandbox/cuda_convnet/tests/test_weight_acts_strided.py',
'sandbox/cuda_convnet/tests/test_image_acts_strided.py',
'sandbox/cuda_convnet/specialized_bench.py',
'sandbox/cuda_convnet/response_norm.py',
'sandbox/cuda_convnet/convnet_compile.py',
'sandbox/cuda_convnet/pthreads.py',
'sandbox/cuda_convnet/bench.py',
'sandbox/lisa_rl/bandit/plot_reward.py',
'sandbox/lisa_rl/bandit/simulate.py',
'config/__init__.py',
'utils/__init__.py',
'optimization/test_linesearch.py',
'optimization/test_minres.py',
'optimization/test_linear_cg.py',
'scripts/papers/maxout/svhn_preprocessing.py',
'scripts/papers/maxout/compute_test_err.py',
'scripts/papers/jia_huang_wkshp_11/fit_final_model.py',
'scripts/papers/jia_huang_wkshp_11/evaluate.py',
'scripts/papers/jia_huang_wkshp_11/npy2mat.py',
'scripts/papers/jia_huang_wkshp_11/assemble.py',
'scripts/datasets/make_cifar100_patches_8x8.py',
'scripts/datasets/make_downsampled_stl10.py',
'scripts/datasets/make_cifar100_whitened.py',
'scripts/tutorials/deep_trainer/test_deep_trainer.py',
'scripts/icml_2013_wrepl/black_box/learn_zca.py',
'train_extensions/tests/test_window_flip.py',
'train_extensions/window_flip.py',
'linear/tests/test_local_c01b.py',
'sandbox/cuda_convnet/debug.py', ])
def test_format_pep8():
"""
Test if pep8 is respected.
"""
pep8_checker = StyleGuide()
files_to_check = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_pep8:
continue
else:
files_to_check.append(path)
report = pep8_checker.check_files(files_to_check)
if report.total_errors > 0:
raise AssertionError("PEP8 Format not respected")
def print_files_information_pep8():
"""
Print the list of files which can be removed from the whitelist and the
list of files which do not respect PEP8 formatting that aren't in the
whitelist
"""
infracting_files = []
non_infracting_files = []
pep8_checker = StyleGuide(quiet=True)
for path in list_files(".py"):
number_of_infractions = pep8_checker.input_file(path)
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if number_of_infractions > 0:
if rel_path not in whitelist_pep8:
infracting_files.append(path)
else:
if rel_path in whitelist_pep8:
non_infracting_files.append(path)
print("Files that must be corrected or added to whitelist:")
for file in infracting_files:
print(file)
print("Files that can be removed from whitelist:")
for file in non_infracting_files:
print(file)
def test_format_docstrings():
"""
Test if docstrings are well formatted.
"""
try:
verify_format_docstrings()
except SkipTest as e:
import traceback
traceback.print_exc(e)
raise AssertionError(
"Some file raised SkipTest on import, and inadvertently"
" canceled the documentation testing."
)
def verify_format_docstrings():
"""
Implementation of `test_format_docstrings`. The implementation is
factored out so it can be placed inside a guard against SkipTest.
"""
format_infractions = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_docstrings:
continue
try:
format_infractions.extend(docstring_errors(path))
except Exception as e:
format_infractions.append(["%s failed to run so format cannot "
"be checked. Error message:\n %s" %
(rel_path, e)])
if len(format_infractions) > 0:
msg = "\n".join(':'.join(line) for line in format_infractions)
raise AssertionError("Docstring format not respected:\n%s" % msg)
if __name__ == "__main__":
print_files_information_pep8()
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<selector
xmlns:android="http://schemas.android.com/apk/res/android">
<item android:state_enabled="true" android:state_pressed="true" android:drawable="@drawable/bg_cover_userheader_outerpress" />
<item android:drawable="@drawable/bg_cover_userheader" />
</selector>
| {
"pile_set_name": "Github"
} |
/*
sndpipe.hh
----------
*/
#ifndef SNDPIPE_SNDPIPE_HH
#define SNDPIPE_SNDPIPE_HH
// SoundDriver
#include "SoundDriver/SoundDriver.h"
namespace sndpipe
{
#ifdef __MACOS__
// Use the same type as MacTypes.h, with no inclusion overhead.
typedef long Phase;
typedef long RecID;
#else
// For non-Mac, the exact type doesn't matter as long as the length fits.
typedef uint32_t Phase;
typedef uint32_t RecID;
#endif
enum
{
basic_domain = 0x0101,
admin_domain = 0x4A4A,
sound_domain = 0x4B4B,
};
enum
{
switch_on = 0x0000, // basic
allow_eof = 0x2e2e, // basic ('..')
full_stop = 0x5858, // basic ('XX')
ftMode_flat_buffer = ftMode | 0x0100, // sound
ftMode_flat_update = ftMode | 0x0200, // admin
};
#ifdef __GNUC__
#pragma pack(push, 2)
#else
#pragma options align=packed
#endif
struct FTSynthRec_flat_header
{
short mode;
RecID recID; // This is the FTSoundRec buffer address
short duration;
Fixed sound1Rate;
Phase sound1Phase;
Fixed sound2Rate;
Phase sound2Phase;
Fixed sound3Rate;
Phase sound3Phase;
Fixed sound4Rate;
Phase sound4Phase;
};
#ifdef __GNUC__
#pragma pack(pop)
#else
#pragma options align=reset
#endif
typedef FTSynthRec_flat_header FTSynthRec_flat_update;
struct FTSynthRec_flat_buffer : FTSynthRec_flat_header
{
Wave sound1Wave;
Wave sound2Wave;
Wave sound3Wave;
Wave sound4Wave;
};
} // namespace sndpipe
#endif
| {
"pile_set_name": "Github"
} |
/*
Copyright 2015 Mandar Chandorkar
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.models.sgp
import breeze.linalg.{DenseMatrix, DenseVector}
import breeze.numerics.sqrt
import breeze.stats.distributions.Gaussian
import spire.implicits._
import io.github.mandar2812.dynaml.algebra._
import io.github.mandar2812.dynaml.algebra.PartitionedMatrixOps._
import io.github.mandar2812.dynaml.algebra.PartitionedMatrixSolvers._
import io.github.mandar2812.dynaml.kernels.{LocalScalarKernel, SVMKernel}
import io.github.mandar2812.dynaml.models.{ContinuousProcessModel, SecondOrderProcessModel}
import io.github.mandar2812.dynaml.optimization.GloballyOptimizable
import io.github.mandar2812.dynaml.pipes.{DataPipe, DataPipe2}
import io.github.mandar2812.dynaml.probability
import io.github.mandar2812.dynaml.probability.{BlockedMESNRV, RandomVariable}
import io.github.mandar2812.dynaml.probability.distributions.UESN
import org.apache.log4j.Logger
import scala.reflect.ClassTag
/**
* Implementation of Extended Skew-Gaussian Process regression model.
* This is represented with a finite dimensional [[BlockedMESNRV]]
* distribution of Adcock and Schutes.
*
* @author mandar2812 date 28/02/2017.
*
* */
abstract class ESGPModel[T, I: ClassTag](
cov: LocalScalarKernel[I], n: LocalScalarKernel[I],
data: T, num: Int, lambda: Double, tau: Double,
meanFunc: DataPipe[I, Double] = DataPipe((_:I) => 0.0))
extends ContinuousProcessModel[T, I, Double, BlockedMESNRV]
with SecondOrderProcessModel[T, I, Double, Double, DenseMatrix[Double], BlockedMESNRV]
with GloballyOptimizable {
private val logger = Logger.getLogger(this.getClass)
/**
* The training data
**/
override protected val g: T = data
/**
* Mean Function: Takes a member of the index set (input)
* and returns the corresponding mean of the distribution
* corresponding to input.
**/
override val mean = meanFunc
/**
* Underlying covariance function of the
* Gaussian Processes.
**/
override val covariance = cov
val noiseModel = n
override protected var hyper_parameters: List[String] =
covariance.hyper_parameters ++ noiseModel.hyper_parameters ++ List("skewness", "cutoff")
override protected var current_state: Map[String, Double] =
covariance.state ++ noiseModel.state ++ Map("skewness" -> lambda, "cutoff" -> tau)
/**
* Set the model "state" which
* contains values of its hyper-parameters
* with respect to the covariance and noise
* kernels.
* */
def setState(s: Map[String, Double]): this.type = {
val (covHyp, noiseHyp) = (
s.filterKeys(covariance.hyper_parameters.contains),
s.filterKeys(noiseModel.hyper_parameters.contains)
)
covariance.setHyperParameters(covHyp)
noiseModel.setHyperParameters(noiseHyp)
current_state = covariance.state ++ noiseModel.state
current_state += ("skewness" -> s("skewness"), "cutoff" -> s("cutoff"))
this
}
val npoints = num
protected var blockSize = 1000
protected lazy val trainingData: Seq[I] = dataAsIndexSeq(g)
protected lazy val trainingDataLabels = PartitionedVector(
dataAsSeq(g).toStream.map(_._2),
trainingData.length.toLong, _blockSize
)
def blockSize_(b: Int): Unit = {
blockSize = b
}
def _blockSize: Int = blockSize
protected var caching: Boolean = false
protected var partitionedKernelMatrixCache: PartitionedPSDMatrix = _
/** Calculates posterior predictive distribution for
* a particular set of test data points.
*
* @param test A Sequence or Sequence like data structure
* storing the values of the input patters.
**/
override def predictiveDistribution[U <: Seq[I]](test: U) = {
println("\nExtended Skew Gaussian Process Regression")
println("Calculating posterior predictive distribution")
//Calculate the prior distribution parameters
//Skewness and cutoff
val (l,t) = (current_state("skewness"), current_state("cutoff"))
//Mean
//Test:
val priorMeanTest = PartitionedVector(
test.map(mean(_))
.grouped(_blockSize)
.zipWithIndex.map(c => (c._2.toLong, DenseVector(c._1.toArray)))
.toStream,
test.length.toLong)
//Training
val trainingMean = PartitionedVector(
trainingData.map(mean(_)).toStream,
trainingData.length.toLong, _blockSize
)
//Calculate the skewness as a partitioned vector
//Test
val priorSkewnessTest = priorMeanTest.map(b => (b._1, DenseVector.fill[Double](b._2.length)(l)))
//Training
val skewnessTraining = trainingMean.map(b => (b._1, DenseVector.fill[Double](b._2.length)(l)))
val effectiveTrainingKernel: LocalScalarKernel[I] = covariance + noiseModel
effectiveTrainingKernel.setBlockSizes((blockSize, blockSize))
//Calculate the kernel + noise matrix on the training data
val smoothingMat = if(!caching) {
println("---------------------------------------------------------------")
println("Calculating covariance matrix for training points")
SVMKernel.buildPartitionedKernelMatrix(trainingData,
trainingData.length, _blockSize, _blockSize,
effectiveTrainingKernel.evaluate)
} else {
println("** Using cached training matrix **")
partitionedKernelMatrixCache
}
println("---------------------------------------------------------------")
println("Calculating covariance matrix for test points")
val kernelTest = SVMKernel.buildPartitionedKernelMatrix(
test, test.length.toLong,
_blockSize, _blockSize, covariance.evaluate)
println("---------------------------------------------------------------")
println("Calculating covariance matrix between training and test points")
val crossKernel = SVMKernel.crossPartitonedKernelMatrix(
trainingData, test,
_blockSize, _blockSize,
covariance.evaluate)
//Solve for parameters of the posterior predictive distribution
val (predMean, predCov, predSkewness, predCutoff) = ESGPModel.solve(
trainingDataLabels, trainingMean, priorMeanTest,
smoothingMat, kernelTest, crossKernel,
skewnessTraining, priorSkewnessTest,
t)
BlockedMESNRV(predCutoff, predSkewness, predMean, predCov)
}
/**
* Draw three predictions from the posterior predictive distribution
* 1) Mean or MAP estimate Y
* 2) Y- : The lower error bar estimate (mean - sigma*stdDeviation)
* 3) Y+ : The upper error bar. (mean + sigma*stdDeviation)
**/
override def predictionWithErrorBars[U <: Seq[I]](testData: U, sigma: Int) = {
val stdG = new Gaussian(0.0, 1.0)
//Calculate the confidence interval alpha, corresponding to the value of sigma
val alpha = 1.0 - (stdG.cdf(sigma.toDouble) - stdG.cdf(-1.0*sigma.toDouble))
print("Calculated confidence bound: "+0x03B1.toChar+" = ")
pprint.pprintln(alpha)
val BlockedMESNRV(pTau, pLambda, postmean, postcov) = predictiveDistribution(testData)
val varD: PartitionedVector = bdiag(postcov)
val stdDev = varD._data.map(c => (c._1, sqrt(c._2))).map(_._2.toArray)
val mean = postmean._data.map(_._2.toArray)
val lambda = pLambda._data.map(_._2.toArray)
println("Generating (marginal) error bars using a buffered approach")
val zippedBufferedParams = mean.zip(stdDev).zip(lambda).map(c => (c._1._1, c._1._2, c._2))
val predictions = zippedBufferedParams.flatMap(buffer => {
val (muBuff, sBuff, lBuff) = buffer
muBuff.zip(sBuff).zip(lBuff).map(c => {
val (mu, s, l) = (c._1._1, c._1._2, c._2)
val (_, _, appMode, lower, higher) =
probability.OrderStats(RandomVariable(UESN(pTau, l, mu, s)), alpha)
(appMode, lower, higher)
})
})
predictions.zip(testData).map(c => (c._2, c._1._1, c._1._2, c._1._3))
}
/**
* Returns a [[DataPipe2]] which calculates the energy of data: [[T]].
* See: [[energy]] below.
* */
def calculateEnergyPipe(h: Map[String, Double], options: Map[String, String]) =
DataPipe2((training: Seq[I], trainingLabels: PartitionedVector) => {
setState(h)
val (l,t) = (current_state("skewness"), current_state("cutoff"))
val trainingMean = PartitionedVector(
training.toStream.map(mean(_)),
training.length.toLong, _blockSize
)
val skewnessTraining = trainingMean.map(b => (b._1, DenseVector.fill[Double](b._2.length)(l)))
val effectiveTrainingKernel: LocalScalarKernel[I] = this.covariance + this.noiseModel
effectiveTrainingKernel.setBlockSizes((_blockSize, _blockSize))
val kernelTraining: PartitionedPSDMatrix =
effectiveTrainingKernel.buildBlockedKernelMatrix(training, training.length)
ESGPModel.logLikelihood(
trainingLabels, t, skewnessTraining,
trainingMean, kernelTraining)
})
/**
* Calculates the energy of the configuration,
* in most global optimization algorithms
* we aim to find an approximate value of
* the hyper-parameters such that this function
* is minimized.
*
* @param h The value of the hyper-parameters in the configuration space
* @param options Optional parameters about configuration
* @return Configuration Energy E(h)
**/
override def energy(h: Map[String, Double], options: Map[String, String]) =
calculateEnergyPipe(h, options)(trainingData, trainingDataLabels)
/**
* Predict the value of the
* target variable given a
* point.
*
**/
override def predict(point: I) = predictionWithErrorBars(Seq(point), 2).head._2
/**
* Cache the training kernel and noise matrices
* for fast access in future predictions.
* */
override def persist(state: Map[String, Double]): Unit = {
//Set the hyperparameters to state
setState(state)
val effectiveTrainingKernel: LocalScalarKernel[I] = covariance + noiseModel
effectiveTrainingKernel.setBlockSizes((blockSize, blockSize))
//Calculate the kernel matrix over the training data.
partitionedKernelMatrixCache =
SVMKernel.buildPartitionedKernelMatrix(
trainingData, trainingData.length,
_blockSize, _blockSize, effectiveTrainingKernel.evaluate)
//Set the caching flag to true
caching = true
}
/**
* Forget the cached kernel & noise matrices.
* */
def unpersist(): Unit = {
partitionedKernelMatrixCache = null
caching = false
}
}
object ESGPModel {
/**
* Calculate the negative log likelihood of data for a
* multivariate extended skew normal model.
* */
def logLikelihood(
y: PartitionedVector, tau: Double,
skewness: PartitionedVector, center: PartitionedVector,
covarince:PartitionedPSDMatrix): Double = {
try {
val distribution = BlockedMESNRV(tau, skewness, center, covarince)
-1.0*distribution.underlyingDist.logPdf(y)
} catch {
case _: breeze.linalg.NotConvergedException => Double.PositiveInfinity
case _: breeze.linalg.MatrixNotSymmetricException => Double.PositiveInfinity
}
}
/**
* Calculate the parameters of the posterior predictive distribution
* for a multivariate extended skew normal model.
* */
def solve(
trainingLabels: PartitionedVector,
trainingMean: PartitionedVector,
priorMeanTest: PartitionedVector,
smoothingMat: PartitionedPSDMatrix,
kernelTest: PartitionedPSDMatrix,
crossKernel: PartitionedMatrix,
skewnessTraining: PartitionedVector,
priorSkewnessTest: PartitionedVector,
priorCutoff: Double): (PartitionedVector, PartitionedPSDMatrix, PartitionedVector, Double) = {
val Lmat: LowerTriPartitionedMatrix = bcholesky(smoothingMat)
val alpha: PartitionedVector = Lmat.t \\ (Lmat \\ (trainingLabels-trainingMean))
val beta : PartitionedVector = Lmat.t \\ (Lmat \\ skewnessTraining)
val delta: Double = 1.0/sqrt(1.0 + (skewnessTraining dot beta))
val v: PartitionedMatrix = Lmat \\ crossKernel
val varianceReducer: PartitionedMatrix = v.t * v
//Ensure that the variance reduction is symmetric
val adjustedVarReducer: PartitionedMatrix = varianceReducer
/*(varianceReducer.L + varianceReducer.L.t).map(bm =>
if(bm._1._1 == bm._1._2) (bm._1, bm._2*(DenseMatrix.eye[Double](bm._2.rows)*0.5))
else bm)*/
val reducedVariance: PartitionedPSDMatrix =
new PartitionedPSDMatrix(
(kernelTest - adjustedVarReducer).filterBlocks(c => c._1 >= c._2),
kernelTest.rows, kernelTest.cols)
(
priorMeanTest + crossKernel.t * alpha,
reducedVariance,
(priorSkewnessTest - crossKernel.t * beta)*delta,
(priorCutoff + (skewnessTraining dot alpha))*delta)
}
/**
* Create an instance of [[ESGPModel]] for a
* particular data type [[T]]
*
* @tparam T The type of the training data
* @tparam I The type of the input patterns in the data set of type [[T]]
* @param cov The covariance function
* @param noise The noise covariance function
* @param meanFunc The trend or mean function
* @param trainingdata The actual data set of type [[T]]
* @param lambda Skewness parameter
* @param tau Cut off parameter
* @param transform An implicit conversion from [[T]] to [[Seq]] represented as a [[DataPipe]]
* */
def apply[T, I: ClassTag](
cov: LocalScalarKernel[I], noise: LocalScalarKernel[I],
meanFunc: DataPipe[I, Double], lambda: Double, tau: Double)(
trainingdata: T, num: Int = 0)(
implicit transform: DataPipe[T, Seq[(I, Double)]]) = {
val num_points = if(num > 0) num else transform(trainingdata).length
new ESGPModel[T, I](cov, noise, trainingdata, num_points, lambda, tau, meanFunc) {
/**
* Convert from the underlying data structure to
* Seq[(I, Y)] where I is the index set of the GP
* and Y is the value/label type.
**/
override def dataAsSeq(data: T) = transform(data)
}
}
} | {
"pile_set_name": "Github"
} |
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose: Antlion Grub - cannon fodder
//
// $Workfile: $
// $Date: $
// $NoKeywords: $
//=============================================================================//
#ifndef NPC_ANTLIONGRUB_H
#define NPC_ANTLIONGRUB_H
#endif //NPC_ANTLIONGRUB_H | {
"pile_set_name": "Github"
} |
#!/bin/bash
cd `dirname $0`
echo `pwd`
training_path=$1
validation_path=$2
echo "training path: " $training_path
echo "validation path: " $validation_path
save_model_dir=$3
echo "save model on: " $save_model_dir
batch_size=$4
embedding_size=$5
echo "batch size: " $batch_size
echo "embedding size: " $embedding_size
optimizer=$6
lr=$7
task=$8
track=$9
echo "task: " $task
echo "track: " $track
mkdir ${save_model_dir};
python train.py \
--training_path $training_path \
--validation_path $validation_path \
--save_model_dir $save_model_dir \
--batch_size $batch_size \
--embedding_size $embedding_size \
--lr $lr \
--task $task \
--track $track \
--optimizer $optimizer
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright 2011 See AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.badlogic.gdx.tests;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.Input;
import com.badlogic.gdx.InputAdapter;
import com.badlogic.gdx.InputMultiplexer;
import com.badlogic.gdx.InputProcessor;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.g2d.ParticleEffect;
import com.badlogic.gdx.graphics.g2d.ParticleEffectPool;
import com.badlogic.gdx.graphics.g2d.ParticleEffectPool.PooledEffect;
import com.badlogic.gdx.graphics.g2d.ParticleEmitter;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.scenes.scene2d.Actor;
import com.badlogic.gdx.scenes.scene2d.Stage;
import com.badlogic.gdx.scenes.scene2d.ui.Button;
import com.badlogic.gdx.scenes.scene2d.ui.CheckBox;
import com.badlogic.gdx.scenes.scene2d.ui.CheckBox.CheckBoxStyle;
import com.badlogic.gdx.scenes.scene2d.ui.Label;
import com.badlogic.gdx.scenes.scene2d.ui.Label.LabelStyle;
import com.badlogic.gdx.scenes.scene2d.ui.Skin;
import com.badlogic.gdx.scenes.scene2d.ui.Table;
import com.badlogic.gdx.scenes.scene2d.ui.TextButton;
import com.badlogic.gdx.scenes.scene2d.ui.TextButton.TextButtonStyle;
import com.badlogic.gdx.scenes.scene2d.utils.ChangeListener;
import com.badlogic.gdx.tests.utils.GdxTest;
import com.badlogic.gdx.utils.Array;
import com.badlogic.gdx.utils.viewport.ExtendViewport;
public class ParticleEmittersTest extends GdxTest {
private SpriteBatch spriteBatch;
ParticleEffect effect;
ParticleEffectPool effectPool;
Array<PooledEffect> effects = new Array();
PooledEffect latestEffect;
float fpsCounter;
Stage ui;
CheckBox skipCleanup;
Button clearEmitters, scaleEffects;
Label logLabel;
@Override
public void create () {
spriteBatch = new SpriteBatch();
effect = new ParticleEffect();
effect.load(Gdx.files.internal("data/singleTextureAllAdditive.p"), Gdx.files.internal("data"));
effect.setPosition(Gdx.graphics.getWidth() / 2, Gdx.graphics.getHeight() / 2);
effectPool = new ParticleEffectPool(effect, 20, 20);
setupUI();
InputProcessor inputProcessor = new InputAdapter() {
public boolean touchDragged (int x, int y, int pointer) {
if (latestEffect != null) latestEffect.setPosition(x, Gdx.graphics.getHeight() - y);
return false;
}
public boolean touchDown (int x, int y, int pointer, int newParam) {
latestEffect = effectPool.obtain();
latestEffect.setEmittersCleanUpBlendFunction(!skipCleanup.isChecked());
latestEffect.setPosition(x, Gdx.graphics.getHeight() - y);
effects.add(latestEffect);
return false;
}
};
InputMultiplexer multiplexer = new InputMultiplexer();
multiplexer.addProcessor(ui);
multiplexer.addProcessor(inputProcessor);
Gdx.input.setInputProcessor(multiplexer);
}
@Override
public void dispose () {
spriteBatch.dispose();
effect.dispose();
}
@Override
public void resize (int width, int height) {
ui.getViewport().update(width, height);
}
public void render () {
ui.act();
spriteBatch.getProjectionMatrix().setToOrtho2D(0, 0, Gdx.graphics.getWidth(), Gdx.graphics.getHeight());
float delta = Gdx.graphics.getDeltaTime();
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
spriteBatch.begin();
for (ParticleEffect e : effects)
e.draw(spriteBatch, delta);
spriteBatch.end();
fpsCounter += delta;
if (fpsCounter > 3) {
fpsCounter = 0;
String log = effects.size + " particle effects, FPS: " + Gdx.graphics.getFramesPerSecond() + ", Render calls: "
+ spriteBatch.renderCalls;
Gdx.app.log("libgdx", log);
logLabel.setText(log);
}
ui.draw();
}
public boolean needsGL20 () {
return false;
}
private void setupUI () {
ui = new Stage(new ExtendViewport(640, 480));
Skin skin = new Skin(Gdx.files.internal("data/uiskin.json"));
skipCleanup = new CheckBox("Skip blend function clean-up", skin);
skipCleanup.addListener(listener);
logLabel = new Label("", skin.get(LabelStyle.class));
clearEmitters = new TextButton("Clear screen", skin);
clearEmitters.addListener(listener);
scaleEffects = new TextButton("Scale existing effects", skin);
scaleEffects.addListener(listener);
Table table = new Table();
table.setTransform(false);
table.setFillParent(true);
table.defaults().padTop(5).left();
table.top().left().padLeft(5);
table.add(skipCleanup).colspan(2).row();
table.add(clearEmitters).spaceRight(10);
table.add(scaleEffects).row();
table.add(logLabel).colspan(2);
ui.addActor(table);
}
void updateSkipCleanupState () {
for (ParticleEffect eff : effects) {
for (ParticleEmitter e : eff.getEmitters())
e.setCleansUpBlendFunction(!skipCleanup.isChecked());
}
}
ChangeListener listener = new ChangeListener() {
@Override
public void changed (ChangeEvent event, Actor actor) {
if (actor == skipCleanup) {
updateSkipCleanupState();
} else if (actor == clearEmitters) {
for (PooledEffect e : effects)
e.free();
effects.clear();
} else if (actor == scaleEffects) {
for (ParticleEffect eff : effects) {
eff.scaleEffect(1.5f);
}
}
}
};
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2006-2008 The FLWOR Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef ZORBA_TYPES_DURATION_H
#define ZORBA_TYPES_DURATION_H
#include <zorba/config.h>
#include "zorbatypes_decl.h"
#include "util/ascii_util.h"
#include "zorbatypes/zstring.h"
namespace zorba
{
class Duration;
namespace serialization
{
class Archiver;
void operator&(Archiver& ar, Duration& obj);
}
/*******************************************************************************
The lexical format of duration is : PnYn MnDTnH nMnS
See http://www.w3.org/TR/xmlschema-2/#duration for details.
Note: class Timezone (in timezone.h) is a subclass of Duration
********************************************************************************/
class Duration
{
friend void serialization::operator&(serialization::Archiver& ar, Duration& obj);
public:
typedef enum
{
DURATION_FACET = 0,
YEARMONTHDURATION_FACET = 1,
DAYTIMEDURATION_FACET = 2
} FACET_TYPE;
static const int FRAC_SECONDS_UPPER_LIMIT; // = 1000000, maximum 6 digits
protected:
typedef enum
{
YEAR_DATA = 0,
MONTH_DATA = 1,
DAY_DATA = 2,
HOUR_DATA = 3,
MINUTE_DATA = 4,
SECONDS_DATA = 5,
FRACSECONDS_DATA = 6
} DATA_TYPE;
protected:
FACET_TYPE facet;
bool is_negative;
long data[7];
public:
/**
* Returns 0 on success, 1 on nonspecified error and 2 on integer overflow
*
*/
static int parseDuration(
const char* str,
ascii::size_type strlen,
Duration& d);
/**
* Returns 0 on success, 1 on nonspecified error and 2 on integer overflow
*/
static int parseYearMonthDuration(
const char* str,
ascii::size_type strlen,
Duration& d);
/**
* Returns 0 on success, 1 on nonspecified error and 2 on integer overflow
*/
static int parseDayTimeDuration(
const char* str,
ascii::size_type strlen,
Duration& d,
bool dont_check_letter_p = false);
/**
* Returns 0 on success
*/
static int fromTimezone(const TimeZone& t, Duration& d);
public:
Duration();
Duration(FACET_TYPE facet_type);
/**
* The function will use the absolute values of all long parameters. The
* sign of the duration will be set by the sign of the longest time unit
* different from 0. E.g. if years are not equal to 0, their sign will be
* the sign of the duration, then months will be checked, then days, etc.
*/
Duration(
FACET_TYPE facet_type,
long years,
long months,
long days,
long hours,
long minutes,
double seconds);
/**
* The function will use the absolute values of all long parameters.
*/
Duration(
FACET_TYPE facet_type,
bool negative,
long years,
long months,
long days,
long hours,
long minutes,
double seconds);
/**
* The function will use the absolute values of all long parameters.
*/
Duration(
FACET_TYPE facet_type,
bool negative,
long years,
long months,
long days,
long hours,
long minutes,
int seconds,
int frac_seconds);
virtual ~Duration() {}
bool operator==(const Duration& d) const;
int compare(const Duration& d, bool ignore_sign = false) const;
zstring toString() const;
Duration* toDuration() const;
Duration* toNegDuration() const;
Duration* toYearMonthDuration() const;
Duration* toDayTimeDuration() const;
Duration* operator+(const Duration& d) const;
Duration* operator-(const Duration& d) const;
Duration* operator*(const Double& value) const;
Duration* operator/(const Double& value) const;
Decimal operator/(const Duration& d) const;
virtual bool isNegative() const;
bool isZero() const;
long getYears() const;
long getMonths() const;
long getDays() const;
virtual long getHours() const;
virtual long getMinutes() const;
virtual Decimal getSeconds() const;
virtual long getFractionalSeconds() const;
virtual long getIntSeconds() const;
Double getTotalSeconds() const;
long getTotalMilliseconds() const;
uint32_t hash() const;
FACET_TYPE getFacet() const { return facet; };
protected:
void normalize();
void adjustToFacet();
void setFacet(FACET_TYPE a_facet);
};
} /* namespace zorba */
#endif
/*
* Local variables:
* mode: c++
* End:
*/
/* vim:set et sw=2 ts=2: */
| {
"pile_set_name": "Github"
} |
/*
MAPI Implementation
OpenChange Project
Copyright (C) Julien Kerihuel 2006
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* parser auto-generated by mparse */
typedef [public, v1_enum, flag(NDR_PAHEX)] enum {
MAPI_E_SUCCESS = 0x00000000,
MAPI_E_NO_SUPPORT = 0x80040102,
MAPI_E_BAD_CHARWIDTH = 0x80040103,
MAPI_E_STRING_TOO_LONG = 0x80040105,
MAPI_E_UNKNOWN_FLAGS = 0x80040106,
MAPI_E_INVALID_ENTRYID = 0x80040107,
MAPI_E_INVALID_OBJECT = 0x80040108,
MAPI_E_OBJECT_CHANGED = 0x80040109,
MAPI_E_OBJECT_DELETED = 0x8004010A,
MAPI_E_BUSY = 0x8004010B,
MAPI_E_NOT_ENOUGH_DISK = 0x8004010D,
MAPI_E_NOT_ENOUGH_RESOURCES = 0x8004010E,
MAPI_E_NOT_FOUND = 0x8004010F,
MAPI_E_VERSION = 0x80040110,
MAPI_E_LOGON_FAILED = 0x80040111,
MAPI_E_SESSION_LIMIT = 0x80040112,
MAPI_E_USER_CANCEL = 0x80040113,
MAPI_E_UNABLE_TO_ABORT = 0x80040114,
MAPI_E_NETWORK_ERROR = 0x80040115,
MAPI_E_DISK_ERROR = 0x80040116,
MAPI_E_TOO_COMPLEX = 0x80040117,
MAPI_E_BAD_COLUMN = 0x80040118,
MAPI_E_EXTENDED_ERROR = 0x80040119,
MAPI_E_COMPUTED = 0x8004011A,
MAPI_E_CORRUPT_DATA = 0x8004011B,
MAPI_E_UNCONFIGURED = 0x8004011C,
MAPI_E_FAILONEPROVIDER = 0x8004011D,
MAPI_E_UNKNOWN_CPID = 0x8004011E,
MAPI_E_UNKNOWN_LCID = 0x8004011F,
MAPI_E_PASSWORD_CHANGE_REQUIRED = 0x80040120,
MAPI_E_PASSWORD_EXPIRED = 0x80040121,
MAPI_E_INVALID_WORKSTATION_ACCOUNT = 0x80040122,
MAPI_E_INVALID_ACCESS_TIME = 0x80040123,
MAPI_E_ACCOUNT_DISABLED = 0x80040124,
MAPI_E_END_OF_SESSION = 0x80040200,
MAPI_E_UNKNOWN_ENTRYID = 0x80040201,
MAPI_E_MISSING_REQUIRED_COLUMN = 0x80040202,
MAPI_W_NO_SERVICE = 0x80040203,
MAPI_E_BAD_VALUE = 0x80040301,
MAPI_E_INVALID_TYPE = 0x80040302,
MAPI_E_TYPE_NO_SUPPORT = 0x80040303,
MAPI_E_UNEXPECTED_TYPE = 0x80040304,
MAPI_E_TOO_BIG = 0x80040305,
MAPI_E_DECLINE_COPY = 0x80040306,
MAPI_E_UNEXPECTED_ID = 0x80040307,
MAPI_W_ERRORS_RETURNED = 0x80040380,
MAPI_E_UNABLE_TO_COMPLETE = 0x80040400,
MAPI_E_TIMEOUT = 0x80040401,
MAPI_E_TABLE_EMPTY = 0x80040402,
MAPI_E_TABLE_TOO_BIG = 0x80040403,
MAPI_E_INVALID_BOOKMARK = 0x80040405,
MAPI_W_POSITION_CHANGED = 0x80040481,
MAPI_W_APPROX_COUNT = 0x80040482,
MAPI_E_WAIT = 0x80040500,
MAPI_E_CANCEL = 0x80040501,
MAPI_E_NOT_ME = 0x80040502,
MAPI_W_CANCEL_MESSAGE = 0x80040580,
MAPI_E_CORRUPT_STORE = 0x80040600,
MAPI_E_NOT_IN_QUEUE = 0x80040601,
MAPI_E_NO_SUPPRESS = 0x80040602,
MAPI_E_COLLISION = 0x80040604,
MAPI_E_NOT_INITIALIZED = 0x80040605,
MAPI_E_NON_STANDARD = 0x80040606,
MAPI_E_NO_RECIPIENTS = 0x80040607,
MAPI_E_SUBMITTED = 0x80040608,
MAPI_E_HAS_FOLDERS = 0x80040609,
MAPI_E_HAS_MESSAGES = 0x8004060A,
MAPI_E_FOLDER_CYCLE = 0x8004060B,
MAPI_W_PARTIAL_COMPLETION = 0x80040680,
MAPI_E_AMBIGUOUS_RECIP = 0x80040700,
MAPI_E_RESERVED = 0xFFFFFFFF
} MAPISTATUS;
| {
"pile_set_name": "Github"
} |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.framework import Variable
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.initializer import Constant
from ppdet.core.workspace import register, serializable
from numbers import Integral
from .nonlocal_helper import add_space_nonlocal
from .name_adapter import NameAdapter
from .resnet import ResNet, ResNetC5
__all__ = ['Res2Net', 'Res2NetC5']
@register
@serializable
class Res2Net(ResNet):
"""
Res2Net, see https://arxiv.org/abs/1904.01169
Args:
depth (int): Res2Net depth, should be 50, 101, 152, 200.
width (int): Res2Net width
scales (int): Res2Net scale
freeze_at (int): freeze the backbone at which stage
norm_type (str): normalization type, 'bn'/'sync_bn'/'affine_channel'
freeze_norm (bool): freeze normalization layers
norm_decay (float): weight decay for normalization layer weights
variant (str): Res2Net variant, supports 'a', 'b', 'c', 'd' currently
feature_maps (list): index of stages whose feature maps are returned
dcn_v2_stages (list): index of stages who select deformable conv v2
nonlocal_stages (list): index of stages who select nonlocal networks
"""
__shared__ = ['norm_type', 'freeze_norm', 'weight_prefix_name']
def __init__(
self,
depth=50,
width=26,
scales=4,
freeze_at=2,
norm_type='bn',
freeze_norm=True,
norm_decay=0.,
variant='b',
feature_maps=[2, 3, 4, 5],
dcn_v2_stages=[],
weight_prefix_name='',
nonlocal_stages=[], ):
super(Res2Net, self).__init__(
depth=depth,
freeze_at=freeze_at,
norm_type=norm_type,
freeze_norm=freeze_norm,
norm_decay=norm_decay,
variant=variant,
feature_maps=feature_maps,
dcn_v2_stages=dcn_v2_stages,
weight_prefix_name=weight_prefix_name,
nonlocal_stages=nonlocal_stages)
assert depth >= 50, "just support depth>=50 in res2net, but got depth=".format(
depth)
# res2net config
self.scales = scales
self.width = width
basic_width = self.width * self.scales
self.num_filters1 = [basic_width * t for t in [1, 2, 4, 8]]
self.num_filters2 = [256 * t for t in [1, 2, 4, 8]]
self.num_filters = [64, 128, 384, 768]
def bottleneck(self,
input,
num_filters1,
num_filters2,
stride,
is_first,
name,
dcn_v2=False):
conv0 = self._conv_norm(
input=input,
num_filters=num_filters1,
filter_size=1,
stride=1,
act='relu',
name=name + '_branch2a')
xs = fluid.layers.split(conv0, self.scales, 1)
ys = []
for s in range(self.scales - 1):
if s == 0 or stride == 2:
ys.append(
self._conv_norm(
input=xs[s],
num_filters=num_filters1 // self.scales,
stride=stride,
filter_size=3,
act='relu',
name=name + '_branch2b_' + str(s + 1),
dcn_v2=dcn_v2))
else:
ys.append(
self._conv_norm(
input=xs[s] + ys[-1],
num_filters=num_filters1 // self.scales,
stride=stride,
filter_size=3,
act='relu',
name=name + '_branch2b_' + str(s + 1),
dcn_v2=dcn_v2))
if stride == 1:
ys.append(xs[-1])
else:
ys.append(
fluid.layers.pool2d(
input=xs[-1],
pool_size=3,
pool_stride=stride,
pool_padding=1,
pool_type='avg'))
conv1 = fluid.layers.concat(ys, axis=1)
conv2 = self._conv_norm(
input=conv1,
num_filters=num_filters2,
filter_size=1,
act=None,
name=name + "_branch2c")
short = self._shortcut(
input, num_filters2, stride, is_first, name=name + "_branch1")
return fluid.layers.elementwise_add(
x=short, y=conv2, act='relu', name=name + ".add.output.5")
def layer_warp(self, input, stage_num):
"""
Args:
input (Variable): input variable.
stage_num (int): the stage number, should be 2, 3, 4, 5
Returns:
The last variable in endpoint-th stage.
"""
assert stage_num in [2, 3, 4, 5]
stages, block_func = self.depth_cfg[self.depth]
count = stages[stage_num - 2]
ch_out = self.stage_filters[stage_num - 2]
is_first = False if stage_num != 2 else True
dcn_v2 = True if stage_num in self.dcn_v2_stages else False
num_filters1 = self.num_filters1[stage_num - 2]
num_filters2 = self.num_filters2[stage_num - 2]
nonlocal_mod = 1000
if stage_num in self.nonlocal_stages:
nonlocal_mod = self.nonlocal_mod_cfg[
self.depth] if stage_num == 4 else 2
# Make the layer name and parameter name consistent
# with ImageNet pre-trained model
conv = input
for i in range(count):
conv_name = self.na.fix_layer_warp_name(stage_num, count, i)
if self.depth < 50:
is_first = True if i == 0 and stage_num == 2 else False
conv = block_func(
input=conv,
num_filters1=num_filters1,
num_filters2=num_filters2,
stride=2 if i == 0 and stage_num != 2 else 1,
is_first=is_first,
name=conv_name,
dcn_v2=dcn_v2)
# add non local model
dim_in = conv.shape[1]
nonlocal_name = "nonlocal_conv{}".format(stage_num)
if i % nonlocal_mod == nonlocal_mod - 1:
conv = add_space_nonlocal(conv, dim_in, dim_in,
nonlocal_name + '_{}'.format(i),
int(dim_in / 2))
return conv
@register
@serializable
class Res2NetC5(Res2Net):
__doc__ = Res2Net.__doc__
def __init__(self,
depth=50,
width=26,
scales=4,
freeze_at=2,
norm_type='bn',
freeze_norm=True,
norm_decay=0.,
variant='b',
feature_maps=[5],
weight_prefix_name=''):
super(Res2NetC5, self).__init__(depth, width, scales, freeze_at,
norm_type, freeze_norm, norm_decay,
variant, feature_maps)
self.severed_head = True
| {
"pile_set_name": "Github"
} |
/*=========================================================================
Program: Visualization Toolkit
Module: ArraySparseArrayToTable.cxx
-------------------------------------------------------------------------
Copyright 2008 Sandia Corporation.
Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
the U.S. Government retains certain rights in this software.
-------------------------------------------------------------------------
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include <vtkAbstractArray.h>
#include <vtkArrayData.h>
#include <vtkSmartPointer.h>
#include <vtkSparseArray.h>
#include <vtkSparseArrayToTable.h>
#include <vtkTable.h>
#include <iostream>
#include <stdexcept>
#define test_expression(expression) \
{ \
if (!(expression)) \
throw std::runtime_error("Expression failed: " #expression); \
}
int ArraySparseArrayToTable(int vtkNotUsed(argc), char* vtkNotUsed(argv)[])
{
try
{
vtkSmartPointer<vtkSparseArray<double>> array = vtkSmartPointer<vtkSparseArray<double>>::New();
array->Resize(10, 10, 10);
array->SetDimensionLabel(0, "i");
array->SetDimensionLabel(1, "j");
array->SetDimensionLabel(2, "k");
array->AddValue(0, 0, 0, 1);
array->AddValue(1, 2, 3, 2);
array->AddValue(4, 5, 6, 3);
vtkSmartPointer<vtkArrayData> array_data = vtkSmartPointer<vtkArrayData>::New();
array_data->AddArray(array);
vtkSmartPointer<vtkSparseArrayToTable> convert = vtkSmartPointer<vtkSparseArrayToTable>::New();
convert->SetInputData(0, array_data);
convert->SetValueColumn("value");
convert->Update();
vtkTable* const table = convert->GetOutput();
table->Dump(8);
test_expression(table->GetNumberOfColumns() == 4);
test_expression(table->GetColumn(0)->GetName() == vtkStdString("i"));
test_expression(table->GetColumn(1)->GetName() == vtkStdString("j"));
test_expression(table->GetColumn(2)->GetName() == vtkStdString("k"));
test_expression(table->GetColumn(3)->GetName() == vtkStdString("value"));
test_expression(table->GetNumberOfRows() == 3);
test_expression(table->GetValue(0, 0).ToInt() == 0);
test_expression(table->GetValue(0, 1).ToInt() == 0);
test_expression(table->GetValue(0, 2).ToInt() == 0);
test_expression(table->GetValue(0, 3).ToDouble() == 1);
test_expression(table->GetValue(1, 0).ToInt() == 1);
test_expression(table->GetValue(1, 1).ToInt() == 2);
test_expression(table->GetValue(1, 2).ToInt() == 3);
test_expression(table->GetValue(1, 3).ToDouble() == 2);
test_expression(table->GetValue(2, 0).ToInt() == 4);
test_expression(table->GetValue(2, 1).ToInt() == 5);
test_expression(table->GetValue(2, 2).ToInt() == 6);
test_expression(table->GetValue(2, 3).ToDouble() == 3);
return 0;
}
catch (std::exception& e)
{
cerr << e.what() << endl;
return 1;
}
}
| {
"pile_set_name": "Github"
} |
// 19.1.3.19 Object.setPrototypeOf(O, proto)
var $export = require('./$.export');
$export($export.S, 'Object', {setPrototypeOf: require('./$.set-proto').set}); | {
"pile_set_name": "Github"
} |
package grouper
import (
"sync"
"github.com/tedsuo/ifrit"
)
/*
An EntranceEvent occurs every time an invoked member becomes ready.
*/
type EntranceEvent struct {
Member Member
Process ifrit.Process
}
type entranceEventChannel chan EntranceEvent
func newEntranceEventChannel(bufferSize int) entranceEventChannel {
return make(entranceEventChannel, bufferSize)
}
type entranceEventBroadcaster struct {
channels []entranceEventChannel
buffer slidingBuffer
bufferSize int
lock *sync.Mutex
}
func newEntranceEventBroadcaster(bufferSize int) *entranceEventBroadcaster {
return &entranceEventBroadcaster{
channels: make([]entranceEventChannel, 0),
buffer: newSlidingBuffer(bufferSize),
bufferSize: bufferSize,
lock: new(sync.Mutex),
}
}
func (b *entranceEventBroadcaster) Attach() entranceEventChannel {
b.lock.Lock()
defer b.lock.Unlock()
channel := newEntranceEventChannel(b.bufferSize)
b.buffer.Range(func(event interface{}) {
channel <- event.(EntranceEvent)
})
if b.channels != nil {
b.channels = append(b.channels, channel)
} else {
close(channel)
}
return channel
}
func (b *entranceEventBroadcaster) Broadcast(entrance EntranceEvent) {
b.lock.Lock()
defer b.lock.Unlock()
b.buffer.Append(entrance)
for _, entranceChan := range b.channels {
entranceChan <- entrance
}
}
func (b *entranceEventBroadcaster) Close() {
b.lock.Lock()
defer b.lock.Unlock()
for _, channel := range b.channels {
close(channel)
}
b.channels = nil
}
| {
"pile_set_name": "Github"
} |
// RUN: %check_clang_tidy -check-suffix=USING-A %s misc-unused-using-decls %t -- -- -DUSING_A
// RUN: %check_clang_tidy -check-suffix=USING-B %s misc-unused-using-decls %t -- -- -DUSING_B
// RUN: %check_clang_tidy -check-suffix=USING-C,USING-D %s misc-unused-using-decls %t -- -- -DUSING_C_D
// RUN: %check_clang_tidy -check-suffixes=USING-C,USING-D %s misc-unused-using-decls %t -- -- -DUSING_C_D
// RUN: %check_clang_tidy %s misc-unused-using-decls %t
namespace a {class A {}; class B {}; class C {}; class D {}; class E {};}
namespace b {
#if defined(USING_A)
using a::A;
#elif defined(USING_B)
using a::B;
#elif defined(USING_C_D)
using a::C;
using a::D;
#else
using a::E;
#endif
}
namespace c {}
// CHECK-MESSAGES-USING-A: warning: using decl 'A' {{.*}}
// CHECK-MESSAGES-USING-B: warning: using decl 'B' {{.*}}
// CHECK-MESSAGES-USING-C: warning: using decl 'C' {{.*}}
// CHECK-MESSAGES-USING-D: warning: using decl 'D' {{.*}}
// CHECK-MESSAGES: warning: using decl 'E' {{.*}}
// CHECK-FIXES-USING-A-NOT: using a::A;$
// CHECK-FIXES-USING-B-NOT: using a::B;$
// CHECK-FIXES-USING-C-NOT: using a::C;$
// CHECK-FIXES-USING-C-NOT: using a::D;$
// CHECK-FIXES-USING-D-NOT: using a::C;$
// CHECK-FIXES-USING-D-NOT: using a::D;$
// CHECK-FIXES-NOT: using a::E;$
| {
"pile_set_name": "Github"
} |
GET /abc/foo/baz HTTP/1.1
Host: 127.0.0.1:51240
User-Agent: curl/7.49.0-DEV
Accept: */*
--MARK--
| {
"pile_set_name": "Github"
} |
/****************************************************************************
Copyright (c) 2010 cocos2d-x.org
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#ifndef __CCACCELEROMETER_DELEGATE_H__
#define __CCACCELEROMETER_DELEGATE_H__
#include "CCCommon.h"
namespace cocos2d {
/**
@brief The device accelerometer reports values for each axis in units of g-force
*/
typedef struct
{
double x;
double y;
double z;
double timestamp;
} CCAcceleration;
/**
@brief
The CCAccelerometerDelegate defines a single method for
receiving acceleration-related data from the system.
*/
class CC_DLL CCAccelerometerDelegate
{
public:
virtual void didAccelerate(CCAcceleration* pAccelerationValue) {CC_UNUSED_PARAM(pAccelerationValue);}
};
} //namespace cocos2d
#endif
| {
"pile_set_name": "Github"
} |
package voldemort.rest.server;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
import org.jboss.netty.channel.ChannelEvent;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.handler.execution.ExecutionHandler;
import voldemort.annotations.jmx.JmxGetter;
import voldemort.store.stats.StoreStats;
public class StorageExecutionHandler extends ExecutionHandler {
private final ThreadPoolExecutor threadPoolExecutor;
private ConcurrentHashMap<String, StoreStats> storeStatsMap;
private StoreStats aggregatedStoreStats;
private boolean isJmxEnabled = false;
private final int localZoneId;
public StorageExecutionHandler(Executor executor,
ConcurrentHashMap<String, StoreStats> storeStatsMap,
StoreStats aggregateStoreStats,
boolean isJmxEnabled,
int localZoneId) {
super(executor);
if(executor instanceof ThreadPoolExecutor) {
threadPoolExecutor = (ThreadPoolExecutor) executor;
} else {
threadPoolExecutor = null;
}
this.storeStatsMap = storeStatsMap;
this.aggregatedStoreStats = aggregateStoreStats;
this.isJmxEnabled = isJmxEnabled;
this.localZoneId = localZoneId;
}
@Override
public void handleUpstream(ChannelHandlerContext context, ChannelEvent channelEvent)
throws Exception {
if(channelEvent instanceof MessageEvent) {
getExecutor().execute(new StorageWorkerThread((MessageEvent) channelEvent,
storeStatsMap,
aggregatedStoreStats,
isJmxEnabled,
localZoneId));
}
}
@JmxGetter(name = "StorageThreadPoolQueueSize", description = "The number of storage requests queued to be executed")
public int getQueueSize() {
if(this.threadPoolExecutor != null) {
return threadPoolExecutor.getQueue().size();
} else {
return -1;
}
}
@JmxGetter(name = "ActiveStorageThreads", description = "The number of active Storage worker threads.")
public int getActiveThreadsInWorkerPool() {
if(this.threadPoolExecutor != null) {
return this.threadPoolExecutor.getActiveCount();
} else {
return -1;
}
}
@JmxGetter(name = "TotalStorageWorkerThreads", description = "The total number of Storage worker threads, active and idle.")
public int getAllThreadInWorkerPool() {
if(this.threadPoolExecutor != null) {
return this.threadPoolExecutor.getPoolSize();
} else {
return -1;
}
}
}
| {
"pile_set_name": "Github"
} |
// WARNING
//
// This file has been generated automatically by Xamarin Studio from the outlets and
// actions declared in your storyboard file.
// Manual changes to this file will not be maintained.
//
using Foundation;
using System;
using System.CodeDom.Compiler;
using UIKit;
namespace MALClient.iOS
{
[Register ("ViewController")]
partial class ViewController
{
[Action ("UIButton17_TouchUpInside:")]
[GeneratedCode ("iOS Designer", "1.0")]
partial void UIButton17_TouchUpInside (UIKit.UIButton sender);
void ReleaseDesignerOutlets ()
{
}
}
} | {
"pile_set_name": "Github"
} |
/*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "MessengerService.h"
EXTERN_C const GUID DECLSPEC_SELECTANY IID_IMessengerService
= { 0x2E50547C, 0xA8AA, 0x4f60, { 0xB5, 0x7E, 0x1F, 0x41, 0x47, 0x11, 0x00, 0x7B } };
MessengerService::MessengerService(IMessenger *messenger)
: _messenger(messenger)
{
_messenger->AddRef();
}
MessengerService::~MessengerService()
{
_messenger->Release();
}
STDMETHODIMP MessengerService::get_MyFriendlyName(BSTR *pbstrName)
{
return _messenger->get_MyFriendlyName(pbstrName);
}
STDMETHODIMP MessengerService::get_MySigninName(BSTR *pbstrName)
{
return _messenger->get_MySigninName(pbstrName);
}
STDMETHODIMP MessengerService::get_MyStatus(MISTATUS *pmiStatus)
{
return _messenger->get_MyStatus(pmiStatus);
}
STDMETHODIMP MessengerService::get_Property(MSERVICEPROPERTY ePropType, VARIANT *pvPropVal)
STDMETHODIMP_E_NOTIMPL_STUB
STDMETHODIMP MessengerService::get_ServiceID(BSTR *pbstrID)
{
return _messenger->get_MyServiceId(pbstrID);
}
STDMETHODIMP MessengerService::get_ServiceName(BSTR *pbstrServiceName)
{
return _messenger->get_MyServiceName(pbstrServiceName);
}
STDMETHODIMP MessengerService::put_Property(MSERVICEPROPERTY ePropType, VARIANT vPropVal)
STDMETHODIMP_E_NOTIMPL_STUB
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
*/
package com.microsoft.azure.management.network.v2020_06_01.implementation;
import com.microsoft.azure.arm.collection.InnerSupportsGet;
import com.microsoft.azure.arm.collection.InnerSupportsDelete;
import com.microsoft.azure.arm.collection.InnerSupportsListing;
import retrofit2.Retrofit;
import com.google.common.reflect.TypeToken;
import com.microsoft.azure.AzureServiceFuture;
import com.microsoft.azure.CloudException;
import com.microsoft.azure.ListOperationCallback;
import com.microsoft.azure.management.network.v2020_06_01.TagsObject;
import com.microsoft.azure.Page;
import com.microsoft.azure.PagedList;
import com.microsoft.rest.ServiceCallback;
import com.microsoft.rest.ServiceFuture;
import com.microsoft.rest.ServiceResponse;
import com.microsoft.rest.Validator;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import okhttp3.ResponseBody;
import retrofit2.http.Body;
import retrofit2.http.GET;
import retrofit2.http.Header;
import retrofit2.http.Headers;
import retrofit2.http.HTTP;
import retrofit2.http.PATCH;
import retrofit2.http.Path;
import retrofit2.http.PUT;
import retrofit2.http.Query;
import retrofit2.http.Url;
import retrofit2.Response;
import rx.functions.Func1;
import rx.Observable;
/**
* An instance of this class provides access to all the operations defined
* in AzureFirewalls.
*/
public class AzureFirewallsInner implements InnerSupportsGet<AzureFirewallInner>, InnerSupportsDelete<Void>, InnerSupportsListing<AzureFirewallInner> {
/** The Retrofit service to perform REST calls. */
private AzureFirewallsService service;
/** The service client containing this operation class. */
private NetworkManagementClientImpl client;
/**
* Initializes an instance of AzureFirewallsInner.
*
* @param retrofit the Retrofit instance built from a Retrofit Builder.
* @param client the instance of the service client containing this operation class.
*/
public AzureFirewallsInner(Retrofit retrofit, NetworkManagementClientImpl client) {
this.service = retrofit.create(AzureFirewallsService.class);
this.client = client;
}
/**
* The interface defining all the services for AzureFirewalls to be
* used by Retrofit to perform actually REST calls.
*/
interface AzureFirewallsService {
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls delete" })
@HTTP(path = "subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}", method = "DELETE", hasBody = true)
Observable<Response<ResponseBody>> delete(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls beginDelete" })
@HTTP(path = "subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}", method = "DELETE", hasBody = true)
Observable<Response<ResponseBody>> beginDelete(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls getByResourceGroup" })
@GET("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}")
Observable<Response<ResponseBody>> getByResourceGroup(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls createOrUpdate" })
@PUT("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}")
Observable<Response<ResponseBody>> createOrUpdate(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Body AzureFirewallInner parameters, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls beginCreateOrUpdate" })
@PUT("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}")
Observable<Response<ResponseBody>> beginCreateOrUpdate(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Body AzureFirewallInner parameters, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls updateTags" })
@PATCH("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}")
Observable<Response<ResponseBody>> updateTags(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Body TagsObject parameters, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls beginUpdateTags" })
@PATCH("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}")
Observable<Response<ResponseBody>> beginUpdateTags(@Path("resourceGroupName") String resourceGroupName, @Path("azureFirewallName") String azureFirewallName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Body TagsObject parameters, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls listByResourceGroup" })
@GET("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls")
Observable<Response<ResponseBody>> listByResourceGroup(@Path("resourceGroupName") String resourceGroupName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls list" })
@GET("subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls")
Observable<Response<ResponseBody>> list(@Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls listByResourceGroupNext" })
@GET
Observable<Response<ResponseBody>> listByResourceGroupNext(@Url String nextUrl, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
@Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.management.network.v2020_06_01.AzureFirewalls listNext" })
@GET
Observable<Response<ResponseBody>> listNext(@Url String nextUrl, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent);
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
*/
public void delete(String resourceGroupName, String azureFirewallName) {
deleteWithServiceResponseAsync(resourceGroupName, azureFirewallName).toBlocking().last().body();
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<Void> deleteAsync(String resourceGroupName, String azureFirewallName, final ServiceCallback<Void> serviceCallback) {
return ServiceFuture.fromResponse(deleteWithServiceResponseAsync(resourceGroupName, azureFirewallName), serviceCallback);
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<Void> deleteAsync(String resourceGroupName, String azureFirewallName) {
return deleteWithServiceResponseAsync(resourceGroupName, azureFirewallName).map(new Func1<ServiceResponse<Void>, Void>() {
@Override
public Void call(ServiceResponse<Void> response) {
return response.body();
}
});
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<ServiceResponse<Void>> deleteWithServiceResponseAsync(String resourceGroupName, String azureFirewallName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
Observable<Response<ResponseBody>> observable = service.delete(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent());
return client.getAzureClient().getPostOrDeleteResultAsync(observable, new TypeToken<Void>() { }.getType());
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
*/
public void beginDelete(String resourceGroupName, String azureFirewallName) {
beginDeleteWithServiceResponseAsync(resourceGroupName, azureFirewallName).toBlocking().single().body();
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<Void> beginDeleteAsync(String resourceGroupName, String azureFirewallName, final ServiceCallback<Void> serviceCallback) {
return ServiceFuture.fromResponse(beginDeleteWithServiceResponseAsync(resourceGroupName, azureFirewallName), serviceCallback);
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceResponse} object if successful.
*/
public Observable<Void> beginDeleteAsync(String resourceGroupName, String azureFirewallName) {
return beginDeleteWithServiceResponseAsync(resourceGroupName, azureFirewallName).map(new Func1<ServiceResponse<Void>, Void>() {
@Override
public Void call(ServiceResponse<Void> response) {
return response.body();
}
});
}
/**
* Deletes the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceResponse} object if successful.
*/
public Observable<ServiceResponse<Void>> beginDeleteWithServiceResponseAsync(String resourceGroupName, String azureFirewallName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
return service.beginDelete(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<Void>>>() {
@Override
public Observable<ServiceResponse<Void>> call(Response<ResponseBody> response) {
try {
ServiceResponse<Void> clientResponse = beginDeleteDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<Void> beginDeleteDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<Void, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<Void>() { }.getType())
.register(202, new TypeToken<Void>() { }.getType())
.register(204, new TypeToken<Void>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Gets the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner getByResourceGroup(String resourceGroupName, String azureFirewallName) {
return getByResourceGroupWithServiceResponseAsync(resourceGroupName, azureFirewallName).toBlocking().single().body();
}
/**
* Gets the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> getByResourceGroupAsync(String resourceGroupName, String azureFirewallName, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(getByResourceGroupWithServiceResponseAsync(resourceGroupName, azureFirewallName), serviceCallback);
}
/**
* Gets the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<AzureFirewallInner> getByResourceGroupAsync(String resourceGroupName, String azureFirewallName) {
return getByResourceGroupWithServiceResponseAsync(resourceGroupName, azureFirewallName).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Gets the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<ServiceResponse<AzureFirewallInner>> getByResourceGroupWithServiceResponseAsync(String resourceGroupName, String azureFirewallName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
return service.getByResourceGroup(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<AzureFirewallInner>>>() {
@Override
public Observable<ServiceResponse<AzureFirewallInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<AzureFirewallInner> clientResponse = getByResourceGroupDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<AzureFirewallInner> getByResourceGroupDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<AzureFirewallInner, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<AzureFirewallInner>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner createOrUpdate(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters) {
return createOrUpdateWithServiceResponseAsync(resourceGroupName, azureFirewallName, parameters).toBlocking().last().body();
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> createOrUpdateAsync(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(createOrUpdateWithServiceResponseAsync(resourceGroupName, azureFirewallName, parameters), serviceCallback);
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<AzureFirewallInner> createOrUpdateAsync(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters) {
return createOrUpdateWithServiceResponseAsync(resourceGroupName, azureFirewallName, parameters).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<ServiceResponse<AzureFirewallInner>> createOrUpdateWithServiceResponseAsync(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
if (parameters == null) {
throw new IllegalArgumentException("Parameter parameters is required and cannot be null.");
}
Validator.validate(parameters);
final String apiVersion = "2020-06-01";
Observable<Response<ResponseBody>> observable = service.createOrUpdate(resourceGroupName, azureFirewallName, this.client.subscriptionId(), parameters, apiVersion, this.client.acceptLanguage(), this.client.userAgent());
return client.getAzureClient().getPutOrPatchResultAsync(observable, new TypeToken<AzureFirewallInner>() { }.getType());
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner beginCreateOrUpdate(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters) {
return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, azureFirewallName, parameters).toBlocking().single().body();
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> beginCreateOrUpdateAsync(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, azureFirewallName, parameters), serviceCallback);
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<AzureFirewallInner> beginCreateOrUpdateAsync(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters) {
return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, azureFirewallName, parameters).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Creates or updates the specified Azure Firewall.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param parameters Parameters supplied to the create or update Azure Firewall operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<ServiceResponse<AzureFirewallInner>> beginCreateOrUpdateWithServiceResponseAsync(String resourceGroupName, String azureFirewallName, AzureFirewallInner parameters) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
if (parameters == null) {
throw new IllegalArgumentException("Parameter parameters is required and cannot be null.");
}
Validator.validate(parameters);
final String apiVersion = "2020-06-01";
return service.beginCreateOrUpdate(resourceGroupName, azureFirewallName, this.client.subscriptionId(), parameters, apiVersion, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<AzureFirewallInner>>>() {
@Override
public Observable<ServiceResponse<AzureFirewallInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<AzureFirewallInner> clientResponse = beginCreateOrUpdateDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<AzureFirewallInner> beginCreateOrUpdateDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<AzureFirewallInner, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<AzureFirewallInner>() { }.getType())
.register(201, new TypeToken<AzureFirewallInner>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner updateTags(String resourceGroupName, String azureFirewallName) {
return updateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName).toBlocking().last().body();
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> updateTagsAsync(String resourceGroupName, String azureFirewallName, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(updateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName), serviceCallback);
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<AzureFirewallInner> updateTagsAsync(String resourceGroupName, String azureFirewallName) {
return updateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<ServiceResponse<AzureFirewallInner>> updateTagsWithServiceResponseAsync(String resourceGroupName, String azureFirewallName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
final Map<String, String> tags = null;
TagsObject parameters = new TagsObject();
parameters.withTags(null);
Observable<Response<ResponseBody>> observable = service.updateTags(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), parameters, this.client.userAgent());
return client.getAzureClient().getPutOrPatchResultAsync(observable, new TypeToken<AzureFirewallInner>() { }.getType());
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner updateTags(String resourceGroupName, String azureFirewallName, Map<String, String> tags) {
return updateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName, tags).toBlocking().last().body();
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> updateTagsAsync(String resourceGroupName, String azureFirewallName, Map<String, String> tags, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(updateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName, tags), serviceCallback);
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<AzureFirewallInner> updateTagsAsync(String resourceGroupName, String azureFirewallName, Map<String, String> tags) {
return updateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName, tags).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable for the request
*/
public Observable<ServiceResponse<AzureFirewallInner>> updateTagsWithServiceResponseAsync(String resourceGroupName, String azureFirewallName, Map<String, String> tags) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
Validator.validate(tags);
final String apiVersion = "2020-06-01";
TagsObject parameters = new TagsObject();
parameters.withTags(tags);
Observable<Response<ResponseBody>> observable = service.updateTags(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), parameters, this.client.userAgent());
return client.getAzureClient().getPutOrPatchResultAsync(observable, new TypeToken<AzureFirewallInner>() { }.getType());
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner beginUpdateTags(String resourceGroupName, String azureFirewallName) {
return beginUpdateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName).toBlocking().single().body();
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> beginUpdateTagsAsync(String resourceGroupName, String azureFirewallName, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(beginUpdateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName), serviceCallback);
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<AzureFirewallInner> beginUpdateTagsAsync(String resourceGroupName, String azureFirewallName) {
return beginUpdateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<ServiceResponse<AzureFirewallInner>> beginUpdateTagsWithServiceResponseAsync(String resourceGroupName, String azureFirewallName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
final Map<String, String> tags = null;
TagsObject parameters = new TagsObject();
parameters.withTags(null);
return service.beginUpdateTags(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), parameters, this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<AzureFirewallInner>>>() {
@Override
public Observable<ServiceResponse<AzureFirewallInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<AzureFirewallInner> clientResponse = beginUpdateTagsDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the AzureFirewallInner object if successful.
*/
public AzureFirewallInner beginUpdateTags(String resourceGroupName, String azureFirewallName, Map<String, String> tags) {
return beginUpdateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName, tags).toBlocking().single().body();
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<AzureFirewallInner> beginUpdateTagsAsync(String resourceGroupName, String azureFirewallName, Map<String, String> tags, final ServiceCallback<AzureFirewallInner> serviceCallback) {
return ServiceFuture.fromResponse(beginUpdateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName, tags), serviceCallback);
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<AzureFirewallInner> beginUpdateTagsAsync(String resourceGroupName, String azureFirewallName, Map<String, String> tags) {
return beginUpdateTagsWithServiceResponseAsync(resourceGroupName, azureFirewallName, tags).map(new Func1<ServiceResponse<AzureFirewallInner>, AzureFirewallInner>() {
@Override
public AzureFirewallInner call(ServiceResponse<AzureFirewallInner> response) {
return response.body();
}
});
}
/**
* Updates tags of an Azure Firewall resource.
*
* @param resourceGroupName The name of the resource group.
* @param azureFirewallName The name of the Azure Firewall.
* @param tags Resource tags.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the AzureFirewallInner object
*/
public Observable<ServiceResponse<AzureFirewallInner>> beginUpdateTagsWithServiceResponseAsync(String resourceGroupName, String azureFirewallName, Map<String, String> tags) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (azureFirewallName == null) {
throw new IllegalArgumentException("Parameter azureFirewallName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
Validator.validate(tags);
final String apiVersion = "2020-06-01";
TagsObject parameters = new TagsObject();
parameters.withTags(tags);
return service.beginUpdateTags(resourceGroupName, azureFirewallName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), parameters, this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<AzureFirewallInner>>>() {
@Override
public Observable<ServiceResponse<AzureFirewallInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<AzureFirewallInner> clientResponse = beginUpdateTagsDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<AzureFirewallInner> beginUpdateTagsDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<AzureFirewallInner, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<AzureFirewallInner>() { }.getType())
.register(202, new TypeToken<Void>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the PagedList<AzureFirewallInner> object if successful.
*/
public PagedList<AzureFirewallInner> listByResourceGroup(final String resourceGroupName) {
ServiceResponse<Page<AzureFirewallInner>> response = listByResourceGroupSinglePageAsync(resourceGroupName).toBlocking().single();
return new PagedList<AzureFirewallInner>(response.body()) {
@Override
public Page<AzureFirewallInner> nextPage(String nextPageLink) {
return listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single().body();
}
};
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<List<AzureFirewallInner>> listByResourceGroupAsync(final String resourceGroupName, final ListOperationCallback<AzureFirewallInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listByResourceGroupSinglePageAsync(resourceGroupName),
new Func1<String, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(String nextPageLink) {
return listByResourceGroupNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<Page<AzureFirewallInner>> listByResourceGroupAsync(final String resourceGroupName) {
return listByResourceGroupWithServiceResponseAsync(resourceGroupName)
.map(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Page<AzureFirewallInner>>() {
@Override
public Page<AzureFirewallInner> call(ServiceResponse<Page<AzureFirewallInner>> response) {
return response.body();
}
});
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listByResourceGroupWithServiceResponseAsync(final String resourceGroupName) {
return listByResourceGroupSinglePageAsync(resourceGroupName)
.concatMap(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(ServiceResponse<Page<AzureFirewallInner>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(listByResourceGroupNextWithServiceResponseAsync(nextPageLink));
}
});
}
/**
* Lists all Azure Firewalls in a resource group.
*
ServiceResponse<PageImpl<AzureFirewallInner>> * @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the PagedList<AzureFirewallInner> object wrapped in {@link ServiceResponse} if successful.
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listByResourceGroupSinglePageAsync(final String resourceGroupName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
return service.listByResourceGroup(resourceGroupName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(Response<ResponseBody> response) {
try {
ServiceResponse<PageImpl<AzureFirewallInner>> result = listByResourceGroupDelegate(response);
return Observable.just(new ServiceResponse<Page<AzureFirewallInner>>(result.body(), result.response()));
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<PageImpl<AzureFirewallInner>> listByResourceGroupDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<PageImpl<AzureFirewallInner>, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<PageImpl<AzureFirewallInner>>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the PagedList<AzureFirewallInner> object if successful.
*/
public PagedList<AzureFirewallInner> list() {
ServiceResponse<Page<AzureFirewallInner>> response = listSinglePageAsync().toBlocking().single();
return new PagedList<AzureFirewallInner>(response.body()) {
@Override
public Page<AzureFirewallInner> nextPage(String nextPageLink) {
return listNextSinglePageAsync(nextPageLink).toBlocking().single().body();
}
};
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<List<AzureFirewallInner>> listAsync(final ListOperationCallback<AzureFirewallInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listSinglePageAsync(),
new Func1<String, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(String nextPageLink) {
return listNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<Page<AzureFirewallInner>> listAsync() {
return listWithServiceResponseAsync()
.map(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Page<AzureFirewallInner>>() {
@Override
public Page<AzureFirewallInner> call(ServiceResponse<Page<AzureFirewallInner>> response) {
return response.body();
}
});
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listWithServiceResponseAsync() {
return listSinglePageAsync()
.concatMap(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(ServiceResponse<Page<AzureFirewallInner>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(listNextWithServiceResponseAsync(nextPageLink));
}
});
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the PagedList<AzureFirewallInner> object wrapped in {@link ServiceResponse} if successful.
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listSinglePageAsync() {
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2020-06-01";
return service.list(this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(Response<ResponseBody> response) {
try {
ServiceResponse<PageImpl<AzureFirewallInner>> result = listDelegate(response);
return Observable.just(new ServiceResponse<Page<AzureFirewallInner>>(result.body(), result.response()));
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<PageImpl<AzureFirewallInner>> listDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<PageImpl<AzureFirewallInner>, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<PageImpl<AzureFirewallInner>>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the PagedList<AzureFirewallInner> object if successful.
*/
public PagedList<AzureFirewallInner> listByResourceGroupNext(final String nextPageLink) {
ServiceResponse<Page<AzureFirewallInner>> response = listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single();
return new PagedList<AzureFirewallInner>(response.body()) {
@Override
public Page<AzureFirewallInner> nextPage(String nextPageLink) {
return listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single().body();
}
};
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @param serviceFuture the ServiceFuture object tracking the Retrofit calls
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<List<AzureFirewallInner>> listByResourceGroupNextAsync(final String nextPageLink, final ServiceFuture<List<AzureFirewallInner>> serviceFuture, final ListOperationCallback<AzureFirewallInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listByResourceGroupNextSinglePageAsync(nextPageLink),
new Func1<String, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(String nextPageLink) {
return listByResourceGroupNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<Page<AzureFirewallInner>> listByResourceGroupNextAsync(final String nextPageLink) {
return listByResourceGroupNextWithServiceResponseAsync(nextPageLink)
.map(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Page<AzureFirewallInner>>() {
@Override
public Page<AzureFirewallInner> call(ServiceResponse<Page<AzureFirewallInner>> response) {
return response.body();
}
});
}
/**
* Lists all Azure Firewalls in a resource group.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listByResourceGroupNextWithServiceResponseAsync(final String nextPageLink) {
return listByResourceGroupNextSinglePageAsync(nextPageLink)
.concatMap(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(ServiceResponse<Page<AzureFirewallInner>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(listByResourceGroupNextWithServiceResponseAsync(nextPageLink));
}
});
}
/**
* Lists all Azure Firewalls in a resource group.
*
ServiceResponse<PageImpl<AzureFirewallInner>> * @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the PagedList<AzureFirewallInner> object wrapped in {@link ServiceResponse} if successful.
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listByResourceGroupNextSinglePageAsync(final String nextPageLink) {
if (nextPageLink == null) {
throw new IllegalArgumentException("Parameter nextPageLink is required and cannot be null.");
}
String nextUrl = String.format("%s", nextPageLink);
return service.listByResourceGroupNext(nextUrl, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(Response<ResponseBody> response) {
try {
ServiceResponse<PageImpl<AzureFirewallInner>> result = listByResourceGroupNextDelegate(response);
return Observable.just(new ServiceResponse<Page<AzureFirewallInner>>(result.body(), result.response()));
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<PageImpl<AzureFirewallInner>> listByResourceGroupNextDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<PageImpl<AzureFirewallInner>, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<PageImpl<AzureFirewallInner>>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @throws CloudException thrown if the request is rejected by server
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @return the PagedList<AzureFirewallInner> object if successful.
*/
public PagedList<AzureFirewallInner> listNext(final String nextPageLink) {
ServiceResponse<Page<AzureFirewallInner>> response = listNextSinglePageAsync(nextPageLink).toBlocking().single();
return new PagedList<AzureFirewallInner>(response.body()) {
@Override
public Page<AzureFirewallInner> nextPage(String nextPageLink) {
return listNextSinglePageAsync(nextPageLink).toBlocking().single().body();
}
};
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @param serviceFuture the ServiceFuture object tracking the Retrofit calls
* @param serviceCallback the async ServiceCallback to handle successful and failed responses.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the {@link ServiceFuture} object
*/
public ServiceFuture<List<AzureFirewallInner>> listNextAsync(final String nextPageLink, final ServiceFuture<List<AzureFirewallInner>> serviceFuture, final ListOperationCallback<AzureFirewallInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listNextSinglePageAsync(nextPageLink),
new Func1<String, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(String nextPageLink) {
return listNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<Page<AzureFirewallInner>> listNextAsync(final String nextPageLink) {
return listNextWithServiceResponseAsync(nextPageLink)
.map(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Page<AzureFirewallInner>>() {
@Override
public Page<AzureFirewallInner> call(ServiceResponse<Page<AzureFirewallInner>> response) {
return response.body();
}
});
}
/**
* Gets all the Azure Firewalls in a subscription.
*
* @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the observable to the PagedList<AzureFirewallInner> object
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listNextWithServiceResponseAsync(final String nextPageLink) {
return listNextSinglePageAsync(nextPageLink)
.concatMap(new Func1<ServiceResponse<Page<AzureFirewallInner>>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(ServiceResponse<Page<AzureFirewallInner>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(listNextWithServiceResponseAsync(nextPageLink));
}
});
}
/**
* Gets all the Azure Firewalls in a subscription.
*
ServiceResponse<PageImpl<AzureFirewallInner>> * @param nextPageLink The NextLink from the previous successful call to List operation.
* @throws IllegalArgumentException thrown if parameters fail the validation
* @return the PagedList<AzureFirewallInner> object wrapped in {@link ServiceResponse} if successful.
*/
public Observable<ServiceResponse<Page<AzureFirewallInner>>> listNextSinglePageAsync(final String nextPageLink) {
if (nextPageLink == null) {
throw new IllegalArgumentException("Parameter nextPageLink is required and cannot be null.");
}
String nextUrl = String.format("%s", nextPageLink);
return service.listNext(nextUrl, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<Page<AzureFirewallInner>>>>() {
@Override
public Observable<ServiceResponse<Page<AzureFirewallInner>>> call(Response<ResponseBody> response) {
try {
ServiceResponse<PageImpl<AzureFirewallInner>> result = listNextDelegate(response);
return Observable.just(new ServiceResponse<Page<AzureFirewallInner>>(result.body(), result.response()));
} catch (Throwable t) {
return Observable.error(t);
}
}
});
}
private ServiceResponse<PageImpl<AzureFirewallInner>> listNextDelegate(Response<ResponseBody> response) throws CloudException, IOException, IllegalArgumentException {
return this.client.restClient().responseBuilderFactory().<PageImpl<AzureFirewallInner>, CloudException>newInstance(this.client.serializerAdapter())
.register(200, new TypeToken<PageImpl<AzureFirewallInner>>() { }.getType())
.registerError(CloudException.class)
.build(response);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2015 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dropbox.djinni.ideaplugin;
import com.dropbox.djinni.ideaplugin.psi.DjinniConstReference;
import com.dropbox.djinni.ideaplugin.psi.DjinniFile;
import com.dropbox.djinni.ideaplugin.psi.DjinniImportStatement;
import com.dropbox.djinni.ideaplugin.psi.DjinniTypeReference;
import com.dropbox.djinni.ideaplugin.psi.impl.DjinniPsiImplUtil;
import com.intellij.lang.annotation.Annotation;
import com.intellij.lang.annotation.AnnotationHolder;
import com.intellij.lang.annotation.Annotator;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.PsiElement;
import org.jetbrains.annotations.NotNull;
/**
* Created by jaetzold on 7/27/15.
*/
public class DjinniAnnotator implements Annotator {
@Override
public void annotate(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
if(element instanceof DjinniTypeReference) {
DjinniTypeReference typeReference = (DjinniTypeReference)element;
boolean validReference = typeReference.getPredefinedType() != null
|| !DjinniUtil.findTypeDefinitionsForName(element.getProject(), typeReference.getText(), element).isEmpty()
|| !DjinniUtil.findExternalTypeForName(element.getProject(), typeReference.getText(), element).isEmpty();
if(!validReference) {
Annotation annotation = holder.createErrorAnnotation(element, "Unresolved type");
annotation.setNeedsUpdateOnTyping(true);
annotation.registerFix(new DjinniCreateTypeDefinitionQuickFix(typeReference.getText(), DjinniPsiImplUtil.DjinniType.RECORD));
annotation.registerFix(new DjinniCreateTypeDefinitionQuickFix(typeReference.getText(), DjinniPsiImplUtil.DjinniType.INTERFACE));
annotation.registerFix(new DjinniCreateTypeDefinitionQuickFix(typeReference.getText(), DjinniPsiImplUtil.DjinniType.ENUM));
}
} else if(element instanceof DjinniConstReference) {
DjinniConstReference constReference = (DjinniConstReference)element;
String typeName = DjinniUtil.getTypeNameOfConstReference(constReference);
boolean validReference = !DjinniUtil.findReferencableValuesWithNameAndTypename(element.getProject(), constReference.getName(), typeName, element).isEmpty();
if(!validReference) {
Annotation annotation = holder.createErrorAnnotation(element, "Unresolved value");
annotation.setNeedsUpdateOnTyping(true);
}
} else if(element instanceof DjinniImportStatement) {
DjinniImportStatement importStatement = (DjinniImportStatement)element;
DjinniFile importedFile =
DjinniUtil.djinniFileRelativeResolve(element.getProject(), element.getContainingFile(), importStatement.getPath());
if(importedFile == null) {
TextRange textRange = importStatement.getTextRange();
TextRange rangeOfPathInImport = importStatement.getRangeOfPath();
TextRange range = new TextRange(textRange.getStartOffset() + rangeOfPathInImport.getStartOffset(),
textRange.getStartOffset() + rangeOfPathInImport.getEndOffset());
holder.createErrorAnnotation(range, "File does not exist");
}
}
}
}
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @jest-environment jsdom
*/
process.cwd = () => `${__dirname}/__fixtures__/website`;
const React = require('react');
const {configure, mount} = require('enzyme');
const Adapter = require('enzyme-adapter-react-16');
const fs = require('fs');
const _ = require('lodash');
const Doc = require('../Doc.js');
configure({adapter: new Adapter()});
describe('when code tabs are used correctly', () => {
// clear unique id counter
_.uniqueId = _.runInContext().uniqueId;
const props = {
content: fs.readFileSync(
`${__dirname}/__fixtures__/split-tab_doc1.md`,
'utf-8',
),
metadata: {},
config: {},
};
let mountedDoc;
const docPage = () => {
if (!mountedDoc) {
mountedDoc = mount(<Doc {...props} />);
}
return mountedDoc;
};
const page = docPage();
it('renders tabs correctly', () => {
const node = page.getDOMNode();
const firstTab = node.querySelector('[data-tab$="-content-2"]').textContent;
expect(firstTab).toEqual('JavaScript');
const secondTab = node.querySelector('[data-tab$="-content-3"]')
.textContent;
expect(secondTab).toEqual('Python');
const thirdTab = node.querySelector('[data-tab$="-content-4"]').textContent;
expect(thirdTab).toEqual('C');
const fourthTab = node.querySelector('[data-tab$="-content-5"]')
.textContent;
expect(fourthTab).toEqual('Pascal');
});
it('renders content correctly', () => {
const node = page.getDOMNode();
const firstContent = node.querySelector('[id$="-content-2"] code')
.textContent;
expect(firstContent).toEqual("console.log('Hello, world!');");
const secondContent = node.querySelector('[id$="-content-3"] code')
.textContent;
expect(secondContent).toEqual("print('Hello, world!')");
const thirdContent = node.querySelector('[id$="-content-4"] code')
.textContent;
expect(thirdContent).toEqual(
'#include <stdio.h>int main() { printf("Hello World!"); return 0;}',
);
const fourthContent = node.querySelector('[id$="-content-5"] code')
.textContent;
expect(fourthContent).toEqual(
"program HelloWorld;begin WriteLn('Hello, world!');end.",
);
});
});
describe('when code tab is used in a list', () => {
// clear unique id counter
_.uniqueId = _.runInContext().uniqueId;
const props = {
content: fs.readFileSync(
`${__dirname}/__fixtures__/split-tab_doc2.md`,
'utf-8',
),
metadata: {},
config: {},
};
let mountedDoc;
const docPage = () => {
if (!mountedDoc) {
mountedDoc = mount(<Doc {...props} />);
}
return mountedDoc;
};
const page = docPage();
it('renders tabs correctly', () => {
const node = page.getDOMNode();
const firstTab = node.querySelector('[data-tab$="-content-2"]').textContent;
expect(firstTab).toEqual('JavaScript');
const secondTab = node.querySelector('[data-tab$="-content-3"]')
.textContent;
expect(secondTab).toEqual('Python');
const thirdTab = node.querySelector('[data-tab$="-content-4"]').textContent;
expect(thirdTab).toEqual('C');
const fourthTab = node.querySelector('[data-tab$="-content-5"]')
.textContent;
expect(fourthTab).toEqual('Pascal');
});
it('renders content correctly', () => {
const node = page.getDOMNode();
const firstContent = node.querySelector('[id$="-content-2"] code')
.textContent;
expect(firstContent).toEqual("console.log('Hello, world!');");
const secondContent = node.querySelector('[id$="-content-3"] code')
.textContent;
expect(secondContent).toEqual("print('Hello, world!')");
const thirdContent = node.querySelector('[id$="-content-4"] code')
.textContent;
expect(thirdContent).toEqual(
'#include <stdio.h>int main() { printf("Hello World!"); return 0;}',
);
const fourthContent = node.querySelector('[id$="-content-5"] code')
.textContent;
expect(fourthContent).toEqual(
"program HelloWorld;begin WriteLn('Hello, world!');end.",
);
});
});
| {
"pile_set_name": "Github"
} |
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <utility>
#include <string>
#include <set>
#include <getopt.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/sysinfo.h>
#include "../allocator.h"
#include "../stats_server.h"
#include "bench.h"
#include "bdb_wrapper.h"
#include "ndb_wrapper.h"
#include "ndb_wrapper_impl.h"
#include "kvdb_wrapper.h"
#include "kvdb_wrapper_impl.h"
#if !NO_MYSQL
#include "mysql_wrapper.h"
#endif
using namespace std;
using namespace util;
static vector<string>
split_ws(const string &s)
{
vector<string> r;
istringstream iss(s);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter<vector<string>>(r));
return r;
}
static size_t
parse_memory_spec(const string &s)
{
string x(s);
size_t mult = 1;
if (x.back() == 'G') {
mult = static_cast<size_t>(1) << 30;
x.pop_back();
} else if (x.back() == 'M') {
mult = static_cast<size_t>(1) << 20;
x.pop_back();
} else if (x.back() == 'K') {
mult = static_cast<size_t>(1) << 10;
x.pop_back();
}
return strtoul(x.c_str(), nullptr, 10) * mult;
}
int
main(int argc, char **argv)
{
abstract_db *db = NULL;
void (*test_fn)(abstract_db *, int argc, char **argv) = NULL;
string bench_type = "ycsb";
string db_type = "ndb-proto2";
char *curdir = get_current_dir_name();
string basedir = curdir;
string bench_opts;
size_t numa_memory = 0;
free(curdir);
int saw_run_spec = 0;
int nofsync = 0;
int do_compress = 0;
int fake_writes = 0;
int disable_gc = 0;
int disable_snapshots = 0;
vector<string> logfiles;
vector<vector<unsigned>> assignments;
string stats_server_sockfile;
while (1) {
static struct option long_options[] =
{
{"verbose" , no_argument , &verbose , 1} ,
{"parallel-loading" , no_argument , &enable_parallel_loading , 1} ,
{"pin-cpus" , no_argument , &pin_cpus , 1} ,
{"slow-exit" , no_argument , &slow_exit , 1} ,
{"retry-aborted-transactions" , no_argument , &retry_aborted_transaction , 1} ,
{"backoff-aborted-transactions" , no_argument , &backoff_aborted_transaction , 1} ,
{"bench" , required_argument , 0 , 'b'} ,
{"scale-factor" , required_argument , 0 , 's'} ,
{"num-threads" , required_argument , 0 , 't'} ,
{"db-type" , required_argument , 0 , 'd'} ,
{"basedir" , required_argument , 0 , 'B'} ,
{"txn-flags" , required_argument , 0 , 'f'} ,
{"runtime" , required_argument , 0 , 'r'} ,
{"ops-per-worker" , required_argument , 0 , 'n'} ,
{"bench-opts" , required_argument , 0 , 'o'} ,
{"numa-memory" , required_argument , 0 , 'm'} , // implies --pin-cpus
{"logfile" , required_argument , 0 , 'l'} ,
{"assignment" , required_argument , 0 , 'a'} ,
{"log-nofsync" , no_argument , &nofsync , 1} ,
{"log-compress" , no_argument , &do_compress , 1} ,
{"log-fake-writes" , no_argument , &fake_writes , 1} ,
{"disable-gc" , no_argument , &disable_gc , 1} ,
{"disable-snapshots" , no_argument , &disable_snapshots , 1} ,
{"stats-server-sockfile" , required_argument , 0 , 'x'} ,
{"no-reset-counters" , no_argument , &no_reset_counters , 1} ,
{0, 0, 0, 0}
};
int option_index = 0;
int c = getopt_long(argc, argv, "b:s:t:d:B:f:r:n:o:m:l:a:x:", long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 0:
if (long_options[option_index].flag != 0)
break;
abort();
break;
case 'b':
bench_type = optarg;
break;
case 's':
scale_factor = strtod(optarg, NULL);
ALWAYS_ASSERT(scale_factor > 0.0);
break;
case 't':
nthreads = strtoul(optarg, NULL, 10);
ALWAYS_ASSERT(nthreads > 0);
break;
case 'd':
db_type = optarg;
break;
case 'B':
basedir = optarg;
break;
case 'f':
txn_flags = strtoul(optarg, NULL, 10);
break;
case 'r':
ALWAYS_ASSERT(!saw_run_spec);
saw_run_spec = 1;
runtime = strtoul(optarg, NULL, 10);
ALWAYS_ASSERT(runtime > 0);
run_mode = RUNMODE_TIME;
break;
case 'n':
ALWAYS_ASSERT(!saw_run_spec);
saw_run_spec = 1;
ops_per_worker = strtoul(optarg, NULL, 10);
ALWAYS_ASSERT(ops_per_worker > 0);
run_mode = RUNMODE_OPS;
case 'o':
bench_opts = optarg;
break;
case 'm':
{
pin_cpus = 1;
const size_t m = parse_memory_spec(optarg);
ALWAYS_ASSERT(m > 0);
numa_memory = m;
}
break;
case 'l':
logfiles.emplace_back(optarg);
break;
case 'a':
assignments.emplace_back(
ParseCSVString<unsigned, RangeAwareParser<unsigned>>(optarg));
break;
case 'x':
stats_server_sockfile = optarg;
break;
case '?':
/* getopt_long already printed an error message. */
exit(1);
default:
abort();
}
}
if (bench_type == "ycsb")
test_fn = ycsb_do_test;
else if (bench_type == "tpcc")
test_fn = tpcc_do_test;
else if (bench_type == "queue")
test_fn = queue_do_test;
else if (bench_type == "encstress")
test_fn = encstress_do_test;
else if (bench_type == "bid")
test_fn = bid_do_test;
else
ALWAYS_ASSERT(false);
if (do_compress && logfiles.empty()) {
cerr << "[ERROR] --log-compress specified without logging enabled" << endl;
return 1;
}
if (fake_writes && logfiles.empty()) {
cerr << "[ERROR] --log-fake-writes specified without logging enabled" << endl;
return 1;
}
if (nofsync && logfiles.empty()) {
cerr << "[ERROR] --log-nofsync specified without logging enabled" << endl;
return 1;
}
if (fake_writes && nofsync) {
cerr << "[WARNING] --log-nofsync has no effect with --log-fake-writes enabled" << endl;
}
#ifndef ENABLE_EVENT_COUNTERS
if (!stats_server_sockfile.empty()) {
cerr << "[WARNING] --stats-server-sockfile with no event counters enabled is useless" << endl;
}
#endif
// initialize the numa allocator
if (numa_memory > 0) {
const size_t maxpercpu = util::iceil(
numa_memory / nthreads, ::allocator::GetHugepageSize());
numa_memory = maxpercpu * nthreads;
::allocator::Initialize(nthreads, maxpercpu);
}
const set<string> can_persist({"ndb-proto2"});
if (!logfiles.empty() && !can_persist.count(db_type)) {
cerr << "[ERROR] benchmark " << db_type
<< " does not have persistence implemented" << endl;
return 1;
}
#ifdef PROTO2_CAN_DISABLE_GC
const set<string> has_gc({"ndb-proto1", "ndb-proto2"});
if (disable_gc && !has_gc.count(db_type)) {
cerr << "[ERROR] benchmark " << db_type
<< " does not have gc to disable" << endl;
return 1;
}
#else
if (disable_gc) {
cerr << "[ERROR] macro PROTO2_CAN_DISABLE_GC was not set, cannot disable gc" << endl;
return 1;
}
#endif
#ifdef PROTO2_CAN_DISABLE_SNAPSHOTS
const set<string> has_snapshots({"ndb-proto2"});
if (disable_snapshots && !has_snapshots.count(db_type)) {
cerr << "[ERROR] benchmark " << db_type
<< " does not have snapshots to disable" << endl;
return 1;
}
#else
if (disable_snapshots) {
cerr << "[ERROR] macro PROTO2_CAN_DISABLE_SNAPSHOTS was not set, cannot disable snapshots" << endl;
return 1;
}
#endif
if (db_type == "bdb") {
const string cmd = "rm -rf " + basedir + "/db/*";
// XXX(stephentu): laziness
int ret UNUSED = system(cmd.c_str());
db = new bdb_wrapper("db", bench_type + ".db");
} else if (db_type == "ndb-proto1") {
// XXX: hacky simulation of proto1
db = new ndb_wrapper<transaction_proto2>(
logfiles, assignments, !nofsync, do_compress, fake_writes);
transaction_proto2_static::set_hack_status(true);
ALWAYS_ASSERT(transaction_proto2_static::get_hack_status());
#ifdef PROTO2_CAN_DISABLE_GC
if (!disable_gc)
transaction_proto2_static::InitGC();
#endif
} else if (db_type == "ndb-proto2") {
db = new ndb_wrapper<transaction_proto2>(
logfiles, assignments, !nofsync, do_compress, fake_writes);
ALWAYS_ASSERT(!transaction_proto2_static::get_hack_status());
#ifdef PROTO2_CAN_DISABLE_GC
if (!disable_gc)
transaction_proto2_static::InitGC();
#endif
#ifdef PROTO2_CAN_DISABLE_SNAPSHOTS
if (disable_snapshots)
transaction_proto2_static::DisableSnapshots();
#endif
} else if (db_type == "kvdb") {
db = new kvdb_wrapper<true>;
} else if (db_type == "kvdb-st") {
db = new kvdb_wrapper<false>;
#if !NO_MYSQL
} else if (db_type == "mysql") {
string dbdir = basedir + "/mysql-db";
db = new mysql_wrapper(dbdir, bench_type);
#endif
} else
ALWAYS_ASSERT(false);
#ifdef DEBUG
cerr << "WARNING: benchmark built in DEBUG mode!!!" << endl;
#endif
#ifdef CHECK_INVARIANTS
cerr << "WARNING: invariant checking is enabled - should disable for benchmark" << endl;
#ifdef PARANOID_CHECKING
cerr << " *** Paranoid checking is enabled ***" << endl;
#endif
#endif
if (verbose) {
const unsigned long ncpus = coreid::num_cpus_online();
cerr << "Database Benchmark:" << endl;
cerr << " pid: " << getpid() << endl;
cerr << "settings:" << endl;
cerr << " par-loading : " << enable_parallel_loading << endl;
cerr << " pin-cpus : " << pin_cpus << endl;
cerr << " slow-exit : " << slow_exit << endl;
cerr << " retry-txns : " << retry_aborted_transaction << endl;
cerr << " backoff-txns: " << backoff_aborted_transaction << endl;
cerr << " bench : " << bench_type << endl;
cerr << " scale : " << scale_factor << endl;
cerr << " num-cpus : " << ncpus << endl;
cerr << " num-threads : " << nthreads << endl;
cerr << " db-type : " << db_type << endl;
cerr << " basedir : " << basedir << endl;
cerr << " txn-flags : " << hexify(txn_flags) << endl;
if (run_mode == RUNMODE_TIME)
cerr << " runtime : " << runtime << endl;
else
cerr << " ops/worker : " << ops_per_worker << endl;
#ifdef USE_VARINT_ENCODING
cerr << " var-encode : yes" << endl;
#else
cerr << " var-encode : no" << endl;
#endif
#ifdef USE_JEMALLOC
cerr << " allocator : jemalloc" << endl;
#elif defined USE_TCMALLOC
cerr << " allocator : tcmalloc" << endl;
#elif defined USE_FLOW
cerr << " allocator : flow" << endl;
#else
cerr << " allocator : libc" << endl;
#endif
if (numa_memory > 0) {
cerr << " numa-memory : " << numa_memory << endl;
} else {
cerr << " numa-memory : disabled" << endl;
}
cerr << " logfiles : " << logfiles << endl;
cerr << " assignments : " << assignments << endl;
cerr << " disable-gc : " << disable_gc << endl;
cerr << " disable-snapshots : " << disable_snapshots << endl;
cerr << " stats-server-sockfile: " << stats_server_sockfile << endl;
cerr << "system properties:" << endl;
cerr << " btree_internal_node_size: " << concurrent_btree::InternalNodeSize() << endl;
cerr << " btree_leaf_node_size : " << concurrent_btree::LeafNodeSize() << endl;
#ifdef TUPLE_PREFETCH
cerr << " tuple_prefetch : yes" << endl;
#else
cerr << " tuple_prefetch : no" << endl;
#endif
#ifdef BTREE_NODE_PREFETCH
cerr << " btree_node_prefetch : yes" << endl;
#else
cerr << " btree_node_prefetch : no" << endl;
#endif
}
if (!stats_server_sockfile.empty()) {
stats_server *srvr = new stats_server(stats_server_sockfile);
thread(&stats_server::serve_forever, srvr).detach();
}
vector<string> bench_toks = split_ws(bench_opts);
int argc = 1 + bench_toks.size();
char *argv[argc];
argv[0] = (char *) bench_type.c_str();
for (size_t i = 1; i <= bench_toks.size(); i++)
argv[i] = (char *) bench_toks[i - 1].c_str();
test_fn(db, argc, argv);
delete db;
return 0;
}
| {
"pile_set_name": "Github"
} |
#ifndef __ASM_GENERIC_POLL_H
#define __ASM_GENERIC_POLL_H
/* These are specified by iBCS2 */
#define POLLIN 0x0001
#define POLLPRI 0x0002
#define POLLOUT 0x0004
#define POLLERR 0x0008
#define POLLHUP 0x0010
#define POLLNVAL 0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
#define POLLRDNORM 0x0040
#define POLLRDBAND 0x0080
#ifndef POLLWRNORM
#define POLLWRNORM 0x0100
#endif
#ifndef POLLWRBAND
#define POLLWRBAND 0x0200
#endif
#ifndef POLLMSG
#define POLLMSG 0x0400
#endif
#ifndef POLLREMOVE
#define POLLREMOVE 0x1000
#endif
#ifndef POLLRDHUP
#define POLLRDHUP 0x2000
#endif
#define POLLFREE 0x4000 /* currently only for epoll */
#define POLL_BUSY_LOOP 0x8000
struct pollfd {
int fd;
short events;
short revents;
};
#endif /* __ASM_GENERIC_POLL_H */
| {
"pile_set_name": "Github"
} |
<?php
namespace Drupal\Tests\node\Functional\Update;
use Drupal\Core\Entity\Entity\EntityFormDisplay;
use Drupal\FunctionalTests\Update\UpdatePathTestBase;
/**
* Tests that node settings are properly updated during database updates.
*
* @group node
* @group legacy
*/
class NodeUpdateTest extends UpdatePathTestBase {
/**
* {@inheritdoc}
*/
protected function setDatabaseDumpFiles() {
$this->databaseDumpFiles = [
__DIR__ . '/../../../../../system/tests/fixtures/update/drupal-8-rc1.bare.standard.php.gz',
];
}
/**
* Tests that the node entity type has a 'published' entity key.
*
* @see node_update_8301()
*/
public function testPublishedEntityKey() {
// Check that the 'published' entity key does not exist prior to the update.
$entity_type = \Drupal::entityDefinitionUpdateManager()->getEntityType('node');
$this->assertFalse($entity_type->getKey('published'));
// Run updates.
$this->runUpdates();
// Check that the entity key exists and it has the correct value.
$entity_type = \Drupal::entityDefinitionUpdateManager()->getEntityType('node');
$this->assertEqual('status', $entity_type->getKey('published'));
}
/**
* Tests that the node entity form has the status checkbox.
*
* @see node_post_update_configure_status_field_widget()
*/
public function testStatusCheckbox() {
// Run updates.
$this->runUpdates();
$query = \Drupal::entityQuery('entity_form_display')
->condition('targetEntityType', 'node');
$ids = $query->execute();
$form_displays = EntityFormDisplay::loadMultiple($ids);
/**
* @var string $id
* @var \Drupal\Core\Entity\Display\EntityFormDisplayInterface $form_display
*/
foreach ($form_displays as $id => $form_display) {
$component = $form_display->getComponent('status');
$this->assertEqual('boolean_checkbox', $component['type']);
$this->assertEqual(['display_label' => TRUE], $component['settings']);
}
}
/**
* Tests that the node entity type has an 'owner' entity key.
*
* @see node_update_8700()
*/
public function testOwnerEntityKey() {
// Check that the 'owner' entity key does not exist prior to the update.
$entity_type = \Drupal::entityDefinitionUpdateManager()->getEntityType('node');
$this->assertFalse($entity_type->getKey('owner'));
// Run updates.
$this->runUpdates();
// Check that the entity key exists and it has the correct value.
$entity_type = \Drupal::entityDefinitionUpdateManager()->getEntityType('node');
$this->assertEquals('uid', $entity_type->getKey('owner'));
}
}
| {
"pile_set_name": "Github"
} |
{
"action": {
"hacking": {
"variety": [
"Unknown"
],
"vector": [
"Web application"
]
},
"malware": {
"variety": [
"Capture app data"
],
"vector": [
"Direct install"
]
}
},
"actor": {
"external": {
"country": [
"Unknown"
],
"motive": [
"Financial"
],
"region": [
"000000"
],
"variety": [
"Unknown"
]
}
},
"asset": {
"assets": [
{
"variety": "S - Web application"
}
],
"cloud": [
"Unknown"
],
"notes": "Following enumerations present before veris 1.3.3 removed: asset.governance.Unknown."
},
"attribute": {
"confidentiality": {
"data": [
{
"variety": "Payment"
}
],
"data_disclosure": "Potentially",
"data_victim": [
"Customer"
],
"state": [
"Unknown"
]
},
"integrity": {
"variety": [
"Software installation"
]
}
},
"discovery_method": {
"external": {
"variety": [
"Unknown"
]
}
},
"discovery_notes": "Ext - Unrelated third party. Discovered by security researcher who made the notifications.",
"impact": {
"overall_rating": "Unknown"
},
"incident_id": "BBB21C34-3C55-46E6-AAE8-FAC1166D2FBD",
"plus": {
"analysis_status": "Finalized",
"analyst": "swidup",
"attribute": {
"confidentiality": {
"credit_monitoring": "Unknown",
"data_misuse": "Y"
}
},
"created": "2016-10-25T01:05:00Z",
"dbir_year": 2017,
"github": "8631",
"master_id": "BBB21C34-3C55-46E6-AAE8-FAC1166D2FBD",
"modified": "2016-10-25T01:10:00Z",
"sub_source": "gwillem",
"timeline": {
"notification": {
"year": 2016
}
}
},
"reference": "http://www.pcworld.com/article/3131040/security/thousands-of-online-shops-compromised-for-credit-card-theft.html; https://gwillem.github.io/2016/10/11/5900-online-stores-found-skimming/; https://gitlab.com/gwillem/public-snippets/snippets/28813",
"schema_version": "1.3.4",
"security_incident": "Confirmed",
"source_id": "vcdb",
"summary": "Online skimmers are installed on vulnerable storefronts to compromise credit card data. This was found by researcher G. Willem and published in a blog post as well as numerous news articles. This one GitHub issue is expanded to one incident per storefront listed in his dataset.",
"timeline": {
"incident": {
"year": 2016
}
},
"victim": {
"country": [
"Unknown"
],
"employee_count": "Unknown",
"industry": "44",
"region": [
"000000"
],
"victim_id": "dischiespartiti.com"
}
} | {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002-2012, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
#ifndef INCLUDED_IMATHQUAT_H
#define INCLUDED_IMATHQUAT_H
//----------------------------------------------------------------------
//
// template class Quat<T>
//
// "Quaternions came from Hamilton ... and have been an unmixed
// evil to those who have touched them in any way. Vector is a
// useless survival ... and has never been of the slightest use
// to any creature."
//
// - Lord Kelvin
//
// This class implements the quaternion numerical type -- you
// will probably want to use this class to represent orientations
// in R3 and to convert between various euler angle reps. You
// should probably use Imath::Euler<> for that.
//
//----------------------------------------------------------------------
#include "ImathExc.h"
#include "ImathMatrix.h"
#include "ImathNamespace.h"
#include <iostream>
IMATH_INTERNAL_NAMESPACE_HEADER_ENTER
#if (defined _WIN32 || defined _WIN64) && defined _MSC_VER
// Disable MS VC++ warnings about conversion from double to float
#pragma warning(disable:4244)
#endif
template <class T>
class Quat
{
public:
T r; // real part
Vec3<T> v; // imaginary vector
//-----------------------------------------------------
// Constructors - default constructor is identity quat
//-----------------------------------------------------
Quat ();
template <class S>
Quat (const Quat<S> &q);
Quat (T s, T i, T j, T k);
Quat (T s, Vec3<T> d);
static Quat<T> identity ();
//-------------------------------------------------
// Basic Algebra - Operators and Methods
// The operator return values are *NOT* normalized
//
// operator^ and euclideanInnnerProduct() both
// implement the 4D dot product
//
// operator/ uses the inverse() quaternion
//
// operator~ is conjugate -- if (S+V) is quat then
// the conjugate (S+V)* == (S-V)
//
// some operators (*,/,*=,/=) treat the quat as
// a 4D vector when one of the operands is scalar
//-------------------------------------------------
const Quat<T> & operator = (const Quat<T> &q);
const Quat<T> & operator *= (const Quat<T> &q);
const Quat<T> & operator *= (T t);
const Quat<T> & operator /= (const Quat<T> &q);
const Quat<T> & operator /= (T t);
const Quat<T> & operator += (const Quat<T> &q);
const Quat<T> & operator -= (const Quat<T> &q);
T & operator [] (int index); // as 4D vector
T operator [] (int index) const;
template <class S> bool operator == (const Quat<S> &q) const;
template <class S> bool operator != (const Quat<S> &q) const;
Quat<T> & invert (); // this -> 1 / this
Quat<T> inverse () const;
Quat<T> & normalize (); // returns this
Quat<T> normalized () const;
T length () const; // in R4
Vec3<T> rotateVector(const Vec3<T> &original) const;
T euclideanInnerProduct(const Quat<T> &q) const;
//-----------------------
// Rotation conversion
//-----------------------
Quat<T> & setAxisAngle (const Vec3<T> &axis, T radians);
Quat<T> & setRotation (const Vec3<T> &fromDirection,
const Vec3<T> &toDirection);
T angle () const;
Vec3<T> axis () const;
Matrix33<T> toMatrix33 () const;
Matrix44<T> toMatrix44 () const;
Quat<T> log () const;
Quat<T> exp () const;
private:
void setRotationInternal (const Vec3<T> &f0,
const Vec3<T> &t0,
Quat<T> &q);
};
template<class T>
Quat<T> slerp (const Quat<T> &q1, const Quat<T> &q2, T t);
template<class T>
Quat<T> slerpShortestArc
(const Quat<T> &q1, const Quat<T> &q2, T t);
template<class T>
Quat<T> squad (const Quat<T> &q1, const Quat<T> &q2,
const Quat<T> &qa, const Quat<T> &qb, T t);
template<class T>
void intermediate (const Quat<T> &q0, const Quat<T> &q1,
const Quat<T> &q2, const Quat<T> &q3,
Quat<T> &qa, Quat<T> &qb);
template<class T>
Matrix33<T> operator * (const Matrix33<T> &M, const Quat<T> &q);
template<class T>
Matrix33<T> operator * (const Quat<T> &q, const Matrix33<T> &M);
template<class T>
std::ostream & operator << (std::ostream &o, const Quat<T> &q);
template<class T>
Quat<T> operator * (const Quat<T> &q1, const Quat<T> &q2);
template<class T>
Quat<T> operator / (const Quat<T> &q1, const Quat<T> &q2);
template<class T>
Quat<T> operator / (const Quat<T> &q, T t);
template<class T>
Quat<T> operator * (const Quat<T> &q, T t);
template<class T>
Quat<T> operator * (T t, const Quat<T> &q);
template<class T>
Quat<T> operator + (const Quat<T> &q1, const Quat<T> &q2);
template<class T>
Quat<T> operator - (const Quat<T> &q1, const Quat<T> &q2);
template<class T>
Quat<T> operator ~ (const Quat<T> &q);
template<class T>
Quat<T> operator - (const Quat<T> &q);
template<class T>
Vec3<T> operator * (const Vec3<T> &v, const Quat<T> &q);
//--------------------
// Convenient typedefs
//--------------------
typedef Quat<float> Quatf;
typedef Quat<double> Quatd;
//---------------
// Implementation
//---------------
template<class T>
inline
Quat<T>::Quat (): r (1), v (0, 0, 0)
{
// empty
}
template<class T>
template <class S>
inline
Quat<T>::Quat (const Quat<S> &q): r (q.r), v (q.v)
{
// empty
}
template<class T>
inline
Quat<T>::Quat (T s, T i, T j, T k): r (s), v (i, j, k)
{
// empty
}
template<class T>
inline
Quat<T>::Quat (T s, Vec3<T> d): r (s), v (d)
{
// empty
}
template<class T>
inline Quat<T>
Quat<T>::identity ()
{
return Quat<T>();
}
template<class T>
inline const Quat<T> &
Quat<T>::operator = (const Quat<T> &q)
{
r = q.r;
v = q.v;
return *this;
}
template<class T>
inline const Quat<T> &
Quat<T>::operator *= (const Quat<T> &q)
{
T rtmp = r * q.r - (v ^ q.v);
v = r * q.v + v * q.r + v % q.v;
r = rtmp;
return *this;
}
template<class T>
inline const Quat<T> &
Quat<T>::operator *= (T t)
{
r *= t;
v *= t;
return *this;
}
template<class T>
inline const Quat<T> &
Quat<T>::operator /= (const Quat<T> &q)
{
*this = *this * q.inverse();
return *this;
}
template<class T>
inline const Quat<T> &
Quat<T>::operator /= (T t)
{
r /= t;
v /= t;
return *this;
}
template<class T>
inline const Quat<T> &
Quat<T>::operator += (const Quat<T> &q)
{
r += q.r;
v += q.v;
return *this;
}
template<class T>
inline const Quat<T> &
Quat<T>::operator -= (const Quat<T> &q)
{
r -= q.r;
v -= q.v;
return *this;
}
template<class T>
inline T &
Quat<T>::operator [] (int index)
{
return index ? v[index - 1] : r;
}
template<class T>
inline T
Quat<T>::operator [] (int index) const
{
return index ? v[index - 1] : r;
}
template <class T>
template <class S>
inline bool
Quat<T>::operator == (const Quat<S> &q) const
{
return r == q.r && v == q.v;
}
template <class T>
template <class S>
inline bool
Quat<T>::operator != (const Quat<S> &q) const
{
return r != q.r || v != q.v;
}
template<class T>
inline T
operator ^ (const Quat<T>& q1 ,const Quat<T>& q2)
{
return q1.r * q2.r + (q1.v ^ q2.v);
}
template <class T>
inline T
Quat<T>::length () const
{
return Math<T>::sqrt (r * r + (v ^ v));
}
template <class T>
inline Quat<T> &
Quat<T>::normalize ()
{
if (T l = length())
{
r /= l;
v /= l;
}
else
{
r = 1;
v = Vec3<T> (0);
}
return *this;
}
template <class T>
inline Quat<T>
Quat<T>::normalized () const
{
if (T l = length())
return Quat (r / l, v / l);
return Quat();
}
template<class T>
inline Quat<T>
Quat<T>::inverse () const
{
//
// 1 Q*
// - = ---- where Q* is conjugate (operator~)
// Q Q* Q and (Q* Q) == Q ^ Q (4D dot)
//
T qdot = *this ^ *this;
return Quat (r / qdot, -v / qdot);
}
template<class T>
inline Quat<T> &
Quat<T>::invert ()
{
T qdot = (*this) ^ (*this);
r /= qdot;
v = -v / qdot;
return *this;
}
template<class T>
inline Vec3<T>
Quat<T>::rotateVector(const Vec3<T>& original) const
{
//
// Given a vector p and a quaternion q (aka this),
// calculate p' = qpq*
//
// Assumes unit quaternions (because non-unit
// quaternions cannot be used to rotate vectors
// anyway).
//
Quat<T> vec (0, original); // temporarily promote grade of original
Quat<T> inv (*this);
inv.v *= -1; // unit multiplicative inverse
Quat<T> result = *this * vec * inv;
return result.v;
}
template<class T>
inline T
Quat<T>::euclideanInnerProduct (const Quat<T> &q) const
{
return r * q.r + v.x * q.v.x + v.y * q.v.y + v.z * q.v.z;
}
template<class T>
T
angle4D (const Quat<T> &q1, const Quat<T> &q2)
{
//
// Compute the angle between two quaternions,
// interpreting the quaternions as 4D vectors.
//
Quat<T> d = q1 - q2;
T lengthD = Math<T>::sqrt (d ^ d);
Quat<T> s = q1 + q2;
T lengthS = Math<T>::sqrt (s ^ s);
return 2 * Math<T>::atan2 (lengthD, lengthS);
}
template<class T>
Quat<T>
slerp (const Quat<T> &q1, const Quat<T> &q2, T t)
{
//
// Spherical linear interpolation.
// Assumes q1 and q2 are normalized and that q1 != -q2.
//
// This method does *not* interpolate along the shortest
// arc between q1 and q2. If you desire interpolation
// along the shortest arc, and q1^q2 is negative, then
// consider calling slerpShortestArc(), below, or flipping
// the second quaternion explicitly.
//
// The implementation of squad() depends on a slerp()
// that interpolates as is, without the automatic
// flipping.
//
// Don Hatch explains the method we use here on his
// web page, The Right Way to Calculate Stuff, at
// http://www.plunk.org/~hatch/rightway.php
//
T a = angle4D (q1, q2);
T s = 1 - t;
Quat<T> q = sinx_over_x (s * a) / sinx_over_x (a) * s * q1 +
sinx_over_x (t * a) / sinx_over_x (a) * t * q2;
return q.normalized();
}
template<class T>
Quat<T>
slerpShortestArc (const Quat<T> &q1, const Quat<T> &q2, T t)
{
//
// Spherical linear interpolation along the shortest
// arc from q1 to either q2 or -q2, whichever is closer.
// Assumes q1 and q2 are unit quaternions.
//
if ((q1 ^ q2) >= 0)
return slerp (q1, q2, t);
else
return slerp (q1, -q2, t);
}
template<class T>
Quat<T>
spline (const Quat<T> &q0, const Quat<T> &q1,
const Quat<T> &q2, const Quat<T> &q3,
T t)
{
//
// Spherical Cubic Spline Interpolation -
// from Advanced Animation and Rendering
// Techniques by Watt and Watt, Page 366:
// A spherical curve is constructed using three
// spherical linear interpolations of a quadrangle
// of unit quaternions: q1, qa, qb, q2.
// Given a set of quaternion keys: q0, q1, q2, q3,
// this routine does the interpolation between
// q1 and q2 by constructing two intermediate
// quaternions: qa and qb. The qa and qb are
// computed by the intermediate function to
// guarantee the continuity of tangents across
// adjacent cubic segments. The qa represents in-tangent
// for q1 and the qb represents the out-tangent for q2.
//
// The q1 q2 is the cubic segment being interpolated.
// The q0 is from the previous adjacent segment and q3 is
// from the next adjacent segment. The q0 and q3 are used
// in computing qa and qb.
//
Quat<T> qa = intermediate (q0, q1, q2);
Quat<T> qb = intermediate (q1, q2, q3);
Quat<T> result = squad (q1, qa, qb, q2, t);
return result;
}
template<class T>
Quat<T>
squad (const Quat<T> &q1, const Quat<T> &qa,
const Quat<T> &qb, const Quat<T> &q2,
T t)
{
//
// Spherical Quadrangle Interpolation -
// from Advanced Animation and Rendering
// Techniques by Watt and Watt, Page 366:
// It constructs a spherical cubic interpolation as
// a series of three spherical linear interpolations
// of a quadrangle of unit quaternions.
//
Quat<T> r1 = slerp (q1, q2, t);
Quat<T> r2 = slerp (qa, qb, t);
Quat<T> result = slerp (r1, r2, 2 * t * (1 - t));
return result;
}
template<class T>
Quat<T>
intermediate (const Quat<T> &q0, const Quat<T> &q1, const Quat<T> &q2)
{
//
// From advanced Animation and Rendering
// Techniques by Watt and Watt, Page 366:
// computing the inner quadrangle
// points (qa and qb) to guarantee tangent
// continuity.
//
Quat<T> q1inv = q1.inverse();
Quat<T> c1 = q1inv * q2;
Quat<T> c2 = q1inv * q0;
Quat<T> c3 = (T) (-0.25) * (c2.log() + c1.log());
Quat<T> qa = q1 * c3.exp();
qa.normalize();
return qa;
}
template <class T>
inline Quat<T>
Quat<T>::log () const
{
//
// For unit quaternion, from Advanced Animation and
// Rendering Techniques by Watt and Watt, Page 366:
//
T theta = Math<T>::acos (std::min (r, (T) 1.0));
if (theta == 0)
return Quat<T> (0, v);
T sintheta = Math<T>::sin (theta);
T k;
if (abs (sintheta) < 1 && abs (theta) >= limits<T>::max() * abs (sintheta))
k = 1;
else
k = theta / sintheta;
return Quat<T> ((T) 0, v.x * k, v.y * k, v.z * k);
}
template <class T>
inline Quat<T>
Quat<T>::exp () const
{
//
// For pure quaternion (zero scalar part):
// from Advanced Animation and Rendering
// Techniques by Watt and Watt, Page 366:
//
T theta = v.length();
T sintheta = Math<T>::sin (theta);
T k;
if (abs (theta) < 1 && abs (sintheta) >= limits<T>::max() * abs (theta))
k = 1;
else
k = sintheta / theta;
T costheta = Math<T>::cos (theta);
return Quat<T> (costheta, v.x * k, v.y * k, v.z * k);
}
template <class T>
inline T
Quat<T>::angle () const
{
return 2 * Math<T>::atan2 (v.length(), r);
}
template <class T>
inline Vec3<T>
Quat<T>::axis () const
{
return v.normalized();
}
template <class T>
inline Quat<T> &
Quat<T>::setAxisAngle (const Vec3<T> &axis, T radians)
{
r = Math<T>::cos (radians / 2);
v = axis.normalized() * Math<T>::sin (radians / 2);
return *this;
}
template <class T>
Quat<T> &
Quat<T>::setRotation (const Vec3<T> &from, const Vec3<T> &to)
{
//
// Create a quaternion that rotates vector from into vector to,
// such that the rotation is around an axis that is the cross
// product of from and to.
//
// This function calls function setRotationInternal(), which is
// numerically accurate only for rotation angles that are not much
// greater than pi/2. In order to achieve good accuracy for angles
// greater than pi/2, we split large angles in half, and rotate in
// two steps.
//
//
// Normalize from and to, yielding f0 and t0.
//
Vec3<T> f0 = from.normalized();
Vec3<T> t0 = to.normalized();
if ((f0 ^ t0) >= 0)
{
//
// The rotation angle is less than or equal to pi/2.
//
setRotationInternal (f0, t0, *this);
}
else
{
//
// The angle is greater than pi/2. After computing h0,
// which is halfway between f0 and t0, we rotate first
// from f0 to h0, then from h0 to t0.
//
Vec3<T> h0 = (f0 + t0).normalized();
if ((h0 ^ h0) != 0)
{
setRotationInternal (f0, h0, *this);
Quat<T> q;
setRotationInternal (h0, t0, q);
*this *= q;
}
else
{
//
// f0 and t0 point in exactly opposite directions.
// Pick an arbitrary axis that is orthogonal to f0,
// and rotate by pi.
//
r = T (0);
Vec3<T> f02 = f0 * f0;
if (f02.x <= f02.y && f02.x <= f02.z)
v = (f0 % Vec3<T> (1, 0, 0)).normalized();
else if (f02.y <= f02.z)
v = (f0 % Vec3<T> (0, 1, 0)).normalized();
else
v = (f0 % Vec3<T> (0, 0, 1)).normalized();
}
}
return *this;
}
template <class T>
void
Quat<T>::setRotationInternal (const Vec3<T> &f0, const Vec3<T> &t0, Quat<T> &q)
{
//
// The following is equivalent to setAxisAngle(n,2*phi),
// where the rotation axis, n, is orthogonal to the f0 and
// t0 vectors, and 2*phi is the angle between f0 and t0.
//
// This function is called by setRotation(), above; it assumes
// that f0 and t0 are normalized and that the angle between
// them is not much greater than pi/2. This function becomes
// numerically inaccurate if f0 and t0 point into nearly
// opposite directions.
//
//
// Find a normalized vector, h0, that is halfway between f0 and t0.
// The angle between f0 and h0 is phi.
//
Vec3<T> h0 = (f0 + t0).normalized();
//
// Store the rotation axis and rotation angle.
//
q.r = f0 ^ h0; // f0 ^ h0 == cos (phi)
q.v = f0 % h0; // (f0 % h0).length() == sin (phi)
}
template<class T>
Matrix33<T>
Quat<T>::toMatrix33() const
{
return Matrix33<T> (1 - 2 * (v.y * v.y + v.z * v.z),
2 * (v.x * v.y + v.z * r),
2 * (v.z * v.x - v.y * r),
2 * (v.x * v.y - v.z * r),
1 - 2 * (v.z * v.z + v.x * v.x),
2 * (v.y * v.z + v.x * r),
2 * (v.z * v.x + v.y * r),
2 * (v.y * v.z - v.x * r),
1 - 2 * (v.y * v.y + v.x * v.x));
}
template<class T>
Matrix44<T>
Quat<T>::toMatrix44() const
{
return Matrix44<T> (1 - 2 * (v.y * v.y + v.z * v.z),
2 * (v.x * v.y + v.z * r),
2 * (v.z * v.x - v.y * r),
0,
2 * (v.x * v.y - v.z * r),
1 - 2 * (v.z * v.z + v.x * v.x),
2 * (v.y * v.z + v.x * r),
0,
2 * (v.z * v.x + v.y * r),
2 * (v.y * v.z - v.x * r),
1 - 2 * (v.y * v.y + v.x * v.x),
0,
0,
0,
0,
1);
}
template<class T>
inline Matrix33<T>
operator * (const Matrix33<T> &M, const Quat<T> &q)
{
return M * q.toMatrix33();
}
template<class T>
inline Matrix33<T>
operator * (const Quat<T> &q, const Matrix33<T> &M)
{
return q.toMatrix33() * M;
}
template<class T>
std::ostream &
operator << (std::ostream &o, const Quat<T> &q)
{
return o << "(" << q.r
<< " " << q.v.x
<< " " << q.v.y
<< " " << q.v.z
<< ")";
}
template<class T>
inline Quat<T>
operator * (const Quat<T> &q1, const Quat<T> &q2)
{
return Quat<T> (q1.r * q2.r - (q1.v ^ q2.v),
q1.r * q2.v + q1.v * q2.r + q1.v % q2.v);
}
template<class T>
inline Quat<T>
operator / (const Quat<T> &q1, const Quat<T> &q2)
{
return q1 * q2.inverse();
}
template<class T>
inline Quat<T>
operator / (const Quat<T> &q, T t)
{
return Quat<T> (q.r / t, q.v / t);
}
template<class T>
inline Quat<T>
operator * (const Quat<T> &q, T t)
{
return Quat<T> (q.r * t, q.v * t);
}
template<class T>
inline Quat<T>
operator * (T t, const Quat<T> &q)
{
return Quat<T> (q.r * t, q.v * t);
}
template<class T>
inline Quat<T>
operator + (const Quat<T> &q1, const Quat<T> &q2)
{
return Quat<T> (q1.r + q2.r, q1.v + q2.v);
}
template<class T>
inline Quat<T>
operator - (const Quat<T> &q1, const Quat<T> &q2)
{
return Quat<T> (q1.r - q2.r, q1.v - q2.v);
}
template<class T>
inline Quat<T>
operator ~ (const Quat<T> &q)
{
return Quat<T> (q.r, -q.v);
}
template<class T>
inline Quat<T>
operator - (const Quat<T> &q)
{
return Quat<T> (-q.r, -q.v);
}
template<class T>
inline Vec3<T>
operator * (const Vec3<T> &v, const Quat<T> &q)
{
Vec3<T> a = q.v % v;
Vec3<T> b = q.v % a;
return v + T (2) * (q.r * a + b);
}
#if (defined _WIN32 || defined _WIN64) && defined _MSC_VER
#pragma warning(default:4244)
#endif
IMATH_INTERNAL_NAMESPACE_HEADER_EXIT
#endif // INCLUDED_IMATHQUAT_H
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!1 &128450
GameObject:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
serializedVersion: 4
m_Component:
- 4: {fileID: 417074}
- 114: {fileID: 11479102}
m_Layer: 0
m_Name: SteamVR_ExternalCamera
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!1 &129796
GameObject:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
serializedVersion: 4
m_Component:
- 4: {fileID: 444732}
- 114: {fileID: 11487396}
- 114: {fileID: 11472986}
m_Layer: 0
m_Name: Controller (third)
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!4 &417074
Transform:
m_ObjectHideFlags: 1
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
m_GameObject: {fileID: 128450}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children:
- {fileID: 444732}
m_Father: {fileID: 0}
m_RootOrder: 0
--- !u!4 &444732
Transform:
m_ObjectHideFlags: 1
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
m_GameObject: {fileID: 129796}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 417074}
m_RootOrder: 0
--- !u!114 &11472986
MonoBehaviour:
m_ObjectHideFlags: 1
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
m_GameObject: {fileID: 129796}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: d37c2cf88f7c59f4c8cf5d3812568143, type: 3}
m_Name:
m_EditorClassIdentifier:
index: -1
origin: {fileID: 0}
isValid: 0
--- !u!114 &11479102
MonoBehaviour:
m_ObjectHideFlags: 1
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
m_GameObject: {fileID: 128450}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: e3b47c2980b93bc48844a54641dab5b8, type: 3}
m_Name:
m_EditorClassIdentifier:
left: {fileID: 0}
right: {fileID: 0}
objects:
- {fileID: 129796}
--- !u!114 &11487396
MonoBehaviour:
m_ObjectHideFlags: 1
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 100100000}
m_GameObject: {fileID: 129796}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: c9da270df5147d24597cc106058c1fa7, type: 3}
m_Name:
m_EditorClassIdentifier:
offset: {fileID: 0}
frontCam: {fileID: 0}
backCam: {fileID: 0}
--- !u!1001 &100100000
Prefab:
m_ObjectHideFlags: 1
serializedVersion: 2
m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications: []
m_RemovedComponents: []
m_ParentPrefab: {fileID: 0}
m_RootGameObject: {fileID: 128450}
m_IsPrefabParent: 1
| {
"pile_set_name": "Github"
} |
/*
* Tencent is pleased to support the open source community by making TKEStack
* available.
*
* Copyright (C) 2012-2019 Tencent. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use
* this file except in compliance with the License. You may obtain a copy of the
* License at
*
* https://opensource.org/licenses/Apache-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, WITHOUT
* WARRANTIES OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package validation
import (
"context"
"errors"
"fmt"
"strings"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"tkestack.io/tke/api/platform"
"tkestack.io/tke/pkg/platform/provider/util/mark"
"tkestack.io/tke/pkg/platform/types"
"tkestack.io/tke/pkg/spec"
"tkestack.io/tke/pkg/util/apiclient"
utilvalidation "tkestack.io/tke/pkg/util/validation"
)
// ValidateCluster validates a given Cluster.
func ValidateCluster(ctx context.Context, cluster *types.Cluster) field.ErrorList {
allErrs := ValidatClusterAddresses(cluster.Status.Addresses, field.NewPath("status", "addresses"))
if cluster.Spec.ClusterCredentialRef != nil {
allErrs = append(allErrs, ValidatClusterCredentialRef(ctx, cluster, field.NewPath("spec", "clusterCredentialRef"))...)
client, err := cluster.Clientset()
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("name"), cluster.Name, fmt.Sprintf("get clientset error: %v", err)))
}
if cluster.Status.Phase == platform.ClusterInitializing {
allErrs = append(allErrs, ValidateClusterMark(ctx, cluster.Name, field.NewPath("name"), client)...)
allErrs = append(allErrs, ValidateClusterVersion(field.NewPath("name"), client)...)
}
}
return allErrs
}
// ValidatClusterAddresses validates a given ClusterAddresses.
func ValidatClusterAddresses(addresses []platform.ClusterAddress, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(addresses) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("status", "addresses"), "must specify at least one obj access address"))
} else {
for i, address := range addresses {
fldPath := fldPath.Index(i)
allErrs = utilvalidation.ValidateEnum(address.Type, fldPath.Child("type"), []interface{}{
platform.AddressAdvertise,
platform.AddressReal,
})
if address.Host == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("host"), "must specify host"))
}
for _, msg := range validation.IsValidPortNum(int(address.Port)) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), address.Port, msg))
}
if address.Path != "" && !strings.HasPrefix(address.Path, "/") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), address.Path, "must start by `/`"))
}
url := fmt.Sprintf("https://%s:%d", address.Host, address.Port)
if address.Path != "" {
url = fmt.Sprintf("%s%s", url, address.Path)
}
err := utilvalidation.IsValiadURL(url, 5*time.Second)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, address, err.Error()))
}
}
}
return allErrs
}
// ValidatClusterCredentialRef validates cluster.Spec.ClusterCredentialRef.
func ValidatClusterCredentialRef(ctx context.Context, cluster *types.Cluster, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
credential := cluster.ClusterCredential
if credential.Token == nil && credential.ClientKey == nil && credential.ClientCert == nil {
allErrs = append(allErrs, field.Required(field.NewPath(""),
"must specify at least one of token or client certificate authentication"))
return allErrs
}
if credential.ClientCert == nil && credential.ClientKey != nil ||
credential.ClientCert != nil && credential.ClientKey == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("clientCert"),
"`clientCert` and `clientKey` must provide togther"))
}
host, err := cluster.Host()
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("clusterName"), credential.ClusterName, err.Error()))
} else {
restConfig := &rest.Config{
Host: host,
Timeout: 5 * time.Second,
}
if credential.CACert != nil {
restConfig.CAData = credential.CACert
if err = utilvalidation.ValidateRESTConfig(ctx, restConfig); err != nil {
if status := apierrors.APIStatus(nil); !errors.As(err, &status) {
allErrs = append(allErrs, field.Invalid(field.NewPath("caCert"), "", err.Error()))
}
}
} else {
restConfig.Insecure = true
}
if credential.Token != nil {
config := rest.CopyConfig(restConfig)
config.BearerToken = *credential.Token
if err = utilvalidation.ValidateRESTConfig(ctx, config); err != nil {
if apierrors.IsUnauthorized(err) {
allErrs = append(allErrs, field.Invalid(field.NewPath("token"), *credential.Token, err.Error()))
} else {
allErrs = append(allErrs, field.InternalError(field.NewPath("token"), err))
}
}
}
if credential.ClientCert != nil && credential != nil {
config := rest.CopyConfig(restConfig)
config.TLSClientConfig.CertData = credential.ClientCert
config.TLSClientConfig.KeyData = credential.ClientKey
if err = utilvalidation.ValidateRESTConfig(ctx, config); err != nil {
if apierrors.IsUnauthorized(err) {
allErrs = append(allErrs, field.Invalid(field.NewPath("clientCert"), "", err.Error()))
} else {
allErrs = append(allErrs, field.InternalError(field.NewPath("clientCert"), err))
}
}
}
}
return allErrs
}
// ValidateClusterMark validates a given cluster had imported already.
func ValidateClusterMark(ctx context.Context, clusterName string, fldPath *field.Path, client kubernetes.Interface) field.ErrorList {
allErrs := field.ErrorList{}
_, err := mark.Get(ctx, client)
if err != nil {
if !apierrors.IsNotFound(err) {
allErrs = append(allErrs, field.InternalError(fldPath, err))
}
} else {
allErrs = append(allErrs, field.Invalid(fldPath, clusterName,
fmt.Sprintf("can't imported same cluster, you can use `kubectl -n%s delete configmap %s`", mark.Namespace, mark.Name)))
}
return allErrs
}
// ValidateClusterMark validates a given cluster's version.
func ValidateClusterVersion(fldPath *field.Path, client kubernetes.Interface) field.ErrorList {
allErrs := field.ErrorList{}
v, err := apiclient.GetClusterVersion(client)
if err != nil {
allErrs = append(allErrs, field.InternalError(fldPath, err))
return allErrs
}
result, err := apiclient.CheckVersion(v, spec.K8sVersionConstraint)
if err != nil {
allErrs = append(allErrs, field.InternalError(fldPath, err))
return allErrs
}
if !result {
allErrs = append(allErrs, field.Invalid(fldPath, v, fmt.Sprintf("cluster version must %s", spec.K8sVersionConstraint)))
}
return allErrs
}
| {
"pile_set_name": "Github"
} |
android.enableJetifier=true
android.useAndroidX=true
org.gradle.jvmargs=-Xmx4096M
kapt.incremental.apt=true | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2008 Thorsten Kukuk <[email protected]>
* Copyright (c) 2013 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __OPASSWD_H__
#define __OPASSWD_H__
#define PAM_PWHISTORY_RUN_HELPER PAM_CRED_INSUFFICIENT
#ifdef WITH_SELINUX
#include <selinux/selinux.h>
#define SELINUX_ENABLED (is_selinux_enabled()>0)
#else
#define SELINUX_ENABLED 0
#endif
#ifdef HELPER_COMPILE
#define PAMH_ARG_DECL(fname, ...) fname(__VA_ARGS__)
#else
#define PAMH_ARG_DECL(fname, ...) fname(pam_handle_t *pamh, __VA_ARGS__)
#endif
#ifdef HELPER_COMPILE
void
helper_log_err(int err, const char *format, ...);
#endif
PAMH_ARG_DECL(int
check_old_pass, const char *user, const char *newpass, int debug);
PAMH_ARG_DECL(int
save_old_pass, const char *user, int howmany, int debug);
#endif /* __OPASSWD_H__ */
| {
"pile_set_name": "Github"
} |
{
"_args": [
[
{
"name": "tmp",
"raw": "[email protected]",
"rawSpec": "0.0.24",
"scope": null,
"spec": "0.0.24",
"type": "version"
},
"/Users/carsenklock/explorer/node_modules/selenium-webdriver"
]
],
"_from": "[email protected]",
"_id": "[email protected]",
"_inCache": true,
"_installable": true,
"_location": "/tmp",
"_npmUser": {
"email": "[email protected]",
"name": "raszi"
},
"_npmVersion": "1.4.16",
"_phantomChildren": {},
"_requested": {
"name": "tmp",
"raw": "[email protected]",
"rawSpec": "0.0.24",
"scope": null,
"spec": "0.0.24",
"type": "version"
},
"_requiredBy": [
"/selenium-webdriver"
],
"_resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.24.tgz",
"_shasum": "d6a5e198d14a9835cc6f2d7c3d9e302428c8cf12",
"_shrinkwrap": null,
"_spec": "[email protected]",
"_where": "/Users/carsenklock/explorer/node_modules/selenium-webdriver",
"author": {
"email": "[email protected]",
"name": "KARASZI István",
"url": "http://raszi.hu/"
},
"bugs": {
"url": "http://github.com/raszi/node-tmp/issues"
},
"dependencies": {},
"description": "Temporary file and directory creator",
"devDependencies": {
"vows": "~0.7.0"
},
"directories": {},
"dist": {
"shasum": "d6a5e198d14a9835cc6f2d7c3d9e302428c8cf12",
"tarball": "https://registry.npmjs.org/tmp/-/tmp-0.0.24.tgz"
},
"engines": {
"node": ">=0.4.0"
},
"gitHead": "6864655f13a11c6043c119a0cb60385f072bcecf",
"homepage": "http://github.com/raszi/node-tmp",
"keywords": [
"temporary",
"tmp",
"temp",
"tempdir",
"tempfile",
"tmpdir",
"tmpfile"
],
"licenses": [
{
"type": "MIT",
"url": "http://opensource.org/licenses/MIT"
}
],
"main": "lib/tmp.js",
"maintainers": [
{
"email": "[email protected]",
"name": "raszi"
}
],
"name": "tmp",
"optionalDependencies": {},
"readme": "ERROR: No README data found!",
"repository": {
"type": "git",
"url": "git://github.com/raszi/node-tmp.git"
},
"scripts": {
"test": "vows test/*-test.js"
},
"version": "0.0.24"
}
| {
"pile_set_name": "Github"
} |
{
"images": [
{
"filename": "ic_fluent_currency_dollar_rupee_20_regular.pdf",
"idiom": "universal"
}
],
"info": {
"author": "xcode",
"version": 1
},
"properties": {
"preserves-vector-representation": true,
"template-rendering-intent": "template"
}
} | {
"pile_set_name": "Github"
} |
/*
Atom One Dark With support for ReasonML by Gidi Morris, based off work by Daniel Gamage
Original One Dark Syntax theme from https://github.com/atom/one-dark-syntax
*/
.hljs {
display: block;
overflow-x: auto;
padding: 0.5em;
line-height: 1.3em;
color: #abb2bf;
background: #282c34;
border-radius: 5px;
}
.hljs-keyword, .hljs-operator {
color: #F92672;
}
.hljs-pattern-match {
color: #F92672;
}
.hljs-pattern-match .hljs-constructor {
color: #61aeee;
}
.hljs-function {
color: #61aeee;
}
.hljs-function .hljs-params {
color: #A6E22E;
}
.hljs-function .hljs-params .hljs-typing {
color: #FD971F;
}
.hljs-module-access .hljs-module {
color: #7e57c2;
}
.hljs-constructor {
color: #e2b93d;
}
.hljs-constructor .hljs-string {
color: #9CCC65;
}
.hljs-comment, .hljs-quote {
color: #b18eb1;
font-style: italic;
}
.hljs-doctag, .hljs-formula {
color: #c678dd;
}
.hljs-section, .hljs-name, .hljs-selector-tag, .hljs-deletion, .hljs-subst {
color: #e06c75;
}
.hljs-literal {
color: #56b6c2;
}
.hljs-string, .hljs-regexp, .hljs-addition, .hljs-attribute, .hljs-meta-string {
color: #98c379;
}
.hljs-built_in, .hljs-class .hljs-title {
color: #e6c07b;
}
.hljs-attr, .hljs-variable, .hljs-template-variable, .hljs-type, .hljs-selector-class, .hljs-selector-attr, .hljs-selector-pseudo, .hljs-number {
color: #d19a66;
}
.hljs-symbol, .hljs-bullet, .hljs-link, .hljs-meta, .hljs-selector-id, .hljs-title {
color: #61aeee;
}
.hljs-emphasis {
font-style: italic;
}
.hljs-strong {
font-weight: bold;
}
.hljs-link {
text-decoration: underline;
}
| {
"pile_set_name": "Github"
} |
// Copyright © 2017 Dmitry Sikorsky. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using ExtCore.Events;
using Platformus.Barebone;
using Platformus.Configurations.Data.Entities;
namespace Platformus.Configurations.Events
{
public interface IConfigurationDeletedEventHandler : IEventHandler<IRequestHandler, Configuration>
{
}
} | {
"pile_set_name": "Github"
} |
/*
*
* Confidential Information of Telekinesys Research Limited (t/a Havok). Not for disclosure or distribution without Havok's
* prior written consent. This software contains code, techniques and know-how which is confidential and proprietary to Havok.
* Level 2 and Level 3 source code contains trade secrets of Havok. Havok Software (C) Copyright 1999-2010 Telekinesys Research Limited t/a Havok. All Rights Reserved. Use of this software is subject to the terms of an end user license agreement.
*
*/
#ifndef HKNP_1AXIS_SWEEP_H
#define HKNP_1AXIS_SWEEP_H
#include <Common/Base/Types/Geometry/Aabb16/hkAabb16.h>
template <typename T>
class hk1AxisSweep16
{
public:
struct KeyPair
{
T m_keyA;
T m_keyB;
};
static int HK_CALL collide( const hkAabb16* pa, int numA, const hkAabb16* pb, int numB,
KeyPair* HK_RESTRICT pairsOut, int maxNumPairs, hkPadSpu<int>& numPairsSkipped );
static int HK_CALL collide( const hkAabb16* pa, int numA,
KeyPair* HK_RESTRICT pairsOut, int maxNumPairs, hkPadSpu<int>& numPairsSkippedOut );
static void HK_CALL sortAabbs(hkAabb16* aabbs, int size);
};
#endif
/*
* Havok SDK - NO SOURCE PC DOWNLOAD, BUILD(#20101115)
*
* Confidential Information of Havok. (C) Copyright 1999-2010
* Telekinesys Research Limited t/a Havok. All Rights Reserved. The Havok
* Logo, and the Havok buzzsaw logo are trademarks of Havok. Title, ownership
* rights, and intellectual property rights in the Havok software remain in
* Havok and/or its suppliers.
*
* Use of this software for evaluation purposes is subject to and indicates
* acceptance of the End User licence Agreement for this product. A copy of
* the license is included with this software and is also available at www.havok.com/tryhavok.
*
*/
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!74 &7400000
AnimationClip:
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_Name: Idle
serializedVersion: 4
m_AnimationType: 2
m_Compressed: 0
m_UseHighQualityCurve: 1
m_RotationCurves: []
m_CompressedRotationCurves: []
m_PositionCurves: []
m_ScaleCurves: []
m_FloatCurves: []
m_PPtrCurves:
- curve:
- time: 0
value: {fileID: 21300000, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .0416666679
value: {fileID: 21300002, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .0833333358
value: {fileID: 21300004, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .125
value: {fileID: 21300006, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .166666672
value: {fileID: 21300008, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .208333328
value: {fileID: 21300010, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .25
value: {fileID: 21300012, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .291666657
value: {fileID: 21300014, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .333333343
value: {fileID: 21300016, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .375
value: {fileID: 21300018, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .416666657
value: {fileID: 21300020, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .458333343
value: {fileID: 21300022, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .5
value: {fileID: 21300024, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .541666687
value: {fileID: 21300026, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .583333313
value: {fileID: 21300028, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .625
value: {fileID: 21300030, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .666666687
value: {fileID: 21300032, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .708333313
value: {fileID: 21300034, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .75
value: {fileID: 21300036, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .791666687
value: {fileID: 21300038, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .833333313
value: {fileID: 21300040, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .875
value: {fileID: 21300042, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .916666687
value: {fileID: 21300044, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: .958333313
value: {fileID: 21300046, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: 1
value: {fileID: 21300048, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: 1.04166663
value: {fileID: 21300050, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: 1.08333337
value: {fileID: 21300052, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: 1.125
value: {fileID: 21300054, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: 1.16666663
value: {fileID: 21300056, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
- time: 1.20833337
value: {fileID: 21300058, guid: 8fb98a6035269e64a998f9b56828fc4f, type: 3}
attribute: m_Sprite
path:
classID: 212
script: {fileID: 0}
m_SampleRate: 24
m_WrapMode: 0
m_Bounds:
m_Center: {x: 0, y: 0, z: 0}
m_Extent: {x: 0, y: 0, z: 0}
m_AnimationClipSettings:
serializedVersion: 2
m_StartTime: 0
m_StopTime: 1.25
m_OrientationOffsetY: 0
m_Level: 0
m_CycleOffset: 0
m_LoopTime: 1
m_LoopBlend: 0
m_LoopBlendOrientation: 0
m_LoopBlendPositionY: 0
m_LoopBlendPositionXZ: 0
m_KeepOriginalOrientation: 0
m_KeepOriginalPositionY: 1
m_KeepOriginalPositionXZ: 0
m_HeightFromFeet: 0
m_Mirror: 0
m_EditorCurves: []
m_EulerEditorCurves: []
m_Events: []
| {
"pile_set_name": "Github"
} |
<!--
Copyright (c) 2005, 2018 Oracle and/or its affiliates. All rights reserved.
Portions Copyright © [2017-2020] Payara Foundation and/or affiliates.
This program and the accompanying materials are made available under the
terms of the Eclipse Public License v. 2.0, which is available at
http://www.eclipse.org/legal/epl-2.0.
This Source Code may also be made available under the following Secondary
Licenses when the conditions for such availability set forth in the
Eclipse Public License v. 2.0 are satisfied: GNU General Public License,
version 2 with the GNU Classpath Exception, which is available at
https://www.gnu.org/software/classpath/license.html.
SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
-->
<!-- Portions Copyright [2019] [Payara Foundation and/or its affiliates] -->
<p><a id="ref-appedit" name="ref-appedit"></a><a id="GHCOM00069" name="GHCOM00069"></a></p>
<h4><a id="sthref158" name="sthref158"></a>Edit Application</h4>
<a name="BEGIN" id="BEGIN"></a>
<p>Use the Edit Application page to modify an existing application.</p>
<p>The Edit Application page contains some or all of the following options, depending on the application type.</p>
<dl>
<dt>Name</dt>
<dd>
<p>Read-only field that displays the name of the application that you are editing.</p>
<p>The name can include an optional version identifier, which follows the name and is separated from the name by a colon (<code>:</code>).</p>
</dd>
<dt>Status</dt>
<dd>
<p>If the Enabled checkbox is selected, the application is enabled. This option is selected by default.</p>
</dd>
<dt>Context Root</dt>
<dd>
<p>For a web application, specifies the path to the application. In the URL of the web application, the context root immediately follows the port number (<code>http://</code><i>host</i><code>:</code><i>port</i><code>/</code><i>context-root</i><code>/...</code>). The context root must start with a forward slash, for example, <code>/hello</code>.</p>
</dd>
<dt>Availability</dt>
<dd>
<p>If the Enabled checkbox is selected, high-availability is enabled for web sessions and for stateful session bean (SFSB) checkpointing and potentially passivation. If set to false (default) all web session saving and SFSB checkpointing is disabled for the specified application, web application, or EJB module. If set to true, the specified application or module is enabled for high-availability. Set this option to true only if high availability is configured and enabled at higher levels, such as the server and container levels.</p>
<p>This option appears if clusters or standalone server instances aside from the default server instance (<code>server</code>) exist.</p>
</dd>
<dt>Virtual Servers</dt>
<dd>
<p>The virtual servers associated with this application.</p>
<p>The Virtual Servers option appears if only the default server instance, <code>server</code>, exists. If clusters or other standalone server instances exist, you can select virtual servers after deployment. Go to the Edit Application page, select the Target tab, and select Manage Virtual Servers for the desired target.</p>
</dd>
<dt>Java Web Start</dt>
<dd>
<p>(For some application types) If the Enabled checkbox is selected, the application uses Java Web Start software. Java Web Start provides a browser-independent way to deploy Java applications to run in a dedicated Java Virtual Machine.</p>
</dd>
<dt>Description</dt>
<dd>
<p>A short description of the application.</p>
</dd>
<dt>Location</dt>
<dd>
<p>The location of the deployed application. This is a read-only field.</p>
</dd>
<dt>Deployment Order</dt>
<dd>
<p>The deployment order of the application.</p>
<p>Applications with a lower number are loaded first at server startup. An application with a deployment order of 102 is loaded before an application with a deployment order of 110. If a deployment order is not specified at the time an application is deployed, the default deployment order of 100 is assigned. If two applications have the same deployment order, the application that was deployed first is loaded first. Specifying a deployment order is useful if the application has dependencies and must be loaded in a certain order.</p>
</dd>
<dt>Libraries</dt>
<dd>
<p>The library JAR files required by the application. This is a read-only field.</p>
</dd>
<dt>Modules and Components</dt>
<dd>
<p>Table that displays the names of the application's modules and their engines. For a web service endpoint, you can select View Endpoint to display the Web Service Endpoint Information page. For an application client, you can select Launch to display the Application Client Launch page or Download Client Stubs to download the client stubs.</p>
</dd>
</dl>
<a id="GHCOM368" name="GHCOM368"></a>
<h5>Related Tasks</h5>
<ul>
<li>
<p><a href="task-appedit.html">To Edit a Deployed Application</a></p>
</li>
<li>
<p><a href="task-applistdeployed.html">To View a List of Deployed Applications</a></p>
</li>
<li>
<p><a href="task-appdeployweb.html">To Deploy a Web Application</a></p>
</li>
<li>
<p><a href="task-deployentapp.html">To Deploy an Enterprise Application</a></p>
</li>
<li>
<p><a href="task-appdeployclient.html">To Deploy an Application Client</a></p>
</li>
<li>
<p><a href="task-appdeployconn.html">To Deploy a Connector Module</a></p>
</li>
<li>
<p><a href="task-appdeployejb.html">To Deploy an EJB Jar</a></p>
</li>
<li>
<p><a href="task-appviewdesc.html">To View Descriptors of an Application</a></p>
</li>
<li>
<p><a href="task-appviewtargets.html">To View Application Targets</a></p>
</li>
<li>
<p><a href="task-endpoint.html">To View Endpoint Information for a Web Service</a></p>
</li>
<li>
<p><a href="task-applaunch.html">To Launch an Application</a></p>
</li>
<li>
<p><a href="task-appclientlaunch.html">To Launch an Application Client</a></p>
</li>
<li>
<p><a href="task-appclientstubs.html">To Download Application Client Stubs</a></p>
</li>
<li>
<p><a href="task-appredeploy.html">To Redeploy a Deployed Application</a></p>
</li>
<li>
<p><a href="task-apprestart.html">To Reload a Deployed Application</a></p>
</li>
<li>
<p><a href="task-appscopedresourcesview.html">To View Application Scoped Resources</a></p>
</li>
<li>
<p><a href="task-appscopedresourcesedit.html">To Modify Application Scoped Resources</a></p>
</li>
</ul>
<small>Copyright © 2005, 2017, Oracle and/or its affiliates. All rights reserved. <a href="docinfo.html">Legal Notices</a></small>
<small>Portions Copyright © [2017-2020] Payara Foundation and/or affiliates.</small>
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2003-2020 The Music Player Daemon Project
* http://www.musicpd.org
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MPD_EVENT_POLLRESULT_GENERIC_HXX
#define MPD_EVENT_POLLRESULT_GENERIC_HXX
#include <cstddef>
#include <vector>
class PollResultGeneric
{
struct Item
{
unsigned events;
void *obj;
Item() = default;
constexpr Item(unsigned _events, void *_obj) noexcept
: events(_events), obj(_obj) { }
};
std::vector<Item> items;
public:
size_t GetSize() const noexcept {
return items.size();
}
unsigned GetEvents(size_t i) const noexcept {
return items[i].events;
}
void *GetObject(size_t i) const noexcept {
return items[i].obj;
}
void Reset() noexcept {
items.clear();
}
void Clear(void *obj) noexcept {
for (auto i = items.begin(); i != items.end(); ++i)
if (i->obj == obj)
i->events = 0;
}
void Add(unsigned events, void *obj) noexcept {
items.emplace_back(events, obj);
}
};
#endif
| {
"pile_set_name": "Github"
} |
.signatureDetail .signatureTable {
padding: 3px;
border-radius: 5px;
border: 1px solid black;
box-shadow: 0px 0px 7px;
margin-bottom: 10px;
}
.signatureDetail .signatureTable .signatureTableWidth {
display: flex;
}
.signatureDetail .signatureTable .signatureTableWidth input {
width: 100%;
}
.signatureDetail .signatureTable .signatureTableWidth .signatureMetaTextArea {
width: 100%;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
}
.signatureDetail .signatureTable div:nth-child(odd) {
background-color: #EEE;
}
.signatureDetail .signatureTable div:nth-child(even) {
background-color: #FFF;
}
.signatureDetail .signatureTable span:first-child {
font-weight: bold;
padding-right: 10px;
display: inline-block;
}
/*# sourceMappingURL=signature_table.css.map */
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env sh
# This script converts the mnist data into leveldb format.
set -e
EXAMPLES=./build/examples/siamese
DATA=./data/mnist
echo "Creating leveldb..."
rm -rf ./examples/siamese/mnist_siamese_train_leveldb
rm -rf ./examples/siamese/mnist_siamese_test_leveldb
$EXAMPLES/convert_mnist_siamese_data.bin \
$DATA/train-images-idx3-ubyte \
$DATA/train-labels-idx1-ubyte \
./examples/siamese/mnist_siamese_train_leveldb
$EXAMPLES/convert_mnist_siamese_data.bin \
$DATA/t10k-images-idx3-ubyte \
$DATA/t10k-labels-idx1-ubyte \
./examples/siamese/mnist_siamese_test_leveldb
echo "Done."
| {
"pile_set_name": "Github"
} |
.. _`Overloaded name`:
Overloaded name
Overloaded name is a term used in this reference documentation to designate
a metafunction providing more than one public interface. In reality,
class template overloading is nonexistent and the referenced functionality
is implemented by other, unspecified, means.
.. _`Concept-identical`:
Concept-identical
A sequence ``s1`` is said to be concept-identical to a sequence ``s2`` if
``s1`` and ``s2`` model the exact same set of concepts.
.. _`Bind expression`:
Bind expression
A bind expression is simply that |--| an instantiation of one of the |bind|
class templates. For instance, these are all bind expressions::
bind< quote3<if_>, _1,int,long >
bind< _1, bind< plus<>, int_<5>, _2> >
bind< times<>, int_<2>, int_<2> >
and these are not::
if_< _1, bind< plus<>, int_<5>, _2>, _2 >
protect< bind< quote3<if_>, _1,int,long > >
_2
.. |overloaded name| replace:: `overloaded name`_
.. |concept-identical| replace:: `concept-identical`_
.. |bind expression| replace:: `bind expression`_
.. copyright:: Copyright © 2001-2009 Aleksey Gurtovoy and David Abrahams
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
| {
"pile_set_name": "Github"
} |
(benchmark tst_bvudiv4
:logic QF_BV
:extrafuns ((a BitVec[80]))
:extrafuns ((b BitVec[80]))
:formula
(and (= a (bvudiv bv999[80] bv1000[80]))
(= b (bvurem bv999[80] bv1000[80])))
)
| {
"pile_set_name": "Github"
} |
/*
* jmemname.c
*
* Copyright (C) 1992-1997, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file provides a generic implementation of the system-dependent
* portion of the JPEG memory manager. This implementation assumes that
* you must explicitly construct a name for each temp file.
* Also, the problem of determining the amount of memory available
* is shoved onto the user.
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jmemsys.h" /* import the system-dependent declarations */
#ifndef HAVE_STDLIB_H /* <stdlib.h> should declare malloc(),free() */
extern void * malloc JPP((size_t size));
extern void free JPP((void *ptr));
#endif
#ifndef SEEK_SET /* pre-ANSI systems may not define this; */
#define SEEK_SET 0 /* if not, assume 0 is correct */
#endif
#ifdef DONT_USE_B_MODE /* define mode parameters for fopen() */
#define READ_BINARY "r"
#define RW_BINARY "w+"
#else
#ifdef VMS /* VMS is very nonstandard */
#define READ_BINARY "rb", "ctx=stm"
#define RW_BINARY "w+b", "ctx=stm"
#else /* standard ANSI-compliant case */
#define READ_BINARY "rb"
#define RW_BINARY "w+b"
#endif
#endif
/*
* Selection of a file name for a temporary file.
* This is system-dependent!
*
* The code as given is suitable for most Unix systems, and it is easily
* modified for most non-Unix systems. Some notes:
* 1. The temp file is created in the directory named by TEMP_DIRECTORY.
* The default value is /usr/tmp, which is the conventional place for
* creating large temp files on Unix. On other systems you'll probably
* want to change the file location. You can do this by editing the
* #define, or (preferred) by defining TEMP_DIRECTORY in jconfig.h.
*
* 2. If you need to change the file name as well as its location,
* you can override the TEMP_FILE_NAME macro. (Note that this is
* actually a printf format string; it must contain %s and %d.)
* Few people should need to do this.
*
* 3. mktemp() is used to ensure that multiple processes running
* simultaneously won't select the same file names. If your system
* doesn't have mktemp(), define NO_MKTEMP to do it the hard way.
* (If you don't have <errno.h>, also define NO_ERRNO_H.)
*
* 4. You probably want to define NEED_SIGNAL_CATCHER so that cjpeg.c/djpeg.c
* will cause the temp files to be removed if you stop the program early.
*/
#ifndef TEMP_DIRECTORY /* can override from jconfig.h or Makefile */
#define TEMP_DIRECTORY "/usr/tmp/" /* recommended setting for Unix */
#endif
static int next_file_num; /* to distinguish among several temp files */
#ifdef NO_MKTEMP
#ifndef TEMP_FILE_NAME /* can override from jconfig.h or Makefile */
#define TEMP_FILE_NAME "%sJPG%03d.TMP"
#endif
#ifndef NO_ERRNO_H
#include <errno.h> /* to define ENOENT */
#endif
/* ANSI C specifies that errno is a macro, but on older systems it's more
* likely to be a plain int variable. And not all versions of errno.h
* bother to declare it, so we have to in order to be most portable. Thus:
*/
#ifndef errno
extern int errno;
#endif
LOCAL(void)
select_file_name (char * fname)
{
FILE * tfile;
/* Keep generating file names till we find one that's not in use */
for (;;) {
next_file_num++; /* advance counter */
sprintf(fname, TEMP_FILE_NAME, TEMP_DIRECTORY, next_file_num);
if ((tfile = fopen(fname, READ_BINARY)) == NULL) {
/* fopen could have failed for a reason other than the file not
* being there; for example, file there but unreadable.
* If <errno.h> isn't available, then we cannot test the cause.
*/
#ifdef ENOENT
if (errno != ENOENT)
continue;
#endif
break;
}
fclose(tfile); /* oops, it's there; close tfile & try again */
}
}
#else /* ! NO_MKTEMP */
/* Note that mktemp() requires the initial filename to end in six X's */
#ifndef TEMP_FILE_NAME /* can override from jconfig.h or Makefile */
#define TEMP_FILE_NAME "%sJPG%dXXXXXX"
#endif
LOCAL(void)
select_file_name (char * fname)
{
next_file_num++; /* advance counter */
sprintf(fname, TEMP_FILE_NAME, TEMP_DIRECTORY, next_file_num);
mktemp(fname); /* make sure file name is unique */
/* mktemp replaces the trailing XXXXXX with a unique string of characters */
}
#endif /* NO_MKTEMP */
/*
* Memory allocation and freeing are controlled by the regular library
* routines malloc() and free().
*/
GLOBAL(void *)
jpeg_get_small (j_common_ptr cinfo, size_t sizeofobject)
{
return (void *) malloc(sizeofobject);
}
GLOBAL(void)
jpeg_free_small (j_common_ptr cinfo, void * object, size_t sizeofobject)
{
free(object);
}
/*
* "Large" objects are treated the same as "small" ones.
* NB: although we include FAR keywords in the routine declarations,
* this file won't actually work in 80x86 small/medium model; at least,
* you probably won't be able to process useful-size images in only 64KB.
*/
GLOBAL(void FAR *)
jpeg_get_large (j_common_ptr cinfo, size_t sizeofobject)
{
return (void FAR *) malloc(sizeofobject);
}
GLOBAL(void)
jpeg_free_large (j_common_ptr cinfo, void FAR * object, size_t sizeofobject)
{
free(object);
}
/*
* This routine computes the total memory space available for allocation.
* It's impossible to do this in a portable way; our current solution is
* to make the user tell us (with a default value set at compile time).
* If you can actually get the available space, it's a good idea to subtract
* a slop factor of 5% or so.
*/
#ifndef DEFAULT_MAX_MEM /* so can override from makefile */
#define DEFAULT_MAX_MEM 1000000L /* default: one megabyte */
#endif
GLOBAL(long)
jpeg_mem_available (j_common_ptr cinfo, long min_bytes_needed,
long max_bytes_needed, long already_allocated)
{
return cinfo->mem->max_memory_to_use - already_allocated;
}
/*
* Backing store (temporary file) management.
* Backing store objects are only used when the value returned by
* jpeg_mem_available is less than the total space needed. You can dispense
* with these routines if you have plenty of virtual memory; see jmemnobs.c.
*/
METHODDEF(void)
read_backing_store (j_common_ptr cinfo, backing_store_ptr info,
void FAR * buffer_address,
long file_offset, long byte_count)
{
if (fseek(info->temp_file, file_offset, SEEK_SET))
ERREXIT(cinfo, JERR_TFILE_SEEK);
if (JFREAD(info->temp_file, buffer_address, byte_count)
!= (size_t) byte_count)
ERREXIT(cinfo, JERR_TFILE_READ);
}
METHODDEF(void)
write_backing_store (j_common_ptr cinfo, backing_store_ptr info,
void FAR * buffer_address,
long file_offset, long byte_count)
{
if (fseek(info->temp_file, file_offset, SEEK_SET))
ERREXIT(cinfo, JERR_TFILE_SEEK);
if (JFWRITE(info->temp_file, buffer_address, byte_count)
!= (size_t) byte_count)
ERREXIT(cinfo, JERR_TFILE_WRITE);
}
METHODDEF(void)
close_backing_store (j_common_ptr cinfo, backing_store_ptr info)
{
fclose(info->temp_file); /* close the file */
unlink(info->temp_name); /* delete the file */
/* If your system doesn't have unlink(), use remove() instead.
* remove() is the ANSI-standard name for this function, but if
* your system was ANSI you'd be using jmemansi.c, right?
*/
TRACEMSS(cinfo, 1, JTRC_TFILE_CLOSE, info->temp_name);
}
/*
* Initial opening of a backing-store object.
*/
GLOBAL(void)
jpeg_open_backing_store (j_common_ptr cinfo, backing_store_ptr info,
long total_bytes_needed)
{
select_file_name(info->temp_name);
if ((info->temp_file = fopen(info->temp_name, RW_BINARY)) == NULL)
ERREXITS(cinfo, JERR_TFILE_CREATE, info->temp_name);
info->read_backing_store = read_backing_store;
info->write_backing_store = write_backing_store;
info->close_backing_store = close_backing_store;
TRACEMSS(cinfo, 1, JTRC_TFILE_OPEN, info->temp_name);
}
/*
* These routines take care of any system-dependent initialization and
* cleanup required.
*/
GLOBAL(long)
jpeg_mem_init (j_common_ptr cinfo)
{
next_file_num = 0; /* initialize temp file name generator */
return DEFAULT_MAX_MEM; /* default for max_memory_to_use */
}
GLOBAL(void)
jpeg_mem_term (j_common_ptr cinfo)
{
/* no work */
}
| {
"pile_set_name": "Github"
} |
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package annotations
import (
"github.com/goodrain/rainbond/gateway/annotations/cookie"
"github.com/goodrain/rainbond/gateway/annotations/header"
"github.com/goodrain/rainbond/gateway/annotations/l4"
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/proxy"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
"github.com/goodrain/rainbond/gateway/annotations/rewrite"
"github.com/goodrain/rainbond/gateway/annotations/upstreamhashby"
weight "github.com/goodrain/rainbond/gateway/annotations/wight"
"github.com/goodrain/rainbond/util/ingress-nginx/ingress/errors"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DeniedKeyName name of the key that contains the reason to deny a location
const DeniedKeyName = "Denied"
// Ingress defines the valid annotations present in one NGINX Ingress rule
type Ingress struct {
metav1.ObjectMeta
Header header.Config
Cookie cookie.Config
Weight weight.Config
Rewrite rewrite.Config
L4 l4.Config
UpstreamHashBy string
Proxy proxy.Config
}
// Extractor defines the annotation parsers to be used in the extraction of annotations
type Extractor struct {
annotations map[string]parser.IngressAnnotation
}
// NewAnnotationExtractor creates a new annotations extractor
func NewAnnotationExtractor(cfg resolver.Resolver) Extractor {
return Extractor{
map[string]parser.IngressAnnotation{
"Header": header.NewParser(cfg),
"Cookie": cookie.NewParser(cfg),
"Weight": weight.NewParser(cfg),
"Rewrite": rewrite.NewParser(cfg),
"L4": l4.NewParser(cfg),
"UpstreamHashBy": upstreamhashby.NewParser(cfg),
"Proxy": proxy.NewParser(cfg),
},
}
}
// Extract extracts the annotations from an Ingress
func (e Extractor) Extract(ing *extensions.Ingress) *Ingress {
pia := &Ingress{
ObjectMeta: ing.ObjectMeta,
}
data := make(map[string]interface{})
for name, annotationParser := range e.annotations {
val, err := annotationParser.Parse(ing)
if err != nil {
if errors.IsMissingAnnotations(err) {
continue
}
if !errors.IsLocationDenied(err) {
continue
}
_, alreadyDenied := data[DeniedKeyName]
if !alreadyDenied {
data[DeniedKeyName] = err
logrus.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err)
continue
}
logrus.Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err)
}
if val != nil {
data[name] = val
}
}
err := mergo.MapWithOverwrite(pia, data)
if err != nil {
logrus.Errorf("unexpected error merging extracted annotations: %v", err)
}
return pia
}
| {
"pile_set_name": "Github"
} |
import Logger from '../utils/Logger';
const log = Logger.module('FcStack');
const FcStack = (spec) => {
/*
spec.callback({
type: sessionDescription.type,
sdp: sessionDescription.sdp
});
*/
const that = {};
that.pcConfig = {};
that.peerConnection = {};
that.desc = {};
that.signalCallback = undefined;
that.close = () => {
log.debug('message: Close FcStack');
};
that.createOffer = () => {
log.debug('message: CreateOffer');
};
that.addStream = (stream) => {
log.debug(`message: addStream, ${stream.toLog()}`);
};
that.processSignalingMessage = (msg) => {
log.debug(`message: processSignaling, message: ${msg}`);
if (that.signalCallback !== undefined) { that.signalCallback(msg); }
};
that.sendSignalingMessage = (msg) => {
log.debug(`message: Sending signaling Message, message: ${msg}`);
spec.callback(msg);
};
that.setSignalingCallback = (callback = () => {}) => {
log.debug('message: Setting signalling callback');
that.signalCallback = callback;
};
return that;
};
export default FcStack;
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2015, The Querydsl Team (http://www.querydsl.com/team)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.querydsl.jpa.codegen;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertFalse;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Properties;
import java.util.Set;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ErrorCollector;
import org.junit.rules.TemporaryFolder;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
public class JPADomainExporterTest {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@Rule
public ErrorCollector errors = new ErrorCollector();
@Test
public void test() throws IOException {
EntityManagerFactory emf = Persistence.createEntityManagerFactory("h2", new Properties());
Path outputFolder = folder.getRoot().toPath();
JPADomainExporter exporter = new JPADomainExporter(outputFolder.toFile(), emf.getMetamodel());
exporter.execute();
File origRoot = new File("../querydsl-jpa/target/generated-test-sources/java");
Set<File> files = exporter.getGeneratedFiles();
assertFalse(files.isEmpty());
for (File file : files) {
Path relativeFile = outputFolder.relativize(file.toPath());
Path origFile = origRoot.toPath().resolve(relativeFile);
String reference = Files.toString(origFile.toFile(), Charsets.UTF_8);
String content = Files.toString(file, Charsets.UTF_8);
errors.checkThat("Mismatch for " + file.getPath(), content, is(equalTo(reference)));
}
}
}
| {
"pile_set_name": "Github"
} |
package lang
import "jvmgo/ch09/native"
import "jvmgo/ch09/rtda"
import "jvmgo/ch09/rtda/heap"
const jlString = "java/lang/String"
func init() {
native.Register(jlString, "intern", "()Ljava/lang/String;", intern)
}
// public native String intern();
// ()Ljava/lang/String;
func intern(frame *rtda.Frame) {
this := frame.LocalVars().GetThis()
interned := heap.InternString(this)
frame.OperandStack().PushRef(interned)
}
| {
"pile_set_name": "Github"
} |
<?php
class AclTest extends RavelTestCase
{
public function setUp()
{
$rolesConfig = $this->_getRolesConfig(); //Config::get('Ravel::Roles');
$auth = $this->_getAuthMock();
$this->acl = new Raftalks\Ravel\Acl\Acl($auth, $rolesConfig);
$Module = $this->_getModuleMock();
$this->acl->setModuleModel($Module);
$this->acl->setUsergroupModel($this->_getUsergroupMock());
$this->acl->setRoleModel($this->_getRoleMock());
}
public function testSomething()
{
$this->assertTrue(true);
}
public function testAclCheckUserIsGuest()
{
$this->assertFalse($this->acl->is_guest());
}
public function testCheckUserAuthenticated()
{
$this->assertTrue($this->acl->check());
}
public function testGetModeratorGroups()
{
$rolesConfig = $this->_getRolesConfig();
$this->assertEquals($this->acl->getModeratorGroups(), $rolesConfig['moderator_usergroups'] );
}
public function testIsUserActivated()
{
$this->assertTrue($this->acl->isUserActivated());
}
protected function _getAuthMock()
{
$user = $this->_getUserMock();
$mock = \Mockery::mock('stdClass');
$mock->shouldReceive(array(
'guest' => false,
'check' => true,
'user' => $user
));
return $mock;
}
protected function _getModuleMock()
{
$mock = Mockery::mock('stdClass');
return $mock;
}
protected function _getUserMock()
{
$mock = Mockery::mock('stdClass');
$mock->activated= true;
return $mock;
}
protected function _getUsergroupMock()
{
$mock = \Mockery::mock('stdClass');
return $mock;
}
protected function _getRoleMock()
{
$mock = \Mockery::mock('stdClass');
return $mock;
}
protected function _getRolesConfig()
{
return array(
'usergroups' => array('superadmin','admin','manager','editor','author','user'),
'moderator_usergroups' => array('superadmin','admin','manager','editor'),
//add modules here and it will get registered to the db automatically
'modules' => array(
'global',
'contents',
'categories',
'settings'
),
'role_actions' => array(
'create',
'read',
'update',
'delete',
),
//default action value applied to all usergroup role actions except to those groups
//mentioned under usergroups_default_to_true
'default_action_set' => false,
//given usergroups when registering the roles, actions will default to true
'usergroups_default_to_true' => array('superadmin')
);
}
} | {
"pile_set_name": "Github"
} |
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.components.paintpreview.player;
import android.content.Context;
import android.view.View;
import android.view.ViewGroup.LayoutParams;
import org.chromium.third_party.android.swiperefresh.SwipeRefreshLayout;
import javax.annotation.Nonnull;
/**
* A class for handling overscroll to refresh behavior for the Paint Preview player. This is based
* on the modified version of the Android compat library's SwipeRefreshLayout due to the Player's
* FrameLayout not behaving like a normal scrolling view.
*/
public class PlayerSwipeRefreshHandler implements OverscrollHandler {
// The duration of the refresh animation after a refresh signal.
private static final int STOP_REFRESH_ANIMATION_DELAY_MS = 500;
// The modified AppCompat version of the refresh effect.
private SwipeRefreshLayout mSwipeRefreshLayout;
// A handler to delegate refreshes event to.
private Runnable mRefreshCallback;
/*
* Constructs a new instance of the handler.
*
* @param context The Context to create tha handler for.
* @param refreshCallback The handler that refresh events are delegated to.
*/
public PlayerSwipeRefreshHandler(Context context, @Nonnull Runnable refreshCallback) {
mRefreshCallback = refreshCallback;
mSwipeRefreshLayout = new SwipeRefreshLayout(context);
mSwipeRefreshLayout.setLayoutParams(
new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT));
// Use the same colors as {@link org.chromium.chrome.browser.SwipeRefreshHandler}.
mSwipeRefreshLayout.setProgressBackgroundColorSchemeResource(
org.chromium.ui.R.color.default_bg_color_elev_2);
mSwipeRefreshLayout.setColorSchemeResources(
org.chromium.ui.R.color.default_control_color_active);
mSwipeRefreshLayout.setEnabled(true);
mSwipeRefreshLayout.setOnRefreshListener(() -> {
mSwipeRefreshLayout.postDelayed(() -> {
mSwipeRefreshLayout.setRefreshing(false);
}, STOP_REFRESH_ANIMATION_DELAY_MS);
mRefreshCallback.run();
});
}
/*
* Gets the view that contains the swipe to refresh animations.
*/
public View getView() {
return mSwipeRefreshLayout;
}
@Override
public boolean start() {
return mSwipeRefreshLayout.start();
}
@Override
public void pull(float yDelta) {
mSwipeRefreshLayout.pull(yDelta);
}
@Override
public void release() {
mSwipeRefreshLayout.release(true);
}
@Override
public void reset() {
mSwipeRefreshLayout.reset();
}
}
| {
"pile_set_name": "Github"
} |
Why we don't support IE 8
-------------------------
We've been living in 2007 for a while now, pretending that new browser features don't
exist because they aren't in IE8. You might not even know about some of these features,
or think they are only enabled by jQuery or underscore, simply because it hasn't
been an option to rely upon them.
Here is the list of features you don't have if you choose to support IE 8:
- HTML5 audio and video
- SVG
- Canvas
- TrueType fonts
- Media Queries
- CSS Transforms
- Multiple Backgrounds
- CSS3 Units (vh, vw, rem)
- Custom DOM events
- Hardware accelerated graphics
- The DOMContentLoaded event
- addEventListener
- Object.create, .seal, .freeze, .defineProperty
- Array.isArray, .indexOf, .every, .some, .forEach, .map, .filter, .reduce
- A modern JavaScript engine
- A real developer tools
- A consistent box model
- jQuery 2
- Google Apps
- Tether
It's true that IE 8 still holds a big chunk of the browsing population, but the reasons
why they can't update are dwindling. There are two big reasons for continuing IE 8 support.
#### Enterprises
Microsoft is dropping support for XP in April, organizations who want security updates will have to upgrade.
#### China uses XP
Chrome, Firefox and Opera all support XP. Nothing prevents users from upgrading, except the inertia of
organizations who still support IE 8.
#### The Future
We are skating towards where the puck will be, and we hope that as you decide to drop IE 8 support,
you choose to add Tether to the list of awesome things you can do.
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.qualitychecker.row;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
/**
* Stores the results of a RowLevelPolicy
* @author stakiar
*/
public class RowLevelPolicyCheckResults {
Map<RowLevelPolicyResultPair, Long> results;
public RowLevelPolicyCheckResults() {
this.results = Maps.newConcurrentMap();
}
public void put(RowLevelPolicy policy, RowLevelPolicy.Result result) {
RowLevelPolicyResultPair resultPolicyPair = new RowLevelPolicyResultPair(policy, result);
long value;
if (this.results.containsKey(resultPolicyPair)) {
value = this.results.get(resultPolicyPair);
} else {
value = 0;
}
this.results.put(new RowLevelPolicyResultPair(policy, result), Long.valueOf(1 + value));
}
public String getResults() {
List<String> list = new ArrayList<>();
Joiner joiner = Joiner.on("\n").skipNulls();
for (Map.Entry<RowLevelPolicyResultPair, Long> entry : this.results.entrySet()) {
list.add("RowLevelPolicy " + entry.getKey().getPolicy().toString() + " processed " + entry.getValue()
+ " record(s) with result " + entry.getKey().getResult());
}
return joiner.join(list);
}
public static class RowLevelPolicyResultPair {
private RowLevelPolicy policy;
private RowLevelPolicy.Result result;
public RowLevelPolicyResultPair(RowLevelPolicy policy, RowLevelPolicy.Result result) {
this.policy = policy;
this.result = result;
}
public RowLevelPolicy getPolicy() {
return this.policy;
}
public RowLevelPolicy.Result getResult() {
return this.result;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RowLevelPolicyResultPair)) {
return false;
}
RowLevelPolicyResultPair p = (RowLevelPolicyResultPair) o;
return p.getPolicy().toString().equals(this.policy.toString()) && p.getResult().equals(this.result);
}
@Override
public int hashCode() {
return (this.policy.toString() + this.result).hashCode();
}
}
}
| {
"pile_set_name": "Github"
} |
//$$ newmat3.cpp Matrix get and restore rows and columns
// Copyright (C) 1991,2,3,4: R B Davies
//#define WANT_STREAM
#include <ossim/matrix/include.h>
#include <ossim/matrix/newmat.h>
#include <ossim/matrix/newmatrc.h>
#ifdef use_namespace
namespace NEWMAT {
#endif
#ifdef DO_REPORT
#define REPORT { static ExeCounter ExeCount(__LINE__,3); ++ExeCount; }
#else
#define REPORT {}
#endif
//#define MONITOR(what,storage,store)
// { cout << what << " " << storage << " at " << (long)store << "\n"; }
#define MONITOR(what,store,storage) {}
// Control bits codes for GetRow, GetCol, RestoreRow, RestoreCol
//
// LoadOnEntry:
// Load data into MatrixRow or Col dummy array under GetRow or GetCol
// StoreOnExit:
// Restore data to original matrix under RestoreRow or RestoreCol
// DirectPart:
// Load or restore only part directly stored; must be set with StoreOnExit
// Still have decide how to handle this with symmetric
// StoreHere:
// used in columns only - store data at supplied storage address;
// used for GetCol, NextCol & RestoreCol. No need to fill out zeros
// HaveStore:
// dummy array has been assigned (internal use only).
// For symmetric matrices, treat columns as rows unless StoreHere is set;
// then stick to columns as this will give better performance for doing
// inverses
// How components are used:
// Use rows wherever possible in preference to columns
// Columns without StoreHere are used in in-exact transpose, sum column,
// multiply a column vector, and maybe in future access to column,
// additional multiply functions, add transpose
// Columns with StoreHere are used in exact transpose (not symmetric matrices
// or vectors, load only)
// Columns with MatrixColX (Store to full column) are used in inverse and solve
// Functions required for each matrix class
// GetRow(MatrixRowCol& mrc)
// GetCol(MatrixRowCol& mrc)
// GetCol(MatrixColX& mrc)
// RestoreRow(MatrixRowCol& mrc)
// RestoreCol(MatrixRowCol& mrc)
// RestoreCol(MatrixColX& mrc)
// NextRow(MatrixRowCol& mrc)
// NextCol(MatrixRowCol& mrc)
// NextCol(MatrixColX& mrc)
// The Restore routines assume StoreOnExit has already been checked
// Defaults for the Next routines are given below
// Assume cannot have both !DirectPart && StoreHere for MatrixRowCol routines
// Default NextRow and NextCol:
// will work as a default but need to override NextRow for efficiency
void GeneralMatrix::NextRow(MatrixRowCol& mrc)
{
REPORT
if (+(mrc.cw*StoreOnExit)) { REPORT this->RestoreRow(mrc); }
mrc.rowcol++;
if (mrc.rowcol<nrows) { REPORT this->GetRow(mrc); }
else { REPORT mrc.cw -= StoreOnExit; }
}
void GeneralMatrix::NextCol(MatrixRowCol& mrc)
{
REPORT // 423
if (+(mrc.cw*StoreOnExit)) { REPORT this->RestoreCol(mrc); }
mrc.rowcol++;
if (mrc.rowcol<ncols) { REPORT this->GetCol(mrc); }
else { REPORT mrc.cw -= StoreOnExit; }
}
void GeneralMatrix::NextCol(MatrixColX& mrc)
{
REPORT // 423
if (+(mrc.cw*StoreOnExit)) { REPORT this->RestoreCol(mrc); }
mrc.rowcol++;
if (mrc.rowcol<ncols) { REPORT this->GetCol(mrc); }
else { REPORT mrc.cw -= StoreOnExit; }
}
// routines for matrix
void Matrix::GetRow(MatrixRowCol& mrc)
{
REPORT
mrc.skip=0; mrc.storage=mrc.length=ncols; mrc.data=store+mrc.rowcol*ncols;
}
void Matrix::GetCol(MatrixRowCol& mrc)
{
REPORT
mrc.skip=0; mrc.storage=mrc.length=nrows;
if ( ncols==1 && !(mrc.cw*StoreHere) ) // ColumnVector
{ REPORT mrc.data=store; }
else
{
Real* ColCopy;
if ( !(mrc.cw*(HaveStore+StoreHere)) )
{
REPORT
ColCopy = new Real [nrows]; MatrixErrorNoSpace(ColCopy);
MONITOR_REAL_NEW("Make (MatGetCol)",nrows,ColCopy)
mrc.data = ColCopy; mrc.cw += HaveStore;
}
else { REPORT ColCopy = mrc.data; }
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+mrc.rowcol; int i=nrows;
//while (i--) { *ColCopy++ = *Mstore; Mstore+=ncols; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore+=ncols; }
}
}
}
void Matrix::GetCol(MatrixColX& mrc)
{
REPORT
mrc.skip=0; mrc.storage=nrows; mrc.length=nrows;
if (+(mrc.cw*LoadOnEntry))
{
REPORT Real* ColCopy = mrc.data;
Real* Mstore = store+mrc.rowcol; int i=nrows;
//while (i--) { *ColCopy++ = *Mstore; Mstore+=ncols; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore+=ncols; }
}
}
void Matrix::RestoreCol(MatrixRowCol& mrc)
{
// always check StoreOnExit before calling RestoreCol
REPORT // 429
if (+(mrc.cw*HaveStore))
{
REPORT // 426
Real* Mstore = store+mrc.rowcol; int i=nrows;
Real* Cstore = mrc.data;
// while (i--) { *Mstore = *Cstore++; Mstore+=ncols; }
if (i) for (;;)
{ *Mstore = *Cstore++; if (!(--i)) break; Mstore+=ncols; }
}
}
void Matrix::RestoreCol(MatrixColX& mrc)
{
REPORT
Real* Mstore = store+mrc.rowcol; int i=nrows; Real* Cstore = mrc.data;
// while (i--) { *Mstore = *Cstore++; Mstore+=ncols; }
if (i) for (;;)
{ *Mstore = *Cstore++; if (!(--i)) break; Mstore+=ncols; }
}
void Matrix::NextRow(MatrixRowCol& mrc) { REPORT mrc.IncrMat(); } // 1808
void Matrix::NextCol(MatrixRowCol& mrc)
{
REPORT // 632
if (+(mrc.cw*StoreOnExit)) { REPORT RestoreCol(mrc); }
mrc.rowcol++;
if (mrc.rowcol<ncols)
{
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* ColCopy = mrc.data;
Real* Mstore = store+mrc.rowcol; int i=nrows;
//while (i--) { *ColCopy++ = *Mstore; Mstore+=ncols; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore+=ncols; }
}
}
else { REPORT mrc.cw -= StoreOnExit; }
}
void Matrix::NextCol(MatrixColX& mrc)
{
REPORT
if (+(mrc.cw*StoreOnExit)) { REPORT RestoreCol(mrc); }
mrc.rowcol++;
if (mrc.rowcol<ncols)
{
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* ColCopy = mrc.data;
Real* Mstore = store+mrc.rowcol; int i=nrows;
// while (i--) { *ColCopy++ = *Mstore; Mstore+=ncols; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore+=ncols; }
}
}
else { REPORT mrc.cw -= StoreOnExit; }
}
// routines for diagonal matrix
void DiagonalMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT
mrc.skip=mrc.rowcol; mrc.storage=1;
mrc.data=store+mrc.skip; mrc.length=ncols;
}
void DiagonalMatrix::GetCol(MatrixRowCol& mrc)
{
REPORT
mrc.skip=mrc.rowcol; mrc.storage=1; mrc.length=nrows;
if (+(mrc.cw*StoreHere)) // should not happen
Throw(InternalException("DiagonalMatrix::GetCol(MatrixRowCol&)"));
else { REPORT mrc.data=store+mrc.skip; }
// not accessed
}
void DiagonalMatrix::GetCol(MatrixColX& mrc)
{
REPORT
mrc.skip=mrc.rowcol; mrc.storage=1; mrc.length=nrows;
mrc.data = mrc.store+mrc.skip;
*(mrc.data)=*(store+mrc.skip);
}
void DiagonalMatrix::NextRow(MatrixRowCol& mrc) { REPORT mrc.IncrDiag(); }
// 800
void DiagonalMatrix::NextCol(MatrixRowCol& mrc) { REPORT mrc.IncrDiag(); }
// not accessed
void DiagonalMatrix::NextCol(MatrixColX& mrc)
{
REPORT
if (+(mrc.cw*StoreOnExit))
{ REPORT *(store+mrc.rowcol)=*(mrc.data); }
mrc.IncrDiag();
int t1 = +(mrc.cw*LoadOnEntry);
if (t1 && mrc.rowcol < ncols)
{ REPORT *(mrc.data)=*(store+mrc.rowcol); }
}
// routines for upper triangular matrix
void UpperTriangularMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT
int row = mrc.rowcol; mrc.skip=row; mrc.length=ncols;
mrc.storage=ncols-row; mrc.data=store+(row*(2*ncols-row+1))/2;
}
void UpperTriangularMatrix::GetCol(MatrixRowCol& mrc)
{
REPORT
mrc.skip=0; int i=mrc.rowcol+1; mrc.storage=i;
mrc.length=nrows; Real* ColCopy;
if ( !(mrc.cw*(StoreHere+HaveStore)) )
{
REPORT // not accessed
ColCopy = new Real [nrows]; MatrixErrorNoSpace(ColCopy);
MONITOR_REAL_NEW("Make (UT GetCol)",nrows,ColCopy)
mrc.data = ColCopy; mrc.cw += HaveStore;
}
else { REPORT ColCopy = mrc.data; }
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+mrc.rowcol; int j = ncols;
// while (i--) { *ColCopy++ = *Mstore; Mstore += --j; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += --j; }
}
}
void UpperTriangularMatrix::GetCol(MatrixColX& mrc)
{
REPORT
mrc.skip=0; int i=mrc.rowcol+1; mrc.storage=i;
mrc.length=nrows;
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* ColCopy = mrc.data;
Real* Mstore = store+mrc.rowcol; int j = ncols;
// while (i--) { *ColCopy++ = *Mstore; Mstore += --j; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += --j; }
}
}
void UpperTriangularMatrix::RestoreCol(MatrixRowCol& mrc)
{
REPORT
Real* Mstore = store+mrc.rowcol; int i=mrc.rowcol+1; int j = ncols;
Real* Cstore = mrc.data;
// while (i--) { *Mstore = *Cstore++; Mstore += --j; }
if (i) for (;;)
{ *Mstore = *Cstore++; if (!(--i)) break; Mstore += --j; }
}
void UpperTriangularMatrix::NextRow(MatrixRowCol& mrc) { REPORT mrc.IncrUT(); }
// 722
// routines for lower triangular matrix
void LowerTriangularMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT
int row=mrc.rowcol; mrc.skip=0; mrc.storage=row+1; mrc.length=ncols;
mrc.data=store+(row*(row+1))/2;
}
void LowerTriangularMatrix::GetCol(MatrixRowCol& mrc)
{
REPORT
int col=mrc.rowcol; mrc.skip=col; mrc.length=nrows;
int i=nrows-col; mrc.storage=i; Real* ColCopy;
if ( +(mrc.cw*(StoreHere+HaveStore)) )
{ REPORT ColCopy = mrc.data; }
else
{
REPORT // not accessed
ColCopy = new Real [nrows]; MatrixErrorNoSpace(ColCopy);
MONITOR_REAL_NEW("Make (LT GetCol)",nrows,ColCopy)
mrc.cw += HaveStore; mrc.data = ColCopy;
}
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+(col*(col+3))/2;
// while (i--) { *ColCopy++ = *Mstore; Mstore += ++col; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += ++col; }
}
}
void LowerTriangularMatrix::GetCol(MatrixColX& mrc)
{
REPORT
int col=mrc.rowcol; mrc.skip=col; mrc.length=nrows;
int i=nrows-col; mrc.storage=i; mrc.data = mrc.store + col;
if (+(mrc.cw*LoadOnEntry))
{
REPORT Real* ColCopy = mrc.data;
Real* Mstore = store+(col*(col+3))/2;
// while (i--) { *ColCopy++ = *Mstore; Mstore += ++col; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += ++col; }
}
}
void LowerTriangularMatrix::RestoreCol(MatrixRowCol& mrc)
{
REPORT
int col=mrc.rowcol; Real* Cstore = mrc.data;
Real* Mstore = store+(col*(col+3))/2; int i=nrows-col;
//while (i--) { *Mstore = *Cstore++; Mstore += ++col; }
if (i) for (;;)
{ *Mstore = *Cstore++; if (!(--i)) break; Mstore += ++col; }
}
void LowerTriangularMatrix::NextRow(MatrixRowCol& mrc) { REPORT mrc.IncrLT(); }
//712
// routines for symmetric matrix
void SymmetricMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT //571
mrc.skip=0; int row=mrc.rowcol; mrc.length=ncols;
if (+(mrc.cw*DirectPart))
{ REPORT mrc.storage=row+1; mrc.data=store+(row*(row+1))/2; }
else
{
// do not allow StoreOnExit and !DirectPart
if (+(mrc.cw*StoreOnExit))
Throw(InternalException("SymmetricMatrix::GetRow(MatrixRowCol&)"));
mrc.storage=ncols; Real* RowCopy;
if (!(mrc.cw*HaveStore))
{
REPORT
RowCopy = new Real [ncols]; MatrixErrorNoSpace(RowCopy);
MONITOR_REAL_NEW("Make (SymGetRow)",ncols,RowCopy)
mrc.cw += HaveStore; mrc.data = RowCopy;
}
else { REPORT RowCopy = mrc.data; }
if (+(mrc.cw*LoadOnEntry))
{
REPORT // 544
Real* Mstore = store+(row*(row+1))/2; int i = row;
while (i--) *RowCopy++ = *Mstore++;
i = ncols-row;
// while (i--) { *RowCopy++ = *Mstore; Mstore += ++row; }
if (i) for (;;)
{ *RowCopy++ = *Mstore; if (!(--i)) break; Mstore += ++row; }
}
}
}
void SymmetricMatrix::GetCol(MatrixRowCol& mrc)
{
// do not allow StoreHere
if (+(mrc.cw*StoreHere))
Throw(InternalException("SymmetricMatrix::GetCol(MatrixRowCol&)"));
int col=mrc.rowcol; mrc.length=nrows;
REPORT
mrc.skip=0;
if (+(mrc.cw*DirectPart)) // actually get row ??
{ REPORT mrc.storage=col+1; mrc.data=store+(col*(col+1))/2; }
else
{
// do not allow StoreOnExit and !DirectPart
if (+(mrc.cw*StoreOnExit))
Throw(InternalException("SymmetricMatrix::GetCol(MatrixRowCol&)"));
mrc.storage=ncols; Real* ColCopy;
if ( +(mrc.cw*HaveStore)) { REPORT ColCopy = mrc.data; }
else
{
REPORT // not accessed
ColCopy = new Real [ncols]; MatrixErrorNoSpace(ColCopy);
MONITOR_REAL_NEW("Make (SymGetCol)",ncols,ColCopy)
mrc.cw += HaveStore; mrc.data = ColCopy;
}
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+(col*(col+1))/2; int i = col;
while (i--) *ColCopy++ = *Mstore++;
i = ncols-col;
// while (i--) { *ColCopy++ = *Mstore; Mstore += ++col; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += ++col; }
}
}
}
void SymmetricMatrix::GetCol(MatrixColX& mrc)
{
int col=mrc.rowcol; mrc.length=nrows;
if (+(mrc.cw*DirectPart))
{
REPORT
mrc.skip=col; int i=nrows-col; mrc.storage=i;
mrc.data = mrc.store+col;
if (+(mrc.cw*LoadOnEntry))
{
REPORT // not accessed
Real* ColCopy = mrc.data;
Real* Mstore = store+(col*(col+3))/2;
// while (i--) { *ColCopy++ = *Mstore; Mstore += ++col; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += ++col; }
}
}
else
{
REPORT
// do not allow StoreOnExit and !DirectPart
if (+(mrc.cw*StoreOnExit))
Throw(InternalException("SymmetricMatrix::GetCol(MatrixColX&)"));
mrc.skip=0; mrc.storage=ncols;
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* ColCopy = mrc.data;
Real* Mstore = store+(col*(col+1))/2; int i = col;
while (i--) *ColCopy++ = *Mstore++;
i = ncols-col;
// while (i--) { *ColCopy++ = *Mstore; Mstore += ++col; }
if (i) for (;;)
{ *ColCopy++ = *Mstore; if (!(--i)) break; Mstore += ++col; }
}
}
}
// Do not need RestoreRow because we do not allow !DirectPart && StoreOnExit
void SymmetricMatrix::RestoreCol(MatrixColX& mrc)
{
REPORT
// Really do restore column
int col=mrc.rowcol; Real* Cstore = mrc.data;
Real* Mstore = store+(col*(col+3))/2; int i = nrows-col;
// while (i--) { *Mstore = *Cstore++; Mstore+= ++col; }
if (i) for (;;)
{ *Mstore = *Cstore++; if (!(--i)) break; Mstore+= ++col; }
}
// routines for row vector
void RowVector::GetCol(MatrixRowCol& mrc)
{
REPORT
// do not allow StoreHere
if (+(mrc.cw*StoreHere))
Throw(InternalException("RowVector::GetCol(MatrixRowCol&)"));
mrc.skip=0; mrc.storage=1; mrc.length=nrows; mrc.data = store+mrc.rowcol;
}
void RowVector::GetCol(MatrixColX& mrc)
{
REPORT
mrc.skip=0; mrc.storage=1; mrc.length=nrows;
if (mrc.cw >= LoadOnEntry)
{ REPORT *(mrc.data) = *(store+mrc.rowcol); }
}
void RowVector::NextCol(MatrixRowCol& mrc)
{ REPORT mrc.rowcol++; mrc.data++; }
void RowVector::NextCol(MatrixColX& mrc)
{
if (+(mrc.cw*StoreOnExit)) { REPORT *(store+mrc.rowcol)=*(mrc.data); }
mrc.rowcol++;
if (mrc.rowcol < ncols)
{
if (+(mrc.cw*LoadOnEntry)) { REPORT *(mrc.data)=*(store+mrc.rowcol); }
}
else { REPORT mrc.cw -= StoreOnExit; }
}
void RowVector::RestoreCol(MatrixColX& mrc)
{ REPORT *(store+mrc.rowcol)=*(mrc.data); } // not accessed
// routines for band matrices
void BandMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT
int r = mrc.rowcol; int w = lower+1+upper; mrc.length=ncols;
int s = r-lower;
if (s<0) { mrc.data = store+(r*w-s); w += s; s = 0; }
else mrc.data = store+r*w;
mrc.skip = s; s += w-ncols; if (s>0) w -= s; mrc.storage = w;
}
// should make special versions of this for upper and lower band matrices
void BandMatrix::NextRow(MatrixRowCol& mrc)
{
REPORT
int r = ++mrc.rowcol;
if (r<=lower) { mrc.storage++; mrc.data += lower+upper; }
else { mrc.skip++; mrc.data += lower+upper+1; }
if (r>=ncols-upper) mrc.storage--;
}
void BandMatrix::GetCol(MatrixRowCol& mrc)
{
REPORT
int c = mrc.rowcol; int n = lower+upper; int w = n+1;
mrc.length=nrows; Real* ColCopy;
int b; int s = c-upper;
if (s<=0) { w += s; s = 0; b = c+lower; } else b = s*w+n;
mrc.skip = s; s += w-nrows; if (s>0) w -= s; mrc.storage = w;
if ( +(mrc.cw*(StoreHere+HaveStore)) )
{ REPORT ColCopy = mrc.data; }
else
{
REPORT
ColCopy = new Real [n+1]; MatrixErrorNoSpace(ColCopy);
MONITOR_REAL_NEW("Make (BMGetCol)",n+1,ColCopy)
mrc.cw += HaveStore; mrc.data = ColCopy;
}
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+b;
// while (w--) { *ColCopy++ = *Mstore; Mstore+=n; }
if (w) for (;;)
{ *ColCopy++ = *Mstore; if (!(--w)) break; Mstore+=n; }
}
}
void BandMatrix::GetCol(MatrixColX& mrc)
{
REPORT
int c = mrc.rowcol; int n = lower+upper; int w = n+1;
mrc.length=nrows; int b; int s = c-upper;
if (s<=0) { w += s; s = 0; b = c+lower; } else b = s*w+n;
mrc.skip = s; s += w-nrows; if (s>0) w -= s; mrc.storage = w;
mrc.data = mrc.store+mrc.skip;
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* ColCopy = mrc.data; Real* Mstore = store+b;
// while (w--) { *ColCopy++ = *Mstore; Mstore+=n; }
if (w) for (;;)
{ *ColCopy++ = *Mstore; if (!(--w)) break; Mstore+=n; }
}
}
void BandMatrix::RestoreCol(MatrixRowCol& mrc)
{
REPORT
int c = mrc.rowcol; int n = lower+upper; int s = c-upper;
Real* Mstore = store + ((s<=0) ? c+lower : s*n+s+n);
Real* Cstore = mrc.data;
int w = mrc.storage;
// while (w--) { *Mstore = *Cstore++; Mstore += n; }
if (w) for (;;)
{ *Mstore = *Cstore++; if (!(--w)) break; Mstore += n; }
}
// routines for symmetric band matrix
void SymmetricBandMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT
int r=mrc.rowcol; int s = r-lower; int w1 = lower+1; int o = r*w1;
mrc.length = ncols;
if (s<0) { w1 += s; o -= s; s = 0; }
mrc.skip = s;
if (+(mrc.cw*DirectPart))
{ REPORT mrc.data = store+o; mrc.storage = w1; }
else
{
// do not allow StoreOnExit and !DirectPart
if (+(mrc.cw*StoreOnExit))
Throw(InternalException("SymmetricBandMatrix::GetRow(MatrixRowCol&)"));
int w = w1+lower; s += w-ncols; Real* RowCopy;
if (s>0) w -= s; mrc.storage = w; int w2 = w-w1;
if (!(mrc.cw*HaveStore))
{
REPORT
RowCopy = new Real [2*lower+1]; MatrixErrorNoSpace(RowCopy);
MONITOR_REAL_NEW("Make (SmBGetRow)",2*lower+1,RowCopy)
mrc.cw += HaveStore; mrc.data = RowCopy;
}
else { REPORT RowCopy = mrc.data; }
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+o;
while (w1--) *RowCopy++ = *Mstore++;
Mstore--;
while (w2--) { Mstore += lower; *RowCopy++ = *Mstore; }
}
}
}
void SymmetricBandMatrix::GetCol(MatrixRowCol& mrc)
{
// do not allow StoreHere
if (+(mrc.cw*StoreHere))
Throw(InternalException("SymmetricBandMatrix::GetCol(MatrixRowCol&)"));
int c=mrc.rowcol; int w1 = lower+1; mrc.length=nrows;
REPORT
int s = c-lower; int o = c*w1;
if (s<0) { w1 += s; o -= s; s = 0; }
mrc.skip = s;
if (+(mrc.cw*DirectPart))
{ REPORT mrc.data = store+o; mrc.storage = w1; }
else
{
// do not allow StoreOnExit and !DirectPart
if (+(mrc.cw*StoreOnExit))
Throw(InternalException("SymmetricBandMatrix::GetCol(MatrixRowCol&)"));
int w = w1+lower; s += w-ncols; Real* ColCopy;
if (s>0) w -= s; mrc.storage = w; int w2 = w-w1;
if ( +(mrc.cw*HaveStore) ) { REPORT ColCopy = mrc.data; }
else
{
REPORT ColCopy = new Real [2*lower+1]; MatrixErrorNoSpace(ColCopy);
MONITOR_REAL_NEW("Make (SmBGetCol)",2*lower+1,ColCopy)
mrc.cw += HaveStore; mrc.data = ColCopy;
}
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+o;
while (w1--) *ColCopy++ = *Mstore++;
Mstore--;
while (w2--) { Mstore += lower; *ColCopy++ = *Mstore; }
}
}
}
void SymmetricBandMatrix::GetCol(MatrixColX& mrc)
{
int c=mrc.rowcol; int w1 = lower+1; mrc.length=nrows;
if (+(mrc.cw*DirectPart))
{
REPORT
int b = c*w1+lower;
mrc.skip = c; c += w1-nrows; w1 -= c; mrc.storage = w1;
Real* ColCopy = mrc.data = mrc.store+mrc.skip;
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+b;
// while (w1--) { *ColCopy++ = *Mstore; Mstore += lower; }
if (w1) for (;;)
{ *ColCopy++ = *Mstore; if (!(--w1)) break; Mstore += lower; }
}
}
else
{
REPORT
// do not allow StoreOnExit and !DirectPart
if (+(mrc.cw*StoreOnExit))
Throw(InternalException("SymmetricBandMatrix::GetCol(MatrixColX&)"));
int s = c-lower; int o = c*w1;
if (s<0) { w1 += s; o -= s; s = 0; }
mrc.skip = s;
int w = w1+lower; s += w-ncols;
if (s>0) w -= s; mrc.storage = w; int w2 = w-w1;
Real* ColCopy = mrc.data = mrc.store+mrc.skip;
if (+(mrc.cw*LoadOnEntry))
{
REPORT
Real* Mstore = store+o;
while (w1--) *ColCopy++ = *Mstore++;
Mstore--;
while (w2--) { Mstore += lower; *ColCopy++ = *Mstore; }
}
}
}
void SymmetricBandMatrix::RestoreCol(MatrixColX& mrc)
{
REPORT
int c = mrc.rowcol;
Real* Mstore = store + c*lower+c+lower;
Real* Cstore = mrc.data; int w = mrc.storage;
// while (w--) { *Mstore = *Cstore++; Mstore += lower; }
if (w) for (;;)
{ *Mstore = *Cstore++; if (!(--w)) break; Mstore += lower; }
}
// routines for identity matrix
void IdentityMatrix::GetRow(MatrixRowCol& mrc)
{
REPORT
mrc.skip=mrc.rowcol; mrc.storage=1; mrc.data=store; mrc.length=ncols;
}
void IdentityMatrix::GetCol(MatrixRowCol& mrc)
{
REPORT
mrc.skip=mrc.rowcol; mrc.storage=1; mrc.length=nrows;
if (+(mrc.cw*StoreHere)) // should not happen
Throw(InternalException("IdentityMatrix::GetCol(MatrixRowCol&)"));
else { REPORT mrc.data=store; }
}
void IdentityMatrix::GetCol(MatrixColX& mrc)
{
REPORT
mrc.skip=mrc.rowcol; mrc.storage=1; mrc.length=nrows;
mrc.data = mrc.store+mrc.skip; *(mrc.data)=*store;
}
void IdentityMatrix::NextRow(MatrixRowCol& mrc) { REPORT mrc.IncrId(); }
void IdentityMatrix::NextCol(MatrixRowCol& mrc) { REPORT mrc.IncrId(); }
void IdentityMatrix::NextCol(MatrixColX& mrc)
{
REPORT
if (+(mrc.cw*StoreOnExit)) { REPORT *store=*(mrc.data); }
mrc.IncrDiag(); // must increase mrc.data so need IncrDiag
int t1 = +(mrc.cw*LoadOnEntry);
if (t1 && mrc.rowcol < ncols) { REPORT *(mrc.data)=*store; }
}
// *************************** destructors *******************************
MatrixRowCol::~MatrixRowCol()
{
if (+(cw*HaveStore))
{
MONITOR_REAL_DELETE("Free (RowCol)",-1,data) // do not know length
delete [] data;
}
}
MatrixRow::~MatrixRow() { if (+(cw*StoreOnExit)) gm->RestoreRow(*this); }
MatrixCol::~MatrixCol() { if (+(cw*StoreOnExit)) gm->RestoreCol(*this); }
MatrixColX::~MatrixColX() { if (+(cw*StoreOnExit)) gm->RestoreCol(*this); }
#ifdef use_namespace
}
#endif
| {
"pile_set_name": "Github"
} |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.engine;
import org.apache.lucene.index.FilterDirectoryReader;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.InternalEngine;
import java.io.IOException;
import java.util.function.Function;
final class MockInternalEngine extends InternalEngine {
private MockEngineSupport support;
private Class<? extends FilterDirectoryReader> wrapperClass;
MockInternalEngine(EngineConfig config, Class<? extends FilterDirectoryReader> wrapper) throws EngineException {
super(config);
wrapperClass = wrapper;
}
private synchronized MockEngineSupport support() {
// lazy initialized since we need it already on super() ctor execution :(
if (support == null) {
support = new MockEngineSupport(config(), wrapperClass);
}
return support;
}
@Override
public void close() throws IOException {
switch (support().flushOrClose(MockEngineSupport.CloseAction.CLOSE)) {
case FLUSH_AND_CLOSE:
flushAndCloseInternal();
break;
case CLOSE:
super.close();
break;
}
}
@Override
public void flushAndClose() throws IOException {
switch (support().flushOrClose(MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) {
case FLUSH_AND_CLOSE:
flushAndCloseInternal();
break;
case CLOSE:
super.close();
break;
}
}
private void flushAndCloseInternal() throws IOException {
if (support().isFlushOnCloseDisabled() == false) {
super.flushAndClose();
} else {
super.close();
}
}
@Override
public Engine.Searcher acquireSearcher(String source, SearcherScope scope) {
final Engine.Searcher engineSearcher = super.acquireSearcher(source, scope);
return support().wrapSearcher(engineSearcher);
}
@Override
public SearcherSupplier acquireSearcherSupplier(Function<Searcher, Searcher> wrapper, SearcherScope scope) throws EngineException {
return super.acquireSearcherSupplier(wrapper.andThen(s -> support().wrapSearcher(s)), scope);
}
}
| {
"pile_set_name": "Github"
} |
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.emulator.memory;
import ghidra.program.model.address.Address;
public class ProgramMappedLoadImage implements MemoryLoadImage {
private ProgramMappedMemory pmm;
//private Language lang;
public ProgramMappedLoadImage(ProgramMappedMemory memory) {
this.pmm = memory;
//this.lang = memory.getProgram().getLanguage();
}
@Override
public byte[] loadFill(byte[] bytes, int size, Address addr, int offset, boolean generateInitializedMask) {
return pmm.read(bytes, size, addr, offset, generateInitializedMask);
// boolean initialized = false;
// for (byte b : bytes) {
// if (b != 0) {
// initialized = true;
// break;
// }
// }
// return generateInitializedMask ? MemoryPage.getInitializedMask(size, initialized) : null;
}
@Override
public void writeBack(byte[] bytes, int size, Address addr, int offset) {
pmm.write(bytes, size, addr, offset);
}
@Override
public void dispose() {
pmm.dispose();
}
}
| {
"pile_set_name": "Github"
} |
<?php
/***************************************************************************
Copyright (C) 2005-2008 GetBoo project
http://sourceforge.net/projects/getboo/
http://www.getboo.com/
This file is part of GetBoo.
GetBoo is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
GetBoo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GetBoo; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
***************************************************************************/
/*
* GetBoo Configuration File
*
* Required configuration: Database
*
* dbhost: SQL database hostname.
* dbport: SQL database port.
* dbuname: SQL username.
* dbpass: SQL password.
* dbname: SQL database name.
* dbtype: SQL database type.
*/
$dbhost = "localhost";
$dbport = "3306";
$dbuname = "getboo";
$dbpass = "getboo";
$dbname = "getboo";
$dbtype = "mysqli";
define("TABLE_PREFIX", "");
define('ABSPATH', dirname(__FILE__). DIRECTORY_SEPARATOR);
if(!$from_conn) include('includes/config.php');
?> | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>[email protected]</email>
<name>James Le Cuirot</name>
</maintainer>
</pkgmetadata>
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2006 Andras Kovacs
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
LANGUAGE LANG_HUNGARIAN, SUBLANG_DEFAULT
STRINGTABLE
{
IDS_FULLFRAMES "Teljes képkockák (tömörítetlen)"
}
ICM_CHOOSE_COMPRESSOR DIALOG 36, 24, 210, 95
STYLE DS_MODALFRAME | WS_POPUP | WS_VISIBLE | WS_CAPTION | WS_SYSMENU
CAPTION "Video tömörítés"
FONT 8, "MS Shell Dlg"
{
DEFPUSHBUTTON "OK",IDOK,154,2,52,14
PUSHBUTTON "Mégse",IDCANCEL,154,18,52,14
LTEXT "&Tömörítõ:",-1,9,6,135,8
COMBOBOX IDC_COMP_LIST,9,16,135,67,CBS_DROPDOWNLIST|WS_VSCROLL|WS_TABSTOP
PUSHBUTTON "Beállí&tás...",IDC_CONFIGURE,154,36,52,14
PUSHBUTTON "&About",IDC_ABOUT,154,52,52,14
SCROLLBAR IDC_QUALITY_SCROLL,9,44,135,10,WS_TABSTOP
LTEXT "Tömörítési minõsé&g:",IDC_QUALITY_TXT,9,34,135,8
CONTROL "&Kulcs képkocka minden",IDC_KEYFRAME_CHECKBOX,"Button",BS_AUTOCHECKBOX|WS_TABSTOP,9,60,86,12
EDITTEXT IDC_KEYFRAME,98,60,22,12
LTEXT "képkockánként",IDC_KEYFRAME_FRAMES,123,62,26,10
CONTROL "A&dat arány",IDC_DATARATE_CHECKBOX,"Button",BS_AUTOCHECKBOX|WS_TABSTOP,9,76,86,12
EDITTEXT IDC_DATARATE,98,76,22,12
LTEXT "KB/sec",IDC_DATARATE_KB,123,78,26,10
}
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2001-2008 Joel de Guzman
Copyright (c) 2001-2008 Hartmut Kaiser
http://spirit.sourceforge.net/
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef BOOST_SPIRIT_INCLUDE_CLASSIC_CHSET_OPERATORS
#define BOOST_SPIRIT_INCLUDE_CLASSIC_CHSET_OPERATORS
#include <boost/spirit/home/classic/utility/chset_operators.hpp>
#endif
| {
"pile_set_name": "Github"
} |
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
[-
our $int16;
our $prefix = 'h';
our $shareI = 64;
our $shareF = 64;
our $stepI = 32;
our $stepF = 32;
our $remapF = 1;
our $remapI = 1;
our $convert = $int16 ? 'I2F.F32.S16' : 'F2F.F32.F16';
sub convert {return $convert;}
-]
<INCLUDE file="xconv_xprop_common.sass"/>
<CONSTANT_MAPPING>
addr_zero : 4x<64*8*2 + 64*8*2 + 0>
szShareF : (64*8)
szShareI : (64*8)
addr_zero : 4x<64*8*2 + 64*8*2 + 0>
addr_mpqk : 4x<64*8*2 + 64*8*2 + 4>
addr_m : 4x<64*8*2 + 64*8*2 + 4>
addr_p : 4x<64*8*2 + 64*8*2 + 5>
addr_q : 4x<64*8*2 + 64*8*2 + 6>
addr_k : 4x<64*8*2 + 64*8*2 + 7>
addr_szLut : 4x<64*8*2 + 64*8*2 + 8>
addr_lut : 4x<64*8*2 + 64*8*2 + 10>
[+ params() +]
</CONSTANT_MAPPING>
<REGISTER_MAPPING>
3, 2,11,10,19,18,27,26 : cx<0-7>y0
7, 6,15,14,23,22,31,30 : cx<0-7>y1
1, 0, 9, 8,17,16,25,24 : cx<0-7>y2
5, 4,13,12,21,20,29,28 : cx<0-7>y3
35,34,43,42,51,50,59,58 : cx<0-7>y4
39,38,47,46,55,54,63,62 : cx<0-7>y5
33,32,41,40,49,48,57,56 : cx<0-7>y6
37,36,45,44,53,52,61,60 : cx<0-7>y7
0-63 : czero<00-63>
64-67 : mpqk<0-3>
64-67 : m, p, q
64-71 : idx_M, idx_P, idx_Q, idx_K, idx_N, tid, tidY, negOne
72-111 ~ tid1, tidX, idx_MPQk, idx_PQk, idx_Qk, idx_k, magic_PQk, magic_Qk, neg_PQk, neg_Qk, neg_k, div1, div2, div3, idx_P2, idx_Q2, q1, q2
72-111 ~ mask_shr, rst, lutStore, lutStore2, warp_count, mt, pr, qs, dep_thd_mask, dep_thd_bits, dep_thd_cnt, t, r, s, rs, x, y, z, ballot, warp_slices, partial, endCRST, str_d, str_h, str_w, rst_prime, x_prime, y_prime, z_prime
64-79 : j0Ix<0-7>, j0Fy<0-7>
80-95 : j1Ix<0-7>, j1Fy<0-7>
96-99 : trackI<0-1>, trackF<0-1>
100-103 : loadI<0-3>
100-103 : storeI<0-3>
104-107 : storeI<4-7>
108-111 : loadF<0-3>
108-111 : storeF<0-3>
104-107 : storeF<4-7>
104-107 ~ offsetF
112-113 : sliceI, sliceF
112-113 : sliceIF<0-1>
114-125 ~ writeS, offsetIn, offsetFk, posCRST, posCRSTf, channel, lutSize, lutSizeRcp, lutOffset, offsetI, offsetIc, offsetFc
126-127 ~ readFs, readIs
72-91 : cs<0-7>, c<0-3>, b<0-7>
72-83 ~ x<0-7>
92-99 : out<0-7>
100-101 : Out<0-1>
102-103 : Sum<0-1>
104-125 ~ writeCs, readCs, alpha, k, n, sum<0-3>, offset, out_offset, bsum_offset, tidOX, tidOY, preds, one
</REGISTER_MAPPING>
--:-:1:-:1 S2R tid, SR_TID.X;
--:-:2:-:1 S2R idx_MPQk, SR_CTAID.X;
--:-:3:-:1 S2R idx_K, SR_CTAID.Y;
--:-:4:-:1 S2R idx_N, SR_CTAID.Z;
<SCHEDULE_BLOCK>
01:-:-:-:1 ISETP.GE.AND P0, PT, tid, 32, PT;
[+ load_zeros() +]
[+ get_mpqk() +]
// tidX = (tid & 7) << 3
// tidY = tid >> 3
--:-:-:-:1 LOP.AND tidX, tid, 7;
--:-:-:-:1 SHL tidX, tidX, 3;
--:-:-:-:1 SHR.U32 tidY, tid, 3;
// trackF += blkF*64 + tidX
--:-:-:-:1 ISCADD offsetFk, idx_K, tidX, 6;
// trackI += blkI*64 + tidX
08:-:-:-:1 ISCADD offsetIn, idx_N, tidX, 6;
// Remap the X dim to avoid bank conflicts when storing to shared
// We can unmap this in the output
--:-:-:-:1 SHR.U32 tidX, tidX, 1;
// writeS = (64*tidY + tidX) * 4
--:-:-:-:1 ISCADD writeS, tidY, tidX, 6;
--:-:-:-:1 SHL writeS, writeS, 2;
// readFs = (((tid & -16) >> 3) | (tid & 1)) << 4;
--:-:-:-:1 LOP.AND tid1, tid, 1;
--:-:-:-:1 LOP.AND readFs, tid, -16;
--:-:-:-:1 SHR.U32 readFs, readFs, 3;
--:-:-:-:1 LOP.OR readFs, readFs, tid1;
--:-:-:-:0 SHL readFs, readFs, 4;
// readIs = ((tid >> 1) & 7) << 4 + 4x<8*64>;
--:-:-:-:1 BFE.U32 readIs, tid, 0x301; // 3 bits at position 1
--:-:-:-:1 ISCADD readIs, readIs, 4x<szShareF>, 4;
</SCHEDULE_BLOCK>
[+ load_lut() +]
--:-:1:-:1 @P1 LDG.E.CI.128 loadF0, [trackF];
--:-:5:-:1 @!P1 LDS.U.128 loadF0, [addr_zero];
--:-:2:-:1 @P1 LDG.E.128 loadI0, [trackI];
--:-:6:-:1 @!P1 LDS.U.128 loadI0, [addr_zero];
11:-:-:-:1 [+ convert() +] storeF7, loadF3.H1;
--:-:-:-:1 [+ convert() +] storeF6, loadF3.H0;
--:-:-:-:1 [+ convert() +] storeF5, loadF2.H1;
--:-:1:-:1 [+ convert() +] storeF4, loadF2.H0;
--:-:-:-:1 [+ convert() +] storeF3, loadF1.H1;
--:-:-:-:1 [+ convert() +] storeF2, loadF1.H0;
--:-:-:-:1 [+ convert() +] storeF1, loadF0.H1;
--:-:5:-:1 [+ convert() +] storeF0, loadF0.H0;
01:1:-:-:1 STS.128 [writeS + 4x<0*64 + 32>], storeF4;
10:-:-:-:1 STS.128 [writeS + 4x<0*64 + 0>], storeF0;
23:-:-:-:1 [+ convert() +] storeI7, loadI3.H1;
--:-:-:-:1 [+ convert() +] storeI6, loadI3.H0;
--:-:-:-:1 [+ convert() +] storeI5, loadI2.H1;
--:-:1:-:1 [+ convert() +] storeI4, loadI2.H0;
--:-:-:-:1 [+ convert() +] storeI3, loadI1.H1;
--:-:-:-:1 [+ convert() +] storeI2, loadI1.H0;
--:-:-:-:1 [+ convert() +] storeI1, loadI0.H1;
--:-:5:-:1 [+ convert() +] storeI0, loadI0.H0;
01:-:-:-:1 STS.128 [writeS + 4x<8*64 + 32>], storeI4;
10:1:-:-:1 STS.128 [writeS + 4x<8*64 + 0>], storeI0;
[+ loop_setup() +]
--:-:2:-:2 @P1 LDG.E.CI.128 loadF0, [trackF + 4x< 0>];
--:-:3:-:1 @P1 LDG.E.128 loadI0, [trackI + 4x< 0>];
[-
our $convert;
our %insert =
(
j0c1 => "--:-:-:-:1 ISETP.GE.AND P1, PT, posCRST, RZ, PT;\n",
j0c3 => "--:-:-:-:1 ISETP.GE.AND P0, PT, posCRST, -8, PT;\n",
j0c13 => "--:-:6:-:1 \@P1 I2F.F32.S32 posCRSTf, posCRST;\n",
j0c39 => "20:-:-:-:1 \@P1 FMUL channel, posCRSTf, lutSizeRcp;\n",
j0c44 => "--:-:-:-:1 \@P1 FFMA channel, channel, 5.9604644775390625e-08, channel;\n",
j0c46 => "--:-:6:-:1 \@P1 F2I.S32.F32.TRUNC channel, channel;\n",
j1c8 => "20:-:-:-:1 \@P1 VMAD.U16.U16 lutOffset, -channel, lutSize, posCRST;\n",
j1c13 => "--:-:-:-:1 \@P1 SHL lutOffset, lutOffset, 3;\n",
j1c17 => "--:-:6:-:1 \@P1 LDS.U.64 sliceIF, [lutOffset + addr_lut];\n",
j1c20 => "--:-:-:-:1 \@P1 XMAD offsetFc, channel, param_KRST, RZ;\n",
j1c25 => "--:-:-:-:1 \@P1 XMAD offsetIc, channel, param_DHWN, RZ;\n",
j1c31 => "--:-:-:-:1 \@P1 XMAD.PSL offsetIc, channel, param_DHWN.H1, offsetIc;\n",
j1c32 => "--:-:-:-:1 IADD posCRST, posCRST, -8;\n",
j1c18 => "02:-:-:-:1 \@P0 $convert storeF7, loadF3.H1;\n",
j1c22 => "--:-:-:-:1 \@P0 $convert storeF6, loadF3.H0;\n",
j1c26 => "--:-:-:-:1 \@P0 $convert storeF5, loadF2.H1;\n",
j1c30 => "--:-:5:-:1 \@P0 $convert storeF4, loadF2.H0;\n",
j1c33 => "--:-:-:-:1 \@P0 $convert storeF3, loadF1.H1;\n",
j1c37 => "--:-:-:-:1 \@P0 $convert storeF2, loadF1.H0;\n",
j1c41 => "--:-:-:-:1 \@P0 $convert storeF1, loadF0.H1;\n",
j1c45 => "--:-:2:-:1 \@P0 $convert storeF0, loadF0.H0;\n",
j1c47 => "10:5:-:-:1 \@P0 STS.128 [writeS + 4x<0*64 + 32>], storeF4;\n",
j1c62 => "02:2:-:-:1 \@P0 STS.128 [writeS + 4x<0*64 + 0>], storeF0;\n",
j2c19 => "30:-:-:-:1 \@P1 IADD3 offsetF, offsetFk, offsetFc, sliceF;\n",
j2c24 => "--:-:-:-:1 \@P1 LEA trackF0.CC, offsetF, param_F[0], 1;\n",
j2c26 => "--:-:-:-:1 \@P1 IADD3 offsetI, offsetIn, offsetIc, sliceI;\n",
j2c28 => "--:-:-:-:1 \@P1 LEA.HI.X trackF1, offsetF, param_F[1], RZ, 1;\n",
j2c30 => "02:-:2:-:1 \@P1 LDG.E.CI.128 loadF0, [trackF];\n",
j5c29 => "04:-:-:-:1 \@P0 $convert storeI7, loadI3.H1;\n",
j5c33 => "--:-:-:-:1 \@P0 $convert storeI6, loadI3.H0;\n",
j5c37 => "--:-:-:-:1 \@P0 $convert storeI5, loadI2.H1;\n",
j5c41 => "--:-:5:-:1 \@P0 $convert storeI4, loadI2.H0;\n",
j5c45 => "--:-:-:-:1 \@P0 $convert storeI3, loadI1.H1;\n",
j5c49 => "--:-:-:-:1 \@P0 $convert storeI2, loadI1.H0;\n",
j5c53 => "--:-:-:-:1 \@P0 $convert storeI1, loadI0.H1;\n",
j5c57 => "--:-:3:-:1 \@P0 $convert storeI0, loadI0.H0;\n",
j5c59 => "10:-:-:-:1 \@P0 STS.128 [writeS + 4x<8*64 + 32>], storeI4;\n",
j6c8 => "04:3:-:-:1 \@P0 STS.128 [writeS + 4x<8*64 + 0>], storeI0;\n",
j6c50 => "--:-:-:-:1 \@P1 LEA trackI0.CC, offsetI, param_I[0], 1;\n",
j6c55 => "--:-:-:-:1 \@P1 LEA.HI.X trackI1, offsetI, param_I[1], RZ, 1;\n",
j6c61 => "04:-:3:-:1 \@P1 LDG.E.128 loadI0, [trackI];\n",
j6c62 => "--:-:-:-:5 \@P0 BAR.SYNC 0;\n" .
"--:-:-:-:1 \@P0 LOP.XOR readIs, readIs, 4x<64*8*2>;\n" .
"--:-:-:-:1 \@P0 LOP.XOR readFs, readFs, 4x<64*8*2>;\n" .
"--:-:-:-:1 \@P0 LOP.XOR writeS, writeS, 4x<64*8*2>;\n",
j7c63 => "--:-:-:Y:5 \@P0 BRA.U LOOP;\n",
);
-]
LOOP:
[+ main_loop() +]
--:-:1:-:1 LDS.U.128 mpqk, [addr_mpqk];
--:-:2:-:1 S2R tid, SR_TID.X;
--:-:3:-:1 S2R idx_N, SR_CTAID.Z;
<SCHEDULE_BLOCK>
// tidOX = (tid & 7) << 3
// tidOY = tid >> 3
02:-:-:-:1 LOP.AND tidOX, tid, 7;
--:-:-:-:1 SHL tidOX, tidOX, 3;
--:-:-:-:1 SHR.U32 tidOY, tid, 3;
--:-:-:-:1 LOP.AND readIs, readIs, 0x7ff;
--:-:-:-:1 LOP.AND readFs, readFs, 0x7ff;
// Expand back out to undo our bank conflict avoiding stride
--:-:-:-:1 SHL readIs, readIs, 1;
// Div by 4 here collapses k stride
// writeCs = (readFs / 4) * 64 + readIs;
--:-:-:-:1 ISCADD writeCs, readFs, readIs, 4;
// readCs = 4 * (tidOX + (tidOY * 64))
--:-:-:-:1 ISCADD readCs, tidOY, tidOX, 6;
--:-:-:-:1 SHL readCs, readCs, 2;
// n = blkI*64 + tidOX;
04:-:-:-:1 ISCADD n, idx_N, tidOX, 6;
// Mul by 4 here expands k stride back out
// Mul by 2 again to undo the bank conflict avoiding stride
// k = blkF*64 + tidOY * 8
--:-:-:-:1 SHL tidOY, tidOY, 3;
01:-:-:-:1 ISCADD k, idx_K, tidOY, 6;
[+ output_setup(63, 0, 6) +]
</SCHEDULE_BLOCK>
[+ output() +]
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2006 Tobias Schwinger
http://spirit.sourceforge.net/
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(BOOST_SPIRIT_DYNAMIC_TYPEOF_HPP)
#define BOOST_SPIRIT_DYNAMIC_TYPEOF_HPP
#include <boost/typeof/typeof.hpp>
#include <boost/spirit/home/classic/namespace.hpp>
#include <boost/spirit/home/classic/core/typeof.hpp>
#include <boost/spirit/home/classic/dynamic/stored_rule_fwd.hpp>
namespace boost { namespace spirit {
BOOST_SPIRIT_CLASSIC_NAMESPACE_BEGIN
// if.hpp
template <class ParsableT, typename CondT> struct if_parser;
template <class ParsableTrueT, class ParsableFalseT, typename CondT>
struct if_else_parser;
// for.hpp
namespace impl {
template<typename InitF, typename CondT, typename StepF, class ParsableT>
struct for_parser;
}
// while.hpp
template<typename ParsableT, typename CondT, bool is_do_parser>
struct while_parser;
// lazy.hpp
template<typename ActorT> struct lazy_parser;
// rule_alias.hpp
template <typename ParserT> class rule_alias;
// switch.hpp
template <typename CaseT, typename CondT> struct switch_parser;
template <int N, class ParserT, bool IsDefault> struct case_parser;
// select.hpp
template <typename TupleT, typename BehaviourT, typename T>
struct select_parser;
BOOST_SPIRIT_CLASSIC_NAMESPACE_END
}} // namespace BOOST_SPIRIT_CLASSIC_NS
#include BOOST_TYPEOF_INCREMENT_REGISTRATION_GROUP()
// if.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::if_parser,2)
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::if_else_parser,3)
// for.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::impl::for_parser,4)
// while.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::while_parser,(class)(class)(bool))
// lazy.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::lazy_parser,1)
// stored_rule.hpp (has forward header)
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::stored_rule,(typename)(typename)(typename)(bool))
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::stored_rule,3)
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::stored_rule,2)
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::stored_rule,1)
BOOST_TYPEOF_REGISTER_TYPE(BOOST_SPIRIT_CLASSIC_NS::stored_rule<>)
// rule_alias.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::rule_alias,1)
// switch.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::switch_parser,2)
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::case_parser,(int)(class)(bool))
// select.hpp
BOOST_TYPEOF_REGISTER_TEMPLATE(BOOST_SPIRIT_CLASSIC_NS::select_parser,3)
#endif
| {
"pile_set_name": "Github"
} |
spring:
application:
name: eureka-server
server:
port: 7000
eureka:
client:
service-url:
defaultZone: http://localhost:7000/eureka/
fetch-registry: false
register-with-eureka: false | {
"pile_set_name": "Github"
} |
import Resolver from 'ember-resolver';
export default Resolver;
| {
"pile_set_name": "Github"
} |
Subject: today
hey ,
last week , jo and me were talking about my prbolem and he said that he tried this
new thing from this site and it worked great for him .
cowry avoid http : / / ns 2 . herbalonline . biz / en / 10 / ? aff _ id = 00450 footmen give me
your thoughts on it and shoot me back an email and tell me what you found out if any .
any input would be appreciated it .
later ,
small
flutter atreus absenteeism oriental stratagem hunt amigo attitudinal twice curio rental billion inclusive ruanda screwball birch walpole canary seward floe lisa monadic harriman capitol colloquy laborious expressway b ' s salaried beware delusive congratulatory ante . | {
"pile_set_name": "Github"
} |
# DRAFT Meeting notes from the workshop "Advanced/Hybrid Fixed Layouts in EPUB" #
[TokyoWorshopForAdvancedHybridFixedLayouts](TokyoWorshopForAdvancedHybridFixedLayouts.md)
Attendees:GYLLING, Markus (IDPF); MURATA, Makoto (IDPF); KOBAYASHI, Tatsuo (IDPF); McCOY, Bill (IDPF); NAGAI, Seiji (ACCESS Co., Ltd.); YAMAMOTO, Taro (Mr.) (Adobe Systems Incorporated); KOPP, Matthieu (Aquafadas); LESTER, Jim (Barnes & Noble, Inc.); KROUPA, Brady (Barnes & Noble, Inc.); FUJISAWA, Jun (Canon, Inc.); YOSHIMURA, Wataru (DNP Digitalcom Co., Ltd.); TAKASE, Hiroshi (EAST Co.,Ltd.); HEKI, Tatsuo (FUJIFILM Corpoation); MURATA, Masao (FUJIFILM Corpoation); NONAKA, Shunichirou (FUJIFILM Corporation); OHMURA, Yoshinori (Mr.) (Impress Holdings, Inc.); ISHII, Koji (Mr.) (W3C CSS WG); TAKAMI, Shinya (Mr.) (Rakuten, Inc.); KANAI, Takeshi (Mr.) (Sony Corporation); SHIMADA, Hiroshi (Shogakukan Inc.); OKAMOTO, Masashi (SHUEISHA Inc.); AKIMOTO, Ryoji (Toppan Printing Co., Ltd.); KOIKE, Toshiaki (Voyager Japan Inc.); KITAHARA, Masakazu (Voyager Japan Inc.); KAWAMURA, Hiroshi (DAISY Consortium); HAMADA, Mayu (Ms) (Assistive Technology Development Organization); OGATA, Katsuhiro (USAPARA corp.)
## 1. Logistics and Introduction (10:00 - 10:15) ##
Heki-san of Fujifilm welcomed attendees.
Bill McCoy of IDPF gave an introduction to the IDPF IP policy.
## 2. Morning session (10:15 - 12:10) ##
The agenda and presentation slides are available at [TokyoWorshopForAdvancedHybridFixedLayouts](TokyoWorshopForAdvancedHybridFixedLayouts.md).
### 1) Murata ###
After the presentation, there was some discussions about the proposed
classification: "Paper first, Digital second", "Digital first, paper
second", and "Digital first, paper never". The first two groups are
common in the US and Japan, while the last group is common in Korea.
Markus pointed out that standardization for the last group might be too
early.
### 2) Aquafadas ###
The PPT file (linked from this workshop announcement page) contains
interactive animation. Additional video is also available.
After the presentation, there was a question about authoring costs. The
presenter replied that they have started an automatic cell recognition.
### 3) Shuueisha ###
Open Manga Format of Shuueisha is derived from EPUB. There are three
groups of files: those for the advanced view mode, those for the
standard view mode, and the program Library for the advanced view mode.
Images witht DRM and those without DRM are stored in separate
directories.
An OMF publication consists of two package documents, namely
standard.opf and advanced.opf. The standard.opf contains image files
(without HTML wrappers), and is intended for ePaper devices. The
advanced.opf contains a single XHTML file (advancedpages.xhml),
CSS(style.css), and Javascript (content.js, device.js, script.js, and
porting.js). On top of the standard mode and the advanced mode,
Shuueisha has developed the cutting mode, which provides cinematic
animation and cell transitions. But this mode is not included in OMF.
advanced.xhtml does NOT reference to image files. It references to
Javascript files and style.css, and also specifies a canvas. The
content.js file contains instructions for specifying which image file
consists of this OMF publication. Meanwhile, script.js, device.js, and
porting.js provide an OMF library that is NOT tailored to each OMF
publication.
There was a question about conformance to EPUB3. Since the fallback
attribute is not specified for each image file in the standard.opf, OMF
is not strictly conformant. The extension by B&N is also non-conformant
for the same reason. There was a discussion for avoiding or even
lifting the restriction that the fallback attribute is mandatory.
There was also a question about content authoring. An authoring
environment for the standard mode will be freely available from Digital
Comic Association of Japan. It will be very easy to create OMF
publications from existing image files
There is another editing environment, Manga Director Tool. This
environment allows the author to specify cell region information, etc.
for the support of the cutting mode. But the authoring cost for this
cutting mode is quite heavy.
Later, there was a question about inconsistencies between future
versions of webkit and the OMF javascript library. If such
inconsistencies arise, it will become necessary to republish the
OMF publications.
## 3. Afternoon session ##
### 1) B&N ###
An [example](http://code.google.com/p/epub-revision/downloads/detail?name=COSMO-20101001_drp.epub) by B&N is available on the IDPF Google Code page.
Rendition mapping is used for both inter-rendition navigation and
intra-rendition navigation (hotspots).
After the presentation, there was a question about the direct use of images.
### 2) Kanai (Sony) ###
After the presentation, there were some discussions about
single-image-only templates that allow centering the image.
There was also discussions about rendition selection beyond media
queries (e.g, natural-language-based selection). Similarities between
content negotiation and rendition selection were pointed out. The
"rendition type" was suggested as a hint for rendition selection.
### 3) Heki and Murata (Fujifilm) ###
After the presentation, there was a question about automatic
determination of cell ordering. The answer was that it is not
completely automatic but requires manual intervention.
There was a suggestion to represent a spread as a single content
document rather than synthesizing a spread from two content
documents.
### 4) Summary ###
Markus and Murata presented an overview summary of the functionality topics that had been demonstrated/discussed:
1. Rendition Mapping
* inter-rendition (1-1 or n-n)
* intra-rendition mapping
* region/area information
* hotspots
* sequence of areas
* hierarchies of regions (scenes & shots)
1. Rendition Selection
* W3C Media Queries scope
* additional queries (multi-lingual, writing modes, EPUB version, etc)
* rendition metadata ("discovery") in container.xml
1. Page & Sub-page navigation
* sub-page navigation made possible by intra-rendition mapping
* Transition effects
* on page navigation
* on sub-page navigation
* Gesture and navigation inc overriding primary nav
* Real text areas (balloons)
1. Device Adaptation
* "reading/interaction modes" (OMF)
* allowing fidelity
1. User/Context Adaptation
* multilinguality
* print disabilities, situational disabilities, day+night
> Noted differences in approach are shown below:
* Declarative vs Scripted (note that EPUB has stated design preference for declarative solutions)
* Use Web Technologies vs Preserve Battery
* Urgency
### 5) Next steps ###
Formation of ad-hoc working group to collect use cases and functional requirements; these will be basis for charter proposal to IDPF membership.
Markus and Murata will be convenors of ad-hoc group, eventually 2 co-chairs of formal WG will be needed
Primary topics: rendition mapping (inter- and intra), rendition selection (inc container.xml metadata), and possibly page+sub-page transition effects/events.
Group agrees to target finalized specification by years end; for this reason, avoiding scope creep and focusing on most urgent aspects is of utmost importance. | {
"pile_set_name": "Github"
} |
local krpc = require 'krpc'
local conn = krpc.connect()
local status = conn.krpc:get_status()
print(string.format('Data in = %.2f KB/s', status.bytes_read_rate/1024))
print(string.format('Data out = %.2f KB/s', status.bytes_written_rate/1024))
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/env.h"
#include "port/port.h"
#include "util/testharness.h"
namespace leveldb {
static const int kDelayMicros = 100000;
class EnvPosixTest {
private:
port::Mutex mu_;
std::string events_;
public:
Env* env_;
EnvPosixTest() : env_(Env::Default()) { }
};
static void SetBool(void* ptr) {
reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
}
TEST(EnvPosixTest, RunImmediately) {
port::AtomicPointer called (NULL);
env_->Schedule(&SetBool, &called);
Env::Default()->SleepForMicroseconds(kDelayMicros);
ASSERT_TRUE(called.NoBarrier_Load() != NULL);
}
TEST(EnvPosixTest, RunMany) {
port::AtomicPointer last_id (NULL);
struct CB {
port::AtomicPointer* last_id_ptr; // Pointer to shared slot
uintptr_t id; // Order# for the execution of this callback
CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { }
static void Run(void* v) {
CB* cb = reinterpret_cast<CB*>(v);
void* cur = cb->last_id_ptr->NoBarrier_Load();
ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur));
cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id));
}
};
// Schedule in different order than start time
CB cb1(&last_id, 1);
CB cb2(&last_id, 2);
CB cb3(&last_id, 3);
CB cb4(&last_id, 4);
env_->Schedule(&CB::Run, &cb1);
env_->Schedule(&CB::Run, &cb2);
env_->Schedule(&CB::Run, &cb3);
env_->Schedule(&CB::Run, &cb4);
Env::Default()->SleepForMicroseconds(kDelayMicros);
void* cur = last_id.Acquire_Load();
ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
}
struct State {
port::Mutex mu;
int val;
int num_running;
};
static void ThreadBody(void* arg) {
State* s = reinterpret_cast<State*>(arg);
s->mu.Lock();
s->val += 1;
s->num_running -= 1;
s->mu.Unlock();
}
TEST(EnvPosixTest, StartThread) {
State state;
state.val = 0;
state.num_running = 3;
for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state);
}
while (true) {
state.mu.Lock();
int num = state.num_running;
state.mu.Unlock();
if (num == 0) {
break;
}
Env::Default()->SleepForMicroseconds(kDelayMicros);
}
ASSERT_EQ(state.val, 3);
}
} // namespace leveldb
int main(int argc, char** argv) {
return leveldb::test::RunAllTests();
}
| {
"pile_set_name": "Github"
} |
#
# best response leduc strategy random_br0.strat
# made on: Fri Mar 29 20:26:34 2013
# opponent: random.strat
# value: 2.14414
#
J:/: 0.000000000 1.000000000 0.000000000
Q:/: 0.000000000 1.000000000 0.000000000
K:/: 0.000000000 1.000000000 0.000000000
J:/rr: 0.000000000 1.000000000 0.000000000
Q:/rr: 0.000000000 1.000000000 0.000000000
K:/rr: 0.000000000 1.000000000 0.000000000
JJ:/rrc/: 1.000000000 0.000000000 0.000000000
JQ:/rrc/: 1.000000000 0.000000000 0.000000000
JK:/rrc/: 0.000000000 1.000000000 0.000000000
QJ:/rrc/: 1.000000000 0.000000000 0.000000000
QQ:/rrc/: 0.000000000 1.000000000 0.000000000
QK:/rrc/: 0.000000000 1.000000000 0.000000000
KJ:/rrc/: 1.000000000 0.000000000 0.000000000
KQ:/rrc/: 0.000000000 1.000000000 0.000000000
KK:/rrc/: 1.000000000 0.000000000 0.000000000
JJ:/rrc/rr: 0.000000000 1.000000000 0.000000000
JQ:/rrc/rr: 0.000000000 1.000000000 0.000000000
JK:/rrc/rr: 0.000000000 0.000000000 1.000000000
QJ:/rrc/rr: 0.000000000 0.000000000 1.000000000
QQ:/rrc/rr: 0.000000000 1.000000000 0.000000000
QK:/rrc/rr: 0.000000000 1.000000000 0.000000000
KJ:/rrc/rr: 0.000000000 1.000000000 0.000000000
KQ:/rrc/rr: 0.000000000 1.000000000 0.000000000
KK:/rrc/rr: 0.000000000 1.000000000 0.000000000
JJ:/rrc/cr: 1.000000000 0.000000000 0.000000000
JQ:/rrc/cr: 0.000000000 0.000000000 1.000000000
JK:/rrc/cr: 1.000000000 0.000000000 0.000000000
QJ:/rrc/cr: 0.000000000 0.000000000 1.000000000
QQ:/rrc/cr: 1.000000000 0.000000000 0.000000000
QK:/rrc/cr: 1.000000000 0.000000000 0.000000000
KJ:/rrc/cr: 1.000000000 0.000000000 0.000000000
KQ:/rrc/cr: 1.000000000 0.000000000 0.000000000
KK:/rrc/cr: 1.000000000 0.000000000 0.000000000
JJ:/rc/: 1.000000000 0.000000000 0.000000000
JQ:/rc/: 0.000000000 1.000000000 0.000000000
JK:/rc/: 0.000000000 1.000000000 0.000000000
QJ:/rc/: 0.000000000 1.000000000 0.000000000
QQ:/rc/: 1.000000000 0.000000000 0.000000000
QK:/rc/: 0.000000000 1.000000000 0.000000000
KJ:/rc/: 0.000000000 1.000000000 0.000000000
KQ:/rc/: 0.000000000 1.000000000 0.000000000
KK:/rc/: 1.000000000 0.000000000 0.000000000
JJ:/rc/rr: 0.000000000 1.000000000 0.000000000
JQ:/rc/rr: 0.000000000 0.000000000 1.000000000
JK:/rc/rr: 0.000000000 0.000000000 1.000000000
QJ:/rc/rr: 0.000000000 0.000000000 1.000000000
QQ:/rc/rr: 0.000000000 1.000000000 0.000000000
QK:/rc/rr: 0.000000000 1.000000000 0.000000000
KJ:/rc/rr: 0.000000000 1.000000000 0.000000000
KQ:/rc/rr: 0.000000000 1.000000000 0.000000000
KK:/rc/rr: 0.000000000 1.000000000 0.000000000
JJ:/rc/cr: 1.000000000 0.000000000 0.000000000
JQ:/rc/cr: 1.000000000 0.000000000 0.000000000
JK:/rc/cr: 1.000000000 0.000000000 0.000000000
QJ:/rc/cr: 1.000000000 0.000000000 0.000000000
QQ:/rc/cr: 1.000000000 0.000000000 0.000000000
QK:/rc/cr: 1.000000000 0.000000000 0.000000000
KJ:/rc/cr: 1.000000000 0.000000000 0.000000000
KQ:/rc/cr: 1.000000000 0.000000000 0.000000000
KK:/rc/cr: 1.000000000 0.000000000 0.000000000
J:/cr: 1.000000000 0.000000000 0.000000000
Q:/cr: 1.000000000 0.000000000 0.000000000
K:/cr: 0.000000000 1.000000000 0.000000000
JJ:/crrc/: 1.000000000 0.000000000 0.000000000
JQ:/crrc/: 0.000000000 1.000000000 0.000000000
JK:/crrc/: 1.000000000 0.000000000 0.000000000
QJ:/crrc/: 0.000000000 1.000000000 0.000000000
QQ:/crrc/: 1.000000000 0.000000000 0.000000000
QK:/crrc/: 0.000000000 1.000000000 0.000000000
KJ:/crrc/: 0.000000000 1.000000000 0.000000000
KQ:/crrc/: 1.000000000 0.000000000 0.000000000
KK:/crrc/: 0.000000000 1.000000000 0.000000000
JJ:/crrc/rr: 0.000000000 1.000000000 0.000000000
JQ:/crrc/rr: 0.000000000 0.000000000 1.000000000
JK:/crrc/rr: 0.000000000 0.000000000 1.000000000
QJ:/crrc/rr: 0.000000000 1.000000000 0.000000000
QQ:/crrc/rr: 0.000000000 1.000000000 0.000000000
QK:/crrc/rr: 0.000000000 1.000000000 0.000000000
KJ:/crrc/rr: 0.000000000 1.000000000 0.000000000
KQ:/crrc/rr: 0.000000000 1.000000000 0.000000000
KK:/crrc/rr: 0.000000000 1.000000000 0.000000000
JJ:/crrc/cr: 1.000000000 0.000000000 0.000000000
JQ:/crrc/cr: 1.000000000 0.000000000 0.000000000
JK:/crrc/cr: 0.000000000 0.000000000 1.000000000
QJ:/crrc/cr: 1.000000000 0.000000000 0.000000000
QQ:/crrc/cr: 1.000000000 0.000000000 0.000000000
QK:/crrc/cr: 1.000000000 0.000000000 0.000000000
KJ:/crrc/cr: 1.000000000 0.000000000 0.000000000
KQ:/crrc/cr: 1.000000000 0.000000000 0.000000000
KK:/crrc/cr: 1.000000000 0.000000000 0.000000000
JJ:/crc/: 1.000000000 0.000000000 0.000000000
JQ:/crc/: 0.000000000 1.000000000 0.000000000
JK:/crc/: 0.000000000 1.000000000 0.000000000
QJ:/crc/: 0.000000000 1.000000000 0.000000000
QQ:/crc/: 1.000000000 0.000000000 0.000000000
QK:/crc/: 1.000000000 0.000000000 0.000000000
KJ:/crc/: 0.000000000 1.000000000 0.000000000
KQ:/crc/: 0.000000000 1.000000000 0.000000000
KK:/crc/: 1.000000000 0.000000000 0.000000000
JJ:/crc/rr: 0.000000000 1.000000000 0.000000000
JQ:/crc/rr: 0.000000000 0.000000000 1.000000000
JK:/crc/rr: 0.000000000 0.000000000 1.000000000
QJ:/crc/rr: 0.000000000 0.000000000 1.000000000
QQ:/crc/rr: 0.000000000 1.000000000 0.000000000
QK:/crc/rr: 0.000000000 1.000000000 0.000000000
KJ:/crc/rr: 0.000000000 1.000000000 0.000000000
KQ:/crc/rr: 0.000000000 1.000000000 0.000000000
KK:/crc/rr: 0.000000000 1.000000000 0.000000000
JJ:/crc/cr: 1.000000000 0.000000000 0.000000000
JQ:/crc/cr: 1.000000000 0.000000000 0.000000000
JK:/crc/cr: 1.000000000 0.000000000 0.000000000
QJ:/crc/cr: 1.000000000 0.000000000 0.000000000
QQ:/crc/cr: 1.000000000 0.000000000 0.000000000
QK:/crc/cr: 1.000000000 0.000000000 0.000000000
KJ:/crc/cr: 1.000000000 0.000000000 0.000000000
KQ:/crc/cr: 1.000000000 0.000000000 0.000000000
KK:/crc/cr: 1.000000000 0.000000000 0.000000000
JJ:/cc/: 0.000000000 1.000000000 0.000000000
JQ:/cc/: 0.000000000 1.000000000 0.000000000
JK:/cc/: 0.000000000 1.000000000 0.000000000
QJ:/cc/: 0.000000000 1.000000000 0.000000000
QQ:/cc/: 1.000000000 0.000000000 0.000000000
QK:/cc/: 0.000000000 1.000000000 0.000000000
KJ:/cc/: 0.000000000 1.000000000 0.000000000
KQ:/cc/: 1.000000000 0.000000000 0.000000000
KK:/cc/: 0.000000000 1.000000000 0.000000000
JJ:/cc/rr: 0.000000000 1.000000000 0.000000000
JQ:/cc/rr: 0.000000000 1.000000000 0.000000000
JK:/cc/rr: 0.000000000 0.000000000 1.000000000
QJ:/cc/rr: 0.000000000 0.000000000 1.000000000
QQ:/cc/rr: 0.000000000 1.000000000 0.000000000
QK:/cc/rr: 0.000000000 1.000000000 0.000000000
KJ:/cc/rr: 0.000000000 1.000000000 0.000000000
KQ:/cc/rr: 0.000000000 1.000000000 0.000000000
KK:/cc/rr: 0.000000000 1.000000000 0.000000000
JJ:/cc/cr: 1.000000000 0.000000000 0.000000000
JQ:/cc/cr: 0.000000000 0.000000000 1.000000000
JK:/cc/cr: 1.000000000 0.000000000 0.000000000
QJ:/cc/cr: 0.000000000 0.000000000 1.000000000
QQ:/cc/cr: 1.000000000 0.000000000 0.000000000
QK:/cc/cr: 1.000000000 0.000000000 0.000000000
KJ:/cc/cr: 1.000000000 0.000000000 0.000000000
KQ:/cc/cr: 1.000000000 0.000000000 0.000000000
KK:/cc/cr: 1.000000000 0.000000000 0.000000000
| {
"pile_set_name": "Github"
} |
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <[email protected]>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <[email protected]> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <[email protected]> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = '\r'
LF = '\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
self.sock.sendall('%s%s' % (line, CRLF))
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print '*cmd*', repr(line)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print '*get*', repr(line)
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print '*resp*', repr(resp)
c = resp[:1]
if c != '+':
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != '.':
if line[:2] == '..':
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print '*stat*', repr(rets)
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(r'\+OK.*(<[^>]+>)')
def apop(self, user, secret):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
secret - secret shared between client and server.
NB: mailbox is locked by server from here to 'quit()'
"""
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = hashlib.md5(m.group(1)+secret).digest()
digest = ''.join(map(lambda x:'%02x'%ord(x), digest))
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
try:
import ssl
except ImportError:
pass
else:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port = POP3_SSL_PORT, keyfile = None, certfile = None):
self.host = host
self.port = port
self.keyfile = keyfile
self.certfile = certfile
self.buffer = ""
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
self.file = self.sock.makefile('rb')
self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile)
self._debugging = 0
self.welcome = self._getresp()
def _fillBuffer(self):
localbuf = self.sslobj.read()
if len(localbuf) == 0:
raise error_proto('-ERR EOF')
self.buffer += localbuf
def _getline(self):
line = ""
renewline = re.compile(r'.*?\n')
match = renewline.match(self.buffer)
while not match:
self._fillBuffer()
if len(self.buffer) > _MAXLINE:
raise error_proto('line too long')
match = renewline.match(self.buffer)
line = match.group(0)
self.buffer = renewline.sub('' ,self.buffer, 1)
if self._debugging > 1: print '*get*', repr(line)
octets = len(line)
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
line += CRLF
bytes = len(line)
while bytes > 0:
sent = self.sslobj.write(line)
if sent == bytes:
break # avoid copy
line = line[sent:]
bytes = bytes - sent
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.sock.close()
del self.sslobj, self.sock
return resp
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print a.getwelcome()
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print "Message %d:" % i
for line in msg:
print ' ' + line
print '-----------------------'
a.quit()
| {
"pile_set_name": "Github"
} |
{
"title":"Navigation Timing API",
"description":"API for accessing timing information related to navigation and elements.",
"spec":"https://www.w3.org/TR/navigation-timing/",
"status":"rec",
"links":[
{
"url":"https://developer.mozilla.org/en/API/navigationTiming",
"title":"MDN Web Docs - Navigation Timing"
},
{
"url":"https://www.html5rocks.com/en/tutorials/webperformance/basics/",
"title":"HTML5 Rocks tutorial"
},
{
"url":"https://www.webplatform.org/docs/apis/navigation_timing",
"title":"WebPlatform Docs"
}
],
"bugs":[
],
"categories":[
"DOM",
"JS API"
],
"stats":{
"ie":{
"5.5":"n",
"6":"n",
"7":"n",
"8":"n",
"9":"y",
"10":"y",
"11":"y"
},
"edge":{
"12":"y",
"13":"y",
"14":"y",
"15":"y",
"16":"y",
"17":"y",
"18":"y"
},
"firefox":{
"2":"n",
"3":"n",
"3.5":"n",
"3.6":"n",
"4":"n",
"5":"n",
"6":"n",
"7":"y",
"8":"y",
"9":"y",
"10":"y",
"11":"y",
"12":"y",
"13":"y",
"14":"y",
"15":"y",
"16":"y",
"17":"y",
"18":"y",
"19":"y",
"20":"y",
"21":"y",
"22":"y",
"23":"y",
"24":"y",
"25":"y",
"26":"y",
"27":"y",
"28":"y",
"29":"y",
"30":"y",
"31":"y",
"32":"y",
"33":"y",
"34":"y",
"35":"y",
"36":"y",
"37":"y",
"38":"y",
"39":"y",
"40":"y",
"41":"y",
"42":"y",
"43":"y",
"44":"y",
"45":"y",
"46":"y",
"47":"y",
"48":"y",
"49":"y",
"50":"y",
"51":"y",
"52":"y",
"53":"y",
"54":"y",
"55":"y",
"56":"y",
"57":"y",
"58":"y",
"59":"y",
"60":"y",
"61":"y",
"62":"y"
},
"chrome":{
"4":"n",
"5":"n",
"6":"y x",
"7":"y x",
"8":"y x",
"9":"y x",
"10":"y x",
"11":"y x",
"12":"y x",
"13":"y",
"14":"y",
"15":"y",
"16":"y",
"17":"y",
"18":"y",
"19":"y",
"20":"y",
"21":"y",
"22":"y",
"23":"y",
"24":"y",
"25":"y",
"26":"y",
"27":"y",
"28":"y",
"29":"y",
"30":"y",
"31":"y",
"32":"y",
"33":"y",
"34":"y",
"35":"y",
"36":"y",
"37":"y",
"38":"y",
"39":"y",
"40":"y",
"41":"y",
"42":"y",
"43":"y",
"44":"y",
"45":"y",
"46":"y",
"47":"y",
"48":"y",
"49":"y",
"50":"y",
"51":"y",
"52":"y",
"53":"y",
"54":"y",
"55":"y",
"56":"y",
"57":"y",
"58":"y",
"59":"y",
"60":"y",
"61":"y",
"62":"y",
"63":"y",
"64":"y",
"65":"y",
"66":"y",
"67":"y",
"68":"y",
"69":"y"
},
"safari":{
"3.1":"n",
"3.2":"n",
"4":"n",
"5":"n",
"5.1":"n",
"6":"n",
"6.1":"n",
"7":"n",
"7.1":"n",
"8":"y",
"9":"y",
"9.1":"y",
"10":"y",
"10.1":"y",
"11":"y",
"11.1":"y",
"12":"y",
"TP":"y"
},
"opera":{
"9":"n",
"9.5-9.6":"n",
"10.0-10.1":"n",
"10.5":"n",
"10.6":"n",
"11":"n",
"11.1":"n",
"11.5":"n",
"11.6":"n",
"12":"n",
"12.1":"n",
"15":"y",
"16":"y",
"17":"y",
"18":"y",
"19":"y",
"20":"y",
"21":"y",
"22":"y",
"23":"y",
"24":"y",
"25":"y",
"26":"y",
"27":"y",
"28":"y",
"29":"y",
"30":"y",
"31":"y",
"32":"y",
"33":"y",
"34":"y",
"35":"y",
"36":"y",
"37":"y",
"38":"y",
"39":"y",
"40":"y",
"41":"y",
"42":"y",
"43":"y",
"44":"y",
"45":"y",
"46":"y",
"47":"y",
"48":"y",
"49":"y",
"50":"y",
"51":"y",
"52":"y",
"53":"y"
},
"ios_saf":{
"3.2":"n",
"4.0-4.1":"n",
"4.2-4.3":"n",
"5.0-5.1":"n",
"6.0-6.1":"n",
"7.0-7.1":"n",
"8":"y",
"8.1-8.4":"n",
"9.0-9.2":"y",
"9.3":"y",
"10.0-10.2":"y",
"10.3":"y",
"11.0-11.2":"y",
"11.3":"y"
},
"op_mini":{
"all":"n"
},
"android":{
"2.1":"n",
"2.2":"n",
"2.3":"n",
"3":"n",
"4":"y",
"4.1":"y",
"4.2-4.3":"y",
"4.4":"y",
"4.4.3-4.4.4":"y",
"66":"y"
},
"bb":{
"7":"n",
"10":"y"
},
"op_mob":{
"10":"n",
"11":"n",
"11.1":"n",
"11.5":"n",
"12":"n",
"12.1":"n",
"46":"y"
},
"and_chr":{
"66":"y"
},
"and_ff":{
"60":"y"
},
"ie_mob":{
"10":"y",
"11":"y"
},
"and_uc":{
"11.8":"y"
},
"samsung":{
"4":"y",
"5":"y",
"6.2":"y"
},
"and_qq":{
"1.2":"y"
},
"baidu":{
"7.12":"y"
}
},
"notes":"Removed in iOS 8.1 due to poor performance.",
"notes_by_num":{
},
"usage_perc_y":94.41,
"usage_perc_a":0,
"ucprefix":false,
"parent":"",
"keywords":"performance,performance.timing,performancenavigation",
"ie_id":"navigationtimingapi",
"chrome_id":"5584144679567360",
"firefox_id":"",
"webkit_id":"specification-navigation-timing-level-1",
"shown":true
}
| {
"pile_set_name": "Github"
} |
##
## SPDX-License-Identifier: BSD-3-Clause
## Copyright Contributors to the OpenEXR Project.
##
## Process this file with automake to produce Makefile.in
AM_CPPFLAGS = -I$(top_srcdir)/config
lib_LTLIBRARIES = libHalf.la
libHalf_la_SOURCES = half.cpp half.h halfFunction.h halfLimits.h
libHalf_la_LDFLAGS = -version-info @LIBTOOL_VERSION@ -no-undefined
if LIB_SUFFIX_EXISTS
libHalf_la_LDFLAGS += -release @LIB_SUFFIX@
endif
libHalfincludedir = $(includedir)/OpenEXR
libHalfinclude_HEADERS = half.h halfFunction.h halfLimits.h halfExport.h
# these are used to build eLut.h and toFloat.h dynamically
EXTRA_DIST = eLut.cpp toFloat.cpp CMakeLists.txt
CLEANFILES = eLut eLut.h toFloat toFloat.h
eLut_SOURCES = eLut.cpp
toFloat_SOURCES = toFloat.cpp
eLut.h: eLut
./eLut > eLut.h
toFloat.h: toFloat
./toFloat > toFloat.h
BUILT_SOURCES = eLut.h toFloat.h
noinst_PROGRAMS = eLut toFloat
| {
"pile_set_name": "Github"
} |
// Copyright (c) BruTile developers team. All rights reserved. See License.txt in the project root for license information.
using System.Collections.Generic;
namespace BruTile.Predefined
{
public class GlobalSphericalMercator : TileSchema
{
private const double ScaleFactor = 78271.51696401953125;
private const string DefaultFormat = "png";
private const int DefaultMinZoomLevel = 0;
private const int DefaultMaxZoomLevel = 19;
private const int TileSize = 256;
// The default for YAxis is YAxis.OSM for all constructors
public GlobalSphericalMercator(string format = DefaultFormat, YAxis yAxis = YAxis.OSM, int minZoomLevel = DefaultMinZoomLevel, int maxZoomLevel = DefaultMaxZoomLevel, string name = null) :
this(ToResolutions(minZoomLevel, maxZoomLevel), format, yAxis, name)
{
}
public GlobalSphericalMercator(YAxis yAxis = YAxis.OSM, int minZoomLevel = DefaultMinZoomLevel, int maxZoomLevel = DefaultMaxZoomLevel, string name = null) :
this(ToResolutions(minZoomLevel, maxZoomLevel), DefaultFormat, yAxis, name)
{
}
public GlobalSphericalMercator(int minZoomLevel = DefaultMinZoomLevel, int maxZoomLevel = DefaultMaxZoomLevel, string name = null) :
this(ToResolutions(minZoomLevel, maxZoomLevel), DefaultFormat, YAxis.OSM, name)
{
}
public GlobalSphericalMercator() :
this(ToResolutions(DefaultMinZoomLevel, DefaultMaxZoomLevel))
{
}
public GlobalSphericalMercator(string format = DefaultFormat, YAxis yAxis = YAxis.OSM, IEnumerable<int> zoomLevels = null, string name = null, Extent extent = default(Extent)) :
this(ToResolutions(zoomLevels), format, yAxis, name, extent)
{
}
private GlobalSphericalMercator(IEnumerable<KeyValuePair<string, Resolution>> resolutions, string format = DefaultFormat,
YAxis yAxis = YAxis.OSM, string name = null, Extent extent = default(Extent))
{
Name = name ?? "GlobalSphericalMercator";
Format = format;
YAxis = yAxis;
Srs = "EPSG:3857";
foreach (var resolution in resolutions)
{
Resolutions[resolution.Value.Level] = resolution.Value;
}
OriginX = -ScaleFactor * TileSize;
OriginY = -ScaleFactor * TileSize;
Extent = extent == default(Extent) ? new Extent(OriginX, OriginY, -OriginX, -OriginY) : extent;
if (yAxis == YAxis.OSM) OriginY = -OriginY; // OSM has an inverted Y-axis
}
private static IEnumerable<KeyValuePair<string, Resolution>> ToResolutions(int min, int max)
{
var list = new List<int>();
for (var i = min; i <= max; i++) list.Add(i);
return ToResolutions(list);
}
private static IEnumerable<KeyValuePair<string, Resolution>> ToResolutions(IEnumerable<int> levels)
{
if (levels == null) return ToResolutions(DefaultMinZoomLevel, DefaultMaxZoomLevel);
var dictionary = new Dictionary<string, Resolution>();
foreach (var level in levels)
{
dictionary[level.ToString()] = new Resolution
(
level,
2 * ScaleFactor / (1 << level)
);
}
return dictionary;
}
}
}
| {
"pile_set_name": "Github"
} |
public static void getResultSetMetaData(Connection con) {
try(Statement stmt = con.createStatement();) {
String SQL = "SELECT TOP 10 * FROM Person.Contact";
ResultSet rs = stmt.executeQuery(SQL);
ResultSetMetaData rsmd = rs.getMetaData();
// Display the column name and type.
int cols = rsmd.getColumnCount();
for (int i = 1; i <= cols; i++) {
System.out.println("NAME: " + rsmd.getColumnName(i) + " " + "TYPE: " + rsmd.getColumnTypeName(i));
}
}
// Handle any errors that may have occurred.
catch (SQLException e) {
e.printStackTrace();
}
}
| {
"pile_set_name": "Github"
} |
// #docplaster
// #docregion schematics-imports, schema-imports, workspace
import {
Rule, Tree, SchematicsException,
apply, url, applyTemplates, move,
chain, mergeWith
} from '@angular-devkit/schematics';
import { strings, normalize, experimental } from '@angular-devkit/core';
// #enddocregion schematics-imports
import { Schema as MyServiceSchema } from './schema';
// #enddocregion schema-imports
export function myService(options: MyServiceSchema): Rule {
return (tree: Tree) => {
const workspaceConfig = tree.read('/angular.json');
if (!workspaceConfig) {
throw new SchematicsException('Could not find Angular workspace configuration');
}
// convert workspace to string
const workspaceContent = workspaceConfig.toString();
// parse workspace string into JSON object
const workspace: experimental.workspace.WorkspaceSchema = JSON.parse(workspaceContent);
// #enddocregion workspace
// #docregion project-fallback
if (!options.project) {
options.project = workspace.defaultProject;
}
// #enddocregion project-fallback
// #docregion project-info
const projectName = options.project as string;
const project = workspace.projects[projectName];
const projectType = project.projectType === 'application' ? 'app' : 'lib';
// #enddocregion project-info
// #docregion path
if (options.path === undefined) {
options.path = `${project.sourceRoot}/${projectType}`;
}
// #enddocregion path
// #docregion template
const templateSource = apply(url('./files'), [
applyTemplates({
classify: strings.classify,
dasherize: strings.dasherize,
name: options.name
}),
move(normalize(options.path as string))
]);
// #enddocregion template
// #docregion chain
return chain([
mergeWith(templateSource)
]);
// #enddocregion chain
// #docregion workspace
};
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2018 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <functional>
#include <string>
#include "source/reduce/change_operand_reduction_opportunity.h"
#include "source/reduce/operand_to_const_reduction_opportunity_finder.h"
#include "source/reduce/reduction_opportunity.h"
#include "source/reduce/reduction_pass.h"
#include "source/reduce/remove_instruction_reduction_opportunity.h"
#include "source/reduce/remove_unused_instruction_reduction_opportunity_finder.h"
| {
"pile_set_name": "Github"
} |
require_relative '../helper'
require 'yajl'
require 'flexmock/test_unit'
require 'fluent/command/binlog_reader'
require 'fluent/event'
class TestFluentBinlogReader < ::Test::Unit::TestCase
module ::BinlogReaderCommand
class Dummy < Base
def call; end
end
end
def suppress_stdout
out = StringIO.new
$stdout = out
yield
ensure
$stdout = STDOUT
end
sub_test_case 'call' do
data(
empty: [],
invalid: %w(invalid packed.log),
)
test 'should fail when invalid command' do |argv|
fu = FluentBinlogReader.new(argv)
assert_raise(SystemExit) do
suppress_stdout { fu.call }
end
end
data(
cat: %w(cat packed.log),
head: %w(head packed.log),
formats: %w(formats packed.log)
)
test 'should succeed when valid command' do |argv|
fu = FluentBinlogReader.new(argv)
flexstub(::BinlogReaderCommand) do |command|
command.should_receive(:const_get).once.and_return(::BinlogReaderCommand::Dummy)
assert_nothing_raised do
fu.call
end
end
end
end
end
class TestBaseCommand < ::Test::Unit::TestCase
TMP_DIR = File.expand_path(File.dirname(__FILE__) + "/../tmp/command/binlog_reader#{ENV['TEST_ENV_NUMBER']}")
def create_message_packed_file(path, times = [event_time], records = [{ 'message' => 'dummy' }])
es = Fluent::MultiEventStream.new(times, records)
v = es.to_msgpack_stream
out_path = "#{TMP_DIR}/#{path}"
File.open(out_path, 'wb') do |f|
f.print(v)
end
waiting(5) do
sleep 0.5 until File.size(out_path) == v.bytesize
end
end
def setup
FileUtils.rm_rf(TMP_DIR)
FileUtils.mkdir_p(TMP_DIR)
end
def timezone(timezone = 'UTC')
old = ENV['TZ']
ENV['TZ'] = timezone
yield
ensure
ENV['TZ'] = old
end
end
class TestHead < TestBaseCommand
sub_test_case 'initialize' do
data(
'file is not passed' => %w(),
'file is not found' => %w(invalid_path.log)
)
test 'should fail if file is invalid' do |argv|
assert_raise(SystemExit) do
capture_stdout { BinlogReaderCommand::Head.new(argv) }
end
end
test 'should succeed if a file is valid' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}"]
create_message_packed_file(file_name)
assert_nothing_raised do
BinlogReaderCommand::Head.new(argv)
end
end
test 'should fail when config_params format is invalid' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}", '--format=csv', '-e', 'only_key']
create_message_packed_file(file_name)
assert_raise(SystemExit) do
capture_stdout { BinlogReaderCommand::Head.new(argv) }
end
end
test 'should succeed if config_params format is valid' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}", '--format=csv', '-e', 'fields=message']
create_message_packed_file(file_name)
assert_nothing_raised do
capture_stdout { BinlogReaderCommand::Head.new(argv) }
end
end
end
sub_test_case 'call' do
setup do
@file_name = 'packed.log'
@t = '2011-01-02 13:14:15 UTC'
@record = { 'message' => 'dummy' }
end
test 'should output the beginning of the file with default format (out_file)' do
argv = ["#{TMP_DIR}/#{@file_name}"]
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i] * 6, [@record] * 6)
head = BinlogReaderCommand::Head.new(argv)
out = capture_stdout { head.call }
assert_equal "2011-01-02T13:14:15+00:00\t#{TMP_DIR}/#{@file_name}\t#{Yajl.dump(@record)}\n" * 5, out
end
end
test 'should set the number of lines to display' do
argv = ["#{TMP_DIR}/#{@file_name}", '-n', '1']
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i] * 6, [@record] * 6)
head = BinlogReaderCommand::Head.new(argv)
out = capture_stdout { head.call }
assert_equal "2011-01-02T13:14:15+00:00\t#{TMP_DIR}/#{@file_name}\t#{Yajl.dump(@record)}\n", out
end
end
test 'should fail when the number of lines is invalid' do
argv = ["#{TMP_DIR}/#{@file_name}", '-n', '0']
create_message_packed_file(@file_name)
assert_raise(SystemExit) do
capture_stdout { BinlogReaderCommand::Head.new(argv) }
end
end
test 'should output content of a file with json format' do
argv = ["#{TMP_DIR}/#{@file_name}", '--format=json']
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i], [@record])
head = BinlogReaderCommand::Head.new(argv)
out = capture_stdout { head.call }
assert_equal "#{Yajl.dump(@record)}\n", out
end
end
test 'should fail with an invalid format' do
argv = ["#{TMP_DIR}/#{@file_name}", '--format=invalid']
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i], [@record])
head = BinlogReaderCommand::Head.new(argv)
assert_raise(SystemExit) do
capture_stdout { head.call }
end
end
end
test 'should succeed if multiple config_params format' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}", '--format=csv', '-e', 'fields=message,fo', '-e', 'delimiter=|']
create_message_packed_file(file_name, [event_time], [{ 'message' => 'dummy', 'fo' => 'dummy2' }])
head = BinlogReaderCommand::Head.new(argv)
assert_equal "\"dummy\"|\"dummy2\"\n", capture_stdout { head.call }
end
end
end
class TestCat < TestBaseCommand
sub_test_case 'initialize' do
data(
'file is not passed' => [],
'file is not found' => %w(invalid_path.log)
)
test 'should fail if a file is invalid' do |argv|
assert_raise(SystemExit) do
capture_stdout { BinlogReaderCommand::Head.new(argv) }
end
end
test 'should succeed if a file is valid' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}"]
create_message_packed_file(file_name)
assert_nothing_raised do
BinlogReaderCommand::Cat.new(argv)
end
end
test 'should fail when config_params format is invalid' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}", '--format=json', '-e', 'only_key']
create_message_packed_file(file_name)
assert_raise(SystemExit) do
capture_stdout { BinlogReaderCommand::Cat.new(argv) }
end
end
test 'should succeed when config_params format is valid' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}", '--format=csv', '-e', 'fields=message']
create_message_packed_file(file_name)
assert_nothing_raised do
capture_stdout { BinlogReaderCommand::Cat.new(argv) }
end
end
end
sub_test_case 'call' do
setup do
@file_name = 'packed.log'
@t = '2011-01-02 13:14:15 UTC'
@record = { 'message' => 'dummy' }
end
test 'should output the file with default format(out_file)' do
argv = ["#{TMP_DIR}/#{@file_name}"]
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i] * 6, [@record] * 6)
head = BinlogReaderCommand::Cat.new(argv)
out = capture_stdout { head.call }
assert_equal "2011-01-02T13:14:15+00:00\t#{TMP_DIR}/#{@file_name}\t#{Yajl.dump(@record)}\n" * 6, out
end
end
test 'should set the number of lines to display' do
argv = ["#{TMP_DIR}/#{@file_name}", '-n', '1']
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i] * 6, [@record] * 6)
head = BinlogReaderCommand::Cat.new(argv)
out = capture_stdout { head.call }
assert_equal "2011-01-02T13:14:15+00:00\t#{TMP_DIR}/#{@file_name}\t#{Yajl.dump(@record)}\n", out
end
end
test 'should output content of a file with json format' do
argv = ["#{TMP_DIR}/#{@file_name}", '--format=json']
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i], [@record])
head = BinlogReaderCommand::Cat.new(argv)
out = capture_stdout { head.call }
assert_equal "#{Yajl.dump(@record)}\n", out
end
end
test 'should fail with an invalid format' do
argv = ["#{TMP_DIR}/#{@file_name}", '--format=invalid']
timezone do
create_message_packed_file(@file_name, [event_time(@t).to_i], [@record])
head = BinlogReaderCommand::Cat.new(argv)
assert_raise(SystemExit) do
capture_stdout { head.call }
end
end
end
test 'should succeed if multiple config_params format' do
file_name = 'packed.log'
argv = ["#{TMP_DIR}/#{file_name}", '--format=csv', '-e', 'fields=message,fo', '-e', 'delimiter=|']
create_message_packed_file(file_name, [event_time], [{ 'message' => 'dummy', 'fo' => 'dummy2' }])
head = BinlogReaderCommand::Cat.new(argv)
assert_equal "\"dummy\"|\"dummy2\"\n", capture_stdout { head.call }
end
end
end
class TestFormats < TestBaseCommand
test 'parse_option!' do
assert_raise(SystemExit) do
capture_stdout do
BinlogReaderCommand::Formats.new(['--plugin=invalid_dir_path'])
end
end
end
sub_test_case 'call' do
test 'display available plugins' do
f = BinlogReaderCommand::Formats.new
out = capture_stdout { f.call }
assert out.include?('json')
assert out.include?('csv')
end
test 'add new plugins using --plugin option' do
dir_path = File.expand_path(File.dirname(__FILE__) + '/../scripts/fluent/plugin/formatter1')
f = BinlogReaderCommand::Formats.new(["--plugin=#{dir_path}"])
out = capture_stdout { f.call }
assert out.include?('json')
assert out.include?('csv')
assert out.include?('test1')
end
test 'add multiple plugins using --plugin option' do
dir_path1 = File.expand_path(File.dirname(__FILE__) + '/../scripts/fluent/plugin/formatter1')
dir_path2 = File.expand_path(File.dirname(__FILE__) + '/../scripts/fluent/plugin/formatter2')
f = BinlogReaderCommand::Formats.new(["--plugin=#{dir_path1}", '-p', dir_path2])
out = capture_stdout { f.call }
assert out.include?('json')
assert out.include?('csv')
assert out.include?('test1')
assert out.include?('test2')
end
end
end
| {
"pile_set_name": "Github"
} |
/*
* jni.c - stoken Java Native Interface
*
* Copyright 2014 Kevin Cernekee <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <jni.h>
#include "stoken.h"
/* these need to match LibStoken.java */
#define SUCCESS ( 0)
#define INVALID_FORMAT (-1)
#define IO_ERROR (-2)
#define FILE_NOT_FOUND (-3)
struct libctx {
JNIEnv *jenv;
jobject jobj;
struct stoken_ctx *instance;
};
static void throw_excep(JNIEnv *jenv, const char *exc, int line)
{
jclass excep;
char msg[64];
snprintf(msg, 64, "%s:%d", __FILE__, line);
(*jenv)->ExceptionClear(jenv);
excep = (*jenv)->FindClass(jenv, exc);
if (excep)
(*jenv)->ThrowNew(jenv, excep, msg);
}
#define OOM(jenv) do { throw_excep(jenv, "java/lang/OutOfMemoryError", __LINE__); } while (0)
static int translate_errno(JNIEnv *jenv, int err)
{
switch (err) {
case 0:
return SUCCESS;
case -EINVAL:
return INVALID_FORMAT;
case -ENOENT:
return FILE_NOT_FOUND;
case -ENOMEM:
throw_excep(jenv, "java/lang/OutOfMemoryError", __LINE__);
/* falls through */
case -EIO:
default:
return IO_ERROR;
}
}
static struct libctx *getctx(JNIEnv *jenv, jobject jobj)
{
jclass jcls = (*jenv)->GetObjectClass(jenv, jobj);
jfieldID jfld = (*jenv)->GetFieldID(jenv, jcls, "libctx", "J");
if (!jfld)
return NULL;
return (void *)(unsigned long)(*jenv)->GetLongField(jenv, jobj, jfld);
}
static int set_int(struct libctx *ctx, jobject jobj, const char *name, int value)
{
jclass jcls = (*ctx->jenv)->GetObjectClass(ctx->jenv, jobj);
jfieldID jfld = (*ctx->jenv)->GetFieldID(ctx->jenv, jcls, name, "I");
if (!jfld)
return -1;
(*ctx->jenv)->SetIntField(ctx->jenv, jobj, jfld, value);
return 0;
}
static int set_long(struct libctx *ctx, jobject jobj, const char *name, uint64_t value)
{
jclass jcls = (*ctx->jenv)->GetObjectClass(ctx->jenv, jobj);
jfieldID jfld = (*ctx->jenv)->GetFieldID(ctx->jenv, jcls, name, "J");
if (!jfld)
return -1;
(*ctx->jenv)->SetLongField(ctx->jenv, jobj, jfld, (jlong)value);
return 0;
}
static int set_bool(struct libctx *ctx, jobject jobj, const char *name, int value)
{
jclass jcls = (*ctx->jenv)->GetObjectClass(ctx->jenv, jobj);
jfieldID jfld = (*ctx->jenv)->GetFieldID(ctx->jenv, jcls, name, "Z");
if (!jfld)
return -1;
(*ctx->jenv)->SetBooleanField(ctx->jenv, jobj, jfld, value);
return 0;
}
static jstring dup_to_jstring(JNIEnv *jenv, const char *in)
{
/*
* Many implementations of NewStringUTF() will return NULL on
* NULL input, but that isn't guaranteed:
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35979
*/
return in ? (*jenv)->NewStringUTF(jenv, in) : NULL;
}
static int set_string(struct libctx *ctx, jobject jobj, const char *name, const char *value)
{
jclass jcls = (*ctx->jenv)->GetObjectClass(ctx->jenv, jobj);
jfieldID jfld = (*ctx->jenv)->GetFieldID(ctx->jenv, jcls, name, "Ljava/lang/String;");
jstring jarg;
if (!jfld)
return -1;
jarg = dup_to_jstring(ctx->jenv, value);
if (value && !jarg)
return -1;
(*ctx->jenv)->SetObjectField(ctx->jenv, jobj, jfld, jarg);
return 0;
}
JNIEXPORT jlong JNICALL Java_org_stoken_LibStoken_init(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = calloc(1, sizeof(*ctx));
if (!ctx)
goto bad;
ctx->jenv = jenv;
ctx->jobj = (*jenv)->NewGlobalRef(jenv, jobj);
if (!ctx->jobj)
goto bad_free_ctx;
ctx->instance = stoken_new();
if (!ctx->instance)
goto bad_delete_ref;
return (jlong)(unsigned long)ctx;
bad_delete_ref:
(*jenv)->DeleteGlobalRef(jenv, ctx->jobj);
bad_free_ctx:
free(ctx);
bad:
OOM(jenv);
return 0;
}
JNIEXPORT void JNICALL Java_org_stoken_LibStoken_free(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
if (!ctx)
return;
stoken_destroy(ctx->instance);
(*jenv)->DeleteGlobalRef(jenv, ctx->jobj);
free(ctx);
}
JNIEXPORT jint JNICALL Java_org_stoken_LibStoken_importRCFile(
JNIEnv *jenv, jobject jobj, jstring jarg0)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *arg0;
int ret;
if (!jarg0)
return translate_errno(jenv, -EINVAL);
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
ret = -ENOMEM;
else
ret = stoken_import_rcfile(ctx->instance, arg0);
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return translate_errno(jenv, ret);
}
JNIEXPORT jint JNICALL Java_org_stoken_LibStoken_importString(
JNIEnv *jenv, jobject jobj, jstring jarg0)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *arg0;
int ret;
if (!jarg0)
return translate_errno(jenv, -EINVAL);
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
ret = -ENOMEM;
else
ret = stoken_import_string(ctx->instance, arg0);
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return translate_errno(jenv, ret);
}
JNIEXPORT jobject JNICALL Java_org_stoken_LibStoken_getInfo(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
struct stoken_info *info;
jmethodID mid;
jclass jcls;
jcls = (*ctx->jenv)->FindClass(ctx->jenv,
"org/stoken/LibStoken$StokenInfo");
if (jcls == NULL)
return NULL;
mid = (*ctx->jenv)->GetMethodID(ctx->jenv, jcls, "<init>", "()V");
if (!mid)
return NULL;
jobj = (*ctx->jenv)->NewObject(ctx->jenv, jcls, mid);
if (!jobj)
return NULL;
info = stoken_get_info(ctx->instance);
if (!info)
return NULL;
if (set_string(ctx, jobj, "serial", info->serial) ||
set_long(ctx, jobj, "unixExpDate", info->exp_date) ||
set_int(ctx, jobj, "interval", info->interval) ||
set_int(ctx, jobj, "tokenVersion", info->token_version) ||
set_bool(ctx, jobj, "usesPin", info->uses_pin))
jobj = NULL;
free(info);
return jobj;
}
JNIEXPORT jint JNICALL Java_org_stoken_LibStoken_getMinPIN(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
int min_pin, max_pin;
stoken_pin_range(ctx->instance, &min_pin, &max_pin);
return min_pin;
}
JNIEXPORT jint JNICALL Java_org_stoken_LibStoken_getMaxPIN(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
int min_pin, max_pin;
stoken_pin_range(ctx->instance, &min_pin, &max_pin);
return max_pin;
}
JNIEXPORT jboolean JNICALL Java_org_stoken_LibStoken_isPINRequired(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
return !!stoken_pin_required(ctx->instance);
}
JNIEXPORT jboolean JNICALL Java_org_stoken_LibStoken_isPassRequired(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
return !!stoken_pass_required(ctx->instance);
}
JNIEXPORT jboolean JNICALL Java_org_stoken_LibStoken_isDevIDRequired(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
return !!stoken_devid_required(ctx->instance);
}
JNIEXPORT jboolean JNICALL Java_org_stoken_LibStoken_checkPIN(
JNIEnv *jenv, jobject jobj, jstring jarg0)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *arg0;
int ret;
if (!jarg0)
return translate_errno(jenv, -EINVAL);
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
ret = -ENOMEM;
else
ret = stoken_check_pin(ctx->instance, arg0);
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return !translate_errno(jenv, ret);
}
JNIEXPORT jboolean JNICALL Java_org_stoken_LibStoken_checkDevID(
JNIEnv *jenv, jobject jobj, jstring jarg0)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *arg0;
int ret;
if (!jarg0)
return translate_errno(jenv, -EINVAL);
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
ret = -ENOMEM;
else
ret = stoken_check_devid(ctx->instance, arg0);
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return !translate_errno(jenv, ret);
}
JNIEXPORT jobject JNICALL Java_org_stoken_LibStoken_getGUIDList(
JNIEnv *jenv, jobject jobj)
{
struct libctx *ctx = getctx(jenv, jobj);
jmethodID mid;
jclass jcls;
const struct stoken_guid *guidlist = stoken_get_guid_list();
int i, len;
jobjectArray jarr;
for (len = 0; guidlist[len].tag != NULL; len++)
;
jcls = (*ctx->jenv)->FindClass(ctx->jenv,
"org/stoken/LibStoken$StokenGUID");
if (jcls == NULL)
return NULL;
mid = (*ctx->jenv)->GetMethodID(ctx->jenv, jcls, "<init>", "()V");
if (!mid)
return NULL;
jarr = (*ctx->jenv)->NewObjectArray(ctx->jenv, len, jcls, NULL);
if (!jarr)
return NULL;
for (i = 0; i < len; i++) {
const struct stoken_guid *g = &guidlist[i];
jobj = (*ctx->jenv)->NewObject(ctx->jenv, jcls, mid);
if (!jobj)
return NULL;
if (set_string(ctx, jobj, "tag", g->tag) ||
set_string(ctx, jobj, "longName", g->long_name) ||
set_string(ctx, jobj, "GUID", g->guid))
return NULL;
(*ctx->jenv)->SetObjectArrayElement(ctx->jenv, jarr, i, jobj);
}
return jarr;
}
JNIEXPORT jint JNICALL Java_org_stoken_LibStoken_decryptSeed(
JNIEnv *jenv, jobject jobj, jstring jarg0, jstring jarg1)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *arg0 = NULL, *arg1 = NULL;
int ret = -ENOMEM;
if (jarg0) {
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
goto out;
}
if (jarg1) {
arg1 = (*jenv)->GetStringUTFChars(jenv, jarg1, NULL);
if (!arg1)
goto out;
}
ret = stoken_decrypt_seed(ctx->instance, arg0, arg1);
out:
if (arg1)
(*jenv)->ReleaseStringUTFChars(jenv, jarg1, arg1);
if (arg0)
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return translate_errno(jenv, ret);
}
JNIEXPORT jstring JNICALL Java_org_stoken_LibStoken_encryptSeed(
JNIEnv *jenv, jobject jobj, jstring jarg0, jstring jarg1)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *arg0 = NULL, *arg1 = NULL;
char *ret;
jstring jret = NULL;
if (jarg0) {
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
goto out;
}
if (jarg1) {
arg1 = (*jenv)->GetStringUTFChars(jenv, jarg1, NULL);
if (!arg1)
goto out;
}
ret = stoken_encrypt_seed(ctx->instance, arg0, arg1);
jret = ret ? (*jenv)->NewStringUTF(jenv, ret) : NULL;
free(ret);
out:
if (arg1)
(*jenv)->ReleaseStringUTFChars(jenv, jarg1, arg1);
if (arg0)
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return jret;
}
JNIEXPORT jstring JNICALL Java_org_stoken_LibStoken_computeTokencode(
JNIEnv *jenv, jobject jobj, jlong jwhen, jstring jpin)
{
struct libctx *ctx = getctx(jenv, jobj);
const char *pin = NULL;
time_t when = jwhen ? jwhen : time(NULL);
char tokencode[STOKEN_MAX_TOKENCODE + 1];
jstring ret = NULL;
if (jpin) {
pin = (*jenv)->GetStringUTFChars(jenv, jpin, NULL);
if (!pin) {
OOM(jenv);
return NULL;
}
}
if (stoken_compute_tokencode(ctx->instance, when, pin, tokencode) == 0)
ret = (*jenv)->NewStringUTF(jenv, tokencode);
if (jpin)
(*jenv)->ReleaseStringUTFChars(jenv, jpin, pin);
return ret;
}
JNIEXPORT jstring JNICALL Java_org_stoken_LibStoken_formatTokencode(
JNIEnv *jenv, jobject jobj, jstring jarg0)
{
const char *arg0;
char *ret;
jstring jret = NULL;
if (!jarg0)
return NULL;
arg0 = (*jenv)->GetStringUTFChars(jenv, jarg0, NULL);
if (!arg0)
return NULL;
ret = stoken_format_tokencode(arg0);
jret = (*jenv)->NewStringUTF(jenv, ret);
free(ret);
(*jenv)->ReleaseStringUTFChars(jenv, jarg0, arg0);
return jret;
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See the LICENSE.txt file in the project root
// for the license information.
using System;
using Xamarin.Flex;
public partial class Test
{
void test_set_margin_all()
{
Item item = new Item();
item.MarginTop = 1;
item.MarginBottom = 2;
item.MarginLeft = 3;
item.MarginRight = 4;
assert(item.MarginTop == 1);
assert(item.MarginBottom == 2);
assert(item.MarginLeft == 3);
assert(item.MarginRight == 4);
item.Margin = 42;
assert(item.MarginTop == 42);
assert(item.MarginBottom == 42);
assert(item.MarginLeft == 42);
assert(item.MarginRight == 42);
item.Margin = 0;
assert(item.MarginTop == 0);
assert(item.MarginBottom == 0);
assert(item.MarginLeft == 0);
assert(item.MarginRight == 0);
item.Dispose();
}
}
| {
"pile_set_name": "Github"
} |
org.gradle.jvmargs=-Xmx1536M
android.enableR8=true
android.useAndroidX=true
android.enableJetifier=true
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright (c) 2013, Daniel Murphy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
package org.jbox2d.dynamics.joints;
import org.jbox2d.dynamics.Body;
/**
* A joint edge is used to connect bodies and joints together
* in a joint graph where each body is a node and each joint
* is an edge. A joint edge belongs to a doubly linked list
* maintained in each attached body. Each joint has two joint
* nodes, one for each attached body.
* @author Daniel
*/
public class JointEdge {
/**
* Provides quick access to the other body attached
*/
public Body other = null;
/**
* the joint
*/
public Joint joint = null;
/**
* the previous joint edge in the body's joint list
*/
public JointEdge prev = null;
/**
* the next joint edge in the body's joint list
*/
public JointEdge next = null;
}
| {
"pile_set_name": "Github"
} |
(set-logic QF_NRA)
(set-info :smt-lib-version 2.0)
(declare-fun x () Real)
(declare-fun b () Bool)
(assert (< x (ite b (+ x 1) (- x 1))))
(check-sat)
(exit)
| {
"pile_set_name": "Github"
} |
/*
* LSP computing for ACELP-based codecs
*
* Copyright (c) 2008 Vladimir Voroshilov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_LSP_H
#define AVCODEC_LSP_H
#include <stdint.h>
/**
(I.F) means fixed-point value with F fractional and I integer bits
*/
/**
* @brief ensure a minimum distance between LSFs
* @param[in,out] lsfq LSF to check and adjust
* @param lsfq_min_distance minimum distance between LSFs
* @param lsfq_min minimum allowed LSF value
* @param lsfq_max maximum allowed LSF value
* @param lp_order LP filter order
*/
void ff_acelp_reorder_lsf(int16_t* lsfq, int lsfq_min_distance, int lsfq_min, int lsfq_max, int lp_order);
/**
* Adjust the quantized LSFs so they are increasing and not too close.
*
* This step is not mentioned in the AMR spec but is in the reference C decoder.
* Omitting this step creates audible distortion on the sinusoidal sweep
* test vectors in 3GPP TS 26.074.
*
* @param[in,out] lsf LSFs in Hertz
* @param min_spacing minimum distance between two consecutive lsf values
* @param size size of the lsf vector
*/
void ff_set_min_dist_lsf(float *lsf, double min_spacing, int size);
/**
* @brief Convert LSF to LSP
* @param[out] lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000)
* @param lsf normalized LSF coefficients (0 <= (2.13) < 0x2000 * PI)
* @param lp_order LP filter order
*
* @remark It is safe to pass the same array into the lsf and lsp parameters.
*/
void ff_acelp_lsf2lsp(int16_t *lsp, const int16_t *lsf, int lp_order);
/**
* Floating point version of ff_acelp_lsf2lsp()
*/
void ff_acelp_lsf2lspd(double *lsp, const float *lsf, int lp_order);
/**
* @brief LSP to LP conversion (3.2.6 of G.729)
* @param[out] lp decoded LP coefficients (-0x8000 <= (3.12) < 0x8000)
* @param lsp LSP coefficients (-0x8000 <= (0.15) < 0x8000)
* @param lp_half_order LP filter order, divided by 2
*/
void ff_acelp_lsp2lpc(int16_t* lp, const int16_t* lsp, int lp_half_order);
/**
* LSP to LP conversion (5.2.4 of AMR-WB)
*/
void ff_amrwb_lsp2lpc(const double *lsp, float *lp, int lp_order);
/**
* @brief Interpolate LSP for the first subframe and convert LSP -> LP for both subframes (3.2.5 and 3.2.6 of G.729)
* @param[out] lp_1st decoded LP coefficients for first subframe (-0x8000 <= (3.12) < 0x8000)
* @param[out] lp_2nd decoded LP coefficients for second subframe (-0x8000 <= (3.12) < 0x8000)
* @param lsp_2nd LSP coefficients of the second subframe (-0x8000 <= (0.15) < 0x8000)
* @param lsp_prev LSP coefficients from the second subframe of the previous frame (-0x8000 <= (0.15) < 0x8000)
* @param lp_order LP filter order
*/
void ff_acelp_lp_decode(int16_t* lp_1st, int16_t* lp_2nd, const int16_t* lsp_2nd, const int16_t* lsp_prev, int lp_order);
#define MAX_LP_HALF_ORDER 10
#define MAX_LP_ORDER (2*MAX_LP_HALF_ORDER)
/**
* Reconstruct LPC coefficients from the line spectral pair frequencies.
*
* @param lsp line spectral pairs in cosine domain
* @param lpc linear predictive coding coefficients
* @param lp_half_order half the number of the amount of LPCs to be
* reconstructed, need to be smaller or equal to MAX_LP_HALF_ORDER
*
* @note buffers should have a minimum size of 2*lp_half_order elements.
*
* TIA/EIA/IS-733 2.4.3.3.5
*/
void ff_acelp_lspd2lpc(const double *lsp, float *lpc, int lp_half_order);
/**
* Sort values in ascending order.
*
* @note O(n) if data already sorted, O(n^2) - otherwise
*/
void ff_sort_nearly_sorted_floats(float *vals, int len);
/**
* Compute the Pa / (1 + z(-1)) or Qa / (1 - z(-1)) coefficients
* needed for LSP to LPC conversion.
* We only need to calculate the 6 first elements of the polynomial.
*
* @param lsp line spectral pairs in cosine domain
* @param[out] f polynomial input/output as a vector
*
* TIA/EIA/IS-733 2.4.3.3.5-1/2
*/
void ff_lsp2polyf(const double *lsp, double *f, int lp_half_order);
#endif /* AVCODEC_LSP_H */
| {
"pile_set_name": "Github"
} |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for arm64, OpenBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| {
"pile_set_name": "Github"
} |
/*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds @ SMU
* Allan Taylor, Alan Hindmarsh and Radu Serban @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example problem for IDA: 2D heat equation, parallel, GMRES.
*
* This example solves a discretized 2D heat equation problem.
* This version uses the Krylov solver SUNLinSol_SPGMR.
*
* The DAE system solved is a spatial discretization of the PDE
* du/dt = d^2u/dx^2 + d^2u/dy^2
* on the unit square. The boundary condition is u = 0 on all edges.
* Initial conditions are given by u = 16 x (1 - x) y (1 - y).
* The PDE is treated with central differences on a uniform MX x MY
* grid. The values of u at the interior points satisfy ODEs, and
* equations u = 0 at the boundaries are appended, to form a DAE
* system of size N = MX * MY. Here MX = MY = 10.
*
* The system is actually implemented on submeshes, processor by
* processor, with an MXSUB by MYSUB mesh on each of NPEX * NPEY
* processors.
*
* The system is solved with IDA using the Krylov linear solver
* SUNLinSol_SPGMR. The preconditioner uses the diagonal elements of the
* Jacobian only. Routines for preconditioning, required by
* SUNLinSol_SPGMR, are supplied here. The constraints u >= 0 are posed
* for all components. Local error testing on the boundary values
* is suppressed. Output is taken at t = 0, .01, .02, .04,
* ..., 10.24.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <idas/idas.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <nvector/nvector_parallel.h>
#include <sundials/sundials_types.h>
#include <mpi.h>
#define ZERO RCONST(0.0)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define NOUT 11 /* Number of output times */
#define NPEX 2 /* No. PEs in x direction of PE array */
#define NPEY 2 /* No. PEs in y direction of PE array */
/* Total no. PEs = NPEX*NPEY */
#define MXSUB 5 /* No. x points per subgrid */
#define MYSUB 5 /* No. y points per subgrid */
#define MX (NPEX*MXSUB) /* MX = number of x mesh points */
#define MY (NPEY*MYSUB) /* MY = number of y mesh points */
/* Spatial mesh is MX by MY */
typedef struct {
int thispe, npex, npey, ixsub, jysub;
sunindextype mx, my, mxsub, mysub;
realtype dx, dy, coeffx, coeffy, coeffxy;
realtype uext[(MXSUB+2)*(MYSUB+2)];
N_Vector pp; /* vector of diagonal preconditioner elements */
MPI_Comm comm;
} *UserData;
/* User-supplied residual function and supporting routines */
int resHeat(realtype tt, N_Vector uu, N_Vector up,
N_Vector rr, void *user_data);
static int rescomm(N_Vector uu, N_Vector up, void *user_data);
static int reslocal(realtype tt, N_Vector uu, N_Vector up,
N_Vector res, void *user_data);
static int BSend(MPI_Comm comm, int thispe, int ixsub, int jysub,
sunindextype dsizex, sunindextype dsizey, realtype uarray[]);
static int BRecvPost(MPI_Comm comm, MPI_Request request[], int thispe,
int ixsub, int jysub, sunindextype dsizex,
sunindextype dsizey, realtype uext[], realtype buffer[]);
static int BRecvWait(MPI_Request request[], int ixsub, int jysub,
sunindextype dsizex, realtype uext[], realtype buffer[]);
/* User-supplied preconditioner routines */
int PsolveHeat(realtype tt, N_Vector uu, N_Vector up, N_Vector rr,
N_Vector rvec, N_Vector zvec, realtype c_j,
realtype delta, void *user_data);
int PsetupHeat(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr,
realtype c_j, void *user_data);
/* Private function to check function return values */
static int InitUserData(int thispe, MPI_Comm comm, UserData data);
static int SetInitialProfile(N_Vector uu, N_Vector up, N_Vector id,
N_Vector res, UserData data);
static void PrintHeader(sunindextype Neq, realtype rtol, realtype atol);
static void PrintOutput(int id, void *ida_mem, realtype t, N_Vector uu);
static void PrintFinalStats(void *ida_mem);
static int check_retval(void *returnvalue, const char *funcname, int opt, int id);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
MPI_Comm comm;
void *ida_mem;
SUNLinearSolver LS;
UserData data;
int iout, thispe, retval, npes;
sunindextype Neq, local_N;
realtype rtol, atol, t0, t1, tout, tret;
N_Vector uu, up, constraints, id, res;
ida_mem = NULL;
LS = NULL;
data = NULL;
uu = up = constraints = id = res = NULL;
/* Get processor number and total number of pe's. */
MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
MPI_Comm_size(comm, &npes);
MPI_Comm_rank(comm, &thispe);
if (npes != NPEX*NPEY) {
if (thispe == 0)
fprintf(stderr,
"\nMPI_ERROR(0): npes = %d is not equal to NPEX*NPEY = %d\n",
npes,NPEX*NPEY);
MPI_Finalize();
return(1);
}
/* Set local length local_N and global length Neq. */
local_N = MXSUB*MYSUB;
Neq = MX * MY;
/* Allocate and initialize the data structure and N-vectors. */
data = (UserData) malloc(sizeof *data);
data->pp = NULL;
if(check_retval((void *)data, "malloc", 2, thispe))
MPI_Abort(comm, 1);
uu = N_VNew_Parallel(comm, local_N, Neq);
if(check_retval((void *)uu, "N_VNew_Parallel", 0, thispe))
MPI_Abort(comm, 1);
up = N_VNew_Parallel(comm, local_N, Neq);
if(check_retval((void *)up, "N_VNew_Parallel", 0, thispe))
MPI_Abort(comm, 1);
res = N_VNew_Parallel(comm, local_N, Neq);
if(check_retval((void *)res, "N_VNew_Parallel", 0, thispe))
MPI_Abort(comm, 1);
constraints = N_VNew_Parallel(comm, local_N, Neq);
if(check_retval((void *)constraints, "N_VNew_Parallel", 0, thispe))
MPI_Abort(comm, 1);
id = N_VNew_Parallel(comm, local_N, Neq);
if(check_retval((void *)id, "N_VNew_Parallel", 0, thispe))
MPI_Abort(comm, 1);
/* An N-vector to hold preconditioner. */
data->pp = N_VNew_Parallel(comm, local_N, Neq);
if(check_retval((void *)data->pp, "N_VNew_Parallel", 0, thispe))
MPI_Abort(comm, 1);
InitUserData(thispe, comm, data);
/* Initialize the uu, up, id, and res profiles. */
SetInitialProfile(uu, up, id, res, data);
/* Set constraints to all 1's for nonnegative solution values. */
N_VConst(ONE, constraints);
t0 = ZERO; t1 = RCONST(0.01);
/* Scalar relative and absolute tolerance. */
rtol = ZERO;
atol = RCONST(1.0e-3);
/* Call IDACreate and IDAMalloc to initialize solution. */
ida_mem = IDACreate();
if(check_retval((void *)ida_mem, "IDACreate", 0, thispe)) MPI_Abort(comm, 1);
retval = IDASetUserData(ida_mem, data);
if(check_retval(&retval, "IDASetUserData", 1, thispe)) MPI_Abort(comm, 1);
retval = IDASetSuppressAlg(ida_mem, SUNTRUE);
if(check_retval(&retval, "IDASetSuppressAlg", 1, thispe)) MPI_Abort(comm, 1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1, thispe)) MPI_Abort(comm, 1);
retval = IDASetConstraints(ida_mem, constraints);
if(check_retval(&retval, "IDASetConstraints", 1, thispe)) MPI_Abort(comm, 1);
N_VDestroy(constraints);
retval = IDAInit(ida_mem, resHeat, t0, uu, up);
if(check_retval(&retval, "IDAInit", 1, thispe)) MPI_Abort(comm, 1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1, thispe)) MPI_Abort(comm, 1);
/* Call SUNLinSol_SPGMR and IDASetLinearSolver to specify the linear solver. */
LS = SUNLinSol_SPGMR(uu, PREC_LEFT, 0); /* use default maxl */
if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0, thispe)) MPI_Abort(comm, 1);
retval = IDASetLinearSolver(ida_mem, LS, NULL);
if(check_retval(&retval, "IDASetLinearSolver", 1, thispe)) MPI_Abort(comm, 1);
retval = IDASetPreconditioner(ida_mem, PsetupHeat, PsolveHeat);
if(check_retval(&retval, "IDASetPreconditioner", 1, thispe)) MPI_Abort(comm, 1);
/* Print output heading (on processor 0 only) and intial solution */
if (thispe == 0) PrintHeader(Neq, rtol, atol);
PrintOutput(thispe, ida_mem, t0, uu);
/* Loop over tout, call IDASolve, print output. */
for (tout = t1, iout = 1; iout <= NOUT; iout++, tout *= TWO) {
retval = IDASolve(ida_mem, tout, &tret, uu, up, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1, thispe)) MPI_Abort(comm, 1);
PrintOutput(thispe, ida_mem, tret, uu);
}
/* Print remaining counters. */
if (thispe == 0) PrintFinalStats(ida_mem);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
N_VDestroy(id);
N_VDestroy(res);
N_VDestroy(up);
N_VDestroy(uu);
N_VDestroy(data->pp);
free(data);
MPI_Finalize();
return(0);
}
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resHeat: heat equation system residual function
* This uses 5-point central differencing on the interior points, and
* includes algebraic equations for the boundary values.
* So for each interior point, the residual component has the form
* res_i = u'_i - (central difference)_i
* while for each boundary point, it is res_i = u_i.
*
* This parallel implementation uses several supporting routines.
* First a call is made to rescomm to do communication of subgrid boundary
* data into array uext. Then reslocal is called to compute the residual
* on individual processors and their corresponding domains. The routines
* BSend, BRecvPost, and BREcvWait handle interprocessor communication
* of uu required to calculate the residual.
*/
int resHeat(realtype tt, N_Vector uu, N_Vector up, N_Vector rr,
void *user_data)
{
int retval;
/* Call rescomm to do inter-processor communication. */
retval = rescomm(uu, up, user_data);
/* Call reslocal to calculate res. */
retval = reslocal(tt, uu, up, rr, user_data);
return(retval);
}
/*
* PsetupHeat: setup for diagonal preconditioner for heatsk.
*
* The optional user-supplied functions PsetupHeat and
* PsolveHeat together must define the left preconditoner
* matrix P approximating the system Jacobian matrix
* J = dF/du + cj*dF/du'
* (where the DAE system is F(t,u,u') = 0), and solve the linear
* systems P z = r. This is done in this case by keeping only
* the diagonal elements of the J matrix above, storing them as
* inverses in a vector pp, when computed in PsetupHeat, for
* subsequent use in PsolveHeat.
*
* In this instance, only cj and data (user data structure, with
* pp etc.) are used from the PsetupHeat argument list.
*
*/
int PsetupHeat(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr,
realtype c_j, void *user_data)
{
realtype *ppv, pelinv;
sunindextype lx, ly, ixbegin, ixend, jybegin, jyend, locu, mxsub, mysub;
int ixsub, jysub, npex, npey;
UserData data;
data = (UserData) user_data;
ppv = N_VGetArrayPointer(data->pp);
ixsub = data->ixsub;
jysub = data->jysub;
mxsub = data->mxsub;
mysub = data->mysub;
npex = data->npex;
npey = data->npey;
/* Initially set all pp elements to one. */
N_VConst(ONE, data->pp);
/* Prepare to loop over subgrid. */
ixbegin = 0;
ixend = mxsub-1;
jybegin = 0;
jyend = mysub-1;
if (ixsub == 0) ixbegin++;
if (ixsub == npex-1) ixend--;
if (jysub == 0) jybegin++;
if (jysub == npey-1) jyend--;
pelinv = ONE/(c_j + data->coeffxy);
/* Load the inverse of the preconditioner diagonal elements
in loop over all the local subgrid. */
for (ly = jybegin; ly <=jyend; ly++) {
for (lx = ixbegin; lx <= ixend; lx++) {
locu = lx + ly*mxsub;
ppv[locu] = pelinv;
}
}
return(0);
}
/*
* PsolveHeat: solve preconditioner linear system.
* This routine multiplies the input vector rvec by the vector pp
* containing the inverse diagonal Jacobian elements (previously
* computed in PsetupHeat), returning the result in zvec.
*/
int PsolveHeat(realtype tt, N_Vector uu, N_Vector up,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype c_j, realtype delta, void *user_data)
{
UserData data;
data = (UserData) user_data;
N_VProd(data->pp, rvec, zvec);
return(0);
}
/*
*--------------------------------------------------------------------
* SUPPORTING FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* rescomm routine. This routine performs all inter-processor
* communication of data in u needed to calculate G.
*/
static int rescomm(N_Vector uu, N_Vector up, void *user_data)
{
UserData data;
realtype *uarray, *uext, buffer[2*MYSUB];
MPI_Comm comm;
int thispe, ixsub, jysub;
sunindextype mxsub, mysub;
MPI_Request request[4];
data = (UserData) user_data;
uarray = N_VGetArrayPointer(uu);
/* Get comm, thispe, subgrid indices, data sizes, extended array uext. */
comm = data->comm; thispe = data->thispe;
ixsub = data->ixsub; jysub = data->jysub;
mxsub = data->mxsub; mysub = data->mysub;
uext = data->uext;
/* Start receiving boundary data from neighboring PEs. */
BRecvPost(comm, request, thispe, ixsub, jysub, mxsub, mysub, uext, buffer);
/* Send data from boundary of local grid to neighboring PEs. */
BSend(comm, thispe, ixsub, jysub, mxsub, mysub, uarray);
/* Finish receiving boundary data from neighboring PEs. */
BRecvWait(request, ixsub, jysub, mxsub, uext, buffer);
return(0);
}
/*
* reslocal routine. Compute res = F(t, uu, up). This routine assumes
* that all inter-processor communication of data needed to calculate F
* has already been done, and that this data is in the work array uext.
*/
static int reslocal(realtype tt, N_Vector uu, N_Vector up, N_Vector rr,
void *user_data)
{
realtype *uext, *uuv, *upv, *resv;
realtype termx, termy, termctr;
sunindextype lx, ly, offsetu, offsetue, locu, locue;
int ixsub, jysub, npex, npey;
sunindextype mxsub, mxsub2, mysub;
sunindextype ixbegin, ixend, jybegin, jyend;
UserData data;
/* Get subgrid indices, array sizes, extended work array uext. */
data = (UserData) user_data;
uext = data->uext;
uuv = N_VGetArrayPointer(uu);
upv = N_VGetArrayPointer(up);
resv = N_VGetArrayPointer(rr);
ixsub = data->ixsub; jysub = data->jysub;
mxsub = data->mxsub; mxsub2 = data->mxsub + 2;
mysub = data->mysub; npex = data->npex; npey = data->npey;
/* Initialize all elements of rr to uu. This sets the boundary
elements simply without indexing hassles. */
N_VScale(ONE, uu, rr);
/* Copy local segment of u vector into the working extended array uext.
This completes uext prior to the computation of the rr vector. */
offsetu = 0;
offsetue = mxsub2 + 1;
for (ly = 0; ly < mysub; ly++) {
for (lx = 0; lx < mxsub; lx++) uext[offsetue+lx] = uuv[offsetu+lx];
offsetu = offsetu + mxsub;
offsetue = offsetue + mxsub2;
}
/* Set loop limits for the interior of the local subgrid. */
ixbegin = 0;
ixend = mxsub-1;
jybegin = 0;
jyend = mysub-1;
if (ixsub == 0) ixbegin++;
if (ixsub == npex-1) ixend--;
if (jysub == 0) jybegin++;
if (jysub == npey-1) jyend--;
/* Loop over all grid points in local subgrid. */
for (ly = jybegin; ly <=jyend; ly++) {
for (lx = ixbegin; lx <= ixend; lx++) {
locu = lx + ly*mxsub;
locue = (lx+1) + (ly+1)*mxsub2;
termx = data->coeffx *(uext[locue-1] + uext[locue+1]);
termy = data->coeffy *(uext[locue-mxsub2] + uext[locue+mxsub2]);
termctr = data->coeffxy*uext[locue];
resv[locu] = upv[locu] - (termx + termy - termctr);
}
}
return(0);
}
/*
* Routine to send boundary data to neighboring PEs.
*/
static int BSend(MPI_Comm comm, int thispe, int ixsub, int jysub,
sunindextype dsizex, sunindextype dsizey, realtype uarray[])
{
sunindextype ly, offsetu;
realtype bufleft[MYSUB], bufright[MYSUB];
/* If jysub > 0, send data from bottom x-line of u. */
if (jysub != 0)
MPI_Send(&uarray[0], (int) dsizex, MPI_SUNREALTYPE, thispe-NPEX, 0, comm);
/* If jysub < NPEY-1, send data from top x-line of u. */
if (jysub != NPEY-1) {
offsetu = (MYSUB-1)*dsizex;
MPI_Send(&uarray[offsetu], (int) dsizex, MPI_SUNREALTYPE,
thispe+NPEX, 0, comm);
}
/* If ixsub > 0, send data from left y-line of u (via bufleft). */
if (ixsub != 0) {
for (ly = 0; ly < MYSUB; ly++) {
offsetu = ly*dsizex;
bufleft[ly] = uarray[offsetu];
}
MPI_Send(&bufleft[0], (int) dsizey, MPI_SUNREALTYPE, thispe-1, 0, comm);
}
/* If ixsub < NPEX-1, send data from right y-line of u (via bufright). */
if (ixsub != NPEX-1) {
for (ly = 0; ly < MYSUB; ly++) {
offsetu = ly*MXSUB + (MXSUB-1);
bufright[ly] = uarray[offsetu];
}
MPI_Send(&bufright[0], (int) dsizey, MPI_SUNREALTYPE, thispe+1, 0, comm);
}
return(0);
}
/*
* Routine to start receiving boundary data from neighboring PEs.
* Notes:
* 1) buffer should be able to hold 2*MYSUB realtype entries, should be
* passed to both the BRecvPost and BRecvWait functions, and should not
* be manipulated between the two calls.
* 2) request should have 4 entries, and should be passed in
* both calls also.
*/
static int BRecvPost(MPI_Comm comm, MPI_Request request[], int thispe,
int ixsub, int jysub, sunindextype dsizex,
sunindextype dsizey, realtype uext[], realtype buffer[])
{
sunindextype offsetue;
/* Have bufleft and bufright use the same buffer. */
realtype *bufleft = buffer, *bufright = buffer+MYSUB;
/* If jysub > 0, receive data for bottom x-line of uext. */
if (jysub != 0)
MPI_Irecv(&uext[1], (int) dsizex, MPI_SUNREALTYPE,
thispe-NPEX, 0, comm, &request[0]);
/* If jysub < NPEY-1, receive data for top x-line of uext. */
if (jysub != NPEY-1) {
offsetue = (1 + (MYSUB+1)*(MXSUB+2));
MPI_Irecv(&uext[offsetue], (int) dsizex, MPI_SUNREALTYPE,
thispe+NPEX, 0, comm, &request[1]);
}
/* If ixsub > 0, receive data for left y-line of uext (via bufleft). */
if (ixsub != 0) {
MPI_Irecv(&bufleft[0], (int) dsizey, MPI_SUNREALTYPE,
thispe-1, 0, comm, &request[2]);
}
/* If ixsub < NPEX-1, receive data for right y-line of uext (via bufright). */
if (ixsub != NPEX-1) {
MPI_Irecv(&bufright[0], (int) dsizey, MPI_SUNREALTYPE,
thispe+1, 0, comm, &request[3]);
}
return(0);
}
/*
* Routine to finish receiving boundary data from neighboring PEs.
* Notes:
* 1) buffer should be able to hold 2*MYSUB realtype entries, should be
* passed to both the BRecvPost and BRecvWait functions, and should not
* be manipulated between the two calls.
* 2) request should have four entries, and should be passed in both
* calls also.
*/
static int BRecvWait(MPI_Request request[], int ixsub, int jysub,
sunindextype dsizex, realtype uext[], realtype buffer[])
{
sunindextype ly, dsizex2, offsetue;
realtype *bufleft = buffer, *bufright = buffer+MYSUB;
MPI_Status status;
dsizex2 = dsizex + 2;
/* If jysub > 0, receive data for bottom x-line of uext. */
if (jysub != 0)
MPI_Wait(&request[0],&status);
/* If jysub < NPEY-1, receive data for top x-line of uext. */
if (jysub != NPEY-1)
MPI_Wait(&request[1],&status);
/* If ixsub > 0, receive data for left y-line of uext (via bufleft). */
if (ixsub != 0) {
MPI_Wait(&request[2],&status);
/* Copy the buffer to uext. */
for (ly = 0; ly < MYSUB; ly++) {
offsetue = (ly+1)*dsizex2;
uext[offsetue] = bufleft[ly];
}
}
/* If ixsub < NPEX-1, receive data for right y-line of uext (via bufright). */
if (ixsub != NPEX-1) {
MPI_Wait(&request[3],&status);
/* Copy the buffer to uext */
for (ly = 0; ly < MYSUB; ly++) {
offsetue = (ly+2)*dsizex2 - 1;
uext[offsetue] = bufright[ly];
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData initializes the user's data block data.
*/
static int InitUserData(int thispe, MPI_Comm comm, UserData data)
{
data->thispe = thispe;
data->dx = ONE/(MX-ONE); /* Assumes a [0,1] interval in x. */
data->dy = ONE/(MY-ONE); /* Assumes a [0,1] interval in y. */
data->coeffx = ONE/(data->dx * data->dx);
data->coeffy = ONE/(data->dy * data->dy);
data->coeffxy = TWO/(data->dx * data->dx) + TWO/(data->dy * data->dy) ;
data->jysub = thispe/NPEX;
data->ixsub = thispe - data->jysub * NPEX;
data->npex = NPEX;
data->npey = NPEY;
data->mx = MX;
data->my = MY;
data->mxsub = MXSUB;
data->mysub = MYSUB;
data->comm = comm;
return(0);
}
/*
* SetInitialProfile sets the initial values for the problem.
*/
static int SetInitialProfile(N_Vector uu, N_Vector up, N_Vector id,
N_Vector res, UserData data)
{
int ixsub, jysub;
sunindextype i, iloc, j, jloc, offset, loc;
sunindextype ixbegin, ixend, jybegin, jyend;
realtype xfact, yfact, *udata, *iddata;
/* Initialize uu. */
udata = N_VGetArrayPointer(uu);
iddata = N_VGetArrayPointer(id);
/* Set mesh spacings and subgrid indices for this PE. */
ixsub = data->ixsub;
jysub = data->jysub;
/* Set beginning and ending locations in the global array corresponding
to the portion of that array assigned to this processor. */
ixbegin = MXSUB*ixsub;
ixend = MXSUB*(ixsub+1) - 1;
jybegin = MYSUB*jysub;
jyend = MYSUB*(jysub+1) - 1;
/* Loop over the local array, computing the initial profile value.
The global indices are (i,j) and the local indices are (iloc,jloc).
Also set the id vector to zero for boundary points, one otherwise. */
N_VConst(ONE,id);
for (j = jybegin, jloc = 0; j <= jyend; j++, jloc++) {
yfact = data->dy*j;
offset= jloc*MXSUB;
for (i = ixbegin, iloc = 0; i <= ixend; i++, iloc++) {
xfact = data->dx * i;
loc = offset + iloc;
udata[loc] = RCONST(16.0) * xfact * (ONE - xfact) * yfact * (ONE - yfact);
if (i == 0 || i == MX-1 || j == 0 || j == MY-1) iddata[loc] = ZERO;
}
}
/* Initialize up. */
N_VConst(ZERO, up); /* Initially set up = 0. */
/* resHeat sets res to negative of ODE RHS values at interior points. */
resHeat(ZERO, uu, up, res, data);
/* Copy -res into up to get correct initial up values. */
N_VScale(-ONE, res, up);
return(0);
}
/*
* Print first lines of output and table heading
*/
static void PrintHeader(sunindextype Neq, realtype rtol, realtype atol)
{
printf("\nidasHeat2D_p: Heat equation, parallel example problem for IDA\n");
printf(" Discretized heat equation on 2D unit square.\n");
printf(" Zero boundary conditions,");
printf(" polynomial initial conditions.\n");
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" Total system size: %ld\n\n", (long int) Neq);
printf("Subgrid dimensions: %d x %d", MXSUB, MYSUB);
printf(" Processor array: %d x %d\n", NPEX, NPEY);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Constraints set to force all solution components >= 0. \n");
printf("SUPPRESSALG = SUNTRUE to suppress local error testing on ");
printf("all boundary components. \n");
printf("Linear solver: SUNLinSol_SPGMR ");
printf("Preconditioner: diagonal elements only.\n");
/* Print output table heading and initial line of table. */
printf("\n Output Summary (umax = max-norm of solution) \n\n");
printf(" time umax k nst nni nli nre nreLS h npe nps\n");
printf("----------------------------------------------------------------------\n");
}
/*
* PrintOutput: print max norm of solution and current solver statistics
*/
static void PrintOutput(int id, void *ida_mem, realtype t, N_Vector uu)
{
realtype hused, umax;
long int nst, nni, nje, nre, nreLS, nli, npe, nps;
int kused, retval;
umax = N_VMaxNorm(uu);
if (id == 0) {
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1, id);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1, id);
retval = IDAGetNumNonlinSolvIters(ida_mem, &nni);
check_retval(&retval, "IDAGetNumNonlinSolvIters", 1, id);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1, id);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1, id);
retval = IDAGetNumJtimesEvals(ida_mem, &nje);
check_retval(&retval, "IDAGetNumJtimesEvals", 1, id);
retval = IDAGetNumLinIters(ida_mem, &nli);
check_retval(&retval, "IDAGetNumLinIters", 1, id);
retval = IDAGetNumLinResEvals(ida_mem, &nreLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1, id);
retval = IDAGetNumPrecEvals(ida_mem, &npe);
check_retval(&retval, "IDAGetPrecEvals", 1, id);
retval = IDAGetNumPrecSolves(ida_mem, &nps);
check_retval(&retval, "IDAGetNumPrecSolves", 1, id);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf(" %5.2Lf %13.5Le %d %3ld %3ld %3ld %4ld %4ld %9.2Le %3ld %3ld\n",
t, umax, kused, nst, nni, nje, nre, nreLS, hused, npe, nps);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf(" %5.2f %13.5e %d %3ld %3ld %3ld %4ld %4ld %9.2e %3ld %3ld\n",
t, umax, kused, nst, nni, nje, nre, nreLS, hused, npe, nps);
#else
printf(" %5.2f %13.5e %d %3ld %3ld %3ld %4ld %4ld %9.2e %3ld %3ld\n",
t, umax, kused, nst, nni, nje, nre, nreLS, hused, npe, nps);
#endif
}
}
/*
* Print some final integrator statistics
*/
static void PrintFinalStats(void *ida_mem)
{
long int netf, ncfn, ncfl;
IDAGetNumErrTestFails(ida_mem, &netf);
IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn);
IDAGetNumLinConvFails(ida_mem, &ncfl);
printf("\nError test failures = %ld\n", netf);
printf("Nonlinear convergence failures = %ld\n", ncfn);
printf("Linear convergence failures = %ld\n", ncfl);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, const char *funcname, int opt, int id)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR(%d): %s() failed - returned NULL pointer\n\n",
id, funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR(%d): %s() failed with retval = %d\n\n",
id, funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR(%d): %s() failed - returned NULL pointer\n\n",
id, funcname);
return(1);
}
return(0);
}
| {
"pile_set_name": "Github"
} |
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// A bit simpler than readable streams.
// Implement an async ._write(chunk, cb), and it'll handle all
// the drain event emission and buffering.
module.exports = Writable;
/*<replacement>*/
var Buffer = require('buffer').Buffer;
/*</replacement>*/
Writable.WritableState = WritableState;
/*<replacement>*/
var util = require('core-util-is');
util.inherits = require('inherits');
/*</replacement>*/
var Stream = require('stream');
util.inherits(Writable, Stream);
function WriteReq(chunk, encoding, cb) {
this.chunk = chunk;
this.encoding = encoding;
this.callback = cb;
}
function WritableState(options, stream) {
var Duplex = require('./_stream_duplex');
options = options || {};
// the point at which write() starts returning false
// Note: 0 is a valid value, means that we always return false if
// the entire buffer is not flushed immediately on write()
var hwm = options.highWaterMark;
var defaultHwm = options.objectMode ? 16 : 16 * 1024;
this.highWaterMark = (hwm || hwm === 0) ? hwm : defaultHwm;
// object stream flag to indicate whether or not this stream
// contains buffers or objects.
this.objectMode = !!options.objectMode;
if (stream instanceof Duplex)
this.objectMode = this.objectMode || !!options.writableObjectMode;
// cast to ints.
this.highWaterMark = ~~this.highWaterMark;
this.needDrain = false;
// at the start of calling end()
this.ending = false;
// when end() has been called, and returned
this.ended = false;
// when 'finish' is emitted
this.finished = false;
// should we decode strings into buffers before passing to _write?
// this is here so that some node-core streams can optimize string
// handling at a lower level.
var noDecode = options.decodeStrings === false;
this.decodeStrings = !noDecode;
// Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8';
// not an actual buffer we keep track of, but a measurement
// of how much we're waiting to get pushed to some underlying
// socket or file.
this.length = 0;
// a flag to see when we're in the middle of a write.
this.writing = false;
// when true all writes will be buffered until .uncork() call
this.corked = 0;
// a flag to be able to tell if the onwrite cb is called immediately,
// or on a later tick. We set this to true at first, because any
// actions that shouldn't happen until "later" should generally also
// not happen before the first write call.
this.sync = true;
// a flag to know if we're processing previously buffered items, which
// may call the _write() callback in the same tick, so that we don't
// end up in an overlapped onwrite situation.
this.bufferProcessing = false;
// the callback that's passed to _write(chunk,cb)
this.onwrite = function(er) {
onwrite(stream, er);
};
// the callback that the user supplies to write(chunk,encoding,cb)
this.writecb = null;
// the amount that is being written when _write is called.
this.writelen = 0;
this.buffer = [];
// number of pending user-supplied write callbacks
// this must be 0 before 'finish' can be emitted
this.pendingcb = 0;
// emit prefinish if the only thing we're waiting for is _write cbs
// This is relevant for synchronous Transform streams
this.prefinished = false;
// True if the error was already emitted and should not be thrown again
this.errorEmitted = false;
}
function Writable(options) {
var Duplex = require('./_stream_duplex');
// Writable ctor is applied to Duplexes, though they're not
// instanceof Writable, they're instanceof Readable.
if (!(this instanceof Writable) && !(this instanceof Duplex))
return new Writable(options);
this._writableState = new WritableState(options, this);
// legacy.
this.writable = true;
Stream.call(this);
}
// Otherwise people can pipe Writable streams, which is just wrong.
Writable.prototype.pipe = function() {
this.emit('error', new Error('Cannot pipe. Not readable.'));
};
function writeAfterEnd(stream, state, cb) {
var er = new Error('write after end');
// TODO: defer error events consistently everywhere, not just the cb
stream.emit('error', er);
process.nextTick(function() {
cb(er);
});
}
// If we get something that is not a buffer, string, null, or undefined,
// and we're not in objectMode, then that's an error.
// Otherwise stream chunks are all considered to be of length=1, and the
// watermarks determine how many objects to keep in the buffer, rather than
// how many bytes or characters.
function validChunk(stream, state, chunk, cb) {
var valid = true;
if (!util.isBuffer(chunk) &&
!util.isString(chunk) &&
!util.isNullOrUndefined(chunk) &&
!state.objectMode) {
var er = new TypeError('Invalid non-string/buffer chunk');
stream.emit('error', er);
process.nextTick(function() {
cb(er);
});
valid = false;
}
return valid;
}
Writable.prototype.write = function(chunk, encoding, cb) {
var state = this._writableState;
var ret = false;
if (util.isFunction(encoding)) {
cb = encoding;
encoding = null;
}
if (util.isBuffer(chunk))
encoding = 'buffer';
else if (!encoding)
encoding = state.defaultEncoding;
if (!util.isFunction(cb))
cb = function() {};
if (state.ended)
writeAfterEnd(this, state, cb);
else if (validChunk(this, state, chunk, cb)) {
state.pendingcb++;
ret = writeOrBuffer(this, state, chunk, encoding, cb);
}
return ret;
};
Writable.prototype.cork = function() {
var state = this._writableState;
state.corked++;
};
Writable.prototype.uncork = function() {
var state = this._writableState;
if (state.corked) {
state.corked--;
if (!state.writing &&
!state.corked &&
!state.finished &&
!state.bufferProcessing &&
state.buffer.length)
clearBuffer(this, state);
}
};
function decodeChunk(state, chunk, encoding) {
if (!state.objectMode &&
state.decodeStrings !== false &&
util.isString(chunk)) {
chunk = new Buffer(chunk, encoding);
}
return chunk;
}
// if we're already writing something, then just put this
// in the queue, and wait our turn. Otherwise, call _write
// If we return false, then we need a drain event, so set that flag.
function writeOrBuffer(stream, state, chunk, encoding, cb) {
chunk = decodeChunk(state, chunk, encoding);
if (util.isBuffer(chunk))
encoding = 'buffer';
var len = state.objectMode ? 1 : chunk.length;
state.length += len;
var ret = state.length < state.highWaterMark;
// we must ensure that previous needDrain will not be reset to false.
if (!ret)
state.needDrain = true;
if (state.writing || state.corked)
state.buffer.push(new WriteReq(chunk, encoding, cb));
else
doWrite(stream, state, false, len, chunk, encoding, cb);
return ret;
}
function doWrite(stream, state, writev, len, chunk, encoding, cb) {
state.writelen = len;
state.writecb = cb;
state.writing = true;
state.sync = true;
if (writev)
stream._writev(chunk, state.onwrite);
else
stream._write(chunk, encoding, state.onwrite);
state.sync = false;
}
function onwriteError(stream, state, sync, er, cb) {
if (sync)
process.nextTick(function() {
state.pendingcb--;
cb(er);
});
else {
state.pendingcb--;
cb(er);
}
stream._writableState.errorEmitted = true;
stream.emit('error', er);
}
function onwriteStateUpdate(state) {
state.writing = false;
state.writecb = null;
state.length -= state.writelen;
state.writelen = 0;
}
function onwrite(stream, er) {
var state = stream._writableState;
var sync = state.sync;
var cb = state.writecb;
onwriteStateUpdate(state);
if (er)
onwriteError(stream, state, sync, er, cb);
else {
// Check if we're actually ready to finish, but don't emit yet
var finished = needFinish(stream, state);
if (!finished &&
!state.corked &&
!state.bufferProcessing &&
state.buffer.length) {
clearBuffer(stream, state);
}
if (sync) {
process.nextTick(function() {
afterWrite(stream, state, finished, cb);
});
} else {
afterWrite(stream, state, finished, cb);
}
}
}
function afterWrite(stream, state, finished, cb) {
if (!finished)
onwriteDrain(stream, state);
state.pendingcb--;
cb();
finishMaybe(stream, state);
}
// Must force callback to be called on nextTick, so that we don't
// emit 'drain' before the write() consumer gets the 'false' return
// value, and has a chance to attach a 'drain' listener.
function onwriteDrain(stream, state) {
if (state.length === 0 && state.needDrain) {
state.needDrain = false;
stream.emit('drain');
}
}
// if there's something in the buffer waiting, then process it
function clearBuffer(stream, state) {
state.bufferProcessing = true;
if (stream._writev && state.buffer.length > 1) {
// Fast case, write everything using _writev()
var cbs = [];
for (var c = 0; c < state.buffer.length; c++)
cbs.push(state.buffer[c].callback);
// count the one we are adding, as well.
// TODO(isaacs) clean this up
state.pendingcb++;
doWrite(stream, state, true, state.length, state.buffer, '', function(err) {
for (var i = 0; i < cbs.length; i++) {
state.pendingcb--;
cbs[i](err);
}
});
// Clear buffer
state.buffer = [];
} else {
// Slow case, write chunks one-by-one
for (var c = 0; c < state.buffer.length; c++) {
var entry = state.buffer[c];
var chunk = entry.chunk;
var encoding = entry.encoding;
var cb = entry.callback;
var len = state.objectMode ? 1 : chunk.length;
doWrite(stream, state, false, len, chunk, encoding, cb);
// if we didn't call the onwrite immediately, then
// it means that we need to wait until it does.
// also, that means that the chunk and cb are currently
// being processed, so move the buffer counter past them.
if (state.writing) {
c++;
break;
}
}
if (c < state.buffer.length)
state.buffer = state.buffer.slice(c);
else
state.buffer.length = 0;
}
state.bufferProcessing = false;
}
Writable.prototype._write = function(chunk, encoding, cb) {
cb(new Error('not implemented'));
};
Writable.prototype._writev = null;
Writable.prototype.end = function(chunk, encoding, cb) {
var state = this._writableState;
if (util.isFunction(chunk)) {
cb = chunk;
chunk = null;
encoding = null;
} else if (util.isFunction(encoding)) {
cb = encoding;
encoding = null;
}
if (!util.isNullOrUndefined(chunk))
this.write(chunk, encoding);
// .end() fully uncorks
if (state.corked) {
state.corked = 1;
this.uncork();
}
// ignore unnecessary end() calls.
if (!state.ending && !state.finished)
endWritable(this, state, cb);
};
function needFinish(stream, state) {
return (state.ending &&
state.length === 0 &&
!state.finished &&
!state.writing);
}
function prefinish(stream, state) {
if (!state.prefinished) {
state.prefinished = true;
stream.emit('prefinish');
}
}
function finishMaybe(stream, state) {
var need = needFinish(stream, state);
if (need) {
if (state.pendingcb === 0) {
prefinish(stream, state);
state.finished = true;
stream.emit('finish');
} else
prefinish(stream, state);
}
return need;
}
function endWritable(stream, state, cb) {
state.ending = true;
finishMaybe(stream, state);
if (cb) {
if (state.finished)
process.nextTick(cb);
else
stream.once('finish', cb);
}
state.ended = true;
}
| {
"pile_set_name": "Github"
} |
// |jit-test| debug
setDebug(true);
x = "notset";
function myparent(nested) {
if (nested) {
/* myparent call in myparent. */
trap(myparent, 39, "failure()");
} else {
x = "success";
myparent(true);
}
}
function failure() { x = "failure"; }
myparent(false);
assertEq(x, "success");
| {
"pile_set_name": "Github"
} |
package amidst.gui.main.viewer.widget;
import amidst.documentation.AmidstThread;
import amidst.documentation.CalledOnlyBy;
import amidst.documentation.NotThreadSafe;
@NotThreadSafe
public class FramerateTimer {
private int tickCounter;
private long lastTime;
private long msPerUpdate;
private float currentFPS = 0.0f;
@CalledOnlyBy(AmidstThread.EDT)
public FramerateTimer(int updatesPerSecond) {
msPerUpdate = (long) (1000f * (1f / updatesPerSecond));
reset();
}
@CalledOnlyBy(AmidstThread.EDT)
public void reset() {
tickCounter = 0;
lastTime = System.currentTimeMillis();
}
@CalledOnlyBy(AmidstThread.EDT)
public void tick() {
tickCounter++;
long currentTime = System.currentTimeMillis();
if (currentTime - lastTime > msPerUpdate) {
currentFPS = calculateCurrentFPS(currentTime);
tickCounter = 0;
lastTime = currentTime;
}
}
@CalledOnlyBy(AmidstThread.EDT)
private float calculateCurrentFPS(long currentTime) {
float timeDifference = currentTime - lastTime;
timeDifference /= 1000f;
timeDifference = tickCounter / timeDifference;
return timeDifference;
}
@CalledOnlyBy(AmidstThread.EDT)
public float getCurrentFPS() {
return currentFPS;
}
}
| {
"pile_set_name": "Github"
} |
---
layout: news_post
title: "Ruby 1.8.7-p330 Sürümü Yayınlandı"
author: "Urabe Shyouhei"
lang: tr
---
1.8.7 güncellemelerinin yıllık yayını burada.
### İndirin
* [https://cache.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p330.tar.gz][1]
* [https://cache.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p330.tar.bz2][2]
* [https://cache.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p330.zip][3]
### Sağlamalar
MD5(ruby-1.8.7-p330.tar.gz)= 50a49edb787211598d08e756e733e42e
SHA256(ruby-1.8.7-p330.tar.gz)= 6c261a463b5ffce1dc0920c980218379479dbdf94866d5ed53f1c71f1407c561
BOYUT(ruby-1.8.7-p330.tar.gz)= 4873383
MD5(ruby-1.8.7-p330.tar.bz2)= 2689719fb42c8cf0aa336f8c8933f413
SHA256(ruby-1.8.7-p330.tar.bz2)= 486c73b023b564c07e062e2e61114e81de970913b04fac6798d0fbe8b7723790
BOYUT(ruby-1.8.7-p330.tar.bz2)= 4191156
MD5(ruby-1.8.7-p330.zip)= 537d424438a0fefe40bed91b022592d6
SHA256(ruby-1.8.7-p330.zip)= 18df0d26d10a9be32275ba7b39ffd222a153fcc4669e4b772eab142d7e7bde90
BOYUT(ruby-1.8.7-p330.zip)= 5972777
Ruby projesi bir lisans değişikliği (GPL sürüm 2'den 2 maddeli BSDL'ye)
geçirmiş ve şu an o zamandan beri bir şeylerin yayınlandığı ilk zaman olsa da,
Matz'in dediğine göre bu değişiklik 1.8.7 gibi zaten yayınlanmış sürümlere
ulaşmadı. Yani bu konuda endişelenmenize gerek yok. Eğer zaten 1.8.7
kullanıyorsanız, kullanmaya devam edebilirsiniz.
Teşekkür ederiz, mutlu tatiller.
[1]: https://cache.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p330.tar.gz
[2]: https://cache.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p330.tar.bz2
[3]: https://cache.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p330.zip
| {
"pile_set_name": "Github"
} |
Query for weight samples. the options object is used to setup a query to retrieve relevant samples.
```javascript
let options = {
unit: 'pound', // optional; default 'pound'
startDate: (new Date(2016,4,27)).toISOString(), // required
endDate: (new Date()).toISOString(), // optional; default now
ascending: false, // optional; default false
limit:10, // optional; default no limit
};
```
```javascript
AppleHealthKit.getWeightSamples(options, (err: Object, results: Array<Object>) => {
if (err) {
return;
}
console.log(results)
});
```
```javascript
[
{ value: 160, startDate: '2016-07-09T00:00:00.000-0400', endDate: '2016-07-10T00:00:00.000-0400' },
{ value: 161, startDate: '2016-07-08T00:00:00.000-0400', endDate: '2016-07-09T00:00:00.000-0400' },
{ value: 165, startDate: '2016-07-07T00:00:00.000-0400', endDate: '2016-07-08T00:00:00.000-0400' },
]
``` | {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.