text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
USE TinySocial;
SELECT ELEMENT strict_avg((
select element LENGTH(message.message)
FROM FacebookMessages AS message
WHERE message.`in-response-to` >= 1 and
message.`in-response-to` < 11
));
| {
"pile_set_name": "Github"
} |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014-2017 [email protected]
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.github.pagehelper.test.basic.dynamic;
import com.github.pagehelper.Page;
import com.github.pagehelper.PageHelper;
import com.github.pagehelper.mapper.UserMapper;
import com.github.pagehelper.model.User;
import com.github.pagehelper.util.MybatisHelper;
import org.apache.ibatis.session.SqlSession;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.assertEquals;
/**
* 针对将ms缓存后的测试
*/
public class TestDynamicIfOrder {
/**
* 使用Mapper接口调用时,使用PageHelper.startPage效果更好,不需要添加Mapper接口参数
*/
@Test
public void testMapperWithStartPage() {
SqlSession sqlSession = MybatisHelper.getSqlSession();
UserMapper userMapper = sqlSession.getMapper(UserMapper.class);
try {
//获取第1页,10条内容,默认查询总数count
PageHelper.startPage(1, 10);
List<User> list = userMapper.selectIf2ListAndOrder(Arrays.asList(1, 2), Arrays.asList(3, 4), null);
assertEquals(5, list.get(0).getId());
assertEquals(10, list.size());
assertEquals(179, ((Page<?>) list).getTotal());
//获取第1页,10条内容,默认查询总数count
PageHelper.startPage(1, 10);
list = userMapper.selectIf2ListAndOrder(Arrays.asList(1, 2), null, "id");
assertEquals(3, list.get(0).getId());
assertEquals(10, list.size());
assertEquals(181, ((Page<?>) list).getTotal());
//获取第1页,10条内容,默认查询总数count
PageHelper.startPage(1, 10);
list = userMapper.selectIf2ListAndOrder(new ArrayList<Integer>(0), null, "name");
assertEquals(18, list.get(0).getId());
assertEquals(10, list.size());
assertEquals(183, ((Page<?>) list).getTotal());
} finally {
sqlSession.close();
}
}
}
| {
"pile_set_name": "Github"
} |
<?php
namespace Illuminate\Database\Eloquent\Relations;
use Illuminate\Database\Eloquent\Collection;
class MorphMany extends MorphOneOrMany
{
/**
* Get the results of the relationship.
*
* @return mixed
*/
public function getResults()
{
return ! is_null($this->getParentKey())
? $this->query->get()
: $this->related->newCollection();
}
/**
* Initialize the relation on a set of models.
*
* @param array $models
* @param string $relation
* @return array
*/
public function initRelation(array $models, $relation)
{
foreach ($models as $model) {
$model->setRelation($relation, $this->related->newCollection());
}
return $models;
}
/**
* Match the eagerly loaded results to their parents.
*
* @param array $models
* @param \Illuminate\Database\Eloquent\Collection $results
* @param string $relation
* @return array
*/
public function match(array $models, Collection $results, $relation)
{
return $this->matchMany($models, $results, $relation);
}
}
| {
"pile_set_name": "Github"
} |
/*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_SIMPLE_BROADPHASE_H
#define BT_SIMPLE_BROADPHASE_H
#include "btOverlappingPairCache.h"
struct btSimpleBroadphaseProxy : public btBroadphaseProxy
{
int m_nextFree;
// int m_handleId;
btSimpleBroadphaseProxy() {};
btSimpleBroadphaseProxy(const btVector3& minpt,const btVector3& maxpt,int shapeType,void* userPtr,short int collisionFilterGroup,short int collisionFilterMask,void* multiSapProxy)
:btBroadphaseProxy(minpt,maxpt,userPtr,collisionFilterGroup,collisionFilterMask,multiSapProxy)
{
(void)shapeType;
}
SIMD_FORCE_INLINE void SetNextFree(int next) {m_nextFree = next;}
SIMD_FORCE_INLINE int GetNextFree() const {return m_nextFree;}
};
///The SimpleBroadphase is just a unit-test for btAxisSweep3, bt32BitAxisSweep3, or btDbvtBroadphase, so use those classes instead.
///It is a brute force aabb culling broadphase based on O(n^2) aabb checks
class btSimpleBroadphase : public btBroadphaseInterface
{
protected:
int m_numHandles; // number of active handles
int m_maxHandles; // max number of handles
int m_LastHandleIndex;
btSimpleBroadphaseProxy* m_pHandles; // handles pool
void* m_pHandlesRawPtr;
int m_firstFreeHandle; // free handles list
int allocHandle()
{
btAssert(m_numHandles < m_maxHandles);
int freeHandle = m_firstFreeHandle;
m_firstFreeHandle = m_pHandles[freeHandle].GetNextFree();
m_numHandles++;
if(freeHandle > m_LastHandleIndex)
{
m_LastHandleIndex = freeHandle;
}
return freeHandle;
}
void freeHandle(btSimpleBroadphaseProxy* proxy)
{
int handle = int(proxy-m_pHandles);
btAssert(handle >= 0 && handle < m_maxHandles);
if(handle == m_LastHandleIndex)
{
m_LastHandleIndex--;
}
proxy->SetNextFree(m_firstFreeHandle);
m_firstFreeHandle = handle;
proxy->m_clientObject = 0;
m_numHandles--;
}
btOverlappingPairCache* m_pairCache;
bool m_ownsPairCache;
int m_invalidPair;
inline btSimpleBroadphaseProxy* getSimpleProxyFromProxy(btBroadphaseProxy* proxy)
{
btSimpleBroadphaseProxy* proxy0 = static_cast<btSimpleBroadphaseProxy*>(proxy);
return proxy0;
}
inline const btSimpleBroadphaseProxy* getSimpleProxyFromProxy(btBroadphaseProxy* proxy) const
{
const btSimpleBroadphaseProxy* proxy0 = static_cast<const btSimpleBroadphaseProxy*>(proxy);
return proxy0;
}
///reset broadphase internal structures, to ensure determinism/reproducability
virtual void resetPool(btDispatcher* dispatcher);
void validate();
protected:
public:
btSimpleBroadphase(int maxProxies=16384,btOverlappingPairCache* overlappingPairCache=0);
virtual ~btSimpleBroadphase();
static bool aabbOverlap(btSimpleBroadphaseProxy* proxy0,btSimpleBroadphaseProxy* proxy1);
virtual btBroadphaseProxy* createProxy( const btVector3& aabbMin, const btVector3& aabbMax,int shapeType,void* userPtr ,short int collisionFilterGroup,short int collisionFilterMask, btDispatcher* dispatcher,void* multiSapProxy);
virtual void calculateOverlappingPairs(btDispatcher* dispatcher);
virtual void destroyProxy(btBroadphaseProxy* proxy,btDispatcher* dispatcher);
virtual void setAabb(btBroadphaseProxy* proxy,const btVector3& aabbMin,const btVector3& aabbMax, btDispatcher* dispatcher);
virtual void getAabb(btBroadphaseProxy* proxy,btVector3& aabbMin, btVector3& aabbMax ) const;
virtual void rayTest(const btVector3& rayFrom,const btVector3& rayTo, btBroadphaseRayCallback& rayCallback, const btVector3& aabbMin=btVector3(0,0,0),const btVector3& aabbMax=btVector3(0,0,0));
virtual void aabbTest(const btVector3& aabbMin, const btVector3& aabbMax, btBroadphaseAabbCallback& callback);
btOverlappingPairCache* getOverlappingPairCache()
{
return m_pairCache;
}
const btOverlappingPairCache* getOverlappingPairCache() const
{
return m_pairCache;
}
bool testAabbOverlap(btBroadphaseProxy* proxy0,btBroadphaseProxy* proxy1);
///getAabb returns the axis aligned bounding box in the 'global' coordinate frame
///will add some transform later
virtual void getBroadphaseAabb(btVector3& aabbMin,btVector3& aabbMax) const
{
aabbMin.setValue(-BT_LARGE_FLOAT,-BT_LARGE_FLOAT,-BT_LARGE_FLOAT);
aabbMax.setValue(BT_LARGE_FLOAT,BT_LARGE_FLOAT,BT_LARGE_FLOAT);
}
virtual void printStats()
{
// printf("btSimpleBroadphase.h\n");
// printf("numHandles = %d, maxHandles = %d\n",m_numHandles,m_maxHandles);
}
};
#endif //BT_SIMPLE_BROADPHASE_H
| {
"pile_set_name": "Github"
} |
/*
* (C) Copyright 2013
* Gumstix Inc. <www.gumstix.com>
* Maintainer: Ash Charles <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <netdev.h>
#include <asm/arch/sys_proto.h>
#include <asm/arch/mmc_host_def.h>
#include <twl6030.h>
#include <asm/emif.h>
#include <asm/arch/clock.h>
#include <asm/arch/gpio.h>
#include <asm/gpio.h>
#include "duovero_mux_data.h"
#define WIFI_EN 43
#if defined(CONFIG_CMD_NET)
#define SMSC_NRESET 45
static void setup_net_chip(void);
#endif
#ifdef CONFIG_USB_EHCI
#include <usb.h>
#include <asm/arch/ehci.h>
#include <asm/ehci-omap.h>
#endif
DECLARE_GLOBAL_DATA_PTR;
const struct omap_sysinfo sysinfo = {
"Board: duovero\n"
};
struct omap4_scrm_regs *const scrm = (struct omap4_scrm_regs *)0x4a30a000;
/**
* @brief board_init
*
* @return 0
*/
int board_init(void)
{
gpmc_init();
gd->bd->bi_arch_number = MACH_TYPE_OMAP4_DUOVERO;
gd->bd->bi_boot_params = CONFIG_SYS_SDRAM_BASE + 0x100;
return 0;
}
/**
* @brief misc_init_r - Configure board specific configurations
* such as power configurations, ethernet initialization as phase2 of
* boot sequence
*
* @return 0
*/
int misc_init_r(void)
{
int ret = 0;
u8 val;
/* wifi setup: first enable 32Khz clock from 6030 pmic */
val = 0xe1;
ret = i2c_write(TWL6030_CHIP_PM, 0xbe, 1, &val, 1);
if (ret)
printf("Failed to enable 32Khz clock to wifi module\n");
/* then setup WIFI_EN as an output pin and send reset pulse */
if (!gpio_request(WIFI_EN, "")) {
gpio_direction_output(WIFI_EN, 0);
gpio_set_value(WIFI_EN, 1);
udelay(1);
gpio_set_value(WIFI_EN, 0);
udelay(1);
gpio_set_value(WIFI_EN, 1);
}
#if defined(CONFIG_CMD_NET)
setup_net_chip();
#endif
return 0;
}
void set_muxconf_regs_essential(void)
{
do_set_mux((*ctrl)->control_padconf_core_base,
core_padconf_array_essential,
sizeof(core_padconf_array_essential) /
sizeof(struct pad_conf_entry));
do_set_mux((*ctrl)->control_padconf_wkup_base,
wkup_padconf_array_essential,
sizeof(wkup_padconf_array_essential) /
sizeof(struct pad_conf_entry));
do_set_mux((*ctrl)->control_padconf_core_base,
core_padconf_array_non_essential,
sizeof(core_padconf_array_non_essential) /
sizeof(struct pad_conf_entry));
do_set_mux((*ctrl)->control_padconf_wkup_base,
wkup_padconf_array_non_essential,
sizeof(wkup_padconf_array_non_essential) /
sizeof(struct pad_conf_entry));
}
#if !defined(CONFIG_SPL_BUILD) && defined(CONFIG_GENERIC_MMC)
int board_mmc_init(bd_t *bis)
{
return omap_mmc_init(0, 0, 0, -1, -1);
}
#endif
#if defined(CONFIG_CMD_NET)
#define GPMC_SIZE_16M 0xF
#define GPMC_BASEADDR_MASK 0x3F
#define GPMC_CS_ENABLE 0x1
static void enable_gpmc_net_config(const u32 *gpmc_config, struct gpmc_cs *cs,
u32 base, u32 size)
{
writel(0, &cs->config7);
sdelay(1000);
/* Delay for settling */
writel(gpmc_config[0], &cs->config1);
writel(gpmc_config[1], &cs->config2);
writel(gpmc_config[2], &cs->config3);
writel(gpmc_config[3], &cs->config4);
writel(gpmc_config[4], &cs->config5);
writel(gpmc_config[5], &cs->config6);
/*
* Enable the config. size is the CS size and goes in
* bits 11:8. We set bit 6 to enable this CS and the base
* address goes into bits 5:0.
*/
writel((size << 8) | (GPMC_CS_ENABLE << 6) |
((base >> 24) & GPMC_BASEADDR_MASK),
&cs->config7);
sdelay(2000);
}
/* GPMC CS configuration for an SMSC LAN9221 ethernet controller */
#define NET_LAN9221_GPMC_CONFIG1 0x2a001203
#define NET_LAN9221_GPMC_CONFIG2 0x000a0a02
#define NET_LAN9221_GPMC_CONFIG3 0x00020200
#define NET_LAN9221_GPMC_CONFIG4 0x0a030a03
#define NET_LAN9221_GPMC_CONFIG5 0x000a0a0a
#define NET_LAN9221_GPMC_CONFIG6 0x8a070707
#define NET_LAN9221_GPMC_CONFIG7 0x00000f6c
/* GPMC definitions for LAN9221 chips on expansion boards */
static const u32 gpmc_lan_config[] = {
NET_LAN9221_GPMC_CONFIG1,
NET_LAN9221_GPMC_CONFIG2,
NET_LAN9221_GPMC_CONFIG3,
NET_LAN9221_GPMC_CONFIG4,
NET_LAN9221_GPMC_CONFIG5,
NET_LAN9221_GPMC_CONFIG6,
/*CONFIG7- computed as params */
};
/*
* Routine: setup_net_chip
* Description: Setting up the configuration GPMC registers specific to the
* Ethernet hardware.
*/
static void setup_net_chip(void)
{
enable_gpmc_net_config(gpmc_lan_config, &gpmc_cfg->cs[5], 0x2C000000,
GPMC_SIZE_16M);
/* Make GPIO SMSC_NRESET as output pin and send reset pulse */
if (!gpio_request(SMSC_NRESET, "")) {
gpio_direction_output(SMSC_NRESET, 0);
gpio_set_value(SMSC_NRESET, 1);
udelay(1);
gpio_set_value(SMSC_NRESET, 0);
udelay(1);
gpio_set_value(SMSC_NRESET, 1);
}
}
#endif
int board_eth_init(bd_t *bis)
{
int rc = 0;
#ifdef CONFIG_SMC911X
rc = smc911x_initialize(0, CONFIG_SMC911X_BASE);
#endif
return rc;
}
#ifdef CONFIG_USB_EHCI
static struct omap_usbhs_board_data usbhs_bdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
};
int ehci_hcd_init(int index, enum usb_init_type init,
struct ehci_hccr **hccr, struct ehci_hcor **hcor)
{
int ret;
unsigned int utmi_clk;
u32 auxclk, altclksrc;
/* Now we can enable our port clocks */
utmi_clk = readl((void *)CM_L3INIT_HSUSBHOST_CLKCTRL);
utmi_clk |= HSUSBHOST_CLKCTRL_CLKSEL_UTMI_P1_MASK;
setbits_le32((void *)CM_L3INIT_HSUSBHOST_CLKCTRL, utmi_clk);
auxclk = readl(&scrm->auxclk3);
/* Select sys_clk */
auxclk &= ~AUXCLK_SRCSELECT_MASK;
auxclk |= AUXCLK_SRCSELECT_SYS_CLK << AUXCLK_SRCSELECT_SHIFT;
/* Set the divisor to 2 */
auxclk &= ~AUXCLK_CLKDIV_MASK;
auxclk |= AUXCLK_CLKDIV_2 << AUXCLK_CLKDIV_SHIFT;
/* Request auxilary clock #3 */
auxclk |= AUXCLK_ENABLE_MASK;
writel(auxclk, &scrm->auxclk3);
altclksrc = readl(&scrm->altclksrc);
/* Activate alternate system clock supplier */
altclksrc &= ~ALTCLKSRC_MODE_MASK;
altclksrc |= ALTCLKSRC_MODE_ACTIVE;
/* enable clocks */
altclksrc |= ALTCLKSRC_ENABLE_INT_MASK | ALTCLKSRC_ENABLE_EXT_MASK;
writel(altclksrc, &scrm->altclksrc);
ret = omap_ehci_hcd_init(index, &usbhs_bdata, hccr, hcor);
if (ret < 0)
return ret;
return 0;
}
int ehci_hcd_stop(int index)
{
return omap_ehci_hcd_stop();
}
#endif
/*
* get_board_rev() - get board revision
*/
u32 get_board_rev(void)
{
return 0x20;
}
| {
"pile_set_name": "Github"
} |
/* eslint-disable no-console */
/* eslint-disable no-param-reassign */
// this is the current snapshot that is being sent to the snapshots array.
class Tree {
constructor(component, useStateInstead = false, name) {
// special case when component is root
// give it a special state = 'root'
// a setState function that just calls the callback instantly
if (!useStateInstead) {
this.component = component === 'root'
? { state: 'root', setState: (partial, callback) => callback() }
: component;
} else {
this.state = component;
this.name = name;
}
this.children = [];
// DEV: Added print() for debugging purposes
// this.print(); // this call is not useful here. it would be useful in a function call where we've already added to this.children
}
appendChild(component) {
const child = new Tree(component);
this.children.push(child);
return child;
}
// deep copies only the state of each component and creates a new tree
getCopy(copy = new Tree('root', true)) {
// copy state of children
copy.children = this.children.map(
child => new Tree(child.component.state
|| child.component.traversed, true, child.component.constructor.name),
);
// copy children's children recursively
this.children.forEach((child, i) => child.getCopy(copy.children[i]));
return copy;
}
// print out the tree structure in the console
// DEV: Process may be different for useState components
// BUG FIX: Don't print the Router as a component
// Change how the children are printed
print() {
console.log("current tree structure for *this : ", this);
const children = ['children: '];
// DEV: What should we push instead for components using hooks (it wouldn't be state)
this.children.forEach(child => { // if this.children is always initialized to empty array, when would there ever be anything to iterate through here?
children.push(child.state || child.component.state);
});
if (this.name) console.log("this.name if exists: ", this.name);
if (children.length === 1) {
console.log(`children length 1. ${this.state ? `this.state: ` : `this.component.state: `}`, this.state || this.component.state);
} else console.log(`children length !== 1. ${this.state ? `this.state: ` : `this.component.state, children: `}`, this.state || this.component.state, ...children);
this.children.forEach(child => {
child.print();
});
}
}
module.exports = Tree;
| {
"pile_set_name": "Github"
} |
$wind-beaufort-0: "\f0b7";
$wind-beaufort-1: "\f0b8";
$wind-beaufort-2: "\f0b9";
$wind-beaufort-3: "\f0ba";
$wind-beaufort-4: "\f0bb";
$wind-beaufort-5: "\f0bc";
$wind-beaufort-6: "\f0bd";
$wind-beaufort-7: "\f0be";
$wind-beaufort-8: "\f0bf";
$wind-beaufort-9: "\f0c0";
$wind-beaufort-10: "\f0c1";
$wind-beaufort-11: "\f0c2";
$wind-beaufort-12: "\f0c3"; | {
"pile_set_name": "Github"
} |
import request from '@/utils/request';
export async function listDashboards() {
return request('/api/dashboards');
}
export async function queryDashboard(id) {
return request(`/api/dashboards/${id}`);
}
export async function createDashboard(params) {
const { restParams } = params;
return request(`/api/dashboards`, {
method: 'POST',
body: restParams,
});
}
export async function deleteDashboard(id) {
return request(`/api/dashboards/${id}`, {
method: 'DELETE',
});
}
| {
"pile_set_name": "Github"
} |
package io.quarkus.hibernate.reactive.panache.test;
import javax.persistence.Entity;
import io.quarkus.hibernate.reactive.panache.PanacheEntity;
@Entity
public class MyOtherEntity extends PanacheEntity {
public String name;
}
| {
"pile_set_name": "Github"
} |
(ns plastic.worker.editor.model.report
(:refer-clojure :exclude [merge])
(:require [plastic.logging :refer-macros [log info warn error group group-end]]))
; -------------------------------------------------------------------------------------------------------------------
; a report is used to collect information on nodes affected by rewriting operation
; for UI highlighting purposes
(defn make
([] (make [] [] [] []))
([modified added removed moved]
{:pre [(vector? modified)
(vector? added)
(vector? removed)
(vector? moved)]}
{:modified modified
:added added
:removed removed
:moved moved}))
(defn make-modified [node-id]
(make [node-id] [] [] []))
(defn make-added [node-id]
(make [] [node-id] [] []))
(defn make-removed [node-id]
(make [] [] [node-id] []))
(defn make-moved [node-id]
(make [] [] [] [node-id]))
(defn make-modified-list [ids]
(make (vec ids) [] [] []))
(defn make-added-list [ids]
(make [] (vec ids) [] []))
(defn make-removed-list [ids]
(make [] [] (vec ids) []))
(defn make-moved-list [ids]
(make [] [] [] (vec ids)))
(defn merge [report new-report]
(make
(vec (concat (:modified report) (:modified new-report)))
(vec (concat (:added report) (:added new-report)))
(vec (concat (:removed report) (:removed new-report)))
(vec (concat (:moved report) (:moved new-report)))))
| {
"pile_set_name": "Github"
} |
$primary: #3759D7 !default; //the base text color from which the rest of the theme derives
//Main Theme Variables
$backgroundColor: #fff !default; //background color of tabulator
$borderColor:#fff !default; //border to tabulator
$textSize:16px !default; //table text size
//header themeing
$headerBackgroundColor:#fff !default; //border to tabulator
$headerTextColor:$primary !default; //header text colour
$headerBorderColor:#fff !default; //header border color
$headerSeperatorColor:$primary !default; //header bottom seperator color
$headerMargin:4px !default; //padding round header
//column header arrows
$sortArrowActive: $primary !default;
$sortArrowInactive: lighten($primary, 30%) !default;
//row themeing
$rowBackgroundColor:#f3f3f3 !default; //table row background color
$rowAltBackgroundColor:#fff !default; //table row background color
$rowBorderColor:#fff !default; //table border color
$rowTextColor:#333 !default; //table text color
$rowHoverBackground:#bbb !default; //row background color on hover
$rowSelectedBackground: #9ABCEA !default; //row background color when selected
$rowSelectedBackgroundHover: #769BCC !default;//row background color when selected and hovered
$editBoxColor:#1D68CD !default; //border color for edit boxes
$errorColor:#dd0000 !default; //error indication
//footer themeing
$footerBackgroundColor:#fff !default; //border to tabulator
$footerTextColor:$primary !default; //footer text colour
$footerBorderColor:#aaa !default; //footer border color
$footerSeperatorColor:#999 !default; //footer bottom seperator color
$footerActiveColor:$primary !default; //footer bottom active text color
$handleWidth:10px !default; //width of the row handle
$handleColor: $primary !default; //color for odd numbered rows
$handleColorAlt: lighten($primary, 10%) !default; //color for even numbered rows
//Tabulator Containing Element
.tabulator{
position: relative;
border: 1px solid $borderColor;
background-color: $backgroundColor;
overflow:hidden;
font-size:$textSize;
text-align: left;
-webkit-transform: translatez(0);
-moz-transform: translatez(0);
-ms-transform: translatez(0);
-o-transform: translatez(0);
transform: translatez(0);
&[tabulator-layout="fitDataFill"]{
.tabulator-tableHolder{
.tabulator-table{
min-width:100%;
}
}
}
&.tabulator-block-select{
user-select: none;
}
//column header containing element
.tabulator-header{
position:relative;
box-sizing: border-box;
width:100%;
border-bottom:3px solid $headerSeperatorColor;
margin-bottom:4px;
background-color: $headerBackgroundColor;
color: $headerTextColor;
font-weight:bold;
white-space: nowrap;
overflow:hidden;
-moz-user-select: none;
-khtml-user-select: none;
-webkit-user-select: none;
-o-user-select: none;
padding-left:$handleWidth;
font-size: 1.1em;
//individual column header element
.tabulator-col{
display:inline-block;
position:relative;
box-sizing:border-box;
border-right:2px solid $headerBorderColor;
background-color: $headerBackgroundColor;
text-align:left;
vertical-align: bottom;
overflow: hidden;
&.tabulator-moving{
position: absolute;
border:1px solid $headerSeperatorColor;
background:darken($headerBackgroundColor, 10%);
pointer-events: none;
}
//hold content of column header
.tabulator-col-content{
box-sizing:border-box;
position: relative;
padding:4px;
//hold title of column header
.tabulator-col-title{
box-sizing:border-box;
width: 100%;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
vertical-align:bottom;
//element to hold title editor
.tabulator-title-editor{
box-sizing: border-box;
width: 100%;
border:1px solid $primary;
padding:1px;
background: #fff;
font-size: 1em;
color: $primary;
}
}
//column sorter arrow
.tabulator-arrow{
display: inline-block;
position: absolute;
top:9px;
right:8px;
width: 0;
height: 0;
border-left: 6px solid transparent;
border-right: 6px solid transparent;
border-bottom: 6px solid $sortArrowInactive;
}
}
//complex header column group
&.tabulator-col-group{
//gelement to hold sub columns in column group
.tabulator-col-group-cols{
position:relative;
display: flex;
border-top:2px solid $headerSeperatorColor;
overflow: hidden;
.tabulator-col:last-child{
margin-right:-1px;
}
}
}
//hide left resize handle on first column
&:first-child{
.tabulator-col-resize-handle.prev{
display: none;
}
}
//placeholder element for sortable columns
&.ui-sortable-helper{
position: absolute;
background-color: darken($headerBackgroundColor, 10%) !important;
border:1px solid $headerBorderColor;
}
//header filter containing element
.tabulator-header-filter{
position: relative;
box-sizing: border-box;
margin-top:2px;
width:100%;
text-align: center;
//styling adjustment for inbuilt editors
textarea{
height:auto !important;
}
svg{
margin-top: 3px;
}
input{
&::-ms-clear {
width : 0;
height: 0;
}
}
}
//styling child elements for sortable columns
&.tabulator-sortable{
.tabulator-col-title{
padding-right:25px;
}
&:hover{
cursor:pointer;
background-color:darken($headerBackgroundColor, 10%);
}
&[aria-sort="none"]{
.tabulator-col-content .tabulator-arrow{
border-top: none;
border-bottom: 6px solid $sortArrowInactive;
}
}
&[aria-sort="asc"]{
.tabulator-col-content .tabulator-arrow{
border-top: none;
border-bottom: 6px solid $sortArrowActive;
}
}
&[aria-sort="desc"]{
.tabulator-col-content .tabulator-arrow{
border-top: 6px solid $sortArrowActive;
border-bottom: none;
}
}
}
&.tabulator-col-vertical{
.tabulator-col-content{
.tabulator-col-title{
writing-mode: vertical-rl;
text-orientation: mixed;
display:flex;
align-items:center;
justify-content:center;
}
}
&.tabulator-col-vertical-flip{
.tabulator-col-title{
transform: rotate(180deg);
}
}
&.tabulator-sortable{
.tabulator-col-title{
padding-right:0;
padding-top:20px;
}
&.tabulator-col-vertical-flip{
.tabulator-col-title{
padding-right:0;
padding-bottom:20px;
}
}
.tabulator-arrow{
right:calc(50% - 6px);
}
}
}
}
.tabulator-frozen{
display: inline-block;
position: absolute;
// background-color: inherit;
z-index: 10;
&.tabulator-frozen-left{
padding-left: $handleWidth;
border-right:2px solid $rowBorderColor;
}
&.tabulator-frozen-right{
border-left:2px solid $rowBorderColor;
}
}
.tabulator-calcs-holder{
box-sizing:border-box;
min-width:400%;
border-top:2px solid $headerSeperatorColor !important;
background:lighten($headerBackgroundColor, 5%) !important;
.tabulator-row{
padding-left: 0 !important;
background:lighten($headerBackgroundColor, 5%) !important;
.tabulator-col-resize-handle{
display: none;
}
.tabulator-cell{
background:none;
}
}
border-top:1px solid $rowBorderColor;
border-bottom:1px solid $headerBorderColor;
overflow: hidden;
}
.tabulator-frozen-rows-holder{
min-width:400%;
&:empty{
display: none;
}
}
}
//scrolling element to hold table
.tabulator-tableHolder{
position:relative;
width:100%;
white-space: nowrap;
overflow:auto;
-webkit-overflow-scrolling: touch;
&:focus{
outline: none;
}
//default placeholder element
.tabulator-placeholder{
box-sizing:border-box;
display: flex;
align-items:center;
&[tabulator-render-mode="virtual"]{
position: absolute;
top:0;
left:0;
height:100%;
}
width:100%;
span{
display: inline-block;
margin:0 auto;
padding:10px;
color:$primary;
font-weight: bold;
font-size: 20px;
}
}
//element to hold table rows
.tabulator-table{
position:relative;
display:inline-block;
background-color:$rowBackgroundColor;
white-space: nowrap;
overflow:visible;
color:$rowTextColor;
.tabulator-row{
&.tabulator-calcs{
font-weight: bold;
background:darken($rowAltBackgroundColor, 5%) !important;
&.tabulator-calcs-top{
border-bottom:2px solid $headerSeperatorColor;
}
&.tabulator-calcs-bottom{
border-top:2px solid $headerSeperatorColor;
}
}
}
}
}
//column resize handles
.tabulator-col-resize-handle{
position:absolute;
right:0;
top:0;
bottom:0;
width:5px;
&.prev{
left:0;
right:auto;
}
&:hover{
cursor:ew-resize;
}
}
//footer element
.tabulator-footer{
padding:5px 10px;
border-top:1px solid $footerSeperatorColor;
background-color: $footerBackgroundColor;
text-align:right;
color: $footerTextColor;
font-weight:bold;
white-space:nowrap;
user-select:none;
-moz-user-select: none;
-khtml-user-select: none;
-webkit-user-select: none;
-o-user-select: none;
.tabulator-calcs-holder{
box-sizing:border-box;
width:calc(100% + 20px);
margin:-5px -10px 5px -10px;
text-align: left;
background:lighten($footerBackgroundColor, 5%) !important;
border-top:3px solid $headerSeperatorColor !important;
border-bottom:2px solid $headerSeperatorColor !important;
.tabulator-row{
background:lighten($footerBackgroundColor, 5%) !important;
.tabulator-col-resize-handle{
display: none;
}
.tabulator-cell{
background:none;
}
}
border-bottom:1px solid $rowBorderColor;
border-top:1px solid $rowBorderColor;
overflow: hidden;
&:only-child{
margin-bottom:-5px;
border-bottom:none;
border-bottom:none !important;
}
}
//pagination container element
.tabulator-pages{
margin:0 7px;
}
//pagination button
.tabulator-page{
display:inline-block;
margin:0 2px;
border:1px solid $footerBorderColor;
border-radius:3px;
padding:2px 5px;
background:rgba(255,255,255,.2);
color: $footerTextColor;
font-family:inherit;
font-weight:inherit;
font-size:inherit;
&.active{
color:$footerActiveColor;
}
&:disabled{
opacity:.5;
}
&:not(.disabled){
&:hover{
cursor:pointer;
background:rgba(0,0,0,.2);
color:#fff;
}
}
}
}
//holding div that contains loader and covers tabulator element to prevent interaction
.tabulator-loader{
position:absolute;
display: flex;
align-items:center;
top:0;
left:0;
z-index:100;
height:100%;
width:100%;
background:rgba(0,0,0,.4);
text-align:center;
//loading message element
.tabulator-loader-msg{
display:inline-block;
margin:0 auto;
padding:10px 20px;
border-radius:10px;
background:#fff;
font-weight:bold;
font-size:16px;
//loading message
&.tabulator-loading{
border:4px solid #333;
color:#000;
}
//error message
&.tabulator-error{
border:4px solid #D00;
color:#590000;
}
}
}
}
//row element
.tabulator-row{
position: relative;
box-sizing: border-box;
box-sizing: border-box;
min-height:$textSize + ($headerMargin * 2);
background-color: $handleColor;
padding-left: $handleWidth !important;
margin-bottom: 2px;
&:nth-child(even){
background-color: $handleColorAlt;
.tabulator-cell{
background-color: $rowAltBackgroundColor;
}
}
&.tabulator-selectable:hover{
cursor: pointer;
.tabulator-cell{
background-color:$rowHoverBackground;
}
}
&.tabulator-selected{
.tabulator-cell{
background-color:$rowSelectedBackground;
}
}
&.tabulator-selected:hover{
.tabulator-cell{
background-color:$rowSelectedBackgroundHover;
cursor: pointer;
}
}
&.tabulator-moving{
position: absolute;
border-top:1px solid $rowBorderColor;
border-bottom:1px solid $rowBorderColor;
pointer-events: none !important;
z-index:15;
}
//row resize handles
.tabulator-row-resize-handle{
position:absolute;
right:0;
bottom:0;
left:0;
height:5px;
&.prev{
top:0;
bottom:auto;
}
&:hover{
cursor:ns-resize;
}
}
.tabulator-frozen{
display: inline-block;
position: absolute;
background-color: inherit;
z-index: 10;
&.tabulator-frozen-left{
padding-left: $handleWidth;
border-right:2px solid $rowBorderColor;
}
&.tabulator-frozen-right{
border-left:2px solid $rowBorderColor;
}
}
.tabulator-responsive-collapse{
box-sizing:border-box;
padding:5px;
border-top:1px solid $rowBorderColor;
border-bottom:1px solid $rowBorderColor;
&:empty{
display:none;
}
table{
font-size:$textSize;
tr{
td{
position: relative;
&:first-of-type{
padding-right:10px;
}
}
}
}
}
//cell element
.tabulator-cell{
display:inline-block;
position: relative;
box-sizing:border-box;
padding:6px 4px;
border-right:2px solid $rowBorderColor;
vertical-align:middle;
white-space:nowrap;
overflow:hidden;
text-overflow:ellipsis;
background-color: $rowBackgroundColor;
&.tabulator-editing{
border:1px solid $editBoxColor;
padding: 0;
input, select{
border:1px;
background:transparent;
}
}
&.tabulator-validation-fail{
border:1px solid $errorColor;
input, select{
border:1px;
background:transparent;
color: $errorColor;
}
}
//hide left resize handle on first column
&:first-child{
.tabulator-col-resize-handle.prev{
display: none;
}
}
//movable row handle
&.tabulator-row-handle{
display: inline-flex;
align-items:center;
-moz-user-select: none;
-khtml-user-select: none;
-webkit-user-select: none;
-o-user-select: none;
//handle holder
.tabulator-row-handle-box{
width:80%;
//Hamburger element
.tabulator-row-handle-bar{
width:100%;
height:3px;
margin-top:2px;
background:#666;
}
}
}
.tabulator-data-tree-branch{
display:inline-block;
vertical-align:middle;
height:9px;
width:7px;
margin-top:-9px;
margin-right:5px;
border-bottom-left-radius:1px;
border-left:2px solid $rowBorderColor;
border-bottom:2px solid $rowBorderColor;
}
.tabulator-data-tree-control{
display:inline-flex;
justify-content:center;
align-items:center;
vertical-align:middle;
height:11px;
width:11px;
margin-right:5px;
border:1px solid $rowTextColor;
border-radius:2px;
background:rgba(0, 0, 0, .1);
overflow:hidden;
&:hover{
cursor:pointer;
background:rgba(0, 0, 0, .2);
}
.tabulator-data-tree-control-collapse{
display:inline-block;
position: relative;
height: 7px;
width: 1px;
background: transparent;
&:after {
position: absolute;
content: "";
left: -3px;
top: 3px;
height: 1px;
width: 7px;
background: $rowTextColor;
}
}
.tabulator-data-tree-control-expand{
display:inline-block;
position: relative;
height: 7px;
width: 1px;
background: $rowTextColor;
&:after {
position: absolute;
content: "";
left: -3px;
top: 3px;
height: 1px;
width: 7px;
background: $rowTextColor;
}
}
}
.tabulator-responsive-collapse-toggle{
display: inline-flex;
align-items:center;
justify-content:center;
-moz-user-select: none;
-khtml-user-select: none;
-webkit-user-select: none;
-o-user-select: none;
height:15px;
width:15px;
border-radius:20px;
background:#666;
color:$rowBackgroundColor;
font-weight:bold;
font-size:1.1em;
&:hover{
opacity:.7;
}
&.open{
.tabulator-responsive-collapse-toggle-close{
display:initial;
}
.tabulator-responsive-collapse-toggle-open{
display:none;
}
}
.tabulator-responsive-collapse-toggle-close{
display:none;
}
}
}
//row grouping element
&.tabulator-group{
box-sizing:border-box;
border-bottom:2px solid $primary;
border-top:2px solid $primary;
padding:5px;
padding-left:10px;
background:lighten($primary, 20%);
font-weight:bold;
color:fff;
margin-bottom: 2px;
min-width: 100%;
&:hover{
cursor:pointer;
background-color:rgba(0,0,0,.1);
}
&.tabulator-group-visible{
.tabulator-arrow{
margin-right:10px;
border-left: 6px solid transparent;
border-right: 6px solid transparent;
border-top: 6px solid $sortArrowActive;
border-bottom: 0;
}
}
&.tabulator-group-level-1{
.tabulator-arrow{
margin-left:20px;
}
}
&.tabulator-group-level-2{
.tabulator-arrow{
margin-left:40px;
}
}
&.tabulator-group-level-3{
.tabulator-arrow{
margin-left:60px;
}
}
&.tabulator-group-level-4{
.tabulator-arrow{
margin-left:80px;
}
}
&.tabulator-group-level-5{
.tabulator-arrow{
margin-left:100px;
}
}
//sorting arrow
.tabulator-arrow{
display: inline-block;
width: 0;
height: 0;
margin-right:16px;
border-top: 6px solid transparent;
border-bottom: 6px solid transparent;
border-right: 0;
border-left: 6px solid $sortArrowActive;
vertical-align:middle;
}
span{
margin-left:10px;
color:$primary;
}
}
}
.tabulator-edit-select-list{
position: absolute;
display:inline-block;
box-sizing:border-box;
max-height:200px;
background:$rowBackgroundColor;
border:1px solid $rowBorderColor;
font-size:$textSize;
overflow-y:auto;
-webkit-overflow-scrolling: touch;
z-index: 10000;
.tabulator-edit-select-list-item{
padding:4px;
color:$rowTextColor;
&.active{
color:$rowBackgroundColor;
background:$editBoxColor;
}
&:hover{
cursor:pointer;
color:$rowBackgroundColor;
background:$editBoxColor;
}
}
.tabulator-edit-select-list-group{
border-bottom:1px solid $rowBorderColor;
padding:4px;
padding-top:6px;
color:$rowTextColor;
font-weight:bold;
}
} | {
"pile_set_name": "Github"
} |
//This package is copied from Go library text/template.
//The original private functions eq, ge, gt, le, lt, and ne
//are exported as public functions.
package template
import (
"bytes"
"errors"
"fmt"
"io"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var Equal = eq
var GreaterEqual = ge
var Greater = gt
var LessEqual = le
var Less = lt
var NotEqual = ne
// FuncMap is the type of the map defining the mapping from names to functions.
// Each function must have either a single return value, or two return values of
// which the second has type error. In that case, if the second (error)
// return value evaluates to non-nil during execution, execution terminates and
// Execute returns that error.
type FuncMap map[string]interface{}
var builtins = FuncMap{
"and": and,
"call": call,
"html": HTMLEscaper,
"index": index,
"js": JSEscaper,
"len": length,
"not": not,
"or": or,
"print": fmt.Sprint,
"printf": fmt.Sprintf,
"println": fmt.Sprintln,
"urlquery": URLQueryEscaper,
// Comparisons
"eq": eq, // ==
"ge": ge, // >=
"gt": gt, // >
"le": le, // <=
"lt": lt, // <
"ne": ne, // !=
}
var builtinFuncs = createValueFuncs(builtins)
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
m := make(map[string]reflect.Value)
addValueFuncs(m, funcMap)
return m
}
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
for name, fn := range in {
v := reflect.ValueOf(fn)
if v.Kind() != reflect.Func {
panic("value for " + name + " not a function")
}
if !goodFunc(v.Type()) {
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
}
out[name] = v
}
}
// AddFuncs adds to values the functions in funcs. It does no checking of the input -
// call addValueFuncs first.
func addFuncs(out, in FuncMap) {
for name, fn := range in {
out[name] = fn
}
}
// goodFunc checks that the function or method has the right result signature.
func goodFunc(typ reflect.Type) bool {
// We allow functions with 1 result or 2 results where the second is an error.
switch {
case typ.NumOut() == 1:
return true
case typ.NumOut() == 2 && typ.Out(1) == errorType:
return true
}
return false
}
// findFunction looks for a function in the template, and global map.
func findFunction(name string) (reflect.Value, bool) {
if fn := builtinFuncs[name]; fn.IsValid() {
return fn, true
}
return reflect.Value{}, false
}
// Indexing.
// index returns the result of indexing its first argument by the following
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
// indexed item must be a map, slice, or array.
func index(item interface{}, indices ...interface{}) (interface{}, error) {
v := reflect.ValueOf(item)
for _, i := range indices {
index := reflect.ValueOf(i)
var isNil bool
if v, isNil = indirect(v); isNil {
return nil, fmt.Errorf("index of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
var x int64
switch index.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x = index.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x = int64(index.Uint())
default:
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
}
if x < 0 || x >= int64(v.Len()) {
return nil, fmt.Errorf("index out of range: %d", x)
}
v = v.Index(int(x))
case reflect.Map:
if !index.IsValid() {
index = reflect.Zero(v.Type().Key())
}
if !index.Type().AssignableTo(v.Type().Key()) {
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
}
if x := v.MapIndex(index); x.IsValid() {
v = x
} else {
v = reflect.Zero(v.Type().Elem())
}
default:
return nil, fmt.Errorf("can't index item of type %s", v.Type())
}
}
return v.Interface(), nil
}
// Length
// length returns the length of the item, with an error if it has no defined length.
func length(item interface{}) (int, error) {
v, isNil := indirect(reflect.ValueOf(item))
if isNil {
return 0, fmt.Errorf("len of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
return v.Len(), nil
}
return 0, fmt.Errorf("len of type %s", v.Type())
}
// Function invocation
// call returns the result of evaluating the first argument as a function.
// The function must return 1 result, or 2 results, the second of which is an error.
func call(fn interface{}, args ...interface{}) (interface{}, error) {
v := reflect.ValueOf(fn)
typ := v.Type()
if typ.Kind() != reflect.Func {
return nil, fmt.Errorf("non-function of type %s", typ)
}
if !goodFunc(typ) {
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
}
numIn := typ.NumIn()
var dddType reflect.Type
if typ.IsVariadic() {
if len(args) < numIn-1 {
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
}
dddType = typ.In(numIn - 1).Elem()
} else {
if len(args) != numIn {
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
}
}
argv := make([]reflect.Value, len(args))
for i, arg := range args {
value := reflect.ValueOf(arg)
// Compute the expected type. Clumsy because of variadics.
var argType reflect.Type
if !typ.IsVariadic() || i < numIn-1 {
argType = typ.In(i)
} else {
argType = dddType
}
if !value.IsValid() && canBeNil(argType) {
value = reflect.Zero(argType)
}
if !value.Type().AssignableTo(argType) {
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
}
argv[i] = value
}
result := v.Call(argv)
if len(result) == 2 && !result[1].IsNil() {
return result[0].Interface(), result[1].Interface().(error)
}
return result[0].Interface(), nil
}
// Boolean logic.
func truth(a interface{}) bool {
t, _ := isTrue(reflect.ValueOf(a))
return t
}
// and computes the Boolean AND of its arguments, returning
// the first false argument it encounters, or the last argument.
func and(arg0 interface{}, args ...interface{}) interface{} {
if !truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if !truth(arg0) {
break
}
}
return arg0
}
// or computes the Boolean OR of its arguments, returning
// the first true argument it encounters, or the last argument.
func or(arg0 interface{}, args ...interface{}) interface{} {
if truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if truth(arg0) {
break
}
}
return arg0
}
// not returns the Boolean negation of its argument.
func not(arg interface{}) (truth bool) {
truth, _ = isTrue(reflect.ValueOf(arg))
return !truth
}
// Comparison.
// TODO: Perhaps allow comparison between signed and unsigned integers.
var (
errBadComparisonType = errors.New("invalid type for comparison")
errBadComparison = errors.New("incompatible types for comparison")
errNoComparison = errors.New("missing argument for comparison")
)
type kind int
const (
invalidKind kind = iota
boolKind
complexKind
intKind
floatKind
integerKind
stringKind
uintKind
)
func basicKind(v reflect.Value) (kind, error) {
switch v.Kind() {
case reflect.Bool:
return boolKind, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intKind, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintKind, nil
case reflect.Float32, reflect.Float64:
return floatKind, nil
case reflect.Complex64, reflect.Complex128:
return complexKind, nil
case reflect.String:
return stringKind, nil
}
return invalidKind, errBadComparisonType
}
// eq evaluates the comparison a == b || a == c || ...
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
if len(arg2) == 0 {
return false, errNoComparison
}
for _, arg := range arg2 {
v2 := reflect.ValueOf(arg)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind:
truth = v1.Bool() == v2.Bool()
case complexKind:
truth = v1.Complex() == v2.Complex()
case floatKind:
truth = v1.Float() == v2.Float()
case intKind:
truth = v1.Int() == v2.Int()
case stringKind:
truth = v1.String() == v2.String()
case uintKind:
truth = v1.Uint() == v2.Uint()
default:
panic("invalid kind")
}
}
if truth {
return true, nil
}
}
return false, nil
}
// ne evaluates the comparison a != b.
func ne(arg1, arg2 interface{}) (bool, error) {
// != is the inverse of ==.
equal, err := eq(arg1, arg2)
return !equal, err
}
// lt evaluates the comparison a < b.
func lt(arg1, arg2 interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
v2 := reflect.ValueOf(arg2)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind, complexKind:
return false, errBadComparisonType
case floatKind:
truth = v1.Float() < v2.Float()
case intKind:
truth = v1.Int() < v2.Int()
case stringKind:
truth = v1.String() < v2.String()
case uintKind:
truth = v1.Uint() < v2.Uint()
default:
panic("invalid kind")
}
}
return truth, nil
}
// le evaluates the comparison <= b.
func le(arg1, arg2 interface{}) (bool, error) {
// <= is < or ==.
lessThan, err := lt(arg1, arg2)
if lessThan || err != nil {
return lessThan, err
}
return eq(arg1, arg2)
}
// gt evaluates the comparison a > b.
func gt(arg1, arg2 interface{}) (bool, error) {
// > is the inverse of <=.
lessOrEqual, err := le(arg1, arg2)
if err != nil {
return false, err
}
return !lessOrEqual, nil
}
// ge evaluates the comparison a >= b.
func ge(arg1, arg2 interface{}) (bool, error) {
// >= is the inverse of <.
lessThan, err := lt(arg1, arg2)
if err != nil {
return false, err
}
return !lessThan, nil
}
// HTML escaping.
var (
htmlQuot = []byte(""") // shorter than """
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
htmlAmp = []byte("&")
htmlLt = []byte("<")
htmlGt = []byte(">")
)
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
func HTMLEscape(w io.Writer, b []byte) {
last := 0
for i, c := range b {
var html []byte
switch c {
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
w.Write(b[last:i])
w.Write(html)
last = i + 1
}
w.Write(b[last:])
}
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexAny(s, `'"&<>`) < 0 {
return s
}
var b bytes.Buffer
HTMLEscape(&b, []byte(s))
return b.String()
}
// HTMLEscaper returns the escaped HTML equivalent of the textual
// representation of its arguments.
func HTMLEscaper(args ...interface{}) string {
return HTMLEscapeString(evalArgs(args))
}
// JavaScript escaping.
var (
jsLowUni = []byte(`\u00`)
hex = []byte("0123456789ABCDEF")
jsBackslash = []byte(`\\`)
jsApos = []byte(`\'`)
jsQuot = []byte(`\"`)
jsLt = []byte(`\x3C`)
jsGt = []byte(`\x3E`)
)
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
last := 0
for i := 0; i < len(b); i++ {
c := b[i]
if !jsIsSpecial(rune(c)) {
// fast path: nothing to do
continue
}
w.Write(b[last:i])
if c < utf8.RuneSelf {
// Quotes, slashes and angle brackets get quoted.
// Control characters get written as \u00XX.
switch c {
case '\\':
w.Write(jsBackslash)
case '\'':
w.Write(jsApos)
case '"':
w.Write(jsQuot)
case '<':
w.Write(jsLt)
case '>':
w.Write(jsGt)
default:
w.Write(jsLowUni)
t, b := c>>4, c&0x0f
w.Write(hex[t : t+1])
w.Write(hex[b : b+1])
}
} else {
// Unicode rune.
r, size := utf8.DecodeRune(b[i:])
if unicode.IsPrint(r) {
w.Write(b[i : i+size])
} else {
fmt.Fprintf(w, "\\u%04X", r)
}
i += size - 1
}
last = i + 1
}
w.Write(b[last:])
}
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
func JSEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexFunc(s, jsIsSpecial) < 0 {
return s
}
var b bytes.Buffer
JSEscape(&b, []byte(s))
return b.String()
}
func jsIsSpecial(r rune) bool {
switch r {
case '\\', '\'', '"', '<', '>':
return true
}
return r < ' ' || utf8.RuneSelf <= r
}
// JSEscaper returns the escaped JavaScript equivalent of the textual
// representation of its arguments.
func JSEscaper(args ...interface{}) string {
return JSEscapeString(evalArgs(args))
}
// URLQueryEscaper returns the escaped value of the textual representation of
// its arguments in a form suitable for embedding in a URL query.
func URLQueryEscaper(args ...interface{}) string {
return url.QueryEscape(evalArgs(args))
}
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
// fmt.Sprint(args...)
// except that each argument is indirected (if a pointer), as required,
// using the same rules as the default string evaluation during template
// execution.
func evalArgs(args []interface{}) string {
ok := false
var s string
// Fast path for simple common case.
if len(args) == 1 {
s, ok = args[0].(string)
}
if !ok {
for i, arg := range args {
a, ok := printableValue(reflect.ValueOf(arg))
if ok {
args[i] = a
} // else left fmt do its thing
}
s = fmt.Sprint(args...)
}
return s
}
| {
"pile_set_name": "Github"
} |
//===-- InstructionSnippetGenerator.h ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Defines helper classes to generate code snippets, in particular register
/// assignment.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_TOOLS_LLVM_EXEGESIS_INSTRUCTIONSNIPPETGENERATOR_H
#define LLVM_TOOLS_LLVM_EXEGESIS_INSTRUCTIONSNIPPETGENERATOR_H
#include "OperandGraph.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include <vector>
namespace exegesis {
// A Variable represents a set of possible values that we need to choose from.
// It may represent one or more explicit operands that are tied together, or one
// implicit operand.
class Variable final {
public:
bool IsUse = false;
bool IsDef = false;
bool IsReg = false;
// Lists all the explicit operand indices that are tied to this variable.
// Empty if Variable represents an implicit operand.
llvm::SmallVector<size_t, 8> ExplicitOperands;
// - In case of explicit operands, PossibleRegisters is the expansion of the
// operands's RegClass registers. Please note that tied together explicit
// operands share the same RegClass.
// - In case of implicit operands, PossibleRegisters is a singleton MCPhysReg.
llvm::SmallSetVector<llvm::MCPhysReg, 16> PossibleRegisters;
// If RegInfo is null, register names won't get resolved.
void print(llvm::raw_ostream &OS, const llvm::MCRegisterInfo *RegInfo) const;
};
// Builds a model of implicit and explicit operands for InstrDesc into
// Variables.
llvm::SmallVector<Variable, 8>
getVariables(const llvm::MCRegisterInfo &RegInfo,
const llvm::MCInstrDesc &InstrDesc,
const llvm::BitVector &ReservedRegs);
// A simple object to represent a Variable assignement.
struct VariableAssignment {
VariableAssignment(size_t VarIdx, llvm::MCPhysReg AssignedReg);
size_t VarIdx;
llvm::MCPhysReg AssignedReg;
bool operator==(const VariableAssignment &) const;
bool operator<(const VariableAssignment &) const;
};
// An AssignmentChain is a set of assignement realizing a dependency chain.
// We inherit from std::set to leverage uniqueness of elements.
using AssignmentChain = std::set<VariableAssignment>;
// Debug function to print an assignment chain.
void dumpAssignmentChain(const llvm::MCRegisterInfo &RegInfo,
const AssignmentChain &Chain);
// Inserts Variables into a graph representing register aliasing and finds all
// the possible dependency chains for this instruction, i.e. all the possible
// assignement of operands that would make execution of the instruction
// sequential.
std::vector<AssignmentChain>
computeSequentialAssignmentChains(const llvm::MCRegisterInfo &RegInfo,
llvm::ArrayRef<Variable> Vars);
// Selects a random configuration leading to a dependency chain.
// The result is a vector of the same size as `Vars`.
// `random_index_for_size` is a functor giving a random value in [0, arg[.
std::vector<llvm::MCPhysReg>
getRandomAssignment(llvm::ArrayRef<Variable> Vars,
llvm::ArrayRef<AssignmentChain> Chains,
const std::function<size_t(size_t)> &RandomIndexForSize);
// Finds an assignment of registers to variables such that no two variables are
// assigned the same register.
// The result is a vector of the same size as `Vars`, or `{}` if the
// assignment is not feasible.
std::vector<llvm::MCPhysReg>
getExclusiveAssignment(llvm::ArrayRef<Variable> Vars);
// Finds a greedy assignment of registers to variables. Each variable gets
// assigned the first possible register that is not already assigned to a
// previous variable. If there is no such register, the variable gets assigned
// the first possible register.
// The result is a vector of the same size as `Vars`, or `{}` if the
// assignment is not feasible.
std::vector<llvm::MCPhysReg>
getGreedyAssignment(llvm::ArrayRef<Variable> Vars);
// Generates an LLVM MCInst with the previously computed variables.
// Immediate values are set to 1.
llvm::MCInst generateMCInst(const llvm::MCInstrDesc &InstrDesc,
llvm::ArrayRef<Variable> Vars,
llvm::ArrayRef<llvm::MCPhysReg> VarRegs);
} // namespace exegesis
#endif // LLVM_TOOLS_LLVM_EXEGESIS_INSTRUCTIONSNIPPETGENERATOR_H
| {
"pile_set_name": "Github"
} |
<?php
/**
* LICENSE: The MIT License (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* https://github.com/azure/azure-storage-php/LICENSE
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* PHP version 5
*
* @category Microsoft
* @package MicrosoftAzure\Storage\Tests\Unit\Common\Internal\Middlewares
* @author Azure Storage PHP SDK <[email protected]>
* @copyright 2017 Microsoft Corporation
* @license https://github.com/azure/azure-storage-php/LICENSE
* @link https://github.com/azure/azure-storage-php
*/
namespace MicrosoftAzure\Storage\Tests\Unit\Common\Internal\Middlewares;
use MicrosoftAzure\Storage\Common\Internal\Middlewares\CommonRequestMiddleware;
use MicrosoftAzure\Storage\Common\Internal\Authentication\SharedKeyAuthScheme;
use MicrosoftAzure\Storage\Common\Internal\Resources;
use MicrosoftAzure\Storage\Tests\Framework\ReflectionTestBase;
use GuzzleHttp\Exception\RequestException;
use GuzzleHttp\Psr7\Request;
/**
* Unit tests for class CommonRequestMiddleware
*
* @category Microsoft
* @package MicrosoftAzure\Storage\Tests\Unit\Common\Internal\Middlewares
* @author Azure Storage PHP SDK <[email protected]>
* @copyright 2017 Microsoft Corporation
* @license https://github.com/azure/azure-storage-php/LICENSE
* @link https://github.com/azure/azure-storage-php
*/
class CommonRequestMiddlewareTest extends ReflectionTestBase
{
public function testOnRequest()
{
// Setup
$beginTime = time();
$headers = self::getTestHeaderArray();
$authScheme = new SharedKeyAuthScheme('accountname', 'accountkey');
// Construct
$middleware = new CommonRequestMiddleware($authScheme, '2016-05-31', '', $headers);
$onRequest = self::getMethod('onRequest', $middleware);
$request = new Request('GET', 'http://www.bing.com');
// Apply middleware
$newRequest = $onRequest->invokeArgs($middleware, array($request));
// Prepare expected
$savedHeaders = array();
foreach ($newRequest->getHeaders() as $key => $value) {
$savedHeaders[$key] = $value[0];
}
$requestToSign = $newRequest->withoutHeader(Resources::AUTHENTICATION);
$signedRequest = $authScheme->signRequest($requestToSign);
// Assert
$this->assertTrue(
(array_intersect($savedHeaders, $headers) === $headers),
'Did not add proper headers.'
);
$this->assertTrue(
$signedRequest->getHeaders() === $newRequest->getHeaders(),
'Failed to create same signed request.'
);
$endTime = time();
$requestTime = strtotime($newRequest->getHeaders()[Resources::DATE][0]);
$this->assertTrue(
$requestTime >= $beginTime && $requestTime <= $endTime,
'Did not add proper date header.'
);
}
private static function getTestHeaderArray()
{
return array(
'testKey1' => 'testValue1',
'testKey2' => 'testValue2',
'testKey3' => 'testValue3',
);
}
}
| {
"pile_set_name": "Github"
} |
context("xy_to_data")
# x is a numeric vector
test_that("x is a numeric vector and y is null", {
data <- xy_to_data(x = 10:1, y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 10:1, row.names = as.character(1:10)))
data <- xy_to_data(x = setNames(10:1, letters[1:10]), y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 10:1, row.names = letters[1:10]))
# duplicate names() are not used as rownames
data <- xy_to_data(x = setNames(10:1, rep("a", 10)), y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 10:1, row.names = as.character(1:10)))
})
test_that("x is a numeric vector and y is a numeric vector", {
data <- xy_to_data(x = 10:1, y = 1:10)
expect_equal(data, data.frame(x = 10:1, y = 1:10, row.names = as.character(1:10)))
expect_error(xy_to_data(x = 10:1, y = 1:5), "x has 10 elements, but y has 5")
})
test_that("x is a numeric vector and y is a character vector", {
data <- xy_to_data(x = 10:1, y = letters[1:10])
expect_equal(data, data.frame(x = 10:1, y = letters[1:10], row.names = as.character(1:10)))
data <- xy_to_data(x = 10:1, y = as.character(1:10))
expect_equal(data, data.frame(x = 10:1, y = as.character(1:10), row.names = as.character(1:10)))
})
test_that("x is a numeric vector and y is a factor", {
data <- xy_to_data(x = 10:1, y = factor(letters[1:10]))
expect_equal(data, data.frame(x = 10:1, y = factor(letters[1:10]), row.names = as.character(1:10)))
data <- xy_to_data(x = 10:1, y = factor(as.character(1:10)))
expect_equal(data, data.frame(x = 10:1, y = factor(as.character(1:10)), row.names = as.character(1:10)))
})
# x is a character vector
test_that("x is a character vector and y is null", {
expect_error(xy_to_data(x = c("a", "b", "c"), y = NULL), "y cannot be NULL when x is a character vector or a factor")
})
test_that("x is a character vector and y is a numeric vector", {
data <- xy_to_data(x = letters[1:10], y = 1:10)
expect_equal(data, data.frame(x = letters[1:10], y = 1:10, row.names = as.character(1:10)))
data <- xy_to_data(x = as.character(1:10), y = 1:10)
expect_equal(data, data.frame(x = as.character(1:10), y = 1:10, row.names = as.character(1:10)))
data <- xy_to_data(x = setNames(as.character(1:10), letters[1:10]), y = 1:10)
expect_equal(data, data.frame(x = as.character(1:10), y = 1:10, row.names = letters[1:10]))
})
test_that("x is a character vector and y is a character vector", {
data <- xy_to_data(x = letters[1:10], y = as.character(1:10))
expect_equal(data, data.frame(x = letters[1:10], y = as.character(1:10), row.names = as.character(1:10)))
data <- xy_to_data(x = as.character(1:10), y = letters[1:10])
expect_equal(data, data.frame(x = as.character(1:10), y = letters[1:10], row.names = as.character(1:10)))
})
test_that("x is a character vector and y is a factor", {
data <- xy_to_data(x = letters[1:10], y = factor(1:10))
expect_equal(data, data.frame(x = letters[1:10], y = factor(1:10), row.names = as.character(1:10)))
data <- xy_to_data(x = as.character(1:10), y = factor(letters[1:10]))
expect_equal(data, data.frame(x = as.character(1:10), y = factor(letters[1:10]), row.names = as.character(1:10)))
})
test_that("x is a character vector and y is a data frame with as many numeric columns as elements in x", {
data <- xy_to_data(x = letters[1:3], y = data.frame("1" = 1:10, "2" = 11:20, "3" = 21:30))
expect_equal(data, data.frame(x = rep(letters[1:3], each = 10), y = 1:30, row.names = as.character(1:30)))
expect_error(xy_to_data(x = letters[1:3], y = data.frame("1" = 1:10, "2" = 11:20)), "x has 3 elements, but y has 2 columns")
})
test_that("x is a character vector and y is a matrix with as many numeric columns as elements in x", {
data <- xy_to_data(x = letters[1:3], y = matrix(1:30, ncol = 3))
expect_equal(data, data.frame(x = rep(letters[1:3], each = 10), y = 1:30, row.names = as.character(1:30)))
expect_error(xy_to_data(x = letters[1:3], y = matrix(1:30, ncol = 2)), "x has 3 elements, but y has 2 columns")
})
# x is a factor
test_that("x is a factor and y is null", {
expect_error(xy_to_data(x = c("a", "b", "c"), y = NULL), "y cannot be NULL when x is a character vector or a factor")
})
test_that("x is a factor and y is a numeric vector", {
data <- xy_to_data(x = factor(letters[1:10]), y = 1:10)
expect_equal(data, data.frame(x = factor(letters[1:10]), y = 1:10, row.names = as.character(1:10)))
data <- xy_to_data(x = factor(as.character(1:10)), y = 1:10)
expect_equal(data, data.frame(x = factor(as.character(1:10)), y = 1:10, row.names = as.character(1:10)))
})
test_that("x is a factor and y is a character vector", {
data <- xy_to_data(x = factor(letters[1:10]), y = as.character(1:10))
expect_equal(data, data.frame(x = factor(letters[1:10]), y = as.character(1:10), row.names = as.character(1:10)))
data <- xy_to_data(x = factor(as.character(1:10)), y = letters[1:10])
expect_equal(data, data.frame(x = factor(as.character(1:10)), y = letters[1:10], row.names = as.character(1:10)))
})
test_that("x is a factor and y is a factor", {
data <- xy_to_data(x = factor(letters[1:10]), y = factor(as.character(1:10)))
expect_equal(data, data.frame(x = factor(letters[1:10]), y = factor(as.character(1:10)), row.names = as.character(1:10)))
data <- xy_to_data(x = factor(as.character(1:10)), y = factor(letters[1:10]))
expect_equal(data, data.frame(x = factor(as.character(1:10)), y = factor(letters[1:10]), row.names = as.character(1:10)))
})
test_that("x is a factor and y is a data frame with as many numeric columns as elements in x", {
data <- xy_to_data(x = factor(letters[1:3]), y = data.frame("1" = 1:10, "2" = 11:20, "3" = 21:30))
expect_equal(data, data.frame(x = factor(rep(letters[1:3], each = 10)), y = 1:30, row.names = as.character(1:30)))
expect_error(xy_to_data(x = factor(letters[1:3]), y = data.frame("1" = 1:10, "2" = 11:20)), "x has 3 elements, but y has 2 columns")
})
test_that("x is a factor and y is a matrix with as many numeric columns as elements in x", {
data <- xy_to_data(x = factor(letters[1:3]), y = matrix(1:30, ncol = 3))
expect_equal(data, data.frame(x = factor(rep(letters[1:3], each = 10)), y = 1:30, row.names = as.character(1:30)))
expect_error(xy_to_data(x = factor(letters[1:3]), y = matrix(1:30, ncol = 2)), "x has 3 elements, but y has 2 columns")
})
# x is a data frame
test_that("x is a data frame with only one column", {
expect_error(xy_to_data(x = data.frame(a=1:10), y = NULL), "When x is a dataframe or a matrix, it must contain at least two columns")
})
test_that("x is a data frame with two columns and y is null", {
data <- xy_to_data(x = data.frame(a = factor(as.character(1:10)), b = 1:10), y = NULL)
expect_equal(data, data.frame(x = factor(as.character(1:10)), y = 1:10, row.names = as.character(1:10)))
data <- xy_to_data(x = data.frame(a = factor(as.character(1:10)), b = 1:10, row.names = letters[1:10]), y = NULL)
expect_equal(data, data.frame(x = factor(as.character(1:10)), y = 1:10, row.names = letters[1:10]))
})
test_that("x is a data frame with three numeric columns or more and y is null", {
data <- xy_to_data(x = data.frame(a = 1:10, b = 11:20, c = 21:30), y = NULL)
expect_equal(data, data.frame(x = rep(c("a","b","c"), each = 10), y = 1:30, row.names = as.character(1:30)))
# ignore the extra columns
expect_message(xy_to_data(x = data.frame(a = letters[1:10], b = 11:20, c = 21:30), y = NULL), "x is not numeric and it has more than two columns, using the first two: a, b")
})
# x is a matrix
test_that("x is a matrix with only one column", {
expect_error(xy_to_data(x = cbind(1:10), y = NULL), "When x is a dataframe or a matrix, it must contain at least two columns")
})
test_that("x is a matrix with at least two columns and y is null", {
data <- xy_to_data(x = matrix(1:20, nrow = 10, ncol = 2), y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 11:20, row.names = as.character(1:10)))
mat <- matrix(1:20, nrow = 10, ncol = 2)
rownames(mat) <- letters[1:10]
data <- xy_to_data(x = mat, y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 11:20, row.names = letters[1:10]))
})
# x is a list
test_that("x is a list with only one element", {
expect_error(xy_to_data(x = list(1:10), y = NULL), "When x is a list, it must contain at least two elements")
})
test_that("x is a list with at least two elements and y is null", {
data <- xy_to_data(x = list(a = 1:10, b = 11:20), y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 11:20, row.names = as.character(1:10)))
# extra elements are ignored
data <- xy_to_data(x = list(a = 1:10, b = 11:20, c = 21:30), y = NULL)
expect_equal(data, data.frame(x = 1:10, y = 11:20, row.names = as.character(1:10)))
expect_error(xy_to_data(x = list(a = 1:10, b = list(1:5), y = NULL), "The first two elements of x have different lengths"))
})
| {
"pile_set_name": "Github"
} |
function main(x0) {
var x1 = function(x2) {
return x2
};
var x3 = {'_1' : 1,'_2' : 2};
var x4 = x1(x3);
return x4
}
| {
"pile_set_name": "Github"
} |
<?php
return [
/*
|--------------------------------------------------------------------------
| Validation Language Lines
|--------------------------------------------------------------------------
|
| The following language lines contain the default error messages used by
| the validator class. Some of these rules have multiple versions such
| such as the size rules. Feel free to tweak each of these messages.
|
*/
"accepted" => ":attribute deve essere accettato.",
"active_url" => ":attribute non è un URL valido.",
"after" => ":attribute deve essere una data maggiore di :date.",
"alpha" => ":attribute può contenere solo lettere.",
"alpha_dash" => ":attribute può contenere solo lettere, numeri e trattini.",
"alpha_num" => ":attribute può contenere solo lettere e numeri.",
"array" => ":attribute deve essere un array.",
"before" => ":attribute deve essere una data minore di :date.",
"between" => [
"numeric" => ":attribute deve essere compreso tra :min e :max.",
"file" => ":attribute deve essere compreso tra :min e :max kilobytes.",
"string" => ":attribute deve essere compreso tra :min e :max caratteri.",
"array" => ":attribute deve avere tra :min e :max elementi.",
],
"confirmed" => "La conferma :attribute non corrisponde.",
"date" => ":attribute non è una data valida.",
"date_format" => ":attribute non corrisponde al formato :format.",
"different" => ":attribute e :other devono essere diversi.",
"digits" => ":attribute deve essere di :digits cifre.",
"digits_between" => ":attribute deve essere tra :min e :max cifre.",
"email" => "Il formato di :attribute non è valido.",
"exists" => "Il valore di :attribute non è valido.",
"image" => ":attribute deve essere un'immagine.",
"in" => "Il valore di :attribute non è valido.",
"integer" => ":attribute deve essere un numero interno.",
"ip" => ":attribute deve essere un indirizzo IP valido.",
"max" => [
"numeric" => ":attribute non può essere maggiore di :max.",
"file" => ":attribute non può essere maggiore di :max kilobytes.",
"string" => ":attribute non può essere maggiore di :max caratteri.",
"array" => ":attribute non può avere più di :max elementi.",
],
"mimes" => ":attribute deve essere un file di tipo: :values.",
"extensions" => ":attribute deve avere un estensione: :values.",
"min" => [
"numeric" => ":attribute deve essere almeno :min.",
"file" => ":attribute deve essere almeno :min kilobytes.",
"string" => ":attribute deve essere almeno :min caratteri.",
"array" => ":attribute deve avere almeno :min elementi.",
],
"not_in" => "Il valore di :attribute non è valido.",
"numeric" => ":attribute deve essere un numero.",
"regex" => "Il formato di :attribute non è valido.",
"required" => "Il campo :attribute è obbligatorio.",
"required_if" => "Il campo :attribute è obbligatorio quando :other è :value.",
"required_with" => "Il campo :attribute è obbligatorio quando :values è presente.",
"required_without" => "Il campo :attribute è obbligatorio quando :values non è presente.",
"same" => ":attribute e :other devono corrispondere.",
"size" => [
"numeric" => ":attribute deve essere :size.",
"file" => ":attribute deve essere :size kilobytes.",
"string" => ":attribute deve essere :size caratteri.",
"array" => ":attribute deve contenere :size elementi.",
],
"unique" => ":attribute è già presente.",
"url" => "Il formato di :attribute non è valido.",
/*
|--------------------------------------------------------------------------
| Custom Validation Language Lines
|--------------------------------------------------------------------------
|
| Here you may specify custom validation messages for attributes using the
| convention "attribute.rule" to name the lines. This makes it quick to
| specify a specific custom language line for a given attribute rule.
|
*/
'custom' => [],
/*
|--------------------------------------------------------------------------
| Custom Validation Attributes
|--------------------------------------------------------------------------
|
| The following language lines are used to swap attribute place-holders
| with something more reader friendly such as E-Mail Address instead
| of "email". This simply helps us make messages a little cleaner.
|
*/
'attributes' => [],
];
| {
"pile_set_name": "Github"
} |
.text
.globl anonymous_function
.type anonymous_function, @function
#! file-offset 0
#! rip-offset 0
#! capacity 17 bytes
# Text # Line RIP Bytes Opcode
.anonymous_function: # 0 0 OPC=0
addb $0x1, %al # 1 0 2 OPC=44
retq # 2 0x2 1 OPC=1978
.size anonymous_function, .-anonymous_function
| {
"pile_set_name": "Github"
} |
<ion-card button style="cursor: pointer;" (click)="presentModal()" *ngIf=" edge">
<ng-container *ngIf="(edge.currentData | async)['channel'] as currentData">
<ion-item lines="full" color="light">
<ion-avatar>
<ion-img src="assets/img/evcs.png"></ion-img>
</ion-avatar>
<ion-label *ngIf="config.properties.alias as alias" style="padding-left: 20px;">
{{ alias }}
</ion-label>
<ion-label *ngIf="!config.properties.alias" style="padding-left: 20px;" translate>
Edge.Index.Widgets.EVCS.chargingStationCluster
</ion-label>
</ion-item>
<ion-label *ngIf="currentData[componentId + '/State'] != 3">
<ion-card-content>
<table class="full_width">
<tr>
<td style="width:50%" translate>Edge.Index.Widgets.EVCS.amountOfChargingStations</td>
<td style="width:15%"></td>
<td style="width:45%" class="align_right">
{{ evcssInCluster.length }}
</td>
</tr>
<tr>
<td style="width:50%">
<ion-label *ngIf="!isEvcsCluster" translate>Edge.Index.Widgets.EVCS.totalChargingPower
</ion-label>
</td>
<td style="width:15%"></td>
<td style="width:45%" class="align_right">
{{ currentData[componentId + '/ChargePower'] | number:'1.0-0' }} W
</td>
</tr>
</table>
</ion-card-content>
</ion-label>
<ion-label *ngIf="currentData[componentId + '/State'] == 3">
<ion-item class="normalFontSize">
<table>
<tr>
<td translate>
Edge.Index.Widgets.EVCS.clusterConfigError
</td>
</tr>
</table>
</ion-item>
</ion-label>
</ng-container>
</ion-card> | {
"pile_set_name": "Github"
} |
/*
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
*/
package remote
import (
"encoding/json"
"io/ioutil"
"net"
"net/http"
"sync"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
)
/*
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
It then forwards that communication to attached reporters.
*/
type Server struct {
listener net.Listener
reporters []reporters.Reporter
alives []func() bool
lock *sync.Mutex
beforeSuiteData types.RemoteBeforeSuiteData
parallelTotal int
counter int
}
//Create a new server, automatically selecting a port
func NewServer(parallelTotal int) (*Server, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
return &Server{
listener: listener,
lock: &sync.Mutex{},
alives: make([]func() bool, parallelTotal),
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
parallelTotal: parallelTotal,
}, nil
}
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *Server) Start() {
httpServer := &http.Server{}
mux := http.NewServeMux()
httpServer.Handler = mux
//streaming endpoints
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
mux.HandleFunc("/SpecWillRun", server.specWillRun)
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
//synchronization endpoints
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
mux.HandleFunc("/counter", server.handleCounter)
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
go httpServer.Serve(server.listener)
}
//Stop the server
func (server *Server) Close() {
server.listener.Close()
}
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *Server) Address() string {
return "http://" + server.listener.Addr().String()
}
//
// Streaming Endpoints
//
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
func (server *Server) readAll(request *http.Request) []byte {
defer request.Body.Close()
body, _ := ioutil.ReadAll(request.Body)
return body
}
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
server.reporters = reporters
}
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var data struct {
Config config.GinkgoConfigType `json:"config"`
Summary *types.SuiteSummary `json:"suite-summary"`
}
json.Unmarshal(body, &data)
for _, reporter := range server.reporters {
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
}
}
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var setupSummary *types.SetupSummary
json.Unmarshal(body, &setupSummary)
for _, reporter := range server.reporters {
reporter.BeforeSuiteDidRun(setupSummary)
}
}
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var setupSummary *types.SetupSummary
json.Unmarshal(body, &setupSummary)
for _, reporter := range server.reporters {
reporter.AfterSuiteDidRun(setupSummary)
}
}
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var specSummary *types.SpecSummary
json.Unmarshal(body, &specSummary)
for _, reporter := range server.reporters {
reporter.SpecWillRun(specSummary)
}
}
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var specSummary *types.SpecSummary
json.Unmarshal(body, &specSummary)
for _, reporter := range server.reporters {
reporter.SpecDidComplete(specSummary)
}
}
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var suiteSummary *types.SuiteSummary
json.Unmarshal(body, &suiteSummary)
for _, reporter := range server.reporters {
reporter.SpecSuiteDidEnd(suiteSummary)
}
}
//
// Synchronization Endpoints
//
func (server *Server) RegisterAlive(node int, alive func() bool) {
server.lock.Lock()
defer server.lock.Unlock()
server.alives[node-1] = alive
}
func (server *Server) nodeIsAlive(node int) bool {
server.lock.Lock()
defer server.lock.Unlock()
alive := server.alives[node-1]
if alive == nil {
return true
}
return alive()
}
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
if request.Method == "POST" {
dec := json.NewDecoder(request.Body)
dec.Decode(&(server.beforeSuiteData))
} else {
beforeSuiteData := server.beforeSuiteData
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
}
enc := json.NewEncoder(writer)
enc.Encode(beforeSuiteData)
}
}
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
afterSuiteData := types.RemoteAfterSuiteData{
CanRun: true,
}
for i := 2; i <= server.parallelTotal; i++ {
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
}
enc := json.NewEncoder(writer)
enc.Encode(afterSuiteData)
}
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
c := spec_iterator.Counter{}
server.lock.Lock()
c.Index = server.counter
server.counter = server.counter + 1
server.lock.Unlock()
json.NewEncoder(writer).Encode(c)
}
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
writer.Write([]byte(""))
}
| {
"pile_set_name": "Github"
} |
package railo.runtime.listener;
import java.util.ArrayList;
import java.util.List;
import railo.commons.io.res.Resource;
import railo.commons.io.res.util.ResourceUtil;
import railo.runtime.type.util.ArrayUtil;
public class JavaSettingsImpl implements JavaSettings {
private final Resource[] resources;
private Resource[] resourcesTranslated;
private final boolean loadCFMLClassPath;
private final boolean reloadOnChange;
private final int watchInterval;
private final String[] watchedExtensions;
public JavaSettingsImpl(){
this.resources=new Resource[0];
this.loadCFMLClassPath=false;
this.reloadOnChange=false;
this.watchInterval=60;
this.watchedExtensions=new String[]{"jar","class"};
}
public JavaSettingsImpl(Resource[] resources, Boolean loadCFMLClassPath,boolean reloadOnChange, int watchInterval, String[] watchedExtensions) {
this.resources=resources;
this.loadCFMLClassPath=loadCFMLClassPath;
this.reloadOnChange=reloadOnChange;
this.watchInterval=watchInterval;
this.watchedExtensions=watchedExtensions;
}
@Override
public Resource[] getResources() {
return resources;
}
// FUTURE add to interface
public Resource[] getResourcesTranslated() {
if(resourcesTranslated==null) {
List<Resource> list=new ArrayList<Resource>();
_getResourcesTranslated(list,resources, true);
resourcesTranslated=list.toArray(new Resource[list.size()]);
}
return resourcesTranslated;
}
public static void _getResourcesTranslated(List<Resource> list, Resource[] resources, boolean deep) {
if(ArrayUtil.isEmpty(resources)) return;
for(int i=0;i<resources.length;i++){
if(resources[i].isFile()) {
if(ResourceUtil.getExtension(resources[i], "").equalsIgnoreCase("jar"))
list.add(resources[i]);
}
else if(deep && resources[i].isDirectory()){
list.add(resources[i]); // add as possible classes dir
_getResourcesTranslated(list,resources[i].listResources(),false);
}
}
}
@Override
public boolean loadCFMLClassPath() {
return loadCFMLClassPath;
}
@Override
public boolean reloadOnChange() {
return reloadOnChange;
}
@Override
public int watchInterval() {
return watchInterval;
}
@Override
public String[] watchedExtensions() {
return watchedExtensions;
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<div xmlns:c="http://java.sun.com/jsp/jstl/core" xmlns:field="urn:jsptagdir:/WEB-INF/tags/form/fields" xmlns:form="urn:jsptagdir:/WEB-INF/tags/form" xmlns:jsp="http://java.sun.com/JSP/Page" xmlns:spring="http://www.springframework.org/tags" version="2.0">
<jsp:directive.page contentType="text/html;charset=UTF-8"/>
<jsp:output omit-xml-declaration="yes"/>
<form:create id="fc_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit" modelAttribute="hisAutorisatiebesluit" path="/hisautorisatiebesluits" render="${empty dependencies}" z="D82++JvotJukKgZ/VenyD/bDsVI=">
<field:select field="autorisatiebesluit" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_autorisatiebesluit" itemValue="id" items="${autorisatiebesluits}" path="/autorisatiebesluits" z="8cUIPy03FQX4cHCrzyF86ACtAms="/>
<field:select field="actieinh" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_actieinh" itemValue="id" items="${acties}" path="/acties" z="W9+8nVSSLQv/lO2VlWucNXZ2+Dw="/>
<field:select field="actieverval" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_actieverval" itemValue="id" items="${acties}" path="/acties" z="WRmwi92C9f4hQZdIf7lJi3tS7CA="/>
<field:datetime dateTimePattern="${hisAutorisatiebesluit_tsreg_date_format}" field="tsreg" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_tsreg" z="eRF1Pmim8XhseXpWqyQ+R2pYnXc="/>
<field:datetime dateTimePattern="${hisAutorisatiebesluit_tsverval_date_format}" field="tsverval" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_tsverval" z="qZt0OvW7gfbcYNCx5Oyk1YpO4R8="/>
<field:checkbox field="indingetrokken" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_indingetrokken" z="gF6g6yNfbHd4XFQhzXtZnR4x0YY="/>
<field:input field="datbesluit" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_datbesluit" required="true" validationMessageCode="field_invalid_number" z="/sIyuBvuSxMP3n37aUYyq8qnk7s="/>
<field:input field="datingang" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_datingang" required="true" validationMessageCode="field_invalid_number" z="atDl4M6Mut0RR32WJq65BBQ2rE8="/>
<field:input field="dateinde" id="c_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit_dateinde" validationMessageCode="field_invalid_number" z="HHw/r22E2vAdcpL1hb2hi04NXKE="/>
</form:create>
<form:dependency dependencies="${dependencies}" id="d_nl_bzk_brp_model_data_autaut_HisAutorisatiebesluit" render="${not empty dependencies}" z="R95fNaHnlK4h5G6siMUD+PxZ73Y="/>
</div>
| {
"pile_set_name": "Github"
} |
/******************************************************************************
* Product: Adempiere ERP & CRM Smart Business Solution *
* Copyright (C) 1999-2006 ComPiere, Inc. All Rights Reserved. *
* This program is free software; you can redistribute it and/or modify it *
* under the terms version 2 of the GNU General Public License as published *
* by the Free Software Foundation. This program is distributed in the hope *
* that it will be useful, but WITHOUT ANY WARRANTY; without even the implied *
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. *
* For the text or an alternative of this public license, you may reach us *
* ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA *
* or via [email protected] or http://www.compiere.org/license.html *
*****************************************************************************/
package org.apache.ecs.xhtml;
import org.apache.ecs.Element;
import org.apache.ecs.KeyEvents;
import org.apache.ecs.MouseEvents;
import org.apache.ecs.MultiPartElement;
import org.apache.ecs.Printable;
/**
* This class creates an <object> tag.
*
* @version $Id: object.java,v 1.2 2006/07/30 00:54:02 jjanke Exp $
* @author <a href="mailto:[email protected]">Stephan Nagy</a>
* @author <a href="mailto:[email protected]">Jon S. Stevens</a>
* @author <a href="mailto:[email protected]">Bojan Smojver</a>
*/
public class object extends MultiPartElement
implements Printable, MouseEvents, KeyEvents
{
/**
*
*/
private static final long serialVersionUID = 6131951963043441532L;
/**
* Private initializer.
*/
{
setElementType ("object");
setCase (LOWERCASE);
setAttributeQuote (true);
}
/**
* Default constructor. Creates the <object/> element.<br>
* use set* methods.
*/
public object ()
{
}
/**
* Sets the declare attribute. (declare this object but don't instantiate
* it.
*
* @param declare
* declare on or off
*/
public object setDeclare (boolean declare)
{
if (declare)
addAttribute ("declare", "declare");
else
removeAttribute ("declare");
return (this);
}
/**
* Identifies an implementation.
*
* @param url
* location of classid.
*/
public object setClassId (String url)
{
addAttribute ("classid", url);
return (this);
}
/**
* Sets the Internet content type for the code.
*
* @param codetype
* Sets the Internet content type for the code.
*/
public object setCodeType (String codetype)
{
addAttribute ("codetype", codetype);
return (this);
}
/**
* Determines the base path to resolve relative urls specified by classid.
*
* @param url
* base path to resolve relative urls specified by classid.
*/
public object setCodeBase (String url)
{
addAttribute ("codebase", url);
return (this);
}
/**
* This attribute specifies the location of the data to be rendered.
*
* @param url
* this attribute specifies the location of the data to be
* rendered.
*/
public object setData (String url)
{
addAttribute ("data", url);
return (this);
}
/**
* This attribute specifies the Internet Media Type for the data specified
* by data.<br>
* This should be a mime type.
*
* @param type
* a mime type for the data specifed by the data attribute.
*/
public object setType (String type)
{
addAttribute ("type", type);
return (this);
}
/**
* Space seperated archive list.
*
* @param url
* Space seperate archive list.
*/
// Anyone know what the hell this is? the spec is rather vague in its
// definition.
public object setArchive (String url)
{
addAttribute ("archive", url);
return (this);
}
/**
* Message to show while the object is loading.
*
* @param cdata
* the message to show while the object is loading.
*/
public object setStandBy (String cdata)
{
addAttribute ("standby", cdata);
return (this);
}
/**
* Suggested link border width.
*
* @param border
* suggested link border width.
*/
public object setBorder (String border)
{
addAttribute ("border", border);
return (this);
}
/**
* Suggested link border width.
*
* @param border
* suggested link border width.
*/
public object setBorder (int border)
{
addAttribute ("border", Integer.toString (border));
return (this);
}
/**
* Suggested link border width.
*
* @param border
* suggested link border width.
*/
public object setBorder (double border)
{
addAttribute ("border", Double.toString (border));
return (this);
}
/**
* Suggested height of object.
*
* @param height
* suggested link height.
*/
public object setHeight (String height)
{
addAttribute ("height", height);
return (this);
}
/**
* Suggested height of object.
*
* @param height
* suggested link height.
*/
public object setHeight (int height)
{
addAttribute ("height", Integer.toString (height));
return (this);
}
/**
* Suggested height of object.
*
* @param height
* suggested link height.
*/
public object setHeight (double height)
{
addAttribute ("height", Double.toString (height));
return (this);
}
/**
* Suggested width of object.
*
* @param width
* suggested link width.
*/
public object setWidth (String width)
{
addAttribute ("width", width);
return (this);
}
/**
* Suggested width of object.
*
* @param width
* suggested link width.
*/
public object setWidth (int width)
{
addAttribute ("width", Integer.toString (width));
return (this);
}
/**
* Suggested width of object.
*
* @param width
* suggested link width.
*/
public object setWidth (double width)
{
addAttribute ("width", Double.toString (width));
return (this);
}
/**
* Suggested horizontal gutter.
*
* @param hspace
* suggested horizontal gutter.
*/
public object setHSpace (String hspace)
{
addAttribute ("hspace", hspace);
return (this);
}
/**
* Suggested horizontal gutter.
*
* @param hspace
* suggested horizontal gutter.
*/
public object setHSpace (int hspace)
{
addAttribute ("hspace", Integer.toString (hspace));
return (this);
}
/**
* Suggested horizontal gutter.
*
* @param hspace
* suggested horizontal gutter.
*/
public object setHSpace (double hspace)
{
addAttribute ("hspace", Double.toString (hspace));
return (this);
}
/**
* Suggested vertical gutter.
*
* @param vspace
* suggested vertical gutter.
*/
public object setVSpace (String vspace)
{
addAttribute ("vspace", vspace);
return (this);
}
/**
* Suggested vertical gutter.
*
* @param vspace
* suggested vertical gutter.
*/
public object setVSpace (int vspace)
{
addAttribute ("vspace", Integer.toString (vspace));
return (this);
}
/**
* Suggested vertical gutter.
*
* @param vspace
* suggested vertical gutter.
*/
public object setVSpace (double vspace)
{
addAttribute ("vspace", Double.toString (vspace));
return (this);
}
/**
* Set the horizontal or vertical alignment of this object.<br>
* Convience variables are in the AlignTypes interface.
*
* @param alignment
* Set the horizontal or vertical alignment of this object.<br>
* Convience variables are in the AlignTypes interface.
*/
public object setAlign (String alignment)
{
addAttribute ("align", alignment);
return (this);
}
/**
* Location of image map to use.
*
* @param url
* location of image map to use.
*/
public object setUseMap (String url)
{
addAttribute ("usemap", url);
return (this);
}
/**
* Object has shaped hypertext links.
*
* @param shape
* does the object have shaped hypertext links?
*/
public object setShapes (boolean shape)
{
if (shape)
addAttribute ("shapes", "shapes");
else
removeAttribute ("shapes");
return (this);
}
/**
* Set the name of this object.
*
* @param name
* set the name of this object.
*/
public object setName (String name)
{
addAttribute ("name", name);
return (this);
}
/**
* Set the elements position in the tabbing order.
*
* @param number
* set the elements position in the tabbing order.
*/
public object setTabIndex (int number)
{
addAttribute ("tabindex", Integer.toString (number));
return (this);
}
/**
* Set the elements position in the tabbing order.
*
* @param number
* set the elements position in the tabbing order.
*/
public object setTabIndex (String number)
{
addAttribute ("tabindex", number);
return (this);
}
/**
* Sets the lang="" and xml:lang="" attributes
*
* @param lang
* the lang="" and xml:lang="" attributes
*/
public Element setLang (String lang)
{
addAttribute ("lang", lang);
addAttribute ("xml:lang", lang);
return this;
}
/**
* Adds an Element to the element.
*
* @param hashcode
* name of element for hash table
* @param element
* Adds an Element to the element.
*/
public object addElement (String hashcode, Element element)
{
addElementToRegistry (hashcode, element);
return (this);
}
/**
* Adds an Element to the element.
*
* @param hashcode
* name of element for hash table
* @param element
* Adds an Element to the element.
*/
public object addElement (String hashcode, String element)
{
addElementToRegistry (hashcode, element);
return (this);
}
/**
* Add an element to the element
*
* @param element
* a string representation of the element
*/
public object addElement (String element)
{
addElementToRegistry (element);
return (this);
}
/**
* Add an element to the element
*
* @param element
* an element to add
*/
public object addElement (Element element)
{
addElementToRegistry (element);
return (this);
}
/**
* Removes an Element from the element.
*
* @param hashcode
* the name of the element to be removed.
*/
public object removeElement (String hashcode)
{
removeElementFromRegistry (hashcode);
return (this);
}
/**
* The onclick event occurs when the pointing device button is clicked over
* an element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnClick (String script)
{
addAttribute ("onclick", script);
}
/**
* The ondblclick event occurs when the pointing device button is double
* clicked over an element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnDblClick (String script)
{
addAttribute ("ondblclick", script);
}
/**
* The onmousedown event occurs when the pointing device button is pressed
* over an element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnMouseDown (String script)
{
addAttribute ("onmousedown", script);
}
/**
* The onmouseup event occurs when the pointing device button is released
* over an element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnMouseUp (String script)
{
addAttribute ("onmouseup", script);
}
/**
* The onmouseover event occurs when the pointing device is moved onto an
* element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnMouseOver (String script)
{
addAttribute ("onmouseover", script);
}
/**
* The onmousemove event occurs when the pointing device is moved while it
* is over an element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnMouseMove (String script)
{
addAttribute ("onmousemove", script);
}
/**
* The onmouseout event occurs when the pointing device is moved away from
* an element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnMouseOut (String script)
{
addAttribute ("onmouseout", script);
}
/**
* The onkeypress event occurs when a key is pressed and released over an
* element. This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnKeyPress (String script)
{
addAttribute ("onkeypress", script);
}
/**
* The onkeydown event occurs when a key is pressed down over an element.
* This attribute may be used with most elements.
*
* @param script The script
*/
public void setOnKeyDown (String script)
{
addAttribute ("onkeydown", script);
}
/**
* The onkeyup event occurs when a key is released over an element. This
* attribute may be used with most elements.
*
* @param script The script
*/
public void setOnKeyUp (String script)
{
addAttribute ("onkeyup", script);
}
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="generator" content="pandoc">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
<title>ros-update(1)</title>
<style type="text/css">code{white-space: pre;}</style>
<link rel="stylesheet" href="../ros.css">
<!--[if lt IE 9]>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<![endif]-->
</head>
<body>
<header>
<h1 class="title">ros-update(1)</h1>
<ul>
<li class="author">Roswell Project Team</li>
</ul>
</header>
<p>ros-update - Update system installed from vcs</p>
<h1 id="synopsis">Synopsis</h1>
<ul>
<li><strong>ros update</strong> system [system ...]</li>
<li><strong>ros update</strong> method [params ...]</li>
</ul>
<h1 id="description">Description</h1>
<dl>
<dt>system</dt>
<dd><p>a name specifying a system.</p>
</dd>
<dt>method</dt>
<dd><p>currently supported method is <code>git</code>.</p>
</dd>
</dl>
<h1 id="update-system">Update system</h1>
<p>When system are already installed and the system has <code>.git</code> in the system directory it invoke <code>git pull</code> and <code>ros install</code> the system.</p>
<h1 id="see-also">SEE ALSO</h1>
<p><em><a href="ros.html">ros</a></em>(1), <em><a href="ros-install.html">ros-install</a></em>(1) _</p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>master</artifactId>
<groupId>org.sakaiproject</groupId>
<version>22-SNAPSHOT</version>
<relativePath>../../master/pom.xml</relativePath>
</parent>
<name>wiki</name>
<groupId>org.sakaiproject</groupId>
<artifactId>wiki</artifactId>
<packaging>war</packaging>
<dependencies>
<dependency>
<groupId>org.sakaiproject.kernel</groupId>
<artifactId>sakai-kernel-util</artifactId>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>org.sakaiproject.kernel</groupId>
<artifactId>sakai-kernel-api</artifactId>
</dependency>
</dependencies>
<build>
<resources />
<!-- java source -->
</build>
</project>
| {
"pile_set_name": "Github"
} |
<?php
/**
* This file is part of Phraseanet
*
* (c) 2005-2016 Alchemy
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Alchemy\Phrasea\Collection\Reference;
interface CollectionReferenceRepository
{
/**
* @return CollectionReference[]
*/
public function findAll();
/**
* @param int $databoxId
* @return CollectionReference[]
*/
public function findAllByDatabox($databoxId);
/**
* @param int $baseId
* @return CollectionReference|null
*/
public function find($baseId);
/**
* @param int[] $baseIds
* @return CollectionReference[]
*/
public function findMany(array $baseIds);
/**
* @param int $databoxId
* @param int $collectionId
* @return CollectionReference|null
*/
public function findByCollectionId($databoxId, $collectionId);
/**
* Find Collection references having at least one Order Master
*
* @param array<int>|null $baseIdsSubset Restrict search to a subset of base ids.
* @return CollectionReference[]
*/
public function findHavingOrderMaster(array $baseIdsSubset = null);
/**
* @param CollectionReference $reference
* @return void
*/
public function save(CollectionReference $reference);
/**
* @param CollectionReference $reference
* @return void
*/
public function delete(CollectionReference $reference);
}
| {
"pile_set_name": "Github"
} |
package com.tencent.mm.plugin.brandservice.ui.timeline.preload;
import a.l;
import com.tencent.matrix.trace.core.AppMethodBeat;
@l(dWo = {1, 1, 13}, dWp = {"\u0000\u0012\n\u0002\u0018\u0002\n\u0002\u0010\u0000\n\u0002\b\u0002\n\u0002\u0010\u0002\n\u0000\bÆ\u0002\u0018\u00002\u00020\u0001B\u0007\b\u0002¢\u0006\u0002\u0010\u0002J\b\u0010\u0003\u001a\u00020\u0004H\u0007¨\u0006\u0005"}, dWq = {"Lcom/tencent/mm/plugin/brandservice/ui/timeline/preload/AppMsgContextEx;", "", "()V", "testClear", "", "plugin-brandservice_release"})
public final class a {
public static final a jQI = new a();
static {
AppMethodBeat.i(14779);
AppMethodBeat.o(14779);
}
private a() {
}
public static final void aWM() {
AppMethodBeat.i(14778);
long aWQ = (long) b.jQK;
if (1 <= aWQ) {
long j = 1;
while (true) {
b.gK(j).clear();
if (j == aWQ) {
break;
}
j++;
}
}
AppMethodBeat.o(14778);
}
}
| {
"pile_set_name": "Github"
} |
var compactable = require('../compactable');
function isComponentOf(property1, property2, shallow) {
return isDirectComponentOf(property1, property2) ||
!shallow && !!compactable[property1.name].shorthandComponents && isSubComponentOf(property1, property2);
}
function isDirectComponentOf(property1, property2) {
var descriptor = compactable[property1.name];
return 'components' in descriptor && descriptor.components.indexOf(property2.name) > -1;
}
function isSubComponentOf(property1, property2) {
return property1
.components
.some(function (component) {
return isDirectComponentOf(component, property2);
});
}
module.exports = isComponentOf;
| {
"pile_set_name": "Github"
} |
/* ssl/s3_pkt.c */
/* Copyright (C) 1995-1998 Eric Young ([email protected])
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young ([email protected]).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson ([email protected]).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young ([email protected])"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson ([email protected])"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* [email protected].
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* ([email protected]). This product includes software written by Tim
* Hudson ([email protected]).
*
*/
#include <stdio.h>
#include <errno.h>
#define USE_SOCKETS
#include "ssl_locl.h"
#include <openssl/evp.h>
#include <openssl/buffer.h>
static int do_ssl3_write(SSL *s, int type, const unsigned char *buf,
unsigned int len, int create_empty_fragment);
static int ssl3_get_record(SSL *s);
int ssl3_read_n(SSL *s, int n, int max, int extend)
{
/* If extend == 0, obtain new n-byte packet; if extend == 1, increase
* packet by another n bytes.
* The packet will be in the sub-array of s->s3->rbuf.buf specified
* by s->packet and s->packet_length.
* (If s->read_ahead is set, 'max' bytes may be stored in rbuf
* [plus s->packet_length bytes if extend == 1].)
*/
int i,len,left;
long align=0;
unsigned char *pkt;
SSL3_BUFFER *rb;
if (n <= 0) return n;
rb = &(s->s3->rbuf);
if (rb->buf == NULL)
if (!ssl3_setup_read_buffer(s))
return -1;
left = rb->left;
#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
align = (long)rb->buf + SSL3_RT_HEADER_LENGTH;
align = (-align)&(SSL3_ALIGN_PAYLOAD-1);
#endif
if (!extend)
{
/* start with empty packet ... */
if (left == 0)
rb->offset = align;
else if (align != 0 && left >= SSL3_RT_HEADER_LENGTH)
{
/* check if next packet length is large
* enough to justify payload alignment... */
pkt = rb->buf + rb->offset;
if (pkt[0] == SSL3_RT_APPLICATION_DATA
&& (pkt[3]<<8|pkt[4]) >= 128)
{
/* Note that even if packet is corrupted
* and its length field is insane, we can
* only be led to wrong decision about
* whether memmove will occur or not.
* Header values has no effect on memmove
* arguments and therefore no buffer
* overrun can be triggered. */
memmove (rb->buf+align,pkt,left);
rb->offset = align;
}
}
s->packet = rb->buf + rb->offset;
s->packet_length = 0;
/* ... now we can act as if 'extend' was set */
}
/* For DTLS/UDP reads should not span multiple packets
* because the read operation returns the whole packet
* at once (as long as it fits into the buffer). */
if (SSL_version(s) == DTLS1_VERSION || SSL_version(s) == DTLS1_BAD_VER)
{
if (left > 0 && n > left)
n = left;
}
/* if there is enough in the buffer from a previous read, take some */
if (left >= n)
{
s->packet_length+=n;
rb->left=left-n;
rb->offset+=n;
return(n);
}
/* else we need to read more data */
len = s->packet_length;
pkt = rb->buf+align;
/* Move any available bytes to front of buffer:
* 'len' bytes already pointed to by 'packet',
* 'left' extra ones at the end */
if (s->packet != pkt) /* len > 0 */
{
memmove(pkt, s->packet, len+left);
s->packet = pkt;
rb->offset = len + align;
}
if (n > (int)(rb->len - rb->offset)) /* does not happen */
{
SSLerr(SSL_F_SSL3_READ_N,ERR_R_INTERNAL_ERROR);
return -1;
}
if (!s->read_ahead)
/* ignore max parameter */
max = n;
else
{
if (max < n)
max = n;
if (max > (int)(rb->len - rb->offset))
max = rb->len - rb->offset;
}
while (left < n)
{
/* Now we have len+left bytes at the front of s->s3->rbuf.buf
* and need to read in more until we have len+n (up to
* len+max if possible) */
clear_sys_error();
if (s->rbio != NULL)
{
s->rwstate=SSL_READING;
i=BIO_read(s->rbio,pkt+len+left, max-left);
}
else
{
SSLerr(SSL_F_SSL3_READ_N,SSL_R_READ_BIO_NOT_SET);
i = -1;
}
if (i <= 0)
{
rb->left = left;
if (s->mode & SSL_MODE_RELEASE_BUFFERS &&
SSL_version(s) != DTLS1_VERSION && SSL_version(s) != DTLS1_BAD_VER)
if (len+left == 0)
ssl3_release_read_buffer(s);
return(i);
}
left+=i;
/* reads should *never* span multiple packets for DTLS because
* the underlying transport protocol is message oriented as opposed
* to byte oriented as in the TLS case. */
if (SSL_version(s) == DTLS1_VERSION || SSL_version(s) == DTLS1_BAD_VER)
{
if (n > left)
n = left; /* makes the while condition false */
}
}
/* done reading, now the book-keeping */
rb->offset += n;
rb->left = left - n;
s->packet_length += n;
s->rwstate=SSL_NOTHING;
return(n);
}
/* Call this to get a new input record.
* It will return <= 0 if more data is needed, normally due to an error
* or non-blocking IO.
* When it finishes, one packet has been decoded and can be found in
* ssl->s3->rrec.type - is the type of record
* ssl->s3->rrec.data, - data
* ssl->s3->rrec.length, - number of bytes
*/
/* used only by ssl3_read_bytes */
static int ssl3_get_record(SSL *s)
{
int ssl_major,ssl_minor,al;
int enc_err,n,i,ret= -1;
SSL3_RECORD *rr;
SSL_SESSION *sess;
unsigned char *p;
unsigned char md[EVP_MAX_MD_SIZE];
short version;
int mac_size;
int clear=0;
size_t extra;
int decryption_failed_or_bad_record_mac = 0;
unsigned char *mac = NULL;
#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
long align=SSL3_ALIGN_PAYLOAD;
#else
long align=0;
#endif
rr= &(s->s3->rrec);
sess=s->session;
if (s->options & SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER)
extra=SSL3_RT_MAX_EXTRA;
else
extra=0;
if (!(SSL_get_mode(s) & SSL_MODE_SMALL_BUFFERS) &&
extra && !s->s3->init_extra)
{
/* An application error: SLS_OP_MICROSOFT_BIG_SSLV3_BUFFER
* set after ssl3_setup_buffers() was done */
SSLerr(SSL_F_SSL3_GET_RECORD, ERR_R_INTERNAL_ERROR);
return -1;
}
again:
/* check if we have the header */
if ( (s->rstate != SSL_ST_READ_BODY) ||
(s->packet_length < SSL3_RT_HEADER_LENGTH))
{
n=ssl3_read_n(s, SSL3_RT_HEADER_LENGTH, s->s3->rbuf.len, 0);
if (n <= 0) return(n); /* error or non-blocking */
s->rstate=SSL_ST_READ_BODY;
p=s->packet;
/* Pull apart the header into the SSL3_RECORD */
rr->type= *(p++);
ssl_major= *(p++);
ssl_minor= *(p++);
version=(ssl_major<<8)|ssl_minor;
n2s(p,rr->length);
#if 0
fprintf(stderr, "Record type=%d, Length=%d\n", rr->type, rr->length);
#endif
/* Lets check version */
if (!s->first_packet)
{
if (version != s->version)
{
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_WRONG_VERSION_NUMBER);
if ((s->version & 0xFF00) == (version & 0xFF00))
/* Send back error using their minor version number :-) */
s->version = (unsigned short)version;
al=SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
}
if ((version>>8) != SSL3_VERSION_MAJOR)
{
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_WRONG_VERSION_NUMBER);
goto err;
}
/* If we receive a valid record larger than the current buffer size,
* allocate some memory for it.
*/
if (rr->length > s->s3->rbuf.len - SSL3_RT_HEADER_LENGTH - align)
{
if ((p=OPENSSL_realloc(s->s3->rbuf.buf, rr->length + SSL3_RT_HEADER_LENGTH + align))==NULL)
{
SSLerr(SSL_F_SSL3_GET_RECORD,ERR_R_MALLOC_FAILURE);
goto err;
}
s->s3->rbuf.buf=p;
s->s3->rbuf.len=rr->length + SSL3_RT_HEADER_LENGTH + align;
s->packet= &(s->s3->rbuf.buf[0]);
}
if (rr->length > s->s3->rbuf.len - SSL3_RT_HEADER_LENGTH)
{
al=SSL_AD_RECORD_OVERFLOW;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_PACKET_LENGTH_TOO_LONG);
goto f_err;
}
/* now s->rstate == SSL_ST_READ_BODY */
}
/* s->rstate == SSL_ST_READ_BODY, get and decode the data */
if (rr->length > s->packet_length-SSL3_RT_HEADER_LENGTH)
{
/* now s->packet_length == SSL3_RT_HEADER_LENGTH */
i=rr->length;
n=ssl3_read_n(s,i,i,1);
if (n <= 0) return(n); /* error or non-blocking io */
/* now n == rr->length,
* and s->packet_length == SSL3_RT_HEADER_LENGTH + rr->length */
}
s->rstate=SSL_ST_READ_HEADER; /* set state for later operations */
/* At this point, s->packet_length == SSL3_RT_HEADER_LNGTH + rr->length,
* and we have that many bytes in s->packet
*/
rr->input= &(s->packet[SSL3_RT_HEADER_LENGTH]);
/* ok, we can now read from 's->packet' data into 'rr'
* rr->input points at rr->length bytes, which
* need to be copied into rr->data by either
* the decryption or by the decompression
* When the data is 'copied' into the rr->data buffer,
* rr->input will be pointed at the new buffer */
/* We now have - encrypted [ MAC [ compressed [ plain ] ] ]
* rr->length bytes of encrypted compressed stuff. */
/* check is not needed I believe */
if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH+extra)
{
al=SSL_AD_RECORD_OVERFLOW;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_ENCRYPTED_LENGTH_TOO_LONG);
goto f_err;
}
/* decrypt in place in 'rr->input' */
rr->data=rr->input;
enc_err = s->method->ssl3_enc->enc(s,0);
if (enc_err <= 0)
{
if (enc_err == 0)
/* SSLerr() and ssl3_send_alert() have been called */
goto err;
/* Otherwise enc_err == -1, which indicates bad padding
* (rec->length has not been changed in this case).
* To minimize information leaked via timing, we will perform
* the MAC computation anyway. */
decryption_failed_or_bad_record_mac = 1;
}
#ifdef TLS_DEBUG
printf("dec %d\n",rr->length);
{ unsigned int z; for (z=0; z<rr->length; z++) printf("%02X%c",rr->data[z],((z+1)%16)?' ':'\n'); }
printf("\n");
#endif
/* r->length is now the compressed data plus mac */
if ( (sess == NULL) ||
(s->enc_read_ctx == NULL) ||
(EVP_MD_CTX_md(s->read_hash) == NULL))
clear=1;
if (!clear)
{
/* !clear => s->read_hash != NULL => mac_size != -1 */
mac_size=EVP_MD_CTX_size(s->read_hash);
OPENSSL_assert(mac_size >= 0);
if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH+extra+mac_size)
{
#if 0 /* OK only for stream ciphers (then rr->length is visible from ciphertext anyway) */
al=SSL_AD_RECORD_OVERFLOW;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_PRE_MAC_LENGTH_TOO_LONG);
goto f_err;
#else
decryption_failed_or_bad_record_mac = 1;
#endif
}
/* check the MAC for rr->input (it's in mac_size bytes at the tail) */
if (rr->length >= (unsigned int)mac_size)
{
rr->length -= mac_size;
mac = &rr->data[rr->length];
}
else
{
/* record (minus padding) is too short to contain a MAC */
#if 0 /* OK only for stream ciphers */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_LENGTH_TOO_SHORT);
goto f_err;
#else
decryption_failed_or_bad_record_mac = 1;
rr->length = 0;
#endif
}
i=s->method->ssl3_enc->mac(s,md,0);
if (i < 0 || mac == NULL || memcmp(md, mac, (size_t)mac_size) != 0)
{
decryption_failed_or_bad_record_mac = 1;
}
}
if (decryption_failed_or_bad_record_mac)
{
/* A separate 'decryption_failed' alert was introduced with TLS 1.0,
* SSL 3.0 only has 'bad_record_mac'. But unless a decryption
* failure is directly visible from the ciphertext anyway,
* we should not reveal which kind of error occured -- this
* might become visible to an attacker (e.g. via a logfile) */
al=SSL_AD_BAD_RECORD_MAC;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC);
goto f_err;
}
/* r->length is now just compressed */
if (s->expand != NULL)
{
if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH+extra)
{
al=SSL_AD_RECORD_OVERFLOW;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_COMPRESSED_LENGTH_TOO_LONG);
goto f_err;
}
if (!ssl3_do_uncompress(s))
{
al=SSL_AD_DECOMPRESSION_FAILURE;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_BAD_DECOMPRESSION);
goto f_err;
}
}
if (rr->length > SSL3_RT_MAX_PLAIN_LENGTH+extra)
{
al=SSL_AD_RECORD_OVERFLOW;
SSLerr(SSL_F_SSL3_GET_RECORD,SSL_R_DATA_LENGTH_TOO_LONG);
goto f_err;
}
rr->off=0;
/* So at this point the following is true
* ssl->s3->rrec.type is the type of record
* ssl->s3->rrec.length == number of bytes in record
* ssl->s3->rrec.off == offset to first valid byte
* ssl->s3->rrec.data == where to take bytes from, increment
* after use :-).
*/
/* we have pulled in a full packet so zero things */
s->packet_length=0;
/* just read a 0 length packet */
if (rr->length == 0) goto again;
#if 0
fprintf(stderr, "Ultimate Record type=%d, Length=%d\n", rr->type, rr->length);
#endif
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
return(ret);
}
int ssl3_do_uncompress(SSL *ssl)
{
#ifndef OPENSSL_NO_COMP
int i;
SSL3_RECORD *rr;
rr= &(ssl->s3->rrec);
i=COMP_expand_block(ssl->expand,rr->comp,
SSL3_RT_MAX_PLAIN_LENGTH,rr->data,(int)rr->length);
if (i < 0)
return(0);
else
rr->length=i;
rr->data=rr->comp;
#endif
return(1);
}
int ssl3_do_compress(SSL *ssl)
{
#ifndef OPENSSL_NO_COMP
int i;
SSL3_RECORD *wr;
wr= &(ssl->s3->wrec);
i=COMP_compress_block(ssl->compress,wr->data,
SSL3_RT_MAX_COMPRESSED_LENGTH,
wr->input,(int)wr->length);
if (i < 0)
return(0);
else
wr->length=i;
wr->input=wr->data;
#endif
return(1);
}
/* Call this to write data in records of type 'type'
* It will return <= 0 if not all data has been sent or non-blocking IO.
*/
int ssl3_write_bytes(SSL *s, int type, const void *buf_, int len)
{
const unsigned char *buf=buf_;
unsigned int tot,n,nw;
int i;
unsigned int max_plain_length;
s->rwstate=SSL_NOTHING;
tot=s->s3->wnum;
s->s3->wnum=0;
if (SSL_in_init(s) && !s->in_handshake)
{
i=s->handshake_func(s);
if (i < 0) return(i);
if (i == 0)
{
SSLerr(SSL_F_SSL3_WRITE_BYTES,SSL_R_SSL_HANDSHAKE_FAILURE);
return -1;
}
}
n=(len-tot);
for (;;)
{
if (type == SSL3_RT_APPLICATION_DATA && (SSL_get_mode(s) & SSL_MODE_SMALL_BUFFERS))
max_plain_length = SSL3_RT_DEFAULT_PLAIN_LENGTH;
else
max_plain_length = s->max_send_fragment;
if (n > max_plain_length)
nw = max_plain_length;
else
nw=n;
i=do_ssl3_write(s, type, &(buf[tot]), nw, 0);
if (i <= 0)
{
s->s3->wnum=tot;
return i;
}
if ((i == (int)n) ||
(type == SSL3_RT_APPLICATION_DATA &&
(s->mode & SSL_MODE_ENABLE_PARTIAL_WRITE)))
{
/* next chunk of data should get another prepended empty fragment
* in ciphersuites with known-IV weakness: */
s->s3->empty_fragment_done = 0;
return tot+i;
}
n-=i;
tot+=i;
}
}
static int do_ssl3_write(SSL *s, int type, const unsigned char *buf,
unsigned int len, int create_empty_fragment)
{
unsigned char *p,*plen;
int i,mac_size,clear=0;
int prefix_len=0;
long align=0;
SSL3_RECORD *wr;
SSL3_BUFFER *wb=&(s->s3->wbuf);
SSL_SESSION *sess;
if (wb->buf == NULL)
if (!ssl3_setup_write_buffer(s))
return -1;
/* first check if there is a SSL3_BUFFER still being written
* out. This will happen with non blocking IO */
if (wb->left != 0)
return(ssl3_write_pending(s,type,buf,len));
/* If we have an alert to send, lets send it */
if (s->s3->alert_dispatch)
{
i=s->method->ssl_dispatch_alert(s);
if (i <= 0)
return(i);
/* if it went, fall through and send more stuff */
}
if (len == 0 && !create_empty_fragment)
return 0;
wr= &(s->s3->wrec);
sess=s->session;
if ( (sess == NULL) ||
(s->enc_write_ctx == NULL) ||
(EVP_MD_CTX_md(s->write_hash) == NULL))
clear=1;
if (clear)
mac_size=0;
else
{
mac_size=EVP_MD_CTX_size(s->write_hash);
if (mac_size < 0)
goto err;
}
/* 'create_empty_fragment' is true only when this function calls itself */
if (!clear && !create_empty_fragment && !s->s3->empty_fragment_done)
{
/* countermeasure against known-IV weakness in CBC ciphersuites
* (see http://www.openssl.org/~bodo/tls-cbc.txt) */
if (s->s3->need_empty_fragments && type == SSL3_RT_APPLICATION_DATA)
{
/* recursive function call with 'create_empty_fragment' set;
* this prepares and buffers the data for an empty fragment
* (these 'prefix_len' bytes are sent out later
* together with the actual payload) */
prefix_len = do_ssl3_write(s, type, buf, 0, 1);
if (prefix_len <= 0)
goto err;
if (prefix_len >
(SSL3_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD))
{
/* insufficient space */
SSLerr(SSL_F_DO_SSL3_WRITE, ERR_R_INTERNAL_ERROR);
goto err;
}
}
s->s3->empty_fragment_done = 1;
}
/* resize if necessary to hold the data. */
if (len + SSL3_RT_DEFAULT_WRITE_OVERHEAD > wb->len)
{
if ((p=OPENSSL_realloc(wb->buf, len + SSL3_RT_DEFAULT_WRITE_OVERHEAD))==NULL)
{
SSLerr(SSL_F_DO_SSL3_WRITE,ERR_R_MALLOC_FAILURE);
goto err;
}
wb->buf = p;
wb->len = len + SSL3_RT_DEFAULT_WRITE_OVERHEAD;
}
if (create_empty_fragment)
{
#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
/* extra fragment would be couple of cipher blocks,
* which would be multiple of SSL3_ALIGN_PAYLOAD, so
* if we want to align the real payload, then we can
* just pretent we simply have two headers. */
align = (long)wb->buf + 2*SSL3_RT_HEADER_LENGTH;
align = (-align)&(SSL3_ALIGN_PAYLOAD-1);
#endif
p = wb->buf + align;
wb->offset = align;
}
else if (prefix_len)
{
p = wb->buf + wb->offset + prefix_len;
}
else
{
#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
align = (long)wb->buf + SSL3_RT_HEADER_LENGTH;
align = (-align)&(SSL3_ALIGN_PAYLOAD-1);
#endif
p = wb->buf + align;
wb->offset = align;
}
/* write the header */
*(p++)=type&0xff;
wr->type=type;
*(p++)=(s->version>>8);
*(p++)=s->version&0xff;
/* field where we are to write out packet length */
plen=p;
p+=2;
/* lets setup the record stuff. */
wr->data=p;
wr->length=(int)len;
wr->input=(unsigned char *)buf;
/* we now 'read' from wr->input, wr->length bytes into
* wr->data */
/* first we compress */
if (s->compress != NULL)
{
if (!ssl3_do_compress(s))
{
SSLerr(SSL_F_DO_SSL3_WRITE,SSL_R_COMPRESSION_FAILURE);
goto err;
}
}
else
{
memcpy(wr->data,wr->input,wr->length);
wr->input=wr->data;
}
/* we should still have the output to wr->data and the input
* from wr->input. Length should be wr->length.
* wr->data still points in the wb->buf */
if (mac_size != 0)
{
if (s->method->ssl3_enc->mac(s,&(p[wr->length]),1) < 0)
goto err;
wr->length+=mac_size;
wr->input=p;
wr->data=p;
}
/* ssl3_enc can only have an error on read */
s->method->ssl3_enc->enc(s,1);
/* record length after mac and block padding */
s2n(wr->length,plen);
/* we should now have
* wr->data pointing to the encrypted data, which is
* wr->length long */
wr->type=type; /* not needed but helps for debugging */
wr->length+=SSL3_RT_HEADER_LENGTH;
if (create_empty_fragment)
{
/* we are in a recursive call;
* just return the length, don't write out anything here
*/
return wr->length;
}
/* now let's set up wb */
wb->left = prefix_len + wr->length;
/* memorize arguments so that ssl3_write_pending can detect bad write retries later */
s->s3->wpend_tot=len;
s->s3->wpend_buf=buf;
s->s3->wpend_type=type;
s->s3->wpend_ret=len;
/* we now just need to write the buffer */
return ssl3_write_pending(s,type,buf,len);
err:
return -1;
}
/* if s->s3->wbuf.left != 0, we need to call this */
int ssl3_write_pending(SSL *s, int type, const unsigned char *buf,
unsigned int len)
{
int i;
SSL3_BUFFER *wb=&(s->s3->wbuf);
/* XXXX */
if ((s->s3->wpend_tot > (int)len)
|| ((s->s3->wpend_buf != buf) &&
!(s->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER))
|| (s->s3->wpend_type != type))
{
SSLerr(SSL_F_SSL3_WRITE_PENDING,SSL_R_BAD_WRITE_RETRY);
return(-1);
}
for (;;)
{
clear_sys_error();
if (s->wbio != NULL)
{
s->rwstate=SSL_WRITING;
i=BIO_write(s->wbio,
(char *)&(wb->buf[wb->offset]),
(unsigned int)wb->left);
}
else
{
SSLerr(SSL_F_SSL3_WRITE_PENDING,SSL_R_BIO_NOT_SET);
i= -1;
}
if (i == wb->left)
{
wb->left=0;
wb->offset+=i;
if (s->mode & SSL_MODE_RELEASE_BUFFERS &&
SSL_version(s) != DTLS1_VERSION && SSL_version(s) != DTLS1_BAD_VER)
ssl3_release_write_buffer(s);
s->rwstate=SSL_NOTHING;
return(s->s3->wpend_ret);
}
else if (i <= 0) {
if (s->version == DTLS1_VERSION ||
s->version == DTLS1_BAD_VER) {
/* For DTLS, just drop it. That's kind of the whole
point in using a datagram service */
wb->left = 0;
}
return(i);
}
wb->offset+=i;
wb->left-=i;
}
}
/* Return up to 'len' payload bytes received in 'type' records.
* 'type' is one of the following:
*
* - SSL3_RT_HANDSHAKE (when ssl3_get_message calls us)
* - SSL3_RT_APPLICATION_DATA (when ssl3_read calls us)
* - 0 (during a shutdown, no data has to be returned)
*
* If we don't have stored data to work from, read a SSL/TLS record first
* (possibly multiple records if we still don't have anything to return).
*
* This function must handle any surprises the peer may have for us, such as
* Alert records (e.g. close_notify), ChangeCipherSpec records (not really
* a surprise, but handled as if it were), or renegotiation requests.
* Also if record payloads contain fragments too small to process, we store
* them until there is enough for the respective protocol (the record protocol
* may use arbitrary fragmentation and even interleaving):
* Change cipher spec protocol
* just 1 byte needed, no need for keeping anything stored
* Alert protocol
* 2 bytes needed (AlertLevel, AlertDescription)
* Handshake protocol
* 4 bytes needed (HandshakeType, uint24 length) -- we just have
* to detect unexpected Client Hello and Hello Request messages
* here, anything else is handled by higher layers
* Application data protocol
* none of our business
*/
int ssl3_read_bytes(SSL *s, int type, unsigned char *buf, int len, int peek)
{
int al,i,j,ret;
unsigned int n;
SSL3_RECORD *rr;
void (*cb)(const SSL *ssl,int type2,int val)=NULL;
if (s->s3->rbuf.buf == NULL) /* Not initialized yet */
if (!ssl3_setup_read_buffer(s))
return(-1);
if ((type && (type != SSL3_RT_APPLICATION_DATA) && (type != SSL3_RT_HANDSHAKE) && type) ||
(peek && (type != SSL3_RT_APPLICATION_DATA)))
{
SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR);
return -1;
}
if ((type == SSL3_RT_HANDSHAKE) && (s->s3->handshake_fragment_len > 0))
/* (partially) satisfy request from storage */
{
unsigned char *src = s->s3->handshake_fragment;
unsigned char *dst = buf;
unsigned int k;
/* peek == 0 */
n = 0;
while ((len > 0) && (s->s3->handshake_fragment_len > 0))
{
*dst++ = *src++;
len--; s->s3->handshake_fragment_len--;
n++;
}
/* move any remaining fragment bytes: */
for (k = 0; k < s->s3->handshake_fragment_len; k++)
s->s3->handshake_fragment[k] = *src++;
return n;
}
/* Now s->s3->handshake_fragment_len == 0 if type == SSL3_RT_HANDSHAKE. */
if (!s->in_handshake && SSL_in_init(s))
{
/* type == SSL3_RT_APPLICATION_DATA */
i=s->handshake_func(s);
if (i < 0) return(i);
if (i == 0)
{
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_SSL_HANDSHAKE_FAILURE);
return(-1);
}
}
start:
s->rwstate=SSL_NOTHING;
/* s->s3->rrec.type - is the type of record
* s->s3->rrec.data, - data
* s->s3->rrec.off, - offset into 'data' for next read
* s->s3->rrec.length, - number of bytes. */
rr = &(s->s3->rrec);
/* get new packet if necessary */
if ((rr->length == 0) || (s->rstate == SSL_ST_READ_BODY))
{
ret=ssl3_get_record(s);
if (ret <= 0) return(ret);
}
/* we now have a packet which can be read and processed */
if (s->s3->change_cipher_spec /* set when we receive ChangeCipherSpec,
* reset by ssl3_get_finished */
&& (rr->type != SSL3_RT_HANDSHAKE))
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_DATA_BETWEEN_CCS_AND_FINISHED);
goto f_err;
}
/* If the other end has shut down, throw anything we read away
* (even in 'peek' mode) */
if (s->shutdown & SSL_RECEIVED_SHUTDOWN)
{
rr->length=0;
s->rwstate=SSL_NOTHING;
return(0);
}
if (type == rr->type) /* SSL3_RT_APPLICATION_DATA or SSL3_RT_HANDSHAKE */
{
/* make sure that we are not getting application data when we
* are doing a handshake for the first time */
if (SSL_in_init(s) && (type == SSL3_RT_APPLICATION_DATA) &&
(s->enc_read_ctx == NULL))
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_APP_DATA_IN_HANDSHAKE);
goto f_err;
}
if (len <= 0) return(len);
if ((unsigned int)len > rr->length)
n = rr->length;
else
n = (unsigned int)len;
memcpy(buf,&(rr->data[rr->off]),n);
if (!peek)
{
rr->length-=n;
rr->off+=n;
if (rr->length == 0)
{
s->rstate=SSL_ST_READ_HEADER;
rr->off=0;
if (s->mode & SSL_MODE_RELEASE_BUFFERS)
ssl3_release_read_buffer(s);
}
}
return(n);
}
/* If we get here, then type != rr->type; if we have a handshake
* message, then it was unexpected (Hello Request or Client Hello). */
/* In case of record types for which we have 'fragment' storage,
* fill that so that we can process the data at a fixed place.
*/
{
unsigned int dest_maxlen = 0;
unsigned char *dest = NULL;
unsigned int *dest_len = NULL;
if (rr->type == SSL3_RT_HANDSHAKE)
{
dest_maxlen = sizeof s->s3->handshake_fragment;
dest = s->s3->handshake_fragment;
dest_len = &s->s3->handshake_fragment_len;
}
else if (rr->type == SSL3_RT_ALERT)
{
dest_maxlen = sizeof s->s3->alert_fragment;
dest = s->s3->alert_fragment;
dest_len = &s->s3->alert_fragment_len;
}
if (dest_maxlen > 0)
{
n = dest_maxlen - *dest_len; /* available space in 'dest' */
if (rr->length < n)
n = rr->length; /* available bytes */
/* now move 'n' bytes: */
while (n-- > 0)
{
dest[(*dest_len)++] = rr->data[rr->off++];
rr->length--;
}
if (*dest_len < dest_maxlen)
goto start; /* fragment was too small */
}
}
/* s->s3->handshake_fragment_len == 4 iff rr->type == SSL3_RT_HANDSHAKE;
* s->s3->alert_fragment_len == 2 iff rr->type == SSL3_RT_ALERT.
* (Possibly rr is 'empty' now, i.e. rr->length may be 0.) */
/* If we are a client, check for an incoming 'Hello Request': */
if ((!s->server) &&
(s->s3->handshake_fragment_len >= 4) &&
(s->s3->handshake_fragment[0] == SSL3_MT_HELLO_REQUEST) &&
(s->session != NULL) && (s->session->cipher != NULL))
{
s->s3->handshake_fragment_len = 0;
if ((s->s3->handshake_fragment[1] != 0) ||
(s->s3->handshake_fragment[2] != 0) ||
(s->s3->handshake_fragment[3] != 0))
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_BAD_HELLO_REQUEST);
goto f_err;
}
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE, s->s3->handshake_fragment, 4, s, s->msg_callback_arg);
if (SSL_is_init_finished(s) &&
!(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS) &&
!s->s3->renegotiate)
{
ssl3_renegotiate(s);
if (ssl3_renegotiate_check(s))
{
i=s->handshake_func(s);
if (i < 0) return(i);
if (i == 0)
{
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_SSL_HANDSHAKE_FAILURE);
return(-1);
}
if (!(s->mode & SSL_MODE_AUTO_RETRY))
{
if (s->s3->rbuf.left == 0) /* no read-ahead left? */
{
BIO *bio;
/* In the case where we try to read application data,
* but we trigger an SSL handshake, we return -1 with
* the retry option set. Otherwise renegotiation may
* cause nasty problems in the blocking world */
s->rwstate=SSL_READING;
bio=SSL_get_rbio(s);
BIO_clear_retry_flags(bio);
BIO_set_retry_read(bio);
return(-1);
}
}
}
}
/* we either finished a handshake or ignored the request,
* now try again to obtain the (application) data we were asked for */
goto start;
}
/* If we are a server and get a client hello when renegotiation isn't
* allowed send back a no renegotiation alert and carry on.
* WARNING: experimental code, needs reviewing (steve)
*/
if (s->server &&
SSL_is_init_finished(s) &&
!s->s3->send_connection_binding &&
(s->version > SSL3_VERSION) &&
(s->s3->handshake_fragment_len >= 4) &&
(s->s3->handshake_fragment[0] == SSL3_MT_CLIENT_HELLO) &&
(s->session != NULL) && (s->session->cipher != NULL) &&
!(s->ctx->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION))
{
/*s->s3->handshake_fragment_len = 0;*/
rr->length = 0;
ssl3_send_alert(s,SSL3_AL_WARNING, SSL_AD_NO_RENEGOTIATION);
goto start;
}
if (s->s3->alert_fragment_len >= 2)
{
int alert_level = s->s3->alert_fragment[0];
int alert_descr = s->s3->alert_fragment[1];
s->s3->alert_fragment_len = 0;
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_ALERT, s->s3->alert_fragment, 2, s, s->msg_callback_arg);
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
if (cb != NULL)
{
j = (alert_level << 8) | alert_descr;
cb(s, SSL_CB_READ_ALERT, j);
}
if (alert_level == 1) /* warning */
{
s->s3->warn_alert = alert_descr;
if (alert_descr == SSL_AD_CLOSE_NOTIFY)
{
s->shutdown |= SSL_RECEIVED_SHUTDOWN;
return(0);
}
/* This is a warning but we receive it if we requested
* renegotiation and the peer denied it. Terminate with
* a fatal alert because if application tried to
* renegotiatie it presumably had a good reason and
* expects it to succeed.
*
* In future we might have a renegotiation where we
* don't care if the peer refused it where we carry on.
*/
else if (alert_descr == SSL_AD_NO_RENEGOTIATION)
{
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_NO_RENEGOTIATION);
goto f_err;
}
}
else if (alert_level == 2) /* fatal */
{
char tmp[16];
s->rwstate=SSL_NOTHING;
s->s3->fatal_alert = alert_descr;
SSLerr(SSL_F_SSL3_READ_BYTES, SSL_AD_REASON_OFFSET + alert_descr);
BIO_snprintf(tmp,sizeof tmp,"%d",alert_descr);
ERR_add_error_data(2,"SSL alert number ",tmp);
s->shutdown|=SSL_RECEIVED_SHUTDOWN;
SSL_CTX_remove_session(s->ctx,s->session);
return(0);
}
else
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_UNKNOWN_ALERT_TYPE);
goto f_err;
}
goto start;
}
if (s->shutdown & SSL_SENT_SHUTDOWN) /* but we have not received a shutdown */
{
s->rwstate=SSL_NOTHING;
rr->length=0;
return(0);
}
if (rr->type == SSL3_RT_CHANGE_CIPHER_SPEC)
{
/* 'Change Cipher Spec' is just a single byte, so we know
* exactly what the record payload has to look like */
if ( (rr->length != 1) || (rr->off != 0) ||
(rr->data[0] != SSL3_MT_CCS))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_BAD_CHANGE_CIPHER_SPEC);
goto f_err;
}
/* Check we have a cipher to change to */
if (s->s3->tmp.new_cipher == NULL)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_CCS_RECEIVED_EARLY);
goto f_err;
}
rr->length=0;
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_CHANGE_CIPHER_SPEC, rr->data, 1, s, s->msg_callback_arg);
s->s3->change_cipher_spec=1;
if (!ssl3_do_change_cipher_spec(s))
goto err;
else
goto start;
}
/* Unexpected handshake message (Client Hello, or protocol violation) */
if ((s->s3->handshake_fragment_len >= 4) && !s->in_handshake)
{
if (((s->state&SSL_ST_MASK) == SSL_ST_OK) &&
!(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS))
{
#if 0 /* worked only because C operator preferences are not as expected (and
* because this is not really needed for clients except for detecting
* protocol violations): */
s->state=SSL_ST_BEFORE|(s->server)
?SSL_ST_ACCEPT
:SSL_ST_CONNECT;
#else
s->state = s->server ? SSL_ST_ACCEPT : SSL_ST_CONNECT;
#endif
s->new_session=1;
}
i=s->handshake_func(s);
if (i < 0) return(i);
if (i == 0)
{
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_SSL_HANDSHAKE_FAILURE);
return(-1);
}
if (!(s->mode & SSL_MODE_AUTO_RETRY))
{
if (s->s3->rbuf.left == 0) /* no read-ahead left? */
{
BIO *bio;
/* In the case where we try to read application data,
* but we trigger an SSL handshake, we return -1 with
* the retry option set. Otherwise renegotiation may
* cause nasty problems in the blocking world */
s->rwstate=SSL_READING;
bio=SSL_get_rbio(s);
BIO_clear_retry_flags(bio);
BIO_set_retry_read(bio);
return(-1);
}
}
goto start;
}
switch (rr->type)
{
default:
#ifndef OPENSSL_NO_TLS
/* TLS just ignores unknown message types */
if (s->version == TLS1_VERSION)
{
rr->length = 0;
goto start;
}
#endif
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_UNEXPECTED_RECORD);
goto f_err;
case SSL3_RT_CHANGE_CIPHER_SPEC:
case SSL3_RT_ALERT:
case SSL3_RT_HANDSHAKE:
/* we already handled all of these, with the possible exception
* of SSL3_RT_HANDSHAKE when s->in_handshake is set, but that
* should not happen when type != rr->type */
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES,ERR_R_INTERNAL_ERROR);
goto f_err;
case SSL3_RT_APPLICATION_DATA:
/* At this point, we were expecting handshake data,
* but have application data. If the library was
* running inside ssl3_read() (i.e. in_read_app_data
* is set) and it makes sense to read application data
* at this point (session renegotiation not yet started),
* we will indulge it.
*/
if (s->s3->in_read_app_data &&
(s->s3->total_renegotiations != 0) &&
((
(s->state & SSL_ST_CONNECT) &&
(s->state >= SSL3_ST_CW_CLNT_HELLO_A) &&
(s->state <= SSL3_ST_CR_SRVR_HELLO_A)
) || (
(s->state & SSL_ST_ACCEPT) &&
(s->state <= SSL3_ST_SW_HELLO_REQ_A) &&
(s->state >= SSL3_ST_SR_CLNT_HELLO_A)
)
))
{
s->s3->in_read_app_data=2;
return(-1);
}
else
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_READ_BYTES,SSL_R_UNEXPECTED_RECORD);
goto f_err;
}
}
/* not reached */
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
return(-1);
}
int ssl3_do_change_cipher_spec(SSL *s)
{
int i;
#ifdef OPENSSL_NO_NEXTPROTONEG
const char *sender;
int slen;
#endif
if (s->state & SSL_ST_ACCEPT)
i=SSL3_CHANGE_CIPHER_SERVER_READ;
else
i=SSL3_CHANGE_CIPHER_CLIENT_READ;
if (s->s3->tmp.key_block == NULL)
{
if (s->session == NULL)
{
/* might happen if dtls1_read_bytes() calls this */
SSLerr(SSL_F_SSL3_DO_CHANGE_CIPHER_SPEC,SSL_R_CCS_RECEIVED_EARLY);
return (0);
}
s->session->cipher=s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s)) return(0);
}
if (!s->method->ssl3_enc->change_cipher_state(s,i))
return(0);
#ifdef OPENSSL_NO_NEXTPROTONEG
/* we have to record the message digest at
* this point so we can get it before we read
* the finished message */
if (s->state & SSL_ST_CONNECT)
{
sender=s->method->ssl3_enc->server_finished_label;
slen=s->method->ssl3_enc->server_finished_label_len;
}
else
{
sender=s->method->ssl3_enc->client_finished_label;
slen=s->method->ssl3_enc->client_finished_label_len;
}
s->s3->tmp.peer_finish_md_len = s->method->ssl3_enc->final_finish_mac(s,
sender,slen,s->s3->tmp.peer_finish_md);
#endif
return(1);
}
int ssl3_send_alert(SSL *s, int level, int desc)
{
/* Map tls/ssl alert value to correct one */
desc=s->method->ssl3_enc->alert_value(desc);
if (s->version == SSL3_VERSION && desc == SSL_AD_PROTOCOL_VERSION)
desc = SSL_AD_HANDSHAKE_FAILURE; /* SSL 3.0 does not have protocol_version alerts */
if (desc < 0) return -1;
/* If a fatal one, remove from cache */
if ((level == 2) && (s->session != NULL))
SSL_CTX_remove_session(s->ctx,s->session);
s->s3->alert_dispatch=1;
s->s3->send_alert[0]=level;
s->s3->send_alert[1]=desc;
if (s->s3->wbuf.left == 0) /* data still being written out? */
return s->method->ssl_dispatch_alert(s);
/* else data is still being written out, we will get written
* some time in the future */
return -1;
}
int ssl3_dispatch_alert(SSL *s)
{
int i,j;
void (*cb)(const SSL *ssl,int type,int val)=NULL;
s->s3->alert_dispatch=0;
i = do_ssl3_write(s, SSL3_RT_ALERT, &s->s3->send_alert[0], 2, 0);
if (i <= 0)
{
s->s3->alert_dispatch=1;
}
else
{
/* Alert sent to BIO. If it is important, flush it now.
* If the message does not get sent due to non-blocking IO,
* we will not worry too much. */
if (s->s3->send_alert[0] == SSL3_AL_FATAL)
(void)BIO_flush(s->wbio);
if (s->msg_callback)
s->msg_callback(1, s->version, SSL3_RT_ALERT, s->s3->send_alert, 2, s, s->msg_callback_arg);
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
if (cb != NULL)
{
j=(s->s3->send_alert[0]<<8)|s->s3->send_alert[1];
cb(s,SSL_CB_WRITE_ALERT,j);
}
}
return(i);
}
| {
"pile_set_name": "Github"
} |
import { ExpandSignature } from '../../operator/expand';
declare module '../../Observable' {
interface Observable<T> {
expand: ExpandSignature<T>;
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright (C) 2013 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<resources xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
<string name="app_label" msgid="3701846017049540910">"Shell"</string>
<string name="bugreport_notification_channel" msgid="2574150205913861141">"Қате туралы есептер"</string>
<string name="bugreport_in_progress_title" msgid="4311705936714972757">"<xliff:g id="ID">#%d</xliff:g> қате туралы есебі жасалуда"</string>
<string name="bugreport_finished_title" msgid="4429132808670114081">"<xliff:g id="ID">#%d</xliff:g> қате туралы есебі жазып алынды"</string>
<string name="bugreport_updating_title" msgid="4423539949559634214">"Қате туралы есепке мәліметтер қосылуда"</string>
<string name="bugreport_updating_wait" msgid="3322151947853929470">"Күте тұрыңыз…"</string>
<string name="bugreport_finished_text" product="watch" msgid="1223616207145252689">"Көп ұзамай қате туралы есеп телефон экранына шығады"</string>
<string name="bugreport_finished_text" product="tv" msgid="5758325479058638893">"Қате туралы есепті бөлісу үшін таңдаңыз"</string>
<string name="bugreport_finished_text" product="default" msgid="8353769438382138847">"Қате туралы есепті бөлісу үшін түртіңіз"</string>
<string name="bugreport_finished_pending_screenshot_text" product="tv" msgid="2343263822812016950">"Қате туралы есепті скриншотсыз бөлісу үшін таңдаңыз немесе скриншот түсіріліп болғанша күтіңіз"</string>
<string name="bugreport_finished_pending_screenshot_text" product="watch" msgid="1474435374470177193">"Қате туралы есепті скриншотсыз бөлісу үшін түртіңіз немесе скриншот сақталып болғанша күтіңіз"</string>
<string name="bugreport_finished_pending_screenshot_text" product="default" msgid="1474435374470177193">"Қате туралы есепті скриншотсыз бөлісу үшін түртіңіз немесе скриншот сақталып болғанша күтіңіз"</string>
<string name="bugreport_confirm" msgid="5917407234515812495">"Қате туралы есептерде жүйенің түрлі журнал файлдарының деректері қамтылады. Оларда сіз құпия деп есептейтін деректер (мысалы, қолданбаны пайдалану және орналасқан жер деректері) болуы мүмкін. Қате туралы есептерді тек сенімді адамдармен және қолданбалармен бөлісіңіз."</string>
<string name="bugreport_confirm_dont_repeat" msgid="6179945398364357318">"Қайтадан көрсетілмесін"</string>
<string name="bugreport_storage_title" msgid="5332488144740527109">"Қате туралы есептер"</string>
<string name="bugreport_unreadable_text" msgid="586517851044535486">"Қате туралы есеп файлын оқу мүмкін болмады"</string>
<string name="bugreport_add_details_to_zip_failed" msgid="1302931926486712371">"Қате туралы есеп мәліметтері zip файлына салынбады"</string>
<string name="bugreport_unnamed" msgid="2800582406842092709">"атаусыз"</string>
<string name="bugreport_info_action" msgid="2158204228510576227">"Мәліметтер"</string>
<string name="bugreport_screenshot_action" msgid="8677781721940614995">"Скриншот"</string>
<string name="bugreport_screenshot_taken" msgid="5684211273096253120">"Скриншот сәтті түсірілді."</string>
<string name="bugreport_screenshot_failed" msgid="5853049140806834601">"Скриншот түсіру мүмкін болмады."</string>
<string name="bugreport_info_dialog_title" msgid="1355948594292983332">"<xliff:g id="ID">#%d</xliff:g> қате туралы есебі туралы мәліметтер"</string>
<string name="bugreport_info_name" msgid="4414036021935139527">"Файл атауы"</string>
<string name="bugreport_info_title" msgid="2306030793918239804">"Қатенің атауы"</string>
<string name="bugreport_info_description" msgid="5072835127481627722">"Қате туралы жиынтық мәліметтер"</string>
<string name="save" msgid="4781509040564835759">"Сақтау"</string>
<string name="bugreport_intent_chooser_title" msgid="7605709494790894076">"Қате туралы есеп бөлістірілсін бе?"</string>
</resources>
| {
"pile_set_name": "Github"
} |
package native
import "strconv"
// Client caps - borrowed from GoMySQL
const (
_CLIENT_LONG_PASSWORD = 1 << iota // new more secure passwords
_CLIENT_FOUND_ROWS // Found instead of affected rows
_CLIENT_LONG_FLAG // Get all column flags
_CLIENT_CONNECT_WITH_DB // One can specify db on connect
_CLIENT_NO_SCHEMA // Don't allow database.table.column
_CLIENT_COMPRESS // Can use compression protocol
_CLIENT_ODBC // Odbc client
_CLIENT_LOCAL_FILES // Can use LOAD DATA LOCAL
_CLIENT_IGNORE_SPACE // Ignore spaces before '('
_CLIENT_PROTOCOL_41 // New 4.1 protocol
_CLIENT_INTERACTIVE // This is an interactive client
_CLIENT_SSL // Switch to SSL after handshake
_CLIENT_IGNORE_SIGPIPE // IGNORE sigpipes
_CLIENT_TRANSACTIONS // Client knows about transactions
_CLIENT_RESERVED // Old flag for 4.1 protocol
_CLIENT_SECURE_CONN // New 4.1 authentication
_CLIENT_MULTI_STATEMENTS // Enable/disable multi-stmt support
_CLIENT_MULTI_RESULTS // Enable/disable multi-results
)
// Commands - borrowed from GoMySQL
const (
_COM_QUIT = 0x01
_COM_INIT_DB = 0x02
_COM_QUERY = 0x03
_COM_FIELD_LIST = 0x04
_COM_CREATE_DB = 0x05
_COM_DROP_DB = 0x06
_COM_REFRESH = 0x07
_COM_SHUTDOWN = 0x08
_COM_STATISTICS = 0x09
_COM_PROCESS_INFO = 0x0a
_COM_CONNECT = 0x0b
_COM_PROCESS_KILL = 0x0c
_COM_DEBUG = 0x0d
_COM_PING = 0x0e
_COM_TIME = 0x0f
_COM_DELAYED_INSERT = 0x10
_COM_CHANGE_USER = 0x11
_COM_BINLOG_DUMP = 0x12
_COM_TABLE_DUMP = 0x13
_COM_CONNECT_OUT = 0x14
_COM_REGISTER_SLAVE = 0x15
_COM_STMT_PREPARE = 0x16
_COM_STMT_EXECUTE = 0x17
_COM_STMT_SEND_LONG_DATA = 0x18
_COM_STMT_CLOSE = 0x19
_COM_STMT_RESET = 0x1a
_COM_SET_OPTION = 0x1b
_COM_STMT_FETCH = 0x1c
)
// MySQL protocol types.
//
// mymysql uses only some of them for send data to the MySQL server. Used
// MySQL types are marked with a comment contains mymysql type that uses it.
const (
MYSQL_TYPE_DECIMAL = 0x00
MYSQL_TYPE_TINY = 0x01 // int8, uint8, bool
MYSQL_TYPE_SHORT = 0x02 // int16, uint16
MYSQL_TYPE_LONG = 0x03 // int32, uint32
MYSQL_TYPE_FLOAT = 0x04 // float32
MYSQL_TYPE_DOUBLE = 0x05 // float64
MYSQL_TYPE_NULL = 0x06 // nil
MYSQL_TYPE_TIMESTAMP = 0x07 // Timestamp
MYSQL_TYPE_LONGLONG = 0x08 // int64, uint64
MYSQL_TYPE_INT24 = 0x09
MYSQL_TYPE_DATE = 0x0a // Date
MYSQL_TYPE_TIME = 0x0b // Time
MYSQL_TYPE_DATETIME = 0x0c // time.Time
MYSQL_TYPE_YEAR = 0x0d
MYSQL_TYPE_NEWDATE = 0x0e
MYSQL_TYPE_VARCHAR = 0x0f
MYSQL_TYPE_BIT = 0x10
MYSQL_TYPE_NEWDECIMAL = 0xf6
MYSQL_TYPE_ENUM = 0xf7
MYSQL_TYPE_SET = 0xf8
MYSQL_TYPE_TINY_BLOB = 0xf9
MYSQL_TYPE_MEDIUM_BLOB = 0xfa
MYSQL_TYPE_LONG_BLOB = 0xfb
MYSQL_TYPE_BLOB = 0xfc // Blob
MYSQL_TYPE_VAR_STRING = 0xfd // []byte
MYSQL_TYPE_STRING = 0xfe // string
MYSQL_TYPE_GEOMETRY = 0xff
MYSQL_UNSIGNED_MASK = uint16(1 << 15)
)
// Mapping of MySQL types to (prefered) protocol types. Use it if you create
// your own Raw value.
//
// Comments contains corresponding types used by mymysql. string type may be
// replaced by []byte type and vice versa. []byte type is native for sending
// on a network, so any string is converted to it before sending. Than for
// better preformance use []byte.
const (
// Client send and receive, mymysql representation for send / receive
TINYINT = MYSQL_TYPE_TINY // int8 / int8
SMALLINT = MYSQL_TYPE_SHORT // int16 / int16
INT = MYSQL_TYPE_LONG // int32 / int32
BIGINT = MYSQL_TYPE_LONGLONG // int64 / int64
FLOAT = MYSQL_TYPE_FLOAT // float32 / float32
DOUBLE = MYSQL_TYPE_DOUBLE // float64 / float32
TIME = MYSQL_TYPE_TIME // Time / Time
DATE = MYSQL_TYPE_DATE // Date / Date
DATETIME = MYSQL_TYPE_DATETIME // time.Time / time.Time
TIMESTAMP = MYSQL_TYPE_TIMESTAMP // Timestamp / time.Time
CHAR = MYSQL_TYPE_STRING // string / []byte
BLOB = MYSQL_TYPE_BLOB // Blob / []byte
NULL = MYSQL_TYPE_NULL // nil
// Client send only, mymysql representation for send
OUT_TEXT = MYSQL_TYPE_STRING // string
OUT_VARCHAR = MYSQL_TYPE_STRING // string
OUT_BINARY = MYSQL_TYPE_BLOB // Blob
OUT_VARBINARY = MYSQL_TYPE_BLOB // Blob
// Client receive only, mymysql representation for receive
IN_MEDIUMINT = MYSQL_TYPE_LONG // int32
IN_YEAR = MYSQL_TYPE_SHORT // int16
IN_BINARY = MYSQL_TYPE_STRING // []byte
IN_VARCHAR = MYSQL_TYPE_VAR_STRING // []byte
IN_VARBINARY = MYSQL_TYPE_VAR_STRING // []byte
IN_TINYBLOB = MYSQL_TYPE_TINY_BLOB // []byte
IN_TINYTEXT = MYSQL_TYPE_TINY_BLOB // []byte
IN_TEXT = MYSQL_TYPE_BLOB // []byte
IN_MEDIUMBLOB = MYSQL_TYPE_MEDIUM_BLOB // []byte
IN_MEDIUMTEXT = MYSQL_TYPE_MEDIUM_BLOB // []byte
IN_LONGBLOB = MYSQL_TYPE_LONG_BLOB // []byte
IN_LONGTEXT = MYSQL_TYPE_LONG_BLOB // []byte
// MySQL 5.x specific
IN_DECIMAL = MYSQL_TYPE_NEWDECIMAL // TODO
IN_BIT = MYSQL_TYPE_BIT // []byte
)
// Flags - borrowed from GoMySQL
const (
_FLAG_NOT_NULL = 1 << iota
_FLAG_PRI_KEY
_FLAG_UNIQUE_KEY
_FLAG_MULTIPLE_KEY
_FLAG_BLOB
_FLAG_UNSIGNED
_FLAG_ZEROFILL
_FLAG_BINARY
_FLAG_ENUM
_FLAG_AUTO_INCREMENT
_FLAG_TIMESTAMP
_FLAG_SET
_FLAG_NO_DEFAULT_VALUE
)
var (
_SIZE_OF_INT int
_INT_TYPE uint16
)
func init() {
switch strconv.IntSize {
case 32:
_INT_TYPE = MYSQL_TYPE_LONG
_SIZE_OF_INT = 4
case 64:
_INT_TYPE = MYSQL_TYPE_LONGLONG
_SIZE_OF_INT = 8
default:
panic("bad int size")
}
}
| {
"pile_set_name": "Github"
} |
package ingester
import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/weaveworks/common/httpgrpc"
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/user"
"google.golang.org/grpc"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/services"
"github.com/cortexproject/cortex/pkg/util/test"
"github.com/cortexproject/cortex/pkg/util/validation"
)
func TestIngester_v2Push(t *testing.T) {
metricLabelAdapters := []client.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
metricLabels := client.FromLabelAdaptersToLabels(metricLabelAdapters)
metricNames := []string{
"cortex_ingester_ingested_samples_total",
"cortex_ingester_ingested_samples_failures_total",
"cortex_ingester_memory_series",
"cortex_ingester_memory_users",
"cortex_ingester_memory_series_created_total",
"cortex_ingester_memory_series_removed_total",
"cortex_discarded_samples_total",
"cortex_ingester_active_series",
}
userID := "test"
tests := map[string]struct {
reqs []*client.WriteRequest
expectedErr error
expectedIngested []client.TimeSeries
expectedMetadataIngested []*client.MetricMetadata
expectedMetrics string
additionalMetrics []string
disableActiveSeries bool
}{
"should succeed on valid series and metadata": {
reqs: []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 9}},
[]*client.MetricMetadata{
{MetricName: "metric_name_1", Help: "a help for metric_name_1", Unit: "", Type: client.COUNTER},
},
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 10}},
[]*client.MetricMetadata{
{MetricName: "metric_name_2", Help: "a help for metric_name_2", Unit: "", Type: client.GAUGE},
},
client.API),
},
expectedErr: nil,
expectedIngested: []client.TimeSeries{
{Labels: metricLabelAdapters, Samples: []client.Sample{{Value: 1, TimestampMs: 9}, {Value: 2, TimestampMs: 10}}},
},
expectedMetadataIngested: []*client.MetricMetadata{
{MetricName: "metric_name_2", Help: "a help for metric_name_2", Unit: "", Type: client.GAUGE},
{MetricName: "metric_name_1", Help: "a help for metric_name_1", Unit: "", Type: client.COUNTER},
},
additionalMetrics: []string{
// Metadata.
"cortex_ingester_memory_metadata",
"cortex_ingester_memory_metadata_created_total",
"cortex_ingester_ingested_metadata_total",
"cortex_ingester_ingested_metadata_failures_total",
},
expectedMetrics: `
# HELP cortex_ingester_ingested_metadata_failures_total The total number of metadata that errored on ingestion.
# TYPE cortex_ingester_ingested_metadata_failures_total counter
cortex_ingester_ingested_metadata_failures_total 0
# HELP cortex_ingester_ingested_metadata_total The total number of metadata ingested.
# TYPE cortex_ingester_ingested_metadata_total counter
cortex_ingester_ingested_metadata_total 2
# HELP cortex_ingester_memory_metadata The current number of metadata in memory.
# TYPE cortex_ingester_memory_metadata gauge
cortex_ingester_memory_metadata 2
# HELP cortex_ingester_memory_metadata_created_total The total number of metadata that were created per user
# TYPE cortex_ingester_memory_metadata_created_total counter
cortex_ingester_memory_metadata_created_total{user="test"} 2
# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
# TYPE cortex_ingester_ingested_samples_total counter
cortex_ingester_ingested_samples_total 2
# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
# TYPE cortex_ingester_ingested_samples_failures_total counter
cortex_ingester_ingested_samples_failures_total 0
# HELP cortex_ingester_memory_users The current number of users in memory.
# TYPE cortex_ingester_memory_users gauge
cortex_ingester_memory_users 1
# HELP cortex_ingester_memory_series The current number of series in memory.
# TYPE cortex_ingester_memory_series gauge
cortex_ingester_memory_series 1
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test"} 0
# HELP cortex_ingester_active_series Number of currently active series per user.
# TYPE cortex_ingester_active_series gauge
cortex_ingester_active_series{user="test"} 1
`,
},
"successful push, active series disabled": {
disableActiveSeries: true,
reqs: []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 9}},
nil,
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 10}},
nil,
client.API),
},
expectedErr: nil,
expectedIngested: []client.TimeSeries{
{Labels: metricLabelAdapters, Samples: []client.Sample{{Value: 1, TimestampMs: 9}, {Value: 2, TimestampMs: 10}}},
},
expectedMetrics: `
# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
# TYPE cortex_ingester_ingested_samples_total counter
cortex_ingester_ingested_samples_total 2
# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
# TYPE cortex_ingester_ingested_samples_failures_total counter
cortex_ingester_ingested_samples_failures_total 0
# HELP cortex_ingester_memory_users The current number of users in memory.
# TYPE cortex_ingester_memory_users gauge
cortex_ingester_memory_users 1
# HELP cortex_ingester_memory_series The current number of series in memory.
# TYPE cortex_ingester_memory_series gauge
cortex_ingester_memory_series 1
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test"} 0
`,
},
"should soft fail on sample out of order": {
reqs: []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 10}},
nil,
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 9}},
nil,
client.API),
},
expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(errors.Wrapf(storage.ErrOutOfOrderSample, "series=%s, timestamp=%s", metricLabels.String(), model.Time(9).Time().UTC().Format(time.RFC3339Nano)), userID).Error()),
expectedIngested: []client.TimeSeries{
{Labels: metricLabelAdapters, Samples: []client.Sample{{Value: 2, TimestampMs: 10}}},
},
expectedMetrics: `
# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
# TYPE cortex_ingester_ingested_samples_total counter
cortex_ingester_ingested_samples_total 1
# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
# TYPE cortex_ingester_ingested_samples_failures_total counter
cortex_ingester_ingested_samples_failures_total 1
# HELP cortex_ingester_memory_users The current number of users in memory.
# TYPE cortex_ingester_memory_users gauge
cortex_ingester_memory_users 1
# HELP cortex_ingester_memory_series The current number of series in memory.
# TYPE cortex_ingester_memory_series gauge
cortex_ingester_memory_series 1
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test"} 0
# HELP cortex_discarded_samples_total The total number of samples that were discarded.
# TYPE cortex_discarded_samples_total counter
cortex_discarded_samples_total{reason="sample-out-of-order",user="test"} 1
# HELP cortex_ingester_active_series Number of currently active series per user.
# TYPE cortex_ingester_active_series gauge
cortex_ingester_active_series{user="test"} 1
`,
},
"should soft fail on sample out of bound": {
reqs: []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 1575043969}},
nil,
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 1575043969 - (86400 * 1000)}},
nil,
client.API),
},
expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(errors.Wrapf(storage.ErrOutOfBounds, "series=%s, timestamp=%s", metricLabels.String(), model.Time(1575043969-(86400*1000)).Time().UTC().Format(time.RFC3339Nano)), userID).Error()),
expectedIngested: []client.TimeSeries{
{Labels: metricLabelAdapters, Samples: []client.Sample{{Value: 2, TimestampMs: 1575043969}}},
},
expectedMetrics: `
# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
# TYPE cortex_ingester_ingested_samples_total counter
cortex_ingester_ingested_samples_total 1
# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
# TYPE cortex_ingester_ingested_samples_failures_total counter
cortex_ingester_ingested_samples_failures_total 1
# HELP cortex_ingester_memory_users The current number of users in memory.
# TYPE cortex_ingester_memory_users gauge
cortex_ingester_memory_users 1
# HELP cortex_ingester_memory_series The current number of series in memory.
# TYPE cortex_ingester_memory_series gauge
cortex_ingester_memory_series 1
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test"} 0
# HELP cortex_discarded_samples_total The total number of samples that were discarded.
# TYPE cortex_discarded_samples_total counter
cortex_discarded_samples_total{reason="sample-out-of-bounds",user="test"} 1
# HELP cortex_ingester_active_series Number of currently active series per user.
# TYPE cortex_ingester_active_series gauge
cortex_ingester_active_series{user="test"} 1
`,
},
"should soft fail on two different sample values at the same timestamp": {
reqs: []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 1575043969}},
nil,
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 1575043969}},
nil,
client.API),
},
expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(errors.Wrapf(storage.ErrDuplicateSampleForTimestamp, "series=%s, timestamp=%s", metricLabels.String(), model.Time(1575043969).Time().UTC().Format(time.RFC3339Nano)), userID).Error()),
expectedIngested: []client.TimeSeries{
{Labels: metricLabelAdapters, Samples: []client.Sample{{Value: 2, TimestampMs: 1575043969}}},
},
expectedMetrics: `
# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
# TYPE cortex_ingester_ingested_samples_total counter
cortex_ingester_ingested_samples_total 1
# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
# TYPE cortex_ingester_ingested_samples_failures_total counter
cortex_ingester_ingested_samples_failures_total 1
# HELP cortex_ingester_memory_users The current number of users in memory.
# TYPE cortex_ingester_memory_users gauge
cortex_ingester_memory_users 1
# HELP cortex_ingester_memory_series The current number of series in memory.
# TYPE cortex_ingester_memory_series gauge
cortex_ingester_memory_series 1
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test"} 0
# HELP cortex_discarded_samples_total The total number of samples that were discarded.
# TYPE cortex_discarded_samples_total counter
cortex_discarded_samples_total{reason="new-value-for-timestamp",user="test"} 1
# HELP cortex_ingester_active_series Number of currently active series per user.
# TYPE cortex_ingester_active_series gauge
cortex_ingester_active_series{user="test"} 1
`,
},
}
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
registry := prometheus.NewRegistry()
registry.MustRegister(validation.DiscardedSamples)
validation.DiscardedSamples.Reset()
// Create a mocked ingester
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
cfg.ActiveSeriesMetricsEnabled = !testData.disableActiveSeries
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
ctx := user.InjectOrgID(context.Background(), userID)
// Wait until the ingester is ACTIVE
test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push timeseries
for idx, req := range testData.reqs {
_, err := i.v2Push(ctx, req)
// We expect no error on any request except the last one
// which may error (and in that case we assert on it)
if idx < len(testData.reqs)-1 {
assert.NoError(t, err)
} else {
assert.Equal(t, testData.expectedErr, err)
}
}
// Read back samples to see what has been really ingested
res, err := i.v2Query(ctx, &client.QueryRequest{
StartTimestampMs: math.MinInt64,
EndTimestampMs: math.MaxInt64,
Matchers: []*client.LabelMatcher{{Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}},
})
require.NoError(t, err)
require.NotNil(t, res)
assert.Equal(t, testData.expectedIngested, res.Timeseries)
// Read back metadata to see what has been really ingested.
mres, err := i.MetricsMetadata(ctx, &client.MetricsMetadataRequest{})
require.NoError(t, err)
require.NotNil(t, res)
// Order is never guaranteed.
assert.ElementsMatch(t, testData.expectedMetadataIngested, mres.Metadata)
// Update active series for metrics check.
if !testData.disableActiveSeries {
i.v2UpdateActiveSeries()
}
// Append additional metrics to assert on.
mn := append(metricNames, testData.additionalMetrics...)
// Check tracked Prometheus metrics
err = testutil.GatherAndCompare(registry, strings.NewReader(testData.expectedMetrics), mn...)
assert.NoError(t, err)
})
}
}
func TestIngester_v2Push_ShouldHandleTheCaseTheCachedReferenceIsInvalid(t *testing.T) {
metricLabelAdapters := []client.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
metricLabels := client.FromLabelAdaptersToLabels(metricLabelAdapters)
// Create a mocked ingester
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
ctx := user.InjectOrgID(context.Background(), userID)
// Wait until the ingester is ACTIVE
test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Set a wrong cached reference for the series we're going to push.
db, err := i.getOrCreateTSDB(userID, false)
require.NoError(t, err)
require.NotNil(t, db)
db.refCache.SetRef(time.Now(), metricLabels, 12345)
// Push the same series multiple times, each time with an increasing timestamp
for j := 1; j <= 3; j++ {
req := client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: float64(j), TimestampMs: int64(j)}},
nil,
client.API)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
// Invalidate reference between pushes. It triggers different AddFast path.
// On first push, "initAppender" is used, on next pushes, "headAppender" is used.
// Unfortunately they return ErrNotFound differently.
db.refCache.SetRef(time.Now(), metricLabels, 12345)
}
// Read back samples to see what has been really ingested
res, err := i.v2Query(ctx, &client.QueryRequest{
StartTimestampMs: math.MinInt64,
EndTimestampMs: math.MaxInt64,
Matchers: []*client.LabelMatcher{{Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}},
})
require.NoError(t, err)
require.NotNil(t, res)
assert.Equal(t, []client.TimeSeries{
{Labels: metricLabelAdapters, Samples: []client.Sample{
{Value: 1, TimestampMs: 1},
{Value: 2, TimestampMs: 2},
{Value: 3, TimestampMs: 3},
}},
}, res.Timeseries)
}
func TestIngester_v2Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *testing.T) {
metricLabelAdapters := []client.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
metricLabels := client.FromLabelAdaptersToLabels(metricLabelAdapters)
metricNames := []string{
"cortex_ingester_ingested_samples_total",
"cortex_ingester_ingested_samples_failures_total",
"cortex_ingester_memory_series",
"cortex_ingester_memory_users",
"cortex_ingester_memory_series_created_total",
"cortex_ingester_memory_series_removed_total",
"cortex_ingester_active_series",
}
registry := prometheus.NewRegistry()
// Create a mocked ingester
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until the ingester is ACTIVE
test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push timeseries for each user
for _, userID := range []string{"test-1", "test-2"} {
reqs := []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 9}},
nil,
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 10}},
nil,
client.API),
}
for _, req := range reqs {
ctx := user.InjectOrgID(context.Background(), userID)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
}
// Update active series for metrics check.
i.v2UpdateActiveSeries()
// Check tracked Prometheus metrics
expectedMetrics := `
# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
# TYPE cortex_ingester_ingested_samples_total counter
cortex_ingester_ingested_samples_total 4
# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
# TYPE cortex_ingester_ingested_samples_failures_total counter
cortex_ingester_ingested_samples_failures_total 0
# HELP cortex_ingester_memory_users The current number of users in memory.
# TYPE cortex_ingester_memory_users gauge
cortex_ingester_memory_users 2
# HELP cortex_ingester_memory_series The current number of series in memory.
# TYPE cortex_ingester_memory_series gauge
cortex_ingester_memory_series 2
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test-1"} 1
cortex_ingester_memory_series_created_total{user="test-2"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test-1"} 0
cortex_ingester_memory_series_removed_total{user="test-2"} 0
# HELP cortex_ingester_active_series Number of currently active series per user.
# TYPE cortex_ingester_active_series gauge
cortex_ingester_active_series{user="test-1"} 1
cortex_ingester_active_series{user="test-2"} 1
`
assert.NoError(t, testutil.GatherAndCompare(registry, strings.NewReader(expectedMetrics), metricNames...))
}
func TestIngester_v2Push_DecreaseInactiveSeries(t *testing.T) {
metricLabelAdapters := []client.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
metricLabels := client.FromLabelAdaptersToLabels(metricLabelAdapters)
metricNames := []string{
"cortex_ingester_memory_series_created_total",
"cortex_ingester_memory_series_removed_total",
"cortex_ingester_active_series",
}
registry := prometheus.NewRegistry()
// Create a mocked ingester
cfg := defaultIngesterTestConfig()
cfg.ActiveSeriesMetricsIdleTimeout = 100 * time.Millisecond
cfg.LifecyclerConfig.JoinAfter = 0
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, registry)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until the ingester is ACTIVE
test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push timeseries for each user
for _, userID := range []string{"test-1", "test-2"} {
reqs := []*client.WriteRequest{
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: 9}},
nil,
client.API),
client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 2, TimestampMs: 10}},
nil,
client.API),
}
for _, req := range reqs {
ctx := user.InjectOrgID(context.Background(), userID)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
}
// Wait a bit to make series inactive (set to 100ms above).
time.Sleep(200 * time.Millisecond)
// Update active series for metrics check. This will remove inactive series.
i.v2UpdateActiveSeries()
// Check tracked Prometheus metrics
expectedMetrics := `
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="test-1"} 1
cortex_ingester_memory_series_created_total{user="test-2"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="test-1"} 0
cortex_ingester_memory_series_removed_total{user="test-2"} 0
# HELP cortex_ingester_active_series Number of currently active series per user.
# TYPE cortex_ingester_active_series gauge
cortex_ingester_active_series{user="test-1"} 0
cortex_ingester_active_series{user="test-2"} 0
`
assert.NoError(t, testutil.GatherAndCompare(registry, strings.NewReader(expectedMetrics), metricNames...))
}
func Benchmark_Ingester_v2PushOnOutOfBoundsSamplesWithHighConcurrency(b *testing.B) {
const (
numSamplesPerRequest = 1000
numRequestsPerClient = 10
numConcurrentClients = 10000
)
registry := prometheus.NewRegistry()
ctx := user.InjectOrgID(context.Background(), userID)
// Create a mocked ingester
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
ingester, cleanup, err := newIngesterMockWithTSDBStorage(cfg, registry)
require.NoError(b, err)
require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingester))
defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck
defer cleanup()
// Wait until the ingester is ACTIVE
test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
return ingester.lifecycler.GetState()
})
// Push a single time series to set the TSDB min time.
metricLabelAdapters := []client.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
metricLabels := client.FromLabelAdaptersToLabels(metricLabelAdapters)
currTimeReq := client.ToWriteRequest(
[]labels.Labels{metricLabels},
[]client.Sample{{Value: 1, TimestampMs: util.TimeToMillis(time.Now())}},
nil,
client.API)
_, err = ingester.v2Push(ctx, currTimeReq)
require.NoError(b, err)
// Prepare a request containing out of bound samples.
metrics := make([]labels.Labels, 0, numSamplesPerRequest)
samples := make([]client.Sample, 0, numSamplesPerRequest)
for i := 0; i < numSamplesPerRequest; i++ {
metrics = append(metrics, metricLabels)
samples = append(samples, client.Sample{Value: float64(i), TimestampMs: 0})
}
outOfBoundReq := client.ToWriteRequest(metrics, samples, nil, client.API)
// Run the benchmark.
wg := sync.WaitGroup{}
wg.Add(numConcurrentClients)
start := make(chan struct{})
for c := 0; c < numConcurrentClients; c++ {
go func() {
defer wg.Done()
<-start
for n := 0; n < numRequestsPerClient; n++ {
ingester.v2Push(ctx, outOfBoundReq) // nolint:errcheck
}
}()
}
b.ResetTimer()
close(start)
wg.Wait()
}
func Test_Ingester_v2LabelNames(t *testing.T) {
series := []struct {
lbls labels.Labels
value float64
timestamp int64
}{
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
}
expected := []string{"__name__", "status", "route"}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series
ctx := user.InjectOrgID(context.Background(), "test")
for _, series := range series {
req, _, _ := mockWriteRequest(series.lbls, series.value, series.timestamp)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
// Get label names
res, err := i.v2LabelNames(ctx, &client.LabelNamesRequest{})
require.NoError(t, err)
assert.ElementsMatch(t, expected, res.LabelNames)
}
func Test_Ingester_v2LabelValues(t *testing.T) {
series := []struct {
lbls labels.Labels
value float64
timestamp int64
}{
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
}
expected := map[string][]string{
"__name__": {"test_1", "test_2"},
"status": {"200", "500"},
"route": {"get_user"},
"unknown": {},
}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series
ctx := user.InjectOrgID(context.Background(), "test")
for _, series := range series {
req, _, _ := mockWriteRequest(series.lbls, series.value, series.timestamp)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
// Get label values
for labelName, expectedValues := range expected {
req := &client.LabelValuesRequest{LabelName: labelName}
res, err := i.v2LabelValues(ctx, req)
require.NoError(t, err)
assert.ElementsMatch(t, expectedValues, res.LabelValues)
}
}
func Test_Ingester_v2Query(t *testing.T) {
series := []struct {
lbls labels.Labels
value float64
timestamp int64
}{
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
}
tests := map[string]struct {
from int64
to int64
matchers []*client.LabelMatcher
expected []client.TimeSeries
}{
"should return an empty response if no metric matches": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "unknown"},
},
expected: []client.TimeSeries{},
},
"should filter series by == matcher": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
},
expected: []client.TimeSeries{
{Labels: client.FromLabelsToLabelAdapters(series[0].lbls), Samples: []client.Sample{{Value: 1, TimestampMs: 100000}}},
{Labels: client.FromLabelsToLabelAdapters(series[1].lbls), Samples: []client.Sample{{Value: 1, TimestampMs: 110000}}},
},
},
"should filter series by != matcher": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatcher{
{Type: client.NOT_EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
},
expected: []client.TimeSeries{
{Labels: client.FromLabelsToLabelAdapters(series[2].lbls), Samples: []client.Sample{{Value: 2, TimestampMs: 200000}}},
},
},
"should filter series by =~ matcher": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatcher{
{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: ".*_1"},
},
expected: []client.TimeSeries{
{Labels: client.FromLabelsToLabelAdapters(series[0].lbls), Samples: []client.Sample{{Value: 1, TimestampMs: 100000}}},
{Labels: client.FromLabelsToLabelAdapters(series[1].lbls), Samples: []client.Sample{{Value: 1, TimestampMs: 110000}}},
},
},
"should filter series by !~ matcher": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatcher{
{Type: client.REGEX_NO_MATCH, Name: model.MetricNameLabel, Value: ".*_1"},
},
expected: []client.TimeSeries{
{Labels: client.FromLabelsToLabelAdapters(series[2].lbls), Samples: []client.Sample{{Value: 2, TimestampMs: 200000}}},
},
},
"should filter series by multiple matchers": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
{Type: client.REGEX_MATCH, Name: "status", Value: "5.."},
},
expected: []client.TimeSeries{
{Labels: client.FromLabelsToLabelAdapters(series[1].lbls), Samples: []client.Sample{{Value: 1, TimestampMs: 110000}}},
},
},
"should filter series by matcher and time range": {
from: 100000,
to: 100000,
matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
},
expected: []client.TimeSeries{
{Labels: client.FromLabelsToLabelAdapters(series[0].lbls), Samples: []client.Sample{{Value: 1, TimestampMs: 100000}}},
},
},
}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series
ctx := user.InjectOrgID(context.Background(), "test")
for _, series := range series {
req, _, _ := mockWriteRequest(series.lbls, series.value, series.timestamp)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
// Run tests
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
req := &client.QueryRequest{
StartTimestampMs: testData.from,
EndTimestampMs: testData.to,
Matchers: testData.matchers,
}
res, err := i.v2Query(ctx, req)
require.NoError(t, err)
assert.ElementsMatch(t, testData.expected, res.Timeseries)
})
}
}
func TestIngester_v2Query_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) {
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Mock request
userID := "test"
ctx := user.InjectOrgID(context.Background(), userID)
req := &client.QueryRequest{}
res, err := i.v2Query(ctx, req)
require.NoError(t, err)
assert.Equal(t, &client.QueryResponse{}, res)
// Check if the TSDB has been created
_, tsdbCreated := i.TSDBState.dbs[userID]
assert.False(t, tsdbCreated)
}
func TestIngester_v2LabelValues_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) {
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Mock request
userID := "test"
ctx := user.InjectOrgID(context.Background(), userID)
req := &client.LabelValuesRequest{}
res, err := i.v2LabelValues(ctx, req)
require.NoError(t, err)
assert.Equal(t, &client.LabelValuesResponse{}, res)
// Check if the TSDB has been created
_, tsdbCreated := i.TSDBState.dbs[userID]
assert.False(t, tsdbCreated)
}
func TestIngester_v2LabelNames_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) {
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Mock request
userID := "test"
ctx := user.InjectOrgID(context.Background(), userID)
req := &client.LabelNamesRequest{}
res, err := i.v2LabelNames(ctx, req)
require.NoError(t, err)
assert.Equal(t, &client.LabelNamesResponse{}, res)
// Check if the TSDB has been created
_, tsdbCreated := i.TSDBState.dbs[userID]
assert.False(t, tsdbCreated)
}
func TestIngester_v2Push_ShouldNotCreateTSDBIfNotInActiveState(t *testing.T) {
// Configure the lifecycler to not immediately join the ring, to make sure
// the ingester will NOT be in the ACTIVE state when we'll push samples.
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 10 * time.Second
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
require.Equal(t, ring.PENDING, i.lifecycler.GetState())
// Mock request
userID := "test"
ctx := user.InjectOrgID(context.Background(), userID)
req := &client.WriteRequest{}
res, err := i.v2Push(ctx, req)
assert.Equal(t, wrapWithUser(fmt.Errorf(errTSDBCreateIncompatibleState, "PENDING"), userID).Error(), err.Error())
assert.Nil(t, res)
// Check if the TSDB has been created
_, tsdbCreated := i.TSDBState.dbs[userID]
assert.False(t, tsdbCreated)
}
func TestIngester_getOrCreateTSDB_ShouldNotAllowToCreateTSDBIfIngesterStateIsNotActive(t *testing.T) {
tests := map[string]struct {
state ring.IngesterState
expectedErr error
}{
"not allow to create TSDB if in PENDING state": {
state: ring.PENDING,
expectedErr: fmt.Errorf(errTSDBCreateIncompatibleState, ring.PENDING),
},
"not allow to create TSDB if in JOINING state": {
state: ring.JOINING,
expectedErr: fmt.Errorf(errTSDBCreateIncompatibleState, ring.JOINING),
},
"not allow to create TSDB if in LEAVING state": {
state: ring.LEAVING,
expectedErr: fmt.Errorf(errTSDBCreateIncompatibleState, ring.LEAVING),
},
"allow to create TSDB if in ACTIVE state": {
state: ring.ACTIVE,
expectedErr: nil,
},
}
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 60 * time.Second
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Switch ingester state to the expected one in the test
if i.lifecycler.GetState() != testData.state {
var stateChain []ring.IngesterState
if testData.state == ring.LEAVING {
stateChain = []ring.IngesterState{ring.ACTIVE, ring.LEAVING}
} else {
stateChain = []ring.IngesterState{testData.state}
}
for _, s := range stateChain {
err = i.lifecycler.ChangeState(context.Background(), s)
require.NoError(t, err)
}
}
db, err := i.getOrCreateTSDB("test", false)
assert.Equal(t, testData.expectedErr, err)
if testData.expectedErr != nil {
assert.Nil(t, db)
} else {
assert.NotNil(t, db)
}
})
}
}
func Test_Ingester_v2MetricsForLabelMatchers(t *testing.T) {
fixtures := []struct {
lbls labels.Labels
value float64
timestamp int64
}{
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000},
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000},
{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
// The two following series have the same FastFingerprint=e002a3a451262627
{labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000},
{labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000},
}
tests := map[string]struct {
from int64
to int64
matchers []*client.LabelMatchers
expected []*client.Metric
}{
"should return an empty response if no metric match": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatchers{{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "unknown"},
},
}},
expected: []*client.Metric{},
},
"should filter metrics by single matcher": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatchers{{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
},
}},
expected: []*client.Metric{
{Labels: client.FromLabelsToLabelAdapters(fixtures[0].lbls)},
{Labels: client.FromLabelsToLabelAdapters(fixtures[1].lbls)},
},
},
"should filter metrics by multiple matchers": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatchers{
{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: "status", Value: "200"},
},
},
{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_2"},
},
},
},
expected: []*client.Metric{
{Labels: client.FromLabelsToLabelAdapters(fixtures[0].lbls)},
{Labels: client.FromLabelsToLabelAdapters(fixtures[2].lbls)},
},
},
"should NOT filter metrics by time range to always return known metrics even when queried for older time ranges": {
from: 100,
to: 1000,
matchers: []*client.LabelMatchers{{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
},
}},
expected: []*client.Metric{
{Labels: client.FromLabelsToLabelAdapters(fixtures[0].lbls)},
{Labels: client.FromLabelsToLabelAdapters(fixtures[1].lbls)},
},
},
"should not return duplicated metrics on overlapping matchers": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatchers{
{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
},
},
{
Matchers: []*client.LabelMatcher{
{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*"},
},
},
},
expected: []*client.Metric{
{Labels: client.FromLabelsToLabelAdapters(fixtures[0].lbls)},
{Labels: client.FromLabelsToLabelAdapters(fixtures[1].lbls)},
{Labels: client.FromLabelsToLabelAdapters(fixtures[2].lbls)},
},
},
"should return all matching metrics even if their FastFingerprint collide": {
from: math.MinInt64,
to: math.MaxInt64,
matchers: []*client.LabelMatchers{{
Matchers: []*client.LabelMatcher{
{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "collision"},
},
}},
expected: []*client.Metric{
{Labels: client.FromLabelsToLabelAdapters(fixtures[3].lbls)},
{Labels: client.FromLabelsToLabelAdapters(fixtures[4].lbls)},
},
},
}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push fixtures
ctx := user.InjectOrgID(context.Background(), "test")
for _, series := range fixtures {
req, _, _ := mockWriteRequest(series.lbls, series.value, series.timestamp)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
// Run tests
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
req := &client.MetricsForLabelMatchersRequest{
StartTimestampMs: testData.from,
EndTimestampMs: testData.to,
MatchersSet: testData.matchers,
}
res, err := i.v2MetricsForLabelMatchers(ctx, req)
require.NoError(t, err)
assert.ElementsMatch(t, testData.expected, res.Metric)
})
}
}
func Test_Ingester_v2MetricsForLabelMatchers_Deduplication(t *testing.T) {
const (
userID = "test"
numSeries = 100000
)
now := util.TimeToMillis(time.Now())
i := createIngesterWithSeries(t, userID, numSeries, now)
ctx := user.InjectOrgID(context.Background(), "test")
req := &client.MetricsForLabelMatchersRequest{
StartTimestampMs: now,
EndTimestampMs: now,
// Overlapping matchers to make sure series are correctly deduplicated.
MatchersSet: []*client.LabelMatchers{
{Matchers: []*client.LabelMatcher{
{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*"},
}},
{Matchers: []*client.LabelMatcher{
{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*0"},
}},
},
}
res, err := i.v2MetricsForLabelMatchers(ctx, req)
require.NoError(t, err)
require.Len(t, res.GetMetric(), numSeries)
}
func Benchmark_Ingester_v2MetricsForLabelMatchers(b *testing.B) {
const (
userID = "test"
numSeries = 100000
)
now := util.TimeToMillis(time.Now())
i := createIngesterWithSeries(b, userID, numSeries, now)
ctx := user.InjectOrgID(context.Background(), "test")
b.ResetTimer()
for n := 0; n < b.N; n++ {
req := &client.MetricsForLabelMatchersRequest{
StartTimestampMs: now,
EndTimestampMs: now,
MatchersSet: []*client.LabelMatchers{{Matchers: []*client.LabelMatcher{
{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*"},
}}},
}
res, err := i.v2MetricsForLabelMatchers(ctx, req)
require.NoError(b, err)
require.Len(b, res.GetMetric(), numSeries)
}
}
// createIngesterWithSeries creates an ingester and push numSeries with 1 sample
// per series.
func createIngesterWithSeries(t testing.TB, userID string, numSeries int, timestamp int64) *Ingester {
const maxBatchSize = 1000
// Create ingester.
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
t.Cleanup(func() {
require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
cleanup()
})
// Wait until it's ACTIVE.
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push fixtures.
ctx := user.InjectOrgID(context.Background(), userID)
for o := 0; o < numSeries; o += maxBatchSize {
batchSize := util.Min(maxBatchSize, numSeries-o)
// Generate metrics and samples (1 for each series).
metrics := make([]labels.Labels, 0, batchSize)
samples := make([]client.Sample, 0, batchSize)
for s := 0; s < batchSize; s++ {
metrics = append(metrics, labels.Labels{
{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", o+s)},
})
samples = append(samples, client.Sample{
TimestampMs: timestamp,
Value: 1,
})
}
// Send metrics to the ingester.
req := client.ToWriteRequest(metrics, samples, nil, client.API)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
return i
}
func TestIngester_v2QueryStream(t *testing.T) {
// Create ingester.
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE.
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series.
ctx := user.InjectOrgID(context.Background(), userID)
lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}}
req, _, expectedResponse := mockWriteRequest(lbls, 123000, 456)
_, err = i.v2Push(ctx, req)
require.NoError(t, err)
// Create a GRPC server used to query back the data.
serv := grpc.NewServer(grpc.StreamInterceptor(middleware.StreamServerUserHeaderInterceptor))
defer serv.GracefulStop()
client.RegisterIngesterServer(serv, i)
listener, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
go func() {
require.NoError(t, serv.Serve(listener))
}()
// Query back the series using GRPC streaming.
c, err := client.MakeIngesterClient(listener.Addr().String(), defaultClientTestConfig())
require.NoError(t, err)
defer c.Close()
s, err := c.QueryStream(ctx, &client.QueryRequest{
StartTimestampMs: 0,
EndTimestampMs: 200000,
Matchers: []*client.LabelMatcher{{
Type: client.EQUAL,
Name: model.MetricNameLabel,
Value: "foo",
}},
})
require.NoError(t, err)
count := 0
var lastResp *client.QueryStreamResponse
for {
resp, err := s.Recv()
if err == io.EOF {
break
}
require.NoError(t, err)
count += len(resp.Timeseries)
lastResp = resp
}
require.Equal(t, 1, count)
require.Equal(t, expectedResponse, lastResp)
}
func TestIngester_v2QueryStreamManySamples(t *testing.T) {
// Create ingester.
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE.
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series.
ctx := user.InjectOrgID(context.Background(), userID)
const samplesCount = 100000
samples := make([]client.Sample, 0, samplesCount)
for i := 0; i < samplesCount; i++ {
samples = append(samples, client.Sample{
Value: float64(i),
TimestampMs: int64(i),
})
}
// 10k samples encode to around 140 KiB,
_, err = i.v2Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "1"}}, samples[0:10000]))
require.NoError(t, err)
// 100k samples encode to around 1.4 MiB,
_, err = i.v2Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "2"}}, samples))
require.NoError(t, err)
// 50k samples encode to around 716 KiB,
_, err = i.v2Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "3"}}, samples[0:50000]))
require.NoError(t, err)
// Create a GRPC server used to query back the data.
serv := grpc.NewServer(grpc.StreamInterceptor(middleware.StreamServerUserHeaderInterceptor))
defer serv.GracefulStop()
client.RegisterIngesterServer(serv, i)
listener, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
go func() {
require.NoError(t, serv.Serve(listener))
}()
// Query back the series using GRPC streaming.
c, err := client.MakeIngesterClient(listener.Addr().String(), defaultClientTestConfig())
require.NoError(t, err)
defer c.Close()
s, err := c.QueryStream(ctx, &client.QueryRequest{
StartTimestampMs: 0,
EndTimestampMs: samplesCount + 1,
Matchers: []*client.LabelMatcher{{
Type: client.EQUAL,
Name: model.MetricNameLabel,
Value: "foo",
}},
})
require.NoError(t, err)
recvMsgs := 0
series := 0
totalSamples := 0
for {
resp, err := s.Recv()
if err == io.EOF {
break
}
require.NoError(t, err)
require.True(t, len(resp.Timeseries) > 0) // No empty messages.
recvMsgs++
series += len(resp.Timeseries)
for _, ts := range resp.Timeseries {
totalSamples += len(ts.Samples)
}
}
// As ingester doesn't guarantee sorting of series, we can get 2 (10k + 50k in first, 100k in second)
// or 3 messages (small series first, 100k second, small series last).
require.True(t, 2 <= recvMsgs && recvMsgs <= 3)
require.Equal(t, 3, series)
require.Equal(t, 10000+50000+samplesCount, totalSamples)
}
func writeRequestSingleSeries(lbls labels.Labels, samples []client.Sample) *client.WriteRequest {
req := &client.WriteRequest{
Source: client.API,
}
ts := client.TimeSeries{}
ts.Labels = client.FromLabelsToLabelAdapters(lbls)
ts.Samples = samples
req.Timeseries = append(req.Timeseries, client.PreallocTimeseries{TimeSeries: &ts})
return req
}
type mockQueryStreamServer struct {
grpc.ServerStream
ctx context.Context
}
func (m *mockQueryStreamServer) Send(response *client.QueryStreamResponse) error {
return nil
}
func (m *mockQueryStreamServer) Context() context.Context {
return m.ctx
}
func BenchmarkIngester_v2QueryStream(b *testing.B) {
// Create ingester.
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(b, err)
require.NoError(b, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE.
test.Poll(b, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series.
ctx := user.InjectOrgID(context.Background(), userID)
const samplesCount = 1000
samples := make([]client.Sample, 0, samplesCount)
for i := 0; i < samplesCount; i++ {
samples = append(samples, client.Sample{
Value: float64(i),
TimestampMs: int64(i),
})
}
const seriesCount = 100
for s := 0; s < seriesCount; s++ {
_, err = i.v2Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: strconv.Itoa(s)}}, samples))
require.NoError(b, err)
}
req := &client.QueryRequest{
StartTimestampMs: 0,
EndTimestampMs: samplesCount + 1,
Matchers: []*client.LabelMatcher{{
Type: client.EQUAL,
Name: model.MetricNameLabel,
Value: "foo",
}},
}
mockStream := &mockQueryStreamServer{ctx: ctx}
b.ResetTimer()
for ix := 0; ix < b.N; ix++ {
err := i.v2QueryStream(req, mockStream)
require.NoError(b, err)
}
}
func mockWriteRequest(lbls labels.Labels, value float64, timestampMs int64) (*client.WriteRequest, *client.QueryResponse, *client.QueryStreamResponse) {
samples := []client.Sample{
{
TimestampMs: timestampMs,
Value: value,
},
}
req := client.ToWriteRequest([]labels.Labels{lbls}, samples, nil, client.API)
// Generate the expected response
expectedQueryRes := &client.QueryResponse{
Timeseries: []client.TimeSeries{
{
Labels: client.FromLabelsToLabelAdapters(lbls),
Samples: samples,
},
},
}
expectedQueryStreamRes := &client.QueryStreamResponse{
Timeseries: []client.TimeSeries{
{
Labels: client.FromLabelsToLabelAdapters(lbls),
Samples: samples,
},
},
}
return req, expectedQueryRes, expectedQueryStreamRes
}
func newIngesterMockWithTSDBStorage(ingesterCfg Config, registerer prometheus.Registerer) (*Ingester, func(), error) {
// Create a temporary directory for TSDB
tempDir, err := ioutil.TempDir("", "tsdb")
if err != nil {
return nil, nil, err
}
// Create a cleanup function that the caller should call with defer
cleanup := func() {
os.RemoveAll(tempDir)
}
ingester, err := newIngesterMockWithTSDBStorageAndLimits(ingesterCfg, defaultLimitsTestConfig(), tempDir, registerer)
return ingester, cleanup, err
}
func newIngesterMockWithTSDBStorageAndLimits(ingesterCfg Config, limits validation.Limits, dir string, registerer prometheus.Registerer) (*Ingester, error) {
clientCfg := defaultClientTestConfig()
overrides, err := validation.NewOverrides(limits, nil)
if err != nil {
return nil, err
}
ingesterCfg.BlocksStorageEnabled = true
ingesterCfg.BlocksStorageConfig.TSDB.Dir = dir
ingesterCfg.BlocksStorageConfig.Bucket.Backend = "s3"
ingesterCfg.BlocksStorageConfig.Bucket.S3.Endpoint = "localhost"
ingester, err := NewV2(ingesterCfg, clientCfg, overrides, registerer)
if err != nil {
return nil, err
}
return ingester, nil
}
func TestIngester_v2LoadTSDBOnStartup(t *testing.T) {
t.Parallel()
tests := map[string]struct {
setup func(*testing.T, string)
check func(*testing.T, *Ingester)
}{
"empty user dir": {
setup: func(t *testing.T, dir string) {
require.NoError(t, os.Mkdir(filepath.Join(dir, "user0"), 0700))
},
check: func(t *testing.T, i *Ingester) {
require.Empty(t, i.getTSDB("user0"), "tsdb created for empty user dir")
},
},
"empty tsdbs": {
setup: func(t *testing.T, dir string) {},
check: func(t *testing.T, i *Ingester) {
require.Zero(t, len(i.TSDBState.dbs), "user tsdb's were created on empty dir")
},
},
"missing tsdb dir": {
setup: func(t *testing.T, dir string) {
require.NoError(t, os.Remove(dir))
},
check: func(t *testing.T, i *Ingester) {
require.Zero(t, len(i.TSDBState.dbs), "user tsdb's were created on missing dir")
},
},
"populated user dirs with unpopulated": {
setup: func(t *testing.T, dir string) {
require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700))
require.NoError(t, os.Mkdir(filepath.Join(dir, "user2"), 0700))
},
check: func(t *testing.T, i *Ingester) {
require.NotNil(t, i.getTSDB("user0"), "tsdb not created for non-empty user dir")
require.NotNil(t, i.getTSDB("user1"), "tsdb not created for non-empty user dir")
require.Empty(t, i.getTSDB("user2"), "tsdb created for empty user dir")
},
},
}
for name, test := range tests {
testName := name
testData := test
t.Run(testName, func(t *testing.T) {
clientCfg := defaultClientTestConfig()
limits := defaultLimitsTestConfig()
overrides, err := validation.NewOverrides(limits, nil)
require.NoError(t, err)
// Create a temporary directory for TSDB
tempDir, err := ioutil.TempDir("", "tsdb")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
ingesterCfg := defaultIngesterTestConfig()
ingesterCfg.BlocksStorageEnabled = true
ingesterCfg.BlocksStorageConfig.TSDB.Dir = tempDir
ingesterCfg.BlocksStorageConfig.Bucket.Backend = "s3"
ingesterCfg.BlocksStorageConfig.Bucket.S3.Endpoint = "localhost"
// setup the tsdbs dir
testData.setup(t, tempDir)
ingester, err := NewV2(ingesterCfg, clientCfg, overrides, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ingester))
defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck
testData.check(t, ingester)
})
}
}
func TestIngester_shipBlocks(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 10*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Create the TSDB for 3 users and then replace the shipper with the mocked one
mocks := []*shipperMock{}
for _, userID := range []string{"user-1", "user-2", "user-3"} {
userDB, err := i.getOrCreateTSDB(userID, false)
require.NoError(t, err)
require.NotNil(t, userDB)
m := &shipperMock{}
m.On("Sync", mock.Anything).Return(0, nil)
mocks = append(mocks, m)
userDB.shipper = m
}
// Ship blocks and assert on the mocked shipper
i.shipBlocks(context.Background())
for _, m := range mocks {
m.AssertNumberOfCalls(t, "Sync", 1)
}
}
type shipperMock struct {
mock.Mock
}
// Sync mocks Shipper.Sync()
func (m *shipperMock) Sync(ctx context.Context) (uploaded int, err error) {
args := m.Called(ctx)
return args.Int(0), args.Error(1)
}
func TestIngester_flushing(t *testing.T) {
for name, tc := range map[string]struct {
setupIngester func(cfg *Config)
action func(t *testing.T, i *Ingester, m *shipperMock)
}{
"ingesterShutdown": {
setupIngester: func(cfg *Config) {
cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = true
cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown = true
},
action: func(t *testing.T, i *Ingester, m *shipperMock) {
pushSingleSample(t, i)
// Nothing shipped yet.
m.AssertNumberOfCalls(t, "Sync", 0)
// Shutdown ingester. This triggers flushing of the block.
require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
verifyCompactedHead(t, i, true)
// Verify that block has been shipped.
m.AssertNumberOfCalls(t, "Sync", 1)
},
},
"shutdownHandler": {
setupIngester: func(cfg *Config) {
cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown = true
},
action: func(t *testing.T, i *Ingester, m *shipperMock) {
pushSingleSample(t, i)
// Nothing shipped yet.
m.AssertNumberOfCalls(t, "Sync", 0)
i.ShutdownHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/shutdown", nil))
verifyCompactedHead(t, i, true)
m.AssertNumberOfCalls(t, "Sync", 1)
},
},
"flushHandler": {
setupIngester: func(cfg *Config) {
cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
},
action: func(t *testing.T, i *Ingester, m *shipperMock) {
pushSingleSample(t, i)
// Nothing shipped yet.
m.AssertNumberOfCalls(t, "Sync", 0)
i.FlushHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/shutdown", nil))
// Flush handler only triggers compactions, but doesn't wait for them to finish. Let's wait for a moment, and then verify.
time.Sleep(1 * time.Second)
verifyCompactedHead(t, i, true)
m.AssertNumberOfCalls(t, "Sync", 1)
},
},
} {
t.Run(name, func(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
cfg.BlocksStorageConfig.TSDB.ShipInterval = 1 * time.Minute // Long enough to not be reached during the test.
if tc.setupIngester != nil {
tc.setupIngester(&cfg)
}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
t.Cleanup(cleanup)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
t.Cleanup(func() {
_ = services.StopAndAwaitTerminated(context.Background(), i)
})
// Wait until it's ACTIVE
test.Poll(t, 10*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// mock user's shipper
m := mockUserShipper(t, i)
m.On("Sync", mock.Anything).Return(0, nil)
tc.action(t, i, m)
})
}
}
func TestIngester_ForFlush(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
cfg.BlocksStorageConfig.TSDB.ShipInterval = 10 * time.Minute // Long enough to not be reached during the test.
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
t.Cleanup(cleanup)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
t.Cleanup(func() {
_ = services.StopAndAwaitTerminated(context.Background(), i)
})
// Wait until it's ACTIVE
test.Poll(t, 10*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// mock user's shipper
m := mockUserShipper(t, i)
m.On("Sync", mock.Anything).Return(0, nil)
// Push some data.
pushSingleSample(t, i)
// Stop ingester.
require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
// Nothing shipped yet.
m.AssertNumberOfCalls(t, "Sync", 0)
// Restart ingester in "For Flusher" mode. We reuse the same config (esp. same dir)
i, err = NewV2ForFlusher(i.cfg, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
m = mockUserShipper(t, i)
m.On("Sync", mock.Anything).Return(0, nil)
// Our single sample should be reloaded from WAL
verifyCompactedHead(t, i, false)
i.Flush()
// Head should be empty after flushing.
verifyCompactedHead(t, i, true)
// Verify that block has been shipped.
m.AssertNumberOfCalls(t, "Sync", 1)
require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
}
func mockUserShipper(t *testing.T, i *Ingester) *shipperMock {
m := &shipperMock{}
userDB, err := i.getOrCreateTSDB(userID, false)
require.NoError(t, err)
require.NotNil(t, userDB)
userDB.shipper = m
return m
}
func Test_Ingester_v2UserStats(t *testing.T) {
series := []struct {
lbls labels.Labels
value float64
timestamp int64
}{
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push series
ctx := user.InjectOrgID(context.Background(), "test")
for _, series := range series {
req, _, _ := mockWriteRequest(series.lbls, series.value, series.timestamp)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
// force update statistics
for _, db := range i.TSDBState.dbs {
db.ingestedAPISamples.tick()
db.ingestedRuleSamples.tick()
}
// Get label names
res, err := i.v2UserStats(ctx, &client.UserStatsRequest{})
require.NoError(t, err)
assert.InDelta(t, 0.2, res.ApiIngestionRate, 0.0001)
assert.InDelta(t, float64(0), res.RuleIngestionRate, 0.0001)
assert.Equal(t, uint64(3), res.NumSeries)
}
func Test_Ingester_v2AllUserStats(t *testing.T) {
series := []struct {
user string
lbls labels.Labels
value float64
timestamp int64
}{
{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000},
{"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000},
{"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000},
}
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(defaultIngesterTestConfig(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
defer cleanup()
// Wait until it's ACTIVE
test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
for _, series := range series {
ctx := user.InjectOrgID(context.Background(), series.user)
req, _, _ := mockWriteRequest(series.lbls, series.value, series.timestamp)
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
// force update statistics
for _, db := range i.TSDBState.dbs {
db.ingestedAPISamples.tick()
db.ingestedRuleSamples.tick()
}
// Get label names
res, err := i.v2AllUserStats(context.Background(), &client.UserStatsRequest{})
require.NoError(t, err)
expect := []*client.UserIDStatsResponse{
{
UserId: "user-1",
Data: &client.UserStatsResponse{
IngestionRate: 0.2,
NumSeries: 3,
ApiIngestionRate: 0.2,
RuleIngestionRate: 0,
},
},
{
UserId: "user-2",
Data: &client.UserStatsResponse{
IngestionRate: 0.13333333333333333,
NumSeries: 2,
ApiIngestionRate: 0.13333333333333333,
RuleIngestionRate: 0,
},
},
}
assert.ElementsMatch(t, expect, res.Stats)
}
func TestIngesterCompactIdleBlock(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = 1 * time.Hour // Long enough to not be reached during the test.
cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout = 1 * time.Second // Testing this.
r := prometheus.NewRegistry()
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, r)
require.NoError(t, err)
t.Cleanup(cleanup)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
t.Cleanup(func() {
_ = services.StopAndAwaitTerminated(context.Background(), i)
})
// Wait until it's ACTIVE
test.Poll(t, 10*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
pushSingleSample(t, i)
i.compactBlocks(context.Background(), false)
verifyCompactedHead(t, i, false)
require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="1"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="1"} 0
`), memSeriesCreatedTotalName, memSeriesRemovedTotalName))
// wait one second -- TSDB is now idle.
time.Sleep(cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout)
i.compactBlocks(context.Background(), false)
verifyCompactedHead(t, i, true)
require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="1"} 1
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="1"} 1
`), memSeriesCreatedTotalName, memSeriesRemovedTotalName))
// Pushing another sample still works.
pushSingleSample(t, i)
verifyCompactedHead(t, i, false)
require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
# TYPE cortex_ingester_memory_series_created_total counter
cortex_ingester_memory_series_created_total{user="1"} 2
# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
# TYPE cortex_ingester_memory_series_removed_total counter
cortex_ingester_memory_series_removed_total{user="1"} 1
`), memSeriesCreatedTotalName, memSeriesRemovedTotalName))
}
func verifyCompactedHead(t *testing.T, i *Ingester, expected bool) {
db := i.getTSDB(userID)
require.NotNil(t, db)
h := db.Head()
require.Equal(t, expected, h.NumSeries() == 0)
}
func pushSingleSample(t *testing.T, i *Ingester) {
ctx := user.InjectOrgID(context.Background(), userID)
req, _, _ := mockWriteRequest(labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now()))
_, err := i.v2Push(ctx, req)
require.NoError(t, err)
}
func TestHeadCompactionOnStartup(t *testing.T) {
// Create a temporary directory for TSDB
tempDir, err := ioutil.TempDir("", "tsdb")
require.NoError(t, err)
t.Cleanup(func() {
os.RemoveAll(tempDir)
})
// Build TSDB for user, with data covering 24 hours.
{
// Number of full chunks, 12 chunks for 24hrs.
numFullChunks := 12
chunkRange := 2 * time.Hour.Milliseconds()
userDir := filepath.Join(tempDir, userID)
require.NoError(t, os.Mkdir(userDir, 0700))
db, err := tsdb.Open(userDir, nil, nil, &tsdb.Options{
RetentionDuration: int64(time.Hour * 25 / time.Millisecond),
NoLockfile: true,
MinBlockDuration: chunkRange,
MaxBlockDuration: chunkRange,
})
require.NoError(t, err)
db.DisableCompactions()
head := db.Head()
l := labels.Labels{{Name: "n", Value: "v"}}
for i := 0; i < numFullChunks; i++ {
// Not using db.Appender() as it checks for compaction.
app := head.Appender(context.Background())
_, err := app.Add(l, int64(i)*chunkRange+1, 9.99)
require.NoError(t, err)
_, err = app.Add(l, int64(i+1)*chunkRange, 9.99)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
dur := time.Duration(head.MaxTime()-head.MinTime()) * time.Millisecond
require.True(t, dur > 23*time.Hour)
require.Equal(t, 0, len(db.Blocks()))
require.NoError(t, db.Close())
}
clientCfg := defaultClientTestConfig()
limits := defaultLimitsTestConfig()
overrides, err := validation.NewOverrides(limits, nil)
require.NoError(t, err)
ingesterCfg := defaultIngesterTestConfig()
ingesterCfg.BlocksStorageEnabled = true
ingesterCfg.BlocksStorageConfig.TSDB.Dir = tempDir
ingesterCfg.BlocksStorageConfig.Bucket.Backend = "s3"
ingesterCfg.BlocksStorageConfig.Bucket.S3.Endpoint = "localhost"
ingesterCfg.BlocksStorageConfig.TSDB.Retention = 2 * 24 * time.Hour // Make sure that no newly created blocks are deleted.
ingester, err := NewV2(ingesterCfg, clientCfg, overrides, nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ingester))
defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck
db := ingester.getTSDB(userID)
require.NotNil(t, db)
h := db.Head()
dur := time.Duration(h.MaxTime()-h.MinTime()) * time.Millisecond
require.True(t, dur <= 2*time.Hour)
require.Equal(t, 11, len(db.Blocks()))
}
func TestIngester_CloseTSDBsOnShutdown(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg.LifecyclerConfig.JoinAfter = 0
// Create ingester
i, cleanup, err := newIngesterMockWithTSDBStorage(cfg, nil)
require.NoError(t, err)
t.Cleanup(cleanup)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
t.Cleanup(func() {
_ = services.StopAndAwaitTerminated(context.Background(), i)
})
// Wait until it's ACTIVE
test.Poll(t, 10*time.Millisecond, ring.ACTIVE, func() interface{} {
return i.lifecycler.GetState()
})
// Push some data.
pushSingleSample(t, i)
db := i.getTSDB(userID)
require.NotNil(t, db)
// Stop ingester.
require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
// Verify that DB is no longer in memory, but was closed
db = i.getTSDB(userID)
require.Nil(t, db)
}
| {
"pile_set_name": "Github"
} |
/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
*
* (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
* (C) 2016 by Pablo Neira Ayuso <[email protected]>
*
* Author: Harald Welte <[email protected]>
* Pablo Neira Ayuso <[email protected]>
* Andreas Schultz <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/rculist.h>
#include <linux/jhash.h>
#include <linux/if_tunnel.h>
#include <linux/net.h>
#include <linux/file.h>
#include <linux/gtp.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include <net/gtp.h>
/* An active session for the subscriber. */
struct pdp_ctx {
struct hlist_node hlist_tid;
struct hlist_node hlist_addr;
union {
u64 tid;
struct {
u64 tid;
u16 flow;
} v0;
struct {
u32 i_tei;
u32 o_tei;
} v1;
} u;
u8 gtp_version;
u16 af;
struct in_addr ms_addr_ip4;
struct in_addr peer_addr_ip4;
struct sock *sk;
struct net_device *dev;
atomic_t tx_seq;
struct rcu_head rcu_head;
};
/* One instance of the GTP device. */
struct gtp_dev {
struct list_head list;
struct sock *sk0;
struct sock *sk1u;
struct net_device *dev;
unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
};
static unsigned int gtp_net_id __read_mostly;
struct gtp_net {
struct list_head gtp_dev_list;
};
static u32 gtp_h_initval;
static void pdp_context_delete(struct pdp_ctx *pctx);
static inline u32 gtp0_hashfn(u64 tid)
{
u32 *tid32 = (u32 *) &tid;
return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
}
static inline u32 gtp1u_hashfn(u32 tid)
{
return jhash_1word(tid, gtp_h_initval);
}
static inline u32 ipv4_hashfn(__be32 ip)
{
return jhash_1word((__force u32)ip, gtp_h_initval);
}
/* Resolve a PDP context structure based on the 64bit TID. */
static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
if (pdp->gtp_version == GTP_V0 &&
pdp->u.v0.tid == tid)
return pdp;
}
return NULL;
}
/* Resolve a PDP context structure based on the 32bit TEI. */
static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
if (pdp->gtp_version == GTP_V1 &&
pdp->u.v1.i_tei == tid)
return pdp;
}
return NULL;
}
/* Resolve a PDP context based on IPv4 address of MS. */
static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
if (pdp->af == AF_INET &&
pdp->ms_addr_ip4.s_addr == ms_addr)
return pdp;
}
return NULL;
}
static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen, unsigned int role)
{
struct iphdr *iph;
if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
return false;
iph = (struct iphdr *)(skb->data + hdrlen);
if (role == GTP_ROLE_SGSN)
return iph->daddr == pctx->ms_addr_ip4.s_addr;
else
return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
/* Check if the inner IP address in this packet is assigned to any
* existing mobile subscriber.
*/
static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen, unsigned int role)
{
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
}
return false;
}
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
unsigned int hdrlen, unsigned int role)
{
struct pcpu_sw_netstats *stats;
if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
}
/* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
!net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
return -1;
netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
/* Now that the UDP and the GTP header have been removed, set up the
* new network header. This is required by the upper layer to
* calculate the transport header.
*/
skb_reset_network_header(skb);
skb->dev = pctx->dev;
stats = this_cpu_ptr(pctx->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
netif_rx(skb);
return 0;
}
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
if (gtp0->type != GTP_TPDU)
return 1;
pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
if (gtp1->type != GTP_TPDU)
return 1;
/* From 29.060: "This field shall be present if and only if any one or
* more of the S, PN and E flags are set.".
*
* If any of the bit is set, then the remaining ones also have to be
* set.
*/
if (gtp1->flags & GTP1_F_MASK)
hdrlen += 4;
/* Make sure the header is larger enough, including extensions. */
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
static void gtp_encap_destroy(struct sock *sk)
{
struct gtp_dev *gtp;
gtp = rcu_dereference_sk_user_data(sk);
if (gtp) {
udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL);
sock_put(sk);
}
}
static void gtp_encap_disable_sock(struct sock *sk)
{
if (!sk)
return;
gtp_encap_destroy(sk);
}
static void gtp_encap_disable(struct gtp_dev *gtp)
{
gtp_encap_disable_sock(gtp->sk0);
gtp_encap_disable_sock(gtp->sk1u);
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
*/
static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct gtp_dev *gtp;
int ret = 0;
gtp = rcu_dereference_sk_user_data(sk);
if (!gtp)
return 1;
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
netdev_dbg(gtp->dev, "received GTP0 packet\n");
ret = gtp0_udp_encap_recv(gtp, skb);
break;
case UDP_ENCAP_GTP1U:
netdev_dbg(gtp->dev, "received GTP1U packet\n");
ret = gtp1u_udp_encap_recv(gtp, skb);
break;
default:
ret = -1; /* Shouldn't happen. */
}
switch (ret) {
case 1:
netdev_dbg(gtp->dev, "pass up to the process\n");
break;
case 0:
break;
case -1:
netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
kfree_skb(skb);
ret = 0;
break;
}
return ret;
}
static int gtp_dev_init(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp->dev = dev;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
static void gtp_dev_uninit(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
free_percpu(dev->tstats);
}
static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
const struct sock *sk,
__be32 daddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = inet_sk(sk)->inet_saddr;
fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol;
return ip_route_output_key(sock_net(sk), fl4);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
struct gtp0_header *gtp0;
gtp0 = skb_push(skb, sizeof(*gtp0));
gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
gtp0->type = GTP_TPDU;
gtp0->length = htons(payload_len);
gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
gtp0->flow = htons(pctx->u.v0.flow);
gtp0->number = 0xff;
gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
}
static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
struct gtp1_header *gtp1;
gtp1 = skb_push(skb, sizeof(*gtp1));
/* Bits 8 7 6 5 4 3 2 1
* +--+--+--+--+--+--+--+--+
* |version |PT| 0| E| S|PN|
* +--+--+--+--+--+--+--+--+
* 0 0 1 1 1 0 0 0
*/
gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
/* TODO: Suppport for extension header, sequence number and N-PDU.
* Update the length field if any of them is available.
*/
}
struct gtp_pktinfo {
struct sock *sk;
struct iphdr *iph;
struct flowi4 fl4;
struct rtable *rt;
struct pdp_ctx *pctx;
struct net_device *dev;
__be16 gtph_port;
};
static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
{
switch (pktinfo->pctx->gtp_version) {
case GTP_V0:
pktinfo->gtph_port = htons(GTP0_PORT);
gtp0_push_header(skb, pktinfo->pctx);
break;
case GTP_V1:
pktinfo->gtph_port = htons(GTP1U_PORT);
gtp1_push_header(skb, pktinfo->pctx);
break;
}
}
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
struct sock *sk, struct iphdr *iph,
struct pdp_ctx *pctx, struct rtable *rt,
struct flowi4 *fl4,
struct net_device *dev)
{
pktinfo->sk = sk;
pktinfo->iph = iph;
pktinfo->pctx = pctx;
pktinfo->rt = rt;
pktinfo->fl4 = *fl4;
pktinfo->dev = dev;
}
static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct gtp_pktinfo *pktinfo)
{
struct gtp_dev *gtp = netdev_priv(dev);
struct pdp_ctx *pctx;
struct rtable *rt;
struct flowi4 fl4;
struct iphdr *iph;
__be16 df;
int mtu;
/* Read the IP destination address and resolve the PDP context.
* Prepend PDP header with TEI/TID from PDP ctx.
*/
iph = ip_hdr(skb);
if (gtp->role == GTP_ROLE_SGSN)
pctx = ipv4_pdp_find(gtp, iph->saddr);
else
pctx = ipv4_pdp_find(gtp, iph->daddr);
if (!pctx) {
netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
&iph->daddr);
return -ENOENT;
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
dev->stats.collisions++;
goto err_rt;
}
skb_dst_drop(skb);
/* This is similar to tnl_update_pmtu(). */
df = iph->frag_off;
if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr);
switch (pctx->gtp_version) {
case GTP_V0:
mtu -= sizeof(struct gtp0_header);
break;
case GTP_V1:
mtu -= sizeof(struct gtp1_header);
break;
}
} else {
mtu = dst_mtu(&rt->dst);
}
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
goto err_rt;
}
gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
return 0;
err_rt:
ip_rt_put(rt);
err:
return -EBADMSG;
}
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int proto = ntohs(skb->protocol);
struct gtp_pktinfo pktinfo;
int err;
/* Ensure there is sufficient headroom. */
if (skb_cow_head(skb, dev->needed_headroom))
goto tx_err;
skb_reset_inner_headers(skb);
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
rcu_read_lock();
switch (proto) {
case ETH_P_IP:
err = gtp_build_skb_ip4(skb, dev, &pktinfo);
break;
default:
err = -EOPNOTSUPP;
break;
}
rcu_read_unlock();
if (err < 0)
goto tx_err;
switch (proto) {
case ETH_P_IP:
netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
&pktinfo.iph->saddr, &pktinfo.iph->daddr);
udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
pktinfo.iph->tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
true, false);
break;
}
return NETDEV_TX_OK;
tx_err:
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static const struct net_device_ops gtp_netdev_ops = {
.ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
.ndo_get_stats64 = ip_tunnel_get_stats64,
};
static void gtp_link_setup(struct net_device *dev)
{
dev->netdev_ops = >p_netdev_ops;
dev->needs_free_netdev = true;
dev->hard_header_len = 0;
dev->addr_len = 0;
/* Zero header length. */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
/* Assume largest header, ie. GTPv0. */
dev->needed_headroom = LL_MAX_HEADER +
sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct gtp_dev *gtp;
struct gtp_net *gn;
int hashsize, err;
if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
return -EINVAL;
gtp = netdev_priv(dev);
err = gtp_encap_enable(gtp, data);
if (err < 0)
return err;
if (!data[IFLA_GTP_PDP_HASHSIZE])
hashsize = 1024;
else
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
goto out_encap;
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
goto out_hashtable;
}
gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(>p->list, &gn->gtp_dev_list);
netdev_dbg(dev, "registered new GTP interface\n");
return 0;
out_hashtable:
gtp_hashtable_free(gtp);
out_encap:
gtp_encap_disable(gtp);
return err;
}
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
gtp_hashtable_free(gtp);
list_del_rcu(>p->list);
unregister_netdevice_queue(dev, head);
}
static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD0] = { .type = NLA_U32 },
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (!data)
return -EINVAL;
return 0;
}
static size_t gtp_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
}
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct rtnl_link_ops gtp_link_ops __read_mostly = {
.kind = "gtp",
.maxtype = IFLA_GTP_MAX,
.policy = gtp_policy,
.priv_size = sizeof(struct gtp_dev),
.setup = gtp_link_setup,
.validate = gtp_validate,
.newlink = gtp_newlink,
.dellink = gtp_dellink,
.get_size = gtp_get_size,
.fill_info = gtp_fill_info,
};
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
{
int i;
gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
if (gtp->addr_hash == NULL)
return -ENOMEM;
gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
if (gtp->tid_hash == NULL)
goto err1;
gtp->hash_size = hsize;
for (i = 0; i < hsize; i++) {
INIT_HLIST_HEAD(>p->addr_hash[i]);
INIT_HLIST_HEAD(>p->tid_hash[i]);
}
return 0;
err1:
kfree(gtp->addr_hash);
return -ENOMEM;
}
static void gtp_hashtable_free(struct gtp_dev *gtp)
{
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
synchronize_rcu();
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct socket *sock;
struct sock *sk;
int err;
pr_debug("enable gtp on %d, %d\n", fd, type);
sock = sockfd_lookup(fd, &err);
if (!sock) {
pr_debug("gtp socket fd=%d not found\n", fd);
return NULL;
}
if (sock->sk->sk_protocol != IPPROTO_UDP) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
if (rcu_dereference_sk_user_data(sock->sk)) {
sk = ERR_PTR(-EBUSY);
goto out_sock;
}
sk = sock->sk;
sock_hold(sk);
tuncfg.sk_user_data = gtp;
tuncfg.encap_type = type;
tuncfg.encap_rcv = gtp_encap_recv;
tuncfg.encap_destroy = gtp_encap_destroy;
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_sock:
sockfd_put(sock);
return sk;
}
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
unsigned int role = GTP_ROLE_GGSN;
if (data[IFLA_GTP_FD0]) {
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
if (IS_ERR(sk0))
return PTR_ERR(sk0);
}
if (data[IFLA_GTP_FD1]) {
u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
if (IS_ERR(sk1u)) {
if (sk0)
gtp_encap_disable_sock(sk0);
return PTR_ERR(sk1u);
}
}
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN)
return -EINVAL;
}
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
gtp->role = role;
return 0;
}
static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
{
struct gtp_dev *gtp = NULL;
struct net_device *dev;
struct net *net;
/* Examine the link attributes and figure out which network namespace
* we are talking about.
*/
if (nla[GTPA_NET_NS_FD])
net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
else
net = get_net(src_net);
if (IS_ERR(net))
return NULL;
/* Check if there's an existing gtpX device to configure */
dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
if (dev && dev->netdev_ops == >p_netdev_ops)
gtp = netdev_priv(dev);
put_net(net);
return gtp;
}
static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
{
pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
pctx->af = AF_INET;
pctx->peer_addr_ip4.s_addr =
nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
pctx->ms_addr_ip4.s_addr =
nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
switch (pctx->gtp_version) {
case GTP_V0:
/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
* label needs to be the same for uplink and downlink packets,
* so let's annotate this.
*/
pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
break;
case GTP_V1:
pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
break;
default:
break;
}
}
static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info)
{
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
struct pdp_ctx *pctx;
bool found = false;
__be32 ms_addr;
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) {
if (pctx->ms_addr_ip4.s_addr == ms_addr) {
found = true;
break;
}
}
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
ipv4_pdp_fill(pctx, info);
if (pctx->gtp_version == GTP_V0)
netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
pctx->u.v0.tid, pctx);
else if (pctx->gtp_version == GTP_V1)
netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
return 0;
}
pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
if (pctx == NULL)
return -ENOMEM;
sock_hold(sk);
pctx->sk = sk;
pctx->dev = gtp->dev;
ipv4_pdp_fill(pctx, info);
atomic_set(&pctx->tx_seq, 0);
switch (pctx->gtp_version) {
case GTP_V0:
/* TS 09.60: "The flow label identifies unambiguously a GTP
* flow.". We use the tid for this instead, I cannot find a
* situation in which this doesn't unambiguosly identify the
* PDP context.
*/
hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
break;
case GTP_V1:
hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
break;
}
hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]);
hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]);
switch (pctx->gtp_version) {
case GTP_V0:
netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v0.tid, &pctx->peer_addr_ip4,
&pctx->ms_addr_ip4, pctx);
break;
case GTP_V1:
netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei,
&pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
break;
}
return 0;
}
static void pdp_context_free(struct rcu_head *head)
{
struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
sock_put(pctx->sk);
kfree(pctx);
}
static void pdp_context_delete(struct pdp_ctx *pctx)
{
hlist_del_rcu(&pctx->hlist_tid);
hlist_del_rcu(&pctx->hlist_addr);
call_rcu(&pctx->rcu_head, pdp_context_free);
}
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
unsigned int version;
struct gtp_dev *gtp;
struct sock *sk;
int err;
if (!info->attrs[GTPA_VERSION] ||
!info->attrs[GTPA_LINK] ||
!info->attrs[GTPA_PEER_ADDRESS] ||
!info->attrs[GTPA_MS_ADDRESS])
return -EINVAL;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
switch (version) {
case GTP_V0:
if (!info->attrs[GTPA_TID] ||
!info->attrs[GTPA_FLOW])
return -EINVAL;
break;
case GTP_V1:
if (!info->attrs[GTPA_I_TEI] ||
!info->attrs[GTPA_O_TEI])
return -EINVAL;
break;
default:
return -EINVAL;
}
rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
if (!gtp) {
err = -ENODEV;
goto out_unlock;
}
if (version == GTP_V0)
sk = gtp->sk0;
else if (version == GTP_V1)
sk = gtp->sk1u;
else
sk = NULL;
if (!sk) {
err = -ENODEV;
goto out_unlock;
}
err = ipv4_pdp_add(gtp, sk, info);
out_unlock:
rcu_read_unlock();
return err;
}
static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
struct nlattr *nla[])
{
struct gtp_dev *gtp;
gtp = gtp_find_dev(net, nla);
if (!gtp)
return ERR_PTR(-ENODEV);
if (nla[GTPA_MS_ADDRESS]) {
__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
return ipv4_pdp_find(gtp, ip);
} else if (nla[GTPA_VERSION]) {
u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
if (gtp_version == GTP_V0 && nla[GTPA_TID])
return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
}
return ERR_PTR(-EINVAL);
}
static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
{
struct pdp_ctx *pctx;
if (nla[GTPA_LINK])
pctx = gtp_find_pdp_by_link(net, nla);
else
pctx = ERR_PTR(-EINVAL);
if (!pctx)
pctx = ERR_PTR(-ENOENT);
return pctx;
}
static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx;
int err = 0;
if (!info->attrs[GTPA_VERSION])
return -EINVAL;
rcu_read_lock();
pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
if (IS_ERR(pctx)) {
err = PTR_ERR(pctx);
goto out_unlock;
}
if (pctx->gtp_version == GTP_V0)
netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
pctx->u.v0.tid, pctx);
else if (pctx->gtp_version == GTP_V1)
netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
pdp_context_delete(pctx);
out_unlock:
rcu_read_unlock();
return err;
}
static struct genl_family gtp_genl_family;
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
u32 type, struct pdp_ctx *pctx)
{
void *genlh;
genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0,
type);
if (genlh == NULL)
goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure;
switch (pctx->gtp_version) {
case GTP_V0:
if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
goto nla_put_failure;
break;
case GTP_V1:
if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
goto nla_put_failure;
break;
}
genlmsg_end(skb, genlh);
return 0;
nlmsg_failure:
nla_put_failure:
genlmsg_cancel(skb, genlh);
return -EMSGSIZE;
}
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
struct sk_buff *skb2;
int err;
if (!info->attrs[GTPA_VERSION])
return -EINVAL;
rcu_read_lock();
pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
if (IS_ERR(pctx)) {
err = PTR_ERR(pctx);
goto err_unlock;
}
skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb2 == NULL) {
err = -ENOMEM;
goto err_unlock;
}
err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
info->snd_seq, info->nlhdr->nlmsg_type, pctx);
if (err < 0)
goto err_unlock_free;
rcu_read_unlock();
return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
err_unlock_free:
kfree_skb(skb2);
err_unlock:
rcu_read_unlock();
return err;
}
static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
struct net *net = sock_net(skb->sk);
struct gtp_net *gn = net_generic(net, gtp_net_id);
unsigned long tid = cb->args[1];
int i, k = cb->args[0], ret;
struct pdp_ctx *pctx;
if (cb->args[4])
return 0;
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;
for (i = k; i < gtp->hash_size; i++) {
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) {
if (tid && tid != pctx->u.tid)
continue;
else
tid = 0;
ret = gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, pctx);
if (ret < 0) {
cb->args[0] = i;
cb->args[1] = pctx->u.tid;
cb->args[2] = (unsigned long)gtp;
goto out;
}
}
}
}
cb->args[4] = 1;
out:
return skb->len;
}
static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
[GTPA_TID] = { .type = NLA_U64, },
[GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
[GTPA_MS_ADDRESS] = { .type = NLA_U32, },
[GTPA_FLOW] = { .type = NLA_U16, },
[GTPA_NET_NS_FD] = { .type = NLA_U32, },
[GTPA_I_TEI] = { .type = NLA_U32, },
[GTPA_O_TEI] = { .type = NLA_U32, },
};
static const struct genl_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
.doit = gtp_genl_new_pdp,
.policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_DELPDP,
.doit = gtp_genl_del_pdp,
.policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_GETPDP,
.doit = gtp_genl_get_pdp,
.dumpit = gtp_genl_dump_pdp,
.policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
static struct genl_family gtp_genl_family __ro_after_init = {
.name = "gtp",
.version = 0,
.hdrsize = 0,
.maxattr = GTPA_MAX,
.netnsok = true,
.module = THIS_MODULE,
.ops = gtp_genl_ops,
.n_ops = ARRAY_SIZE(gtp_genl_ops),
};
static int __net_init gtp_net_init(struct net *net)
{
struct gtp_net *gn = net_generic(net, gtp_net_id);
INIT_LIST_HEAD(&gn->gtp_dev_list);
return 0;
}
static void __net_exit gtp_net_exit(struct net *net)
{
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(gtp, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
.exit = gtp_net_exit,
.id = >p_net_id,
.size = sizeof(struct gtp_net),
};
static int __init gtp_init(void)
{
int err;
get_random_bytes(>p_h_initval, sizeof(gtp_h_initval));
err = rtnl_link_register(>p_link_ops);
if (err < 0)
goto error_out;
err = genl_register_family(>p_genl_family);
if (err < 0)
goto unreg_rtnl_link;
err = register_pernet_subsys(>p_net_ops);
if (err < 0)
goto unreg_genl_family;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
unreg_genl_family:
genl_unregister_family(>p_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(>p_link_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
}
late_initcall(gtp_init);
static void __exit gtp_fini(void)
{
unregister_pernet_subsys(>p_net_ops);
genl_unregister_family(>p_genl_family);
rtnl_link_unregister(>p_link_ops);
pr_info("GTP module unloaded\n");
}
module_exit(gtp_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
MODULE_ALIAS_GENL_FAMILY("gtp");
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System.Collections.Generic;
using Microsoft.Build.Collections;
using Xunit;
namespace Microsoft.Build.UnitTests
{
public class HashTableUtilityTests
{
/// <summary>
/// Missing unittest found by mutation testing.
/// REASON TEST WASN'T ORIGINALLY PRESENT: HashTableUtility was not a separate class and
/// there was no way to hit this case through BatchingEngine directly because it never
/// calls Compare() with unequal numbers of items.
///
/// This test ensures that hashtable with unequal numbers of items are considered not
/// equivalent.
/// </summary>
[Fact]
public void Regress_Mutation_HashtablesWithDifferentCountsAreNotEquivalent()
{
Dictionary<string, string> h1 = new Dictionary<string, string>();
h1["a"] = "x"; // <---------- Must be the same in both hashtables.
Dictionary<string, string> h2 = new Dictionary<string, string>();
h2["a"] = "x"; // <---------- Must be the same in both hashtables.
h2["b"] = "y";
Assert.True(HashTableUtility.Compare(h1, h2) < 0);
Assert.True(HashTableUtility.Compare(h2, h1) > 0);
}
[Fact]
public void HashtableComparisons()
{
Dictionary<string, string> h1 = new Dictionary<string, string>();
Dictionary<string, string> h2 = new Dictionary<string, string>();
Assert.Equal(0, HashTableUtility.Compare(h1, h2));
h1["a"] = "x";
h2["a"] = "x";
Assert.Equal(0, HashTableUtility.Compare(h1, h2));
h1["b"] = "y";
h1["c"] = "z";
h2["b"] = "y";
h2["c"] = "z";
Assert.Equal(0, HashTableUtility.Compare(h1, h2));
h1["b"] = "j";
Assert.True(HashTableUtility.Compare(h1, h2) < 0);
h2["b"] = "j";
h2["c"] = "k";
Assert.True(HashTableUtility.Compare(h1, h2) > 0);
h1["a"] = null;
h1["c"] = "k";
Assert.True(HashTableUtility.Compare(h1, h2) < 0);
h2["a"] = null;
Assert.Equal(0, HashTableUtility.Compare(h1, h2));
}
}
}
| {
"pile_set_name": "Github"
} |
from __future__ import print_function, absolute_import
import unittest, random
import pandas as pd, numpy as np
class BasePandasExtensionsTester(unittest.TestCase):
def setUp(self):
random.seed(0)
np.random.seed(0)
def close(self, o1, o2):
if type(o1) is dict: o1 = pd.DataFrame(o1)
if type(o2) is dict: o2 = pd.DataFrame(o2)
if type(o1) is dict: o1 = pd.DataFrame(o1)
if type(o2) is dict: o2 = pd.DataFrame(o2)
if hasattr(o1, 'values'): o1 = o1.values
if hasattr(o2, 'values'): o2 = o2.values
if not isinstance(o1, np.ndarray): np.array(o1)
if not isinstance(o2, np.ndarray): np.array(o2)
np.testing.assert_almost_equal(o1, o2, 3)
def eq(self, o1, o2):
if type(o1) is dict: o1 = pd.DataFrame(o1)
if type(o2) is dict: o2 = pd.DataFrame(o2)
if hasattr(o1, 'values'): o1 = o1.values
if hasattr(o2, 'values'): o2 = o2.values
if not isinstance(o1, np.ndarray): np.array(o1)
if not isinstance(o2, np.ndarray): np.array(o2)
np.testing.assert_array_equal(o1, o2) | {
"pile_set_name": "Github"
} |
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
#![forbid(unsafe_code)]
extern crate clap;
extern crate regex;
extern crate filters;
#[macro_use] extern crate log;
#[macro_use] extern crate anyhow;
extern crate resiter;
extern crate libimagrt;
extern crate libimagerror;
extern crate libimagstore;
extern crate libimagwiki;
extern crate libimagentryedit;
extern crate libimagentrylink;
extern crate libimagutil;
use std::io::Write;
use anyhow::Result;
use anyhow::Context;
use anyhow::Error;
use clap::App;
use resiter::AndThen;
use libimagrt::runtime::Runtime;
use libimagrt::application::ImagApplication;
use libimagentryedit::edit::{Edit, EditHeader};
use libimagwiki::store::WikiStore;
use libimagwiki::entry::WikiEntry;
mod ui;
/// Marker enum for implementing ImagApplication on
///
/// This is used by binaries crates to execute business logic
/// or to build a CLI completion.
pub enum ImagWiki {}
impl ImagApplication for ImagWiki {
fn run(rt: Runtime) -> Result<()> {
let wiki_name = rt.cli().value_of("wikiname").unwrap_or("default");
trace!("wiki_name = {}", wiki_name);
trace!("calling = {:?}", rt.cli().subcommand_name());
match rt.cli().subcommand_name().ok_or_else(|| anyhow!("No subcommand called"))? {
"list" => list(&rt, wiki_name),
"idof" => idof(&rt, wiki_name),
"create" => create(&rt, wiki_name),
"create-wiki" => create_wiki(&rt),
"show" => show(&rt, wiki_name),
"delete" => delete(&rt, wiki_name),
other => {
debug!("Unknown command");
if rt.handle_unknown_subcommand("imag-wiki", other, rt.cli())?.success() {
Ok(())
} else {
Err(anyhow!("Failed to handle unknown subcommand"))
}
}
} // end match scmd
}
fn build_cli<'a>(app: App<'a, 'a>) -> App<'a, 'a> {
ui::build_ui(app)
}
fn name() -> &'static str {
env!("CARGO_PKG_NAME")
}
fn description() -> &'static str {
"Personal wiki"
}
fn version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
}
fn list(rt: &Runtime, wiki_name: &str) -> Result<()> {
let scmd = rt.cli().subcommand_matches("list").unwrap(); // safed by clap
let prefix = if scmd.is_present("list-full") {
format!("{}/", rt.store().path().display())
} else {
String::from("")
};
let out = rt.stdout();
let mut outlock = out.lock();
rt.store()
.get_wiki(wiki_name)?
.ok_or_else(|| anyhow!("No wiki '{}' found", wiki_name))?
.all_ids()?
.and_then_ok(|id| writeln!(outlock, "{}{}", prefix, id).map_err(Error::from))
.collect::<Result<Vec<_>>>()
.map(|_| ())
}
fn idof(rt: &Runtime, wiki_name: &str) -> Result<()> {
let scmd = rt.cli().subcommand_matches("idof").unwrap(); // safed by clap
let entryname = scmd
.value_of("idof-name")
.map(String::from)
.unwrap(); // safed by clap
let out = rt.stdout();
let mut lock = out.lock();
rt.store()
.get_wiki(wiki_name)?
.ok_or_else(|| anyhow!("No wiki '{}' found", wiki_name))?
.get_entry(&entryname)?
.ok_or_else(|| anyhow!("Entry '{}' in wiki '{}' not found!", entryname, wiki_name))
.and_then(|entry| {
let id = entry.get_location().clone();
let prefix = if scmd.is_present("idof-full") {
format!("{}/", rt.store().path().display())
} else {
String::from("")
};
writeln!(lock, "{}{}", prefix, id).map_err(Error::from)
})
}
fn create(rt: &Runtime, wiki_name: &str) -> Result<()> {
let scmd = rt.cli().subcommand_matches("create").unwrap(); // safed by clap
let name = String::from(scmd.value_of("create-name").unwrap()); // safe by clap
let wiki = rt
.store()
.get_wiki(&wiki_name)?
.ok_or_else(|| anyhow!("No wiki '{}' found", wiki_name))?;
let mut entry = wiki.create_entry(name)?;
if !scmd.is_present("create-noedit") {
if scmd.is_present("create-editheader") {
entry.edit_header_and_content(rt)?;
} else {
entry.edit_content(rt)?;
}
}
if let Err(e) = entry
.autolink(rt.store())
.context("Linking has failed. Trying to safe the entry now. Please investigate by hand if this succeeds.")
{
rt.store().update(&mut entry).context("Safed entry")?;
return Err(e).map_err(Error::from)
}
let id = entry.get_location();
if scmd.is_present("create-printid") {
let out = rt.stdout();
let mut lock = out.lock();
writeln!(lock, "{}", id)?;
}
rt.report_touched(&id).map_err(Error::from)
}
fn create_wiki(rt: &Runtime) -> Result<()> {
let scmd = rt.cli().subcommand_matches("create-wiki").unwrap(); // safed by clap
let wiki_name = String::from(scmd.value_of("create-wiki-name").unwrap()); // safe by clap
let (_, index) = rt.store().create_wiki(&wiki_name)?;
rt.report_touched(index.get_location()).map_err(Error::from)
}
fn show(rt: &Runtime, wiki_name: &str) -> Result<()> {
use filters::filter::Filter;
let scmd = rt.cli().subcommand_matches("show").unwrap(); // safed by clap
struct NameFilter(Option<Vec<String>>);
impl Filter<String> for NameFilter {
fn filter(&self, e: &String) -> bool {
match self.0 {
Some(ref v) => v.contains(e),
None => false,
}
}
}
let namefilter = NameFilter(scmd
.values_of("show-name")
.map(|v| v.map(String::from).collect::<Vec<String>>()));
let wiki = rt
.store()
.get_wiki(&wiki_name)?
.ok_or_else(|| anyhow!("No wiki '{}' found", wiki_name))?;
let out = rt.stdout();
let mut outlock = out.lock();
scmd.values_of("show-name")
.unwrap() // safe by clap
.map(String::from)
.filter(|e| namefilter.filter(e))
.map(|name| {
let entry = wiki
.get_entry(&name)?
.ok_or_else(|| anyhow!("No wiki entry '{}' found in wiki '{}'", name, wiki_name))?;
writeln!(outlock, "{}", entry.get_location())?;
writeln!(outlock, "{}", entry.get_content())?;
rt.report_touched(entry.get_location()).map_err(Error::from)
})
.collect::<Result<Vec<_>>>()
.map(|_| ())
}
fn delete(rt: &Runtime, wiki_name: &str) -> Result<()> {
use libimagentrylink::linkable::Linkable;
let scmd = rt.cli().subcommand_matches("delete").unwrap(); // safed by clap
let name = String::from(scmd.value_of("delete-name").unwrap()); // safe by clap
let unlink = !scmd.is_present("delete-no-remove-linkings");
let wiki = rt
.store()
.get_wiki(&wiki_name)?
.ok_or_else(|| anyhow!("No wiki '{}' found", wiki_name))?;
if unlink {
wiki.get_entry(&name)?
.ok_or_else(|| anyhow!("No wiki entry '{}' in '{}' found", name, wiki_name))?
.unlink(rt.store())?;
}
wiki.delete_entry(&name)
}
| {
"pile_set_name": "Github"
} |
import { connect } from 'react-redux';
import { checkKeepAlive } from 'platform/user/authentication/actions';
import {
ssoeInbound,
hasCheckedKeepAlive,
ssoeTransactionId,
} from 'platform/user/authentication/selectors';
import {
selectProfile,
isLoggedIn,
isProfileLoading,
} from 'platform/user/selectors';
import { checkAutoSession } from 'platform/utilities/sso';
import { removeLoginAttempted } from 'platform/utilities/sso/loginAttempted';
function AutoSSO(props) {
const {
useInboundSSOe,
hasCalledKeepAlive,
transactionId,
loggedIn,
profileLoading,
profile,
} = props;
if (loggedIn) {
removeLoginAttempted();
}
const badPaths = ['auth/login/callback', 'logout'];
const isValidPath = !badPaths.some(path =>
window.location.pathname.includes(path),
);
if (
// avoid race condition where hasSession hasn't been set
isValidPath &&
useInboundSSOe &&
!profileLoading &&
!hasCalledKeepAlive
) {
checkAutoSession(loggedIn, transactionId, profile).then(() => {
props.checkKeepAlive();
});
}
return null;
}
const mapStateToProps = state => ({
profile: selectProfile(state),
transactionId: ssoeTransactionId(state),
hasCalledKeepAlive: hasCheckedKeepAlive(state),
profileLoading: isProfileLoading(state),
loggedIn: isLoggedIn(state),
useInboundSSOe: ssoeInbound(state),
});
const mapDispatchToProps = {
checkKeepAlive,
};
export default connect(
mapStateToProps,
mapDispatchToProps,
)(AutoSSO);
export { AutoSSO };
| {
"pile_set_name": "Github"
} |
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
function write(v) { WScript.Echo(v + ""); }
function foo() {}
write(+0.0 != '');
write(+0.0 != 0xa);
write(+0.0 != 04);
write(+0.0 != 'hello');
write(+0.0 != 'hel' + 'lo');
write(+0.0 != String(''));
write(+0.0 != String('hello'));
write(+0.0 != String('h' + 'ello'));
write(+0.0 != new String(''));
write(+0.0 != new String('hello'));
write(+0.0 != new String('he' + 'llo'));
write(+0.0 != new Object());
write(+0.0 != new Object());
write(+0.0 != [1, 2, 3]);
write(+0.0 != [1 ,2 , 3]);
write(+0.0 != new Array(3));
write(+0.0 != Array(3));
write(+0.0 != new Array(1 ,2 ,3));
write(+0.0 != Array(1));
write(+0.0 != foo);
write(1 != undefined);
write(1 != null);
write(1 != true);
write(1 != false);
write(1 != Boolean(true));
write(1 != Boolean(false));
write(1 != new Boolean(true));
write(1 != new Boolean(false));
write(1 != NaN);
write(1 != +0);
write(1 != -0);
write(1 != 0);
write(1 != 0.0);
write(1 != -0.0);
write(1 != +0.0);
write(1 != 1);
write(1 != 10);
write(1 != 10.0);
write(1 != 10.1);
write(1 != -1);
write(1 != -10);
write(1 != -10.0);
write(1 != -10.1);
write(1 != Number.MAX_VALUE);
write(1 != Number.MIN_VALUE);
write(1 != Number.NaN);
write(1 != Number.POSITIVE_INFINITY);
write(1 != Number.NEGATIVE_INFINITY);
write(1 != new Number(NaN));
write(1 != new Number(+0));
write(1 != new Number(-0));
write(1 != new Number(0));
write(1 != new Number(0.0));
write(1 != new Number(-0.0));
write(1 != new Number(+0.0));
write(1 != new Number(1));
write(1 != new Number(10));
write(1 != new Number(10.0));
write(1 != new Number(10.1));
write(1 != new Number(-1));
write(1 != new Number(-10));
write(1 != new Number(-10.0));
write(1 != new Number(-10.1));
write(1 != new Number(Number.MAX_VALUE));
write(1 != new Number(Number.MIN_VALUE));
write(1 != new Number(Number.NaN));
write(1 != new Number(Number.POSITIVE_INFINITY));
write(1 != new Number(Number.NEGATIVE_INFINITY));
write(1 != '');
write(1 != 0xa);
write(1 != 04);
write(1 != 'hello');
write(1 != 'hel' + 'lo');
write(1 != String(''));
write(1 != String('hello'));
write(1 != String('h' + 'ello'));
write(1 != new String(''));
write(1 != new String('hello'));
write(1 != new String('he' + 'llo'));
write(1 != new Object());
write(1 != new Object());
write(1 != [1, 2, 3]);
write(1 != [1 ,2 , 3]);
write(1 != new Array(3));
write(1 != Array(3));
write(1 != new Array(1 ,2 ,3));
write(1 != Array(1));
write(1 != foo);
write(10 != undefined);
write(10 != null);
write(10 != true);
write(10 != false);
write(10 != Boolean(true));
write(10 != Boolean(false));
write(10 != new Boolean(true));
write(10 != new Boolean(false));
write(10 != NaN);
write(10 != +0);
write(10 != -0);
write(10 != 0);
write(10 != 0.0);
write(10 != -0.0);
write(10 != +0.0);
write(10 != 1);
write(10 != 10);
write(10 != 10.0);
write(10 != 10.1);
write(10 != -1);
write(10 != -10);
write(10 != -10.0);
write(10 != -10.1);
write(10 != Number.MAX_VALUE);
write(10 != Number.MIN_VALUE);
write(10 != Number.NaN);
write(10 != Number.POSITIVE_INFINITY);
write(10 != Number.NEGATIVE_INFINITY);
write(10 != new Number(NaN));
write(10 != new Number(+0));
write(10 != new Number(-0));
write(10 != new Number(0));
write(10 != new Number(0.0));
write(10 != new Number(-0.0));
write(10 != new Number(+0.0));
write(10 != new Number(1));
write(10 != new Number(10));
write(10 != new Number(10.0));
write(10 != new Number(10.1));
write(10 != new Number(-1));
write(10 != new Number(-10));
write(10 != new Number(-10.0));
write(10 != new Number(-10.1));
write(10 != new Number(Number.MAX_VALUE));
write(10 != new Number(Number.MIN_VALUE));
write(10 != new Number(Number.NaN));
write(10 != new Number(Number.POSITIVE_INFINITY));
write(10 != new Number(Number.NEGATIVE_INFINITY));
write(10 != '');
write(10 != 0xa);
write(10 != 04);
write(10 != 'hello');
write(10 != 'hel' + 'lo');
write(10 != String(''));
write(10 != String('hello'));
write(10 != String('h' + 'ello'));
write(10 != new String(''));
write(10 != new String('hello'));
write(10 != new String('he' + 'llo'));
write(10 != new Object());
write(10 != new Object());
write(10 != [1, 2, 3]);
write(10 != [1 ,2 , 3]);
write(10 != new Array(3));
write(10 != Array(3));
write(10 != new Array(1 ,2 ,3));
write(10 != Array(1));
write(10 != foo);
write(10.0 != undefined);
write(10.0 != null);
write(10.0 != true);
write(10.0 != false);
write(10.0 != Boolean(true));
write(10.0 != Boolean(false));
write(10.0 != new Boolean(true));
write(10.0 != new Boolean(false));
write(10.0 != NaN);
write(10.0 != +0);
write(10.0 != -0);
write(10.0 != 0);
write(10.0 != 0.0);
write(10.0 != -0.0);
write(10.0 != +0.0);
write(10.0 != 1);
write(10.0 != 10);
write(10.0 != 10.0);
write(10.0 != 10.1);
write(10.0 != -1);
write(10.0 != -10);
write(10.0 != -10.0);
write(10.0 != -10.1);
write(10.0 != Number.MAX_VALUE);
write(10.0 != Number.MIN_VALUE);
write(10.0 != Number.NaN);
write(10.0 != Number.POSITIVE_INFINITY);
write(10.0 != Number.NEGATIVE_INFINITY);
write(10.0 != new Number(NaN));
write(10.0 != new Number(+0));
write(10.0 != new Number(-0));
write(10.0 != new Number(0));
write(10.0 != new Number(0.0));
write(10.0 != new Number(-0.0));
write(10.0 != new Number(+0.0));
write(10.0 != new Number(1));
write(10.0 != new Number(10));
write(10.0 != new Number(10.0));
write(10.0 != new Number(10.1));
write(10.0 != new Number(-1));
write(10.0 != new Number(-10));
write(10.0 != new Number(-10.0));
write(10.0 != new Number(-10.1));
write(10.0 != new Number(Number.MAX_VALUE));
write(10.0 != new Number(Number.MIN_VALUE));
write(10.0 != new Number(Number.NaN));
write(10.0 != new Number(Number.POSITIVE_INFINITY));
write(10.0 != new Number(Number.NEGATIVE_INFINITY));
write(10.0 != '');
write(10.0 != 0xa);
write(10.0 != 04);
write(10.0 != 'hello');
write(10.0 != 'hel' + 'lo');
write(10.0 != String(''));
write(10.0 != String('hello'));
write(10.0 != String('h' + 'ello'));
write(10.0 != new String(''));
write(10.0 != new String('hello'));
write(10.0 != new String('he' + 'llo'));
write(10.0 != new Object());
write(10.0 != new Object());
write(10.0 != [1, 2, 3]);
write(10.0 != [1 ,2 , 3]);
write(10.0 != new Array(3));
write(10.0 != Array(3));
write(10.0 != new Array(1 ,2 ,3));
write(10.0 != Array(1));
write(10.0 != foo);
write(10.1 != undefined);
write(10.1 != null);
write(10.1 != true);
write(10.1 != false);
write(10.1 != Boolean(true));
write(10.1 != Boolean(false));
write(10.1 != new Boolean(true));
write(10.1 != new Boolean(false));
write(10.1 != NaN);
write(10.1 != +0);
write(10.1 != -0);
write(10.1 != 0);
write(10.1 != 0.0);
write(10.1 != -0.0);
write(10.1 != +0.0);
write(10.1 != 1);
write(10.1 != 10);
write(10.1 != 10.0);
write(10.1 != 10.1);
write(10.1 != -1);
write(10.1 != -10);
write(10.1 != -10.0);
write(10.1 != -10.1);
write(10.1 != Number.MAX_VALUE);
write(10.1 != Number.MIN_VALUE);
write(10.1 != Number.NaN);
write(10.1 != Number.POSITIVE_INFINITY);
write(10.1 != Number.NEGATIVE_INFINITY);
write(10.1 != new Number(NaN));
write(10.1 != new Number(+0));
write(10.1 != new Number(-0));
write(10.1 != new Number(0));
write(10.1 != new Number(0.0));
write(10.1 != new Number(-0.0));
write(10.1 != new Number(+0.0));
write(10.1 != new Number(1));
write(10.1 != new Number(10));
write(10.1 != new Number(10.0));
write(10.1 != new Number(10.1));
write(10.1 != new Number(-1));
write(10.1 != new Number(-10));
write(10.1 != new Number(-10.0));
write(10.1 != new Number(-10.1));
write(10.1 != new Number(Number.MAX_VALUE));
write(10.1 != new Number(Number.MIN_VALUE));
write(10.1 != new Number(Number.NaN));
write(10.1 != new Number(Number.POSITIVE_INFINITY));
write(10.1 != new Number(Number.NEGATIVE_INFINITY));
write(10.1 != '');
write(10.1 != 0xa);
write(10.1 != 04);
write(10.1 != 'hello');
write(10.1 != 'hel' + 'lo');
write(10.1 != String(''));
write(10.1 != String('hello'));
write(10.1 != String('h' + 'ello'));
write(10.1 != new String(''));
write(10.1 != new String('hello'));
write(10.1 != new String('he' + 'llo'));
write(10.1 != new Object());
write(10.1 != new Object());
write(10.1 != [1, 2, 3]);
write(10.1 != [1 ,2 , 3]);
write(10.1 != new Array(3));
write(10.1 != Array(3));
write(10.1 != new Array(1 ,2 ,3));
write(10.1 != Array(1));
write(10.1 != foo);
write(-1 != undefined);
write(-1 != null);
write(-1 != true);
write(-1 != false);
write(-1 != Boolean(true));
write(-1 != Boolean(false));
write(-1 != new Boolean(true));
write(-1 != new Boolean(false));
write(-1 != NaN);
write(-1 != +0);
write(-1 != -0);
write(-1 != 0);
write(-1 != 0.0);
write(-1 != -0.0);
write(-1 != +0.0);
write(-1 != 1);
write(-1 != 10);
write(-1 != 10.0);
write(-1 != 10.1);
write(-1 != -1);
write(-1 != -10);
write(-1 != -10.0);
write(-1 != -10.1);
write(-1 != Number.MAX_VALUE);
write(-1 != Number.MIN_VALUE);
write(-1 != Number.NaN);
write(-1 != Number.POSITIVE_INFINITY);
write(-1 != Number.NEGATIVE_INFINITY);
write(-1 != new Number(NaN));
write(-1 != new Number(+0));
write(-1 != new Number(-0));
write(-1 != new Number(0));
write(-1 != new Number(0.0));
write(-1 != new Number(-0.0));
write(-1 != new Number(+0.0));
write(-1 != new Number(1));
write(-1 != new Number(10));
write(-1 != new Number(10.0));
write(-1 != new Number(10.1));
write(-1 != new Number(-1));
write(-1 != new Number(-10));
write(-1 != new Number(-10.0));
write(-1 != new Number(-10.1));
write(-1 != new Number(Number.MAX_VALUE));
write(-1 != new Number(Number.MIN_VALUE));
write(-1 != new Number(Number.NaN));
write(-1 != new Number(Number.POSITIVE_INFINITY));
write(-1 != new Number(Number.NEGATIVE_INFINITY));
write(-1 != '');
write(-1 != 0xa);
write(-1 != 04);
write(-1 != 'hello');
write(-1 != 'hel' + 'lo');
write(-1 != String(''));
write(-1 != String('hello'));
write(-1 != String('h' + 'ello'));
write(-1 != new String(''));
write(-1 != new String('hello'));
write(-1 != new String('he' + 'llo'));
write(-1 != new Object());
write(-1 != new Object());
write(-1 != [1, 2, 3]);
write(-1 != [1 ,2 , 3]);
write(-1 != new Array(3));
write(-1 != Array(3));
write(-1 != new Array(1 ,2 ,3));
write(-1 != Array(1));
write(-1 != foo);
write(-10 != undefined);
write(-10 != null);
write(-10 != true);
write(-10 != false);
write(-10 != Boolean(true));
write(-10 != Boolean(false));
write(-10 != new Boolean(true));
write(-10 != new Boolean(false));
write(-10 != NaN);
write(-10 != +0);
write(-10 != -0);
write(-10 != 0);
write(-10 != 0.0);
write(-10 != -0.0);
write(-10 != +0.0);
write(-10 != 1);
write(-10 != 10);
write(-10 != 10.0);
write(-10 != 10.1);
write(-10 != -1);
write(-10 != -10);
write(-10 != -10.0);
write(-10 != -10.1);
write(-10 != Number.MAX_VALUE);
write(-10 != Number.MIN_VALUE);
write(-10 != Number.NaN);
write(-10 != Number.POSITIVE_INFINITY);
write(-10 != Number.NEGATIVE_INFINITY);
write(-10 != new Number(NaN));
write(-10 != new Number(+0));
write(-10 != new Number(-0));
write(-10 != new Number(0));
write(-10 != new Number(0.0));
write(-10 != new Number(-0.0));
write(-10 != new Number(+0.0));
write(-10 != new Number(1));
write(-10 != new Number(10));
write(-10 != new Number(10.0));
write(-10 != new Number(10.1));
write(-10 != new Number(-1));
write(-10 != new Number(-10));
write(-10 != new Number(-10.0));
write(-10 != new Number(-10.1));
write(-10 != new Number(Number.MAX_VALUE));
write(-10 != new Number(Number.MIN_VALUE));
write(-10 != new Number(Number.NaN));
write(-10 != new Number(Number.POSITIVE_INFINITY));
write(-10 != new Number(Number.NEGATIVE_INFINITY));
write(-10 != '');
write(-10 != 0xa);
write(-10 != 04);
write(-10 != 'hello');
write(-10 != 'hel' + 'lo');
write(-10 != String(''));
write(-10 != String('hello'));
write(-10 != String('h' + 'ello'));
write(-10 != new String(''));
write(-10 != new String('hello'));
write(-10 != new String('he' + 'llo'));
write(-10 != new Object());
write(-10 != new Object());
write(-10 != [1, 2, 3]);
write(-10 != [1 ,2 , 3]);
write(-10 != new Array(3));
write(-10 != Array(3));
write(-10 != new Array(1 ,2 ,3));
write(-10 != Array(1));
write(-10 != foo);
write(-10.0 != undefined);
write(-10.0 != null);
write(-10.0 != true);
write(-10.0 != false);
write(-10.0 != Boolean(true));
write(-10.0 != Boolean(false));
write(-10.0 != new Boolean(true));
write(-10.0 != new Boolean(false));
write(-10.0 != NaN);
write(-10.0 != +0);
write(-10.0 != -0);
write(-10.0 != 0);
write(-10.0 != 0.0);
write(-10.0 != -0.0);
write(-10.0 != +0.0);
write(-10.0 != 1);
write(-10.0 != 10);
write(-10.0 != 10.0);
write(-10.0 != 10.1);
write(-10.0 != -1);
write(-10.0 != -10);
write(-10.0 != -10.0);
write(-10.0 != -10.1);
write(-10.0 != Number.MAX_VALUE);
write(-10.0 != Number.MIN_VALUE);
write(-10.0 != Number.NaN);
write(-10.0 != Number.POSITIVE_INFINITY);
write(-10.0 != Number.NEGATIVE_INFINITY);
write(-10.0 != new Number(NaN));
write(-10.0 != new Number(+0));
write(-10.0 != new Number(-0));
write(-10.0 != new Number(0));
write(-10.0 != new Number(0.0));
write(-10.0 != new Number(-0.0));
write(-10.0 != new Number(+0.0));
write(-10.0 != new Number(1));
write(-10.0 != new Number(10));
write(-10.0 != new Number(10.0));
write(-10.0 != new Number(10.1));
write(-10.0 != new Number(-1));
write(-10.0 != new Number(-10));
write(-10.0 != new Number(-10.0));
write(-10.0 != new Number(-10.1));
write(-10.0 != new Number(Number.MAX_VALUE));
write(-10.0 != new Number(Number.MIN_VALUE));
write(-10.0 != new Number(Number.NaN));
write(-10.0 != new Number(Number.POSITIVE_INFINITY));
write(-10.0 != new Number(Number.NEGATIVE_INFINITY));
write(-10.0 != '');
write(-10.0 != 0xa);
write(-10.0 != 04);
write(-10.0 != 'hello');
write(-10.0 != 'hel' + 'lo');
write(-10.0 != String(''));
write(-10.0 != String('hello'));
write(-10.0 != String('h' + 'ello'));
write(-10.0 != new String(''));
write(-10.0 != new String('hello'));
write(-10.0 != new String('he' + 'llo'));
write(-10.0 != new Object());
write(-10.0 != new Object());
write(-10.0 != [1, 2, 3]);
write(-10.0 != [1 ,2 , 3]);
write(-10.0 != new Array(3));
write(-10.0 != Array(3));
write(-10.0 != new Array(1 ,2 ,3));
write(-10.0 != Array(1));
write(-10.0 != foo);
write(-10.1 != undefined);
write(-10.1 != null);
write(-10.1 != true);
write(-10.1 != false);
write(-10.1 != Boolean(true));
write(-10.1 != Boolean(false));
write(-10.1 != new Boolean(true));
write(-10.1 != new Boolean(false));
write(-10.1 != NaN);
write(-10.1 != +0);
write(-10.1 != -0);
write(-10.1 != 0);
write(-10.1 != 0.0);
write(-10.1 != -0.0);
write(-10.1 != +0.0);
write(-10.1 != 1);
write(-10.1 != 10);
write(-10.1 != 10.0);
write(-10.1 != 10.1);
write(-10.1 != -1);
write(-10.1 != -10);
write(-10.1 != -10.0);
write(-10.1 != -10.1);
write(-10.1 != Number.MAX_VALUE);
write(-10.1 != Number.MIN_VALUE);
write(-10.1 != Number.NaN);
write(-10.1 != Number.POSITIVE_INFINITY);
write(-10.1 != Number.NEGATIVE_INFINITY);
write(-10.1 != new Number(NaN));
write(-10.1 != new Number(+0));
write(-10.1 != new Number(-0));
write(-10.1 != new Number(0));
write(-10.1 != new Number(0.0));
write(-10.1 != new Number(-0.0));
write(-10.1 != new Number(+0.0));
write(-10.1 != new Number(1));
write(-10.1 != new Number(10));
write(-10.1 != new Number(10.0));
write(-10.1 != new Number(10.1));
write(-10.1 != new Number(-1));
write(-10.1 != new Number(-10));
write(-10.1 != new Number(-10.0));
write(-10.1 != new Number(-10.1));
write(-10.1 != new Number(Number.MAX_VALUE));
write(-10.1 != new Number(Number.MIN_VALUE));
write(-10.1 != new Number(Number.NaN));
write(-10.1 != new Number(Number.POSITIVE_INFINITY));
write(-10.1 != new Number(Number.NEGATIVE_INFINITY));
write(-10.1 != '');
write(-10.1 != 0xa);
write(-10.1 != 04);
write(-10.1 != 'hello');
write(-10.1 != 'hel' + 'lo');
write(-10.1 != String(''));
write(-10.1 != String('hello'));
write(-10.1 != String('h' + 'ello'));
write(-10.1 != new String(''));
write(-10.1 != new String('hello'));
write(-10.1 != new String('he' + 'llo'));
write(-10.1 != new Object());
write(-10.1 != new Object());
write(-10.1 != [1, 2, 3]);
write(-10.1 != [1 ,2 , 3]);
write(-10.1 != new Array(3));
write(-10.1 != Array(3));
write(-10.1 != new Array(1 ,2 ,3));
write(-10.1 != Array(1));
write(-10.1 != foo);
write(Number.MAX_VALUE != undefined);
write(Number.MAX_VALUE != null);
write(Number.MAX_VALUE != true);
write(Number.MAX_VALUE != false);
write(Number.MAX_VALUE != Boolean(true));
write(Number.MAX_VALUE != Boolean(false));
write(Number.MAX_VALUE != new Boolean(true));
write(Number.MAX_VALUE != new Boolean(false));
write(Number.MAX_VALUE != NaN);
write(Number.MAX_VALUE != +0);
write(Number.MAX_VALUE != -0);
write(Number.MAX_VALUE != 0);
write(Number.MAX_VALUE != 0.0);
write(Number.MAX_VALUE != -0.0);
write(Number.MAX_VALUE != +0.0);
write(Number.MAX_VALUE != 1);
write(Number.MAX_VALUE != 10);
write(Number.MAX_VALUE != 10.0);
write(Number.MAX_VALUE != 10.1);
write(Number.MAX_VALUE != -1);
write(Number.MAX_VALUE != -10);
write(Number.MAX_VALUE != -10.0);
write(Number.MAX_VALUE != -10.1);
write(Number.MAX_VALUE != Number.MAX_VALUE);
write(Number.MAX_VALUE != Number.MIN_VALUE);
write(Number.MAX_VALUE != Number.NaN);
write(Number.MAX_VALUE != Number.POSITIVE_INFINITY);
write(Number.MAX_VALUE != Number.NEGATIVE_INFINITY);
write(Number.MAX_VALUE != new Number(NaN));
write(Number.MAX_VALUE != new Number(+0));
write(Number.MAX_VALUE != new Number(-0));
write(Number.MAX_VALUE != new Number(0));
write(Number.MAX_VALUE != new Number(0.0));
write(Number.MAX_VALUE != new Number(-0.0));
write(Number.MAX_VALUE != new Number(+0.0));
write(Number.MAX_VALUE != new Number(1));
write(Number.MAX_VALUE != new Number(10));
write(Number.MAX_VALUE != new Number(10.0));
write(Number.MAX_VALUE != new Number(10.1));
write(Number.MAX_VALUE != new Number(-1));
write(Number.MAX_VALUE != new Number(-10));
write(Number.MAX_VALUE != new Number(-10.0));
write(Number.MAX_VALUE != new Number(-10.1));
write(Number.MAX_VALUE != new Number(Number.MAX_VALUE));
write(Number.MAX_VALUE != new Number(Number.MIN_VALUE));
write(Number.MAX_VALUE != new Number(Number.NaN));
write(Number.MAX_VALUE != new Number(Number.POSITIVE_INFINITY));
write(Number.MAX_VALUE != new Number(Number.NEGATIVE_INFINITY));
write(Number.MAX_VALUE != '');
write(Number.MAX_VALUE != 0xa);
write(Number.MAX_VALUE != 04);
write(Number.MAX_VALUE != 'hello');
write(Number.MAX_VALUE != 'hel' + 'lo');
write(Number.MAX_VALUE != String(''));
write(Number.MAX_VALUE != String('hello'));
write(Number.MAX_VALUE != String('h' + 'ello'));
write(Number.MAX_VALUE != new String(''));
write(Number.MAX_VALUE != new String('hello'));
write(Number.MAX_VALUE != new String('he' + 'llo'));
write(Number.MAX_VALUE != new Object());
write(Number.MAX_VALUE != new Object());
write(Number.MAX_VALUE != [1, 2, 3]);
write(Number.MAX_VALUE != [1 ,2 , 3]);
write(Number.MAX_VALUE != new Array(3));
write(Number.MAX_VALUE != Array(3));
write(Number.MAX_VALUE != new Array(1 ,2 ,3));
write(Number.MAX_VALUE != Array(1));
write(Number.MAX_VALUE != foo);
write(Number.MIN_VALUE != undefined);
write(Number.MIN_VALUE != null);
write(Number.MIN_VALUE != true);
write(Number.MIN_VALUE != false);
write(Number.MIN_VALUE != Boolean(true));
write(Number.MIN_VALUE != Boolean(false));
write(Number.MIN_VALUE != new Boolean(true));
write(Number.MIN_VALUE != new Boolean(false));
write(Number.MIN_VALUE != NaN);
write(Number.MIN_VALUE != +0);
write(Number.MIN_VALUE != -0);
write(Number.MIN_VALUE != 0);
write(Number.MIN_VALUE != 0.0);
write(Number.MIN_VALUE != -0.0);
write(Number.MIN_VALUE != +0.0);
write(Number.MIN_VALUE != 1);
write(Number.MIN_VALUE != 10);
write(Number.MIN_VALUE != 10.0);
write(Number.MIN_VALUE != 10.1);
write(Number.MIN_VALUE != -1);
write(Number.MIN_VALUE != -10);
write(Number.MIN_VALUE != -10.0);
write(Number.MIN_VALUE != -10.1);
write(Number.MIN_VALUE != Number.MAX_VALUE);
write(Number.MIN_VALUE != Number.MIN_VALUE);
write(Number.MIN_VALUE != Number.NaN);
write(Number.MIN_VALUE != Number.POSITIVE_INFINITY);
write(Number.MIN_VALUE != Number.NEGATIVE_INFINITY);
write(Number.MIN_VALUE != new Number(NaN));
write(Number.MIN_VALUE != new Number(+0));
write(Number.MIN_VALUE != new Number(-0));
write(Number.MIN_VALUE != new Number(0));
write(Number.MIN_VALUE != new Number(0.0));
write(Number.MIN_VALUE != new Number(-0.0));
write(Number.MIN_VALUE != new Number(+0.0));
write(Number.MIN_VALUE != new Number(1));
write(Number.MIN_VALUE != new Number(10));
write(Number.MIN_VALUE != new Number(10.0));
write(Number.MIN_VALUE != new Number(10.1));
write(Number.MIN_VALUE != new Number(-1));
write(Number.MIN_VALUE != new Number(-10));
write(Number.MIN_VALUE != new Number(-10.0));
write(Number.MIN_VALUE != new Number(-10.1));
write(Number.MIN_VALUE != new Number(Number.MAX_VALUE));
write(Number.MIN_VALUE != new Number(Number.MIN_VALUE));
write(Number.MIN_VALUE != new Number(Number.NaN));
write(Number.MIN_VALUE != new Number(Number.POSITIVE_INFINITY));
write(Number.MIN_VALUE != new Number(Number.NEGATIVE_INFINITY));
write(Number.MIN_VALUE != '');
write(Number.MIN_VALUE != 0xa);
write(Number.MIN_VALUE != 04);
write(Number.MIN_VALUE != 'hello');
write(Number.MIN_VALUE != 'hel' + 'lo');
write(Number.MIN_VALUE != String(''));
write(Number.MIN_VALUE != String('hello'));
write(Number.MIN_VALUE != String('h' + 'ello'));
write(Number.MIN_VALUE != new String(''));
write(Number.MIN_VALUE != new String('hello'));
write(Number.MIN_VALUE != new String('he' + 'llo'));
write(Number.MIN_VALUE != new Object());
write(Number.MIN_VALUE != new Object());
write(Number.MIN_VALUE != [1, 2, 3]);
write(Number.MIN_VALUE != [1 ,2 , 3]);
write(Number.MIN_VALUE != new Array(3));
write(Number.MIN_VALUE != Array(3));
write(Number.MIN_VALUE != new Array(1 ,2 ,3));
write(Number.MIN_VALUE != Array(1));
write(Number.MIN_VALUE != foo);
write(Number.NaN != undefined);
write(Number.NaN != null);
write(Number.NaN != true);
write(Number.NaN != false);
write(Number.NaN != Boolean(true));
write(Number.NaN != Boolean(false));
write(Number.NaN != new Boolean(true));
write(Number.NaN != new Boolean(false));
write(Number.NaN != NaN);
write(Number.NaN != +0);
write(Number.NaN != -0);
write(Number.NaN != 0);
write(Number.NaN != 0.0);
write(Number.NaN != -0.0);
write(Number.NaN != +0.0);
write(Number.NaN != 1);
write(Number.NaN != 10);
write(Number.NaN != 10.0);
write(Number.NaN != 10.1);
write(Number.NaN != -1);
write(Number.NaN != -10);
write(Number.NaN != -10.0);
write(Number.NaN != -10.1);
write(Number.NaN != Number.MAX_VALUE);
write(Number.NaN != Number.MIN_VALUE);
write(Number.NaN != Number.NaN);
write(Number.NaN != Number.POSITIVE_INFINITY);
write(Number.NaN != Number.NEGATIVE_INFINITY);
write(Number.NaN != new Number(NaN));
write(Number.NaN != new Number(+0));
write(Number.NaN != new Number(-0));
write(Number.NaN != new Number(0));
write(Number.NaN != new Number(0.0));
write(Number.NaN != new Number(-0.0));
write(Number.NaN != new Number(+0.0));
write(Number.NaN != new Number(1));
write(Number.NaN != new Number(10));
write(Number.NaN != new Number(10.0));
write(Number.NaN != new Number(10.1));
write(Number.NaN != new Number(-1));
write(Number.NaN != new Number(-10));
write(Number.NaN != new Number(-10.0));
write(Number.NaN != new Number(-10.1));
write(Number.NaN != new Number(Number.MAX_VALUE));
write(Number.NaN != new Number(Number.MIN_VALUE));
write(Number.NaN != new Number(Number.NaN));
write(Number.NaN != new Number(Number.POSITIVE_INFINITY));
write(Number.NaN != new Number(Number.NEGATIVE_INFINITY));
write(Number.NaN != '');
write(Number.NaN != 0xa);
write(Number.NaN != 04);
write(Number.NaN != 'hello');
write(Number.NaN != 'hel' + 'lo');
write(Number.NaN != String(''));
write(Number.NaN != String('hello'));
write(Number.NaN != String('h' + 'ello'));
write(Number.NaN != new String(''));
write(Number.NaN != new String('hello'));
write(Number.NaN != new String('he' + 'llo'));
write(Number.NaN != new Object());
write(Number.NaN != new Object());
write(Number.NaN != [1, 2, 3]);
write(Number.NaN != [1 ,2 , 3]);
write(Number.NaN != new Array(3));
write(Number.NaN != Array(3));
write(Number.NaN != new Array(1 ,2 ,3));
write(Number.NaN != Array(1));
write(Number.NaN != foo);
write(Number.POSITIVE_INFINITY != undefined);
write(Number.POSITIVE_INFINITY != null);
write(Number.POSITIVE_INFINITY != true);
write(Number.POSITIVE_INFINITY != false);
write(Number.POSITIVE_INFINITY != Boolean(true));
write(Number.POSITIVE_INFINITY != Boolean(false));
write(Number.POSITIVE_INFINITY != new Boolean(true));
write(Number.POSITIVE_INFINITY != new Boolean(false));
write(Number.POSITIVE_INFINITY != NaN);
write(Number.POSITIVE_INFINITY != +0);
write(Number.POSITIVE_INFINITY != -0);
write(Number.POSITIVE_INFINITY != 0);
write(Number.POSITIVE_INFINITY != 0.0);
write(Number.POSITIVE_INFINITY != -0.0);
write(Number.POSITIVE_INFINITY != +0.0);
write(Number.POSITIVE_INFINITY != 1);
write(Number.POSITIVE_INFINITY != 10);
write(Number.POSITIVE_INFINITY != 10.0);
write(Number.POSITIVE_INFINITY != 10.1);
write(Number.POSITIVE_INFINITY != -1);
write(Number.POSITIVE_INFINITY != -10);
write(Number.POSITIVE_INFINITY != -10.0);
write(Number.POSITIVE_INFINITY != -10.1);
write(Number.POSITIVE_INFINITY != Number.MAX_VALUE);
write(Number.POSITIVE_INFINITY != Number.MIN_VALUE);
write(Number.POSITIVE_INFINITY != Number.NaN);
write(Number.POSITIVE_INFINITY != Number.POSITIVE_INFINITY);
write(Number.POSITIVE_INFINITY != Number.NEGATIVE_INFINITY);
write(Number.POSITIVE_INFINITY != new Number(NaN));
write(Number.POSITIVE_INFINITY != new Number(+0));
write(Number.POSITIVE_INFINITY != new Number(-0));
write(Number.POSITIVE_INFINITY != new Number(0));
write(Number.POSITIVE_INFINITY != new Number(0.0));
write(Number.POSITIVE_INFINITY != new Number(-0.0));
write(Number.POSITIVE_INFINITY != new Number(+0.0));
write(Number.POSITIVE_INFINITY != new Number(1));
write(Number.POSITIVE_INFINITY != new Number(10));
write(Number.POSITIVE_INFINITY != new Number(10.0));
write(Number.POSITIVE_INFINITY != new Number(10.1));
write(Number.POSITIVE_INFINITY != new Number(-1));
write(Number.POSITIVE_INFINITY != new Number(-10));
write(Number.POSITIVE_INFINITY != new Number(-10.0));
write(Number.POSITIVE_INFINITY != new Number(-10.1));
write(Number.POSITIVE_INFINITY != new Number(Number.MAX_VALUE));
write(Number.POSITIVE_INFINITY != new Number(Number.MIN_VALUE));
write(Number.POSITIVE_INFINITY != new Number(Number.NaN));
write(Number.POSITIVE_INFINITY != new Number(Number.POSITIVE_INFINITY));
write(Number.POSITIVE_INFINITY != new Number(Number.NEGATIVE_INFINITY));
write(Number.POSITIVE_INFINITY != '');
write(Number.POSITIVE_INFINITY != 0xa);
write(Number.POSITIVE_INFINITY != 04);
write(Number.POSITIVE_INFINITY != 'hello');
write(Number.POSITIVE_INFINITY != 'hel' + 'lo');
write(Number.POSITIVE_INFINITY != String(''));
write(Number.POSITIVE_INFINITY != String('hello'));
write(Number.POSITIVE_INFINITY != String('h' + 'ello'));
write(Number.POSITIVE_INFINITY != new String(''));
write(Number.POSITIVE_INFINITY != new String('hello'));
write(Number.POSITIVE_INFINITY != new String('he' + 'llo'));
write(Number.POSITIVE_INFINITY != new Object());
write(Number.POSITIVE_INFINITY != new Object());
write(Number.POSITIVE_INFINITY != [1, 2, 3]);
write(Number.POSITIVE_INFINITY != [1 ,2 , 3]);
write(Number.POSITIVE_INFINITY != new Array(3));
write(Number.POSITIVE_INFINITY != Array(3));
write(Number.POSITIVE_INFINITY != new Array(1 ,2 ,3));
write(Number.POSITIVE_INFINITY != Array(1));
write(Number.POSITIVE_INFINITY != foo);
write(Number.NEGATIVE_INFINITY != undefined);
write(Number.NEGATIVE_INFINITY != null);
write(Number.NEGATIVE_INFINITY != true);
write(Number.NEGATIVE_INFINITY != false);
write(Number.NEGATIVE_INFINITY != Boolean(true));
write(Number.NEGATIVE_INFINITY != Boolean(false));
write(Number.NEGATIVE_INFINITY != new Boolean(true));
write(Number.NEGATIVE_INFINITY != new Boolean(false));
write(Number.NEGATIVE_INFINITY != NaN);
write(Number.NEGATIVE_INFINITY != +0);
write(Number.NEGATIVE_INFINITY != -0);
write(Number.NEGATIVE_INFINITY != 0);
write(Number.NEGATIVE_INFINITY != 0.0);
write(Number.NEGATIVE_INFINITY != -0.0);
write(Number.NEGATIVE_INFINITY != +0.0);
write(Number.NEGATIVE_INFINITY != 1);
write(Number.NEGATIVE_INFINITY != 10);
write(Number.NEGATIVE_INFINITY != 10.0);
write(Number.NEGATIVE_INFINITY != 10.1);
write(Number.NEGATIVE_INFINITY != -1);
write(Number.NEGATIVE_INFINITY != -10);
write(Number.NEGATIVE_INFINITY != -10.0);
write(Number.NEGATIVE_INFINITY != -10.1);
write(Number.NEGATIVE_INFINITY != Number.MAX_VALUE);
write(Number.NEGATIVE_INFINITY != Number.MIN_VALUE);
write(Number.NEGATIVE_INFINITY != Number.NaN);
write(Number.NEGATIVE_INFINITY != Number.POSITIVE_INFINITY);
write(Number.NEGATIVE_INFINITY != Number.NEGATIVE_INFINITY);
write(Number.NEGATIVE_INFINITY != new Number(NaN));
write(Number.NEGATIVE_INFINITY != new Number(+0));
write(Number.NEGATIVE_INFINITY != new Number(-0));
write(Number.NEGATIVE_INFINITY != new Number(0));
write(Number.NEGATIVE_INFINITY != new Number(0.0));
write(Number.NEGATIVE_INFINITY != new Number(-0.0));
write(Number.NEGATIVE_INFINITY != new Number(+0.0));
write(Number.NEGATIVE_INFINITY != new Number(1));
write(Number.NEGATIVE_INFINITY != new Number(10));
write(Number.NEGATIVE_INFINITY != new Number(10.0));
write(Number.NEGATIVE_INFINITY != new Number(10.1));
write(Number.NEGATIVE_INFINITY != new Number(-1));
write(Number.NEGATIVE_INFINITY != new Number(-10));
write(Number.NEGATIVE_INFINITY != new Number(-10.0));
write(Number.NEGATIVE_INFINITY != new Number(-10.1));
write(Number.NEGATIVE_INFINITY != new Number(Number.MAX_VALUE));
write(Number.NEGATIVE_INFINITY != new Number(Number.MIN_VALUE));
write(Number.NEGATIVE_INFINITY != new Number(Number.NaN));
write(Number.NEGATIVE_INFINITY != new Number(Number.POSITIVE_INFINITY));
write(Number.NEGATIVE_INFINITY != new Number(Number.NEGATIVE_INFINITY));
write(Number.NEGATIVE_INFINITY != '');
write(Number.NEGATIVE_INFINITY != 0xa);
write(Number.NEGATIVE_INFINITY != 04);
write(Number.NEGATIVE_INFINITY != 'hello');
write(Number.NEGATIVE_INFINITY != 'hel' + 'lo');
write(Number.NEGATIVE_INFINITY != String(''));
write(Number.NEGATIVE_INFINITY != String('hello'));
write(Number.NEGATIVE_INFINITY != String('h' + 'ello'));
write(Number.NEGATIVE_INFINITY != new String(''));
write(Number.NEGATIVE_INFINITY != new String('hello'));
write(Number.NEGATIVE_INFINITY != new String('he' + 'llo'));
write(Number.NEGATIVE_INFINITY != new Object());
write(Number.NEGATIVE_INFINITY != new Object());
write(Number.NEGATIVE_INFINITY != [1, 2, 3]);
write(Number.NEGATIVE_INFINITY != [1 ,2 , 3]);
write(Number.NEGATIVE_INFINITY != new Array(3));
write(Number.NEGATIVE_INFINITY != Array(3));
write(Number.NEGATIVE_INFINITY != new Array(1 ,2 ,3));
write(Number.NEGATIVE_INFINITY != Array(1));
write(Number.NEGATIVE_INFINITY != foo);
write(new Number(NaN) != undefined);
write(new Number(NaN) != null);
write(new Number(NaN) != true);
write(new Number(NaN) != false);
write(new Number(NaN) != Boolean(true));
write(new Number(NaN) != Boolean(false));
write(new Number(NaN) != new Boolean(true));
write(new Number(NaN) != new Boolean(false));
write(new Number(NaN) != NaN);
write(new Number(NaN) != +0);
write(new Number(NaN) != -0);
write(new Number(NaN) != 0);
write(new Number(NaN) != 0.0);
write(new Number(NaN) != -0.0);
write(new Number(NaN) != +0.0);
write(new Number(NaN) != 1);
write(new Number(NaN) != 10);
write(new Number(NaN) != 10.0);
write(new Number(NaN) != 10.1);
write(new Number(NaN) != -1);
write(new Number(NaN) != -10);
write(new Number(NaN) != -10.0);
write(new Number(NaN) != -10.1);
write(new Number(NaN) != Number.MAX_VALUE);
write(new Number(NaN) != Number.MIN_VALUE);
write(new Number(NaN) != Number.NaN);
write(new Number(NaN) != Number.POSITIVE_INFINITY);
write(new Number(NaN) != Number.NEGATIVE_INFINITY);
write(new Number(NaN) != new Number(NaN));
write(new Number(NaN) != new Number(+0));
write(new Number(NaN) != new Number(-0));
write(new Number(NaN) != new Number(0));
write(new Number(NaN) != new Number(0.0));
write(new Number(NaN) != new Number(-0.0));
write(new Number(NaN) != new Number(+0.0));
write(new Number(NaN) != new Number(1));
write(new Number(NaN) != new Number(10));
write(new Number(NaN) != new Number(10.0));
write(new Number(NaN) != new Number(10.1));
write(new Number(NaN) != new Number(-1));
write(new Number(NaN) != new Number(-10));
write(new Number(NaN) != new Number(-10.0));
write(new Number(NaN) != new Number(-10.1));
write(new Number(NaN) != new Number(Number.MAX_VALUE));
write(new Number(NaN) != new Number(Number.MIN_VALUE));
write(new Number(NaN) != new Number(Number.NaN));
write(new Number(NaN) != new Number(Number.POSITIVE_INFINITY));
write(new Number(NaN) != new Number(Number.NEGATIVE_INFINITY));
write(new Number(NaN) != '');
write(new Number(NaN) != 0xa);
write(new Number(NaN) != 04);
write(new Number(NaN) != 'hello');
write(new Number(NaN) != 'hel' + 'lo');
write(new Number(NaN) != String(''));
write(new Number(NaN) != String('hello'));
write(new Number(NaN) != String('h' + 'ello'));
write(new Number(NaN) != new String(''));
write(new Number(NaN) != new String('hello'));
write(new Number(NaN) != new String('he' + 'llo'));
write(new Number(NaN) != new Object());
write(new Number(NaN) != new Object());
write(new Number(NaN) != [1, 2, 3]);
write(new Number(NaN) != [1 ,2 , 3]);
write(new Number(NaN) != new Array(3));
write(new Number(NaN) != Array(3));
write(new Number(NaN) != new Array(1 ,2 ,3));
write(new Number(NaN) != Array(1));
write(new Number(NaN) != foo);
write(new Number(+0) != undefined);
write(new Number(+0) != null);
write(new Number(+0) != true);
write(new Number(+0) != false);
write(new Number(+0) != Boolean(true));
write(new Number(+0) != Boolean(false));
write(new Number(+0) != new Boolean(true));
write(new Number(+0) != new Boolean(false));
write(new Number(+0) != NaN);
write(new Number(+0) != +0);
write(new Number(+0) != -0);
write(new Number(+0) != 0);
write(new Number(+0) != 0.0);
write(new Number(+0) != -0.0);
write(new Number(+0) != +0.0);
write(new Number(+0) != 1);
write(new Number(+0) != 10);
write(new Number(+0) != 10.0);
write(new Number(+0) != 10.1);
write(new Number(+0) != -1);
write(new Number(+0) != -10);
write(new Number(+0) != -10.0);
write(new Number(+0) != -10.1);
write(new Number(+0) != Number.MAX_VALUE);
write(new Number(+0) != Number.MIN_VALUE);
write(new Number(+0) != Number.NaN);
write(new Number(+0) != Number.POSITIVE_INFINITY);
write(new Number(+0) != Number.NEGATIVE_INFINITY);
write(new Number(+0) != new Number(NaN));
| {
"pile_set_name": "Github"
} |
/*
* scrollNav
* http://scrollnav.com
*
* Copyright (c) 2013-2016 James Wilson
* Licensed under the MIT license.
*/
/* eslint-disable */
(function($) {
// Animate scrolling to section location
var scroll_to = function(value, speed, offset, animated) {
if ($(value).length > 0) {
var destination = $(value).offset().top;
speed = animated ? speed : 0;
// Add a class to the scrolled-to section
$('.' + S.settings.className + '__focused-section').removeClass(
S.settings.className + '__focused-section'
);
$(value).addClass(S.settings.className + '__focused-section');
$('html:not(:animated),body:not(:animated)').animate(
{ scrollTop: destination - offset },
speed
);
}
};
// Get url hash if one exists
var get_hash = function() {
return window.location.hash;
};
var S = {
classes: {
loading: 'sn-loading',
failed: 'sn-failed',
success: 'sn-active'
},
defaults: {
sections: 'h2',
subSections: false,
sectionElem: 'section',
className: 'scroll-nav',
showHeadline: true,
headlineText: 'Scroll To',
showTopLink: true,
topLinkText: 'Top',
fixedMargin: 40,
scrollOffset: 40,
animated: true,
speed: 500,
insertLocation: 'insertBefore',
arrowKeys: false,
scrollToHash: true,
onInit: null,
onRender: null,
onDestroy: null,
onResetPos: null
},
_set_body_class: function(state) {
// Set and swap our loading hooks to the body
var $body = $('body');
if (state === 'loading') {
$body.addClass(S.classes.loading);
} else if (state === 'success') {
$body.removeClass(S.classes.loading).addClass(S.classes.success);
} else {
$body.removeClass(S.classes.loading).addClass(S.classes.failed);
}
},
_find_sections: function($el) {
// Find the html for each section
var target_elems = S.settings.sections;
var raw_html = [];
if (S.settings.showTopLink) {
var $firstElem = $el.children().first();
if (!$firstElem.is(target_elems)) {
raw_html.push($firstElem.nextUntil(target_elems).addBack());
}
}
$el.find(target_elems).each(function() {
raw_html.push(
$(this)
.nextUntil(target_elems)
.addBack()
);
});
S.sections = {
raw: raw_html
};
},
_setup_sections: function(sections) {
// Wrap each section and add it's details to the section array
var section_data = [];
$(sections).each(function(i) {
var sub_data = [];
var $this_section = $(this);
var section_id = 'scrollNav-' + (i + 1);
var isFirst = function() {
return i === 0;
};
var hasHeading = function() {
return !$this_section.eq(0).is(S.settings.sections);
};
var text =
S.settings.showTopLink && isFirst() && hasHeading()
? S.settings.topLinkText
: $this_section.filter(S.settings.sections).text();
$this_section.wrapAll(
'<' +
S.settings.sectionElem +
' id="' +
section_id +
'" class="' +
S.settings.className +
'__section" />'
);
if (S.settings.subSections) {
var $sub_sections = $this_section.filter(S.settings.subSections);
if ($sub_sections.length > 0) {
$sub_sections.each(function(i) {
var sub_id = section_id + '-' + (i + 1);
var sub_text = $(this).text();
var $this_sub = $this_section.filter(
$(this)
.nextUntil($sub_sections)
.addBack()
);
$this_sub.wrapAll(
'<div id="' +
sub_id +
'" class="' +
S.settings.className +
'__sub-section" />'
);
sub_data.push({ id: sub_id, text: sub_text });
});
}
}
section_data.push({
id: section_id,
text: text,
sub_sections: sub_data
});
});
S.sections.data = section_data;
},
_tear_down_sections: function(sections) {
$(sections).each(function() {
var sub_sections = this.sub_sections;
$('#' + this.id)
.children()
.unwrap();
if (sub_sections.length > 0) {
$(sub_sections).each(function() {
$('#' + this.id)
.children()
.unwrap();
});
}
});
},
_setup_nav: function(sections) {
// Populate an ordered list from the section array we built
var $headline = $('<span />', {
class: S.settings.className + '__heading',
text: S.settings.headlineText
});
var $wrapper = $('<div />', {
class: S.settings.className + '__wrapper'
});
var $nav = $('<nav />', {
class: S.settings.className,
role: 'navigation'
});
var $nav_list = $('<ol />', { class: S.settings.className + '__list' });
$.each(sections, function(i) {
var $item =
i === 0
? $('<li />', {
class:
S.settings.className +
'__item ' +
S.settings.className +
'__item--active active'
})
: $('<li />', { class: S.settings.className + '__item' });
var $link = $('<a />', {
href: '#' + this.id,
class: S.settings.className + '__link',
text: this.text
});
var $sub_nav_list;
if (this.sub_sections.length > 0) {
$item.addClass('is-parent-item');
$sub_nav_list = $('<ol />', {
class: S.settings.className + '__sub-list'
});
$.each(this.sub_sections, function() {
var $sub_item = $('<li />', {
class: S.settings.className + '__sub-item'
});
var $sub_link = $('<a />', {
href: '#' + this.id,
class: S.settings.className + '__sub-link',
text: this.text
});
$sub_nav_list.append($sub_item.append($sub_link));
});
}
$nav_list.append($item.append($link).append($sub_nav_list));
});
if (S.settings.showHeadline) {
$nav.append($wrapper.append($headline).append($nav_list));
} else {
$nav.append($wrapper.append($nav_list));
}
S.nav = $nav;
},
_insert_nav: function() {
// Add the nav to our page
var insert_location = S.settings.insertLocation;
var $insert_target = S.settings.insertTarget;
S.nav[insert_location]($insert_target);
},
_setup_pos: function() {
// Find the offset positions of each section
var $nav = S.nav;
var vp_height = $(window).height();
var nav_offset = $nav.offset().top;
var set_offset = function(section) {
var $this_section = $('#' + section.id);
var this_height = $this_section.height();
section.top_offset = $this_section.offset().top;
section.bottom_offset = section.top_offset + this_height;
};
$.each(S.sections.data, function() {
set_offset(this);
$.each(this.sub_sections, function() {
set_offset(this);
});
});
S.dims = {
vp_height: vp_height,
nav_offset: nav_offset
};
},
_check_pos: function() {
// Set nav to fixed after scrolling past the header and add an in-view class to any
// sections currently within the bounds of our view and active class to the first
// in-view section
var $nav = S.nav;
var win_top = $(window).scrollTop();
var boundry_top = win_top + S.settings.scrollOffset;
var boundry_bottom = win_top + S.dims.vp_height - S.settings.scrollOffset;
var sections_active = [];
var sub_sections_active = [];
if (win_top > S.dims.nav_offset - S.settings.fixedMargin) {
$nav.addClass('fixed');
} else {
$nav.removeClass('fixed');
}
var in_view = function(section) {
return (
(section.top_offset >= boundry_top &&
section.top_offset <= boundry_bottom) ||
(section.bottom_offset > boundry_top &&
section.bottom_offset < boundry_bottom) ||
(section.top_offset < boundry_top &&
section.bottom_offset > boundry_bottom)
);
};
$.each(S.sections.data, function() {
if (in_view(this)) {
sections_active.push(this);
}
$.each(this.sub_sections, function() {
if (in_view(this)) {
sub_sections_active.push(this);
}
});
});
$nav
.find('.' + S.settings.className + '__item')
.removeClass(S.settings.className + '__item--active')
.removeClass('active')
.removeClass('in-view');
$nav
.find('.' + S.settings.className + '__sub-item')
.removeClass(S.settings.className + '__sub-item--active')
.removeClass('active')
.removeClass('in-view');
$.each(sections_active, function(i) {
if (i === 0) {
$nav
.find('a[href="#' + this.id + '"]')
.parents('.' + S.settings.className + '__item')
.addClass(S.settings.className + '__item--active')
.addClass('active')
.addClass('in-view');
} else {
$nav
.find('a[href="#' + this.id + '"]')
.parents('.' + S.settings.className + '__item')
.addClass('in-view');
}
});
S.sections.active = sections_active;
$.each(sub_sections_active, function(i) {
if (i === 0) {
$nav
.find('a[href="#' + this.id + '"]')
.parents('.' + S.settings.className + '__sub-item')
.addClass(S.settings.className + '__sub-item--active')
.addClass('active')
.addClass('in-view');
} else {
$nav
.find('a[href="#' + this.id + '"]')
.parents('.' + S.settings.className + '__sub-item')
.addClass('in-view');
}
});
},
_init_scroll_listener: function() {
// Set a scroll listener to update the fixed and active classes
$(window).on('scroll.scrollNav', function() {
S._check_pos();
});
},
_rm_scroll_listeners: function() {
$(window).off('scroll.scrollNav');
},
_init_resize_listener: function() {
// Set a resize listener to update position values and the fixed and active classes
$(window).on('resize.scrollNav', function() {
S._setup_pos();
S._check_pos();
});
},
_rm_resize_listener: function() {
$(window).off('resize.scrollNav');
},
_init_click_listener: function() {
// Scroll to section on click
$('.' + S.settings.className)
.find('a')
.on('click.scrollNav', function(e) {
e.preventDefault();
var value = $(this).attr('href');
var speed = S.settings.speed;
var offset = S.settings.scrollOffset;
var animated = S.settings.animated;
scroll_to(value, speed, offset, animated);
});
},
_rm_click_listener: function() {
$('.' + S.settings.className)
.find('a')
.off('click.scrollNav');
},
_init_keyboard_listener: function(sections) {
// Scroll to section on arrow key press
if (S.settings.arrowKeys) {
$(document).on('keydown.scrollNav', function(e) {
if (e.keyCode === 40 || e.keyCode === 38) {
var findSection = function(key) {
var i = 0;
var l = sections.length;
for (i; i < l; i++) {
if (sections[i].id === S.sections.active[0].id) {
var array_offset = key === 40 ? i + 1 : i - 1;
var id =
sections[array_offset] === undefined
? undefined
: sections[array_offset].id;
return id;
}
}
};
var target_section = findSection(e.keyCode);
if (target_section !== undefined) {
e.preventDefault();
var value = '#' + target_section;
var speed = S.settings.speed;
var offset = S.settings.scrollOffset;
var animated = S.settings.animated;
scroll_to(value, speed, offset, animated);
}
}
});
}
},
_rm_keyboard_listener: function() {
$(document).off('keydown.scrollNav');
},
init: function(options) {
return this.each(function() {
var $el = $(this);
// Merge default settings with user defined options
S.settings = $.extend({}, S.defaults, options);
// If the insert target isn't set, use the initialized element
S.settings.insertTarget = S.settings.insertTarget
? $(S.settings.insertTarget)
: $el;
if ($el.length > 0) {
// Initialize
// Fire custom init callback
if (S.settings.onInit) {
S.settings.onInit.call(this);
}
S._set_body_class('loading');
S._find_sections($el);
if ($el.find(S.settings.sections).length > 0) {
// BUILD!!!!
S._setup_sections(S.sections.raw);
S._setup_nav(S.sections.data);
if (S.settings.insertTarget.length > 0) {
//Add to page
S._insert_nav();
S._setup_pos();
S._check_pos();
S._init_scroll_listener();
S._init_resize_listener();
S._init_click_listener();
S._init_keyboard_listener(S.sections.data);
S._set_body_class('success');
if (S.settings.scrollToHash) {
scroll_to(get_hash());
}
// Fire custom render callback
if (S.settings.onRender) {
S.settings.onRender.call(this);
}
} else {
console.log(
'Build failed, scrollNav could not find "' +
S.settings.insertTarget +
'"'
);
S._set_body_class('failed');
}
} else {
console.log(
'Build failed, scrollNav could not find any "' +
S.settings.sections +
's" inside of "' +
$el.selector +
'"'
);
S._set_body_class('failed');
}
} else {
console.log(
'Build failed, scrollNav could not find "' + $el.selector + '"'
);
S._set_body_class('failed');
}
});
},
destroy: function() {
return this.each(function() {
// Unbind event listeners
S._rm_scroll_listeners();
S._rm_resize_listener();
S._rm_click_listener();
S._rm_keyboard_listener();
// Remove any of the loading hooks
$('body').removeClass('sn-loading sn-active sn-failed');
// Remove the nav from the dom
$('.' + S.settings.className).remove();
// Teardown sections
S._tear_down_sections(S.sections.data);
// Fire custom destroy callback
if (S.settings.onDestroy) {
S.settings.onDestroy.call(this);
}
// Remove the saved settings
S.settings = [];
S.sections = undefined;
});
},
resetPos: function() {
S._setup_pos();
S._check_pos();
// Fire custom reset position callback
if (S.settings.onResetPos) {
S.settings.onResetPos.call(this);
}
}
};
$.fn.scrollNav = function() {
var options;
var method = arguments[0];
if (S[method]) {
// Method exists, so use it
method = S[method];
options = Array.prototype.slice.call(arguments, 1);
} else if (typeof method === 'object' || !method) {
// No method passed, default to init
method = S.init;
options = arguments;
} else {
// Method doesn't exist
$.error('Method ' + method + ' does not exist in the scrollNav plugin');
return this;
}
return method.apply(this, options);
};
})(jQuery);
| {
"pile_set_name": "Github"
} |
---
layout: page
title: Capture HTML output
parent_title: What Else Can I Do
permalink: /what-else-can-i-do/capture-html-output.html
modification_time: 2015-08-05T12:00:21+00:00
---
One way of outputting a webpage to mPDF without re-writing your scripts too much, is to buffer the output:
```php
<?php
// Require composer autoload
require_once __DIR__ . '/vendor/autoload.php';
$mpdf = new \Mpdf\Mpdf();
// Buffer the following html with PHP so we can store it to a variable later
ob_start();
// This is where your script would normally output the HTML using echo or print
echo '<div>Generate your content</div>';
// Now collect the output buffer into a variable
$html = ob_get_contents();
ob_end_clean();
// send the captured HTML from the output buffer to the mPDF class for processing
$mpdf->WriteHTML($html);
$mpdf->Output();
```
- <a href="{{ "/reference/mpdf-functions/construct.html" | prepend: site.baseurl }}" markdown="1">\Mpdf\Mpdf()</a> - Initialise an instance of mPDF class, and specify configuration
- <a href="{{ "/reference/mpdf-functions/writehtml.html" | prepend: site.baseurl }}">WriteHTML()</a> - Write HTML to the document
- <a href="{{ "/reference/mpdf-functions/output.html" | prepend: site.baseurl }}">Output()</a> - Finalise and output the document | {
"pile_set_name": "Github"
} |
heterocl\.tvm\.build\_module module
===================================
.. automodule:: heterocl.tvm.build_module
:members:
:undoc-members:
:show-inheritance:
| {
"pile_set_name": "Github"
} |
/*
* DXVA2 HW acceleration
*
* copyright (c) 2010 Laurent Aimar
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DXVA_INTERNAL_H
#define AVCODEC_DXVA_INTERNAL_H
#define COBJMACROS
#include "config.h"
#include "dxva2.h"
#if HAVE_DXVA_H
#include <dxva.h>
#endif
#include "avcodec.h"
#include "mpegvideo.h"
void *ff_dxva2_get_surface(const AVFrame *frame);
unsigned ff_dxva2_get_surface_index(const struct dxva_context *,
const AVFrame *frame);
int ff_dxva2_commit_buffer(AVCodecContext *, struct dxva_context *,
DXVA2_DecodeBufferDesc *,
unsigned type, const void *data, unsigned size,
unsigned mb_count);
int ff_dxva2_common_end_frame(AVCodecContext *, AVFrame *,
const void *pp, unsigned pp_size,
const void *qm, unsigned qm_size,
int (*commit_bs_si)(AVCodecContext *,
DXVA2_DecodeBufferDesc *bs,
DXVA2_DecodeBufferDesc *slice));
#endif /* AVCODEC_DXVA_INTERNAL_H */
| {
"pile_set_name": "Github"
} |
#include "macros.inc"
test_suite shift
.macro test_shift prefix, dst, src, v, imm
\prefix\()_set \dst, \src, \v, \imm
\prefix\()_ver \dst, \v, \imm
.endm
.macro test_shift_sd prefix, v, imm
test_shift \prefix, a3, a2, \v, \imm
test_shift \prefix, a2, a2, \v, \imm
.endm
.macro tests_imm_shift prefix, v
test_shift_sd \prefix, \v, 1
test_shift_sd \prefix, \v, 2
test_shift_sd \prefix, \v, 7
test_shift_sd \prefix, \v, 8
test_shift_sd \prefix, \v, 15
test_shift_sd \prefix, \v, 16
test_shift_sd \prefix, \v, 31
.endm
.macro tests_shift prefix, v
test_shift_sd \prefix, \v, 0
tests_imm_shift \prefix, \v
test_shift_sd \prefix, \v, 32
.endm
.macro slli_set dst, src, v, imm
movi \src, \v
slli \dst, \src, \imm
.endm
.macro slli_ver dst, v, imm
mov a2, \dst
movi a3, ((\v) << (\imm)) & 0xffffffff
assert eq, a2, a3
.endm
test slli
tests_imm_shift slli, 0xa3c51249
test_end
.macro srai_set dst, src, v, imm
movi \src, \v
srai \dst, \src, \imm
.endm
.macro srai_ver dst, v, imm
mov a2, \dst
.if (\imm)
movi a3, (((\v) >> (\imm)) & 0xffffffff) | \
~((((\v) & 0x80000000) >> ((\imm) - 1)) - 1)
.else
movi a3, \v
.endif
assert eq, a2, a3
.endm
test srai
tests_imm_shift srai, 0x49a3c512
tests_imm_shift srai, 0xa3c51249
test_end
.macro srli_set dst, src, v, imm
movi \src, \v
srli \dst, \src, \imm
.endm
.macro srli_ver dst, v, imm
mov a2, \dst
movi a3, (((\v) >> (\imm)) & 0xffffffff)
assert eq, a2, a3
.endm
test srli
tests_imm_shift srli, 0x49a3c512
tests_imm_shift srli, 0xa3c51249
test_end
.macro sll_set dst, src, v, imm
movi a2, \imm
ssl a2
movi \src, \v
sll \dst, \src
.endm
.macro sll_sar_set dst, src, v, imm
movi a2, 32 - \imm
wsr a2, sar
movi \src, \v
sll \dst, \src
.endm
.macro sll_ver dst, v, imm
slli_ver \dst, \v, (\imm) & 0x1f
.endm
.macro sll_sar_ver dst, v, imm
slli_ver \dst, \v, \imm
.endm
test sll
tests_shift sll, 0xa3c51249
tests_shift sll_sar, 0xa3c51249
test_end
.macro srl_set dst, src, v, imm
movi a2, \imm
ssr a2
movi \src, \v
srl \dst, \src
.endm
.macro srl_sar_set dst, src, v, imm
movi a2, \imm
wsr a2, sar
movi \src, \v
srl \dst, \src
.endm
.macro srl_ver dst, v, imm
srli_ver \dst, \v, (\imm) & 0x1f
.endm
.macro srl_sar_ver dst, v, imm
srli_ver \dst, \v, \imm
.endm
test srl
tests_shift srl, 0xa3c51249
tests_shift srl_sar, 0xa3c51249
tests_shift srl, 0x49a3c512
tests_shift srl_sar, 0x49a3c512
test_end
.macro sra_set dst, src, v, imm
movi a2, \imm
ssr a2
movi \src, \v
sra \dst, \src
.endm
.macro sra_sar_set dst, src, v, imm
movi a2, \imm
wsr a2, sar
movi \src, \v
sra \dst, \src
.endm
.macro sra_ver dst, v, imm
srai_ver \dst, \v, (\imm) & 0x1f
.endm
.macro sra_sar_ver dst, v, imm
srai_ver \dst, \v, \imm
.endm
test sra
tests_shift sra, 0xa3c51249
tests_shift sra_sar, 0xa3c51249
tests_shift sra, 0x49a3c512
tests_shift sra_sar, 0x49a3c512
test_end
.macro src_set dst, src, v, imm
movi a2, \imm
ssr a2
movi \src, (\v) & 0xffffffff
movi a4, (\v) >> 32
src \dst, a4, \src
.endm
.macro src_sar_set dst, src, v, imm
movi a2, \imm
wsr a2, sar
movi \src, (\v) & 0xffffffff
movi a4, (\v) >> 32
src \dst, a4, \src
.endm
.macro src_ver dst, v, imm
src_sar_ver \dst, \v, (\imm) & 0x1f
.endm
.macro src_sar_ver dst, v, imm
mov a2, \dst
movi a3, ((\v) >> (\imm)) & 0xffffffff
assert eq, a2, a3
.endm
test src
tests_shift src, 0xa3c51249215c3a94
tests_shift src_sar, 0xa3c51249215c3a94
test_end
test_suite_end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2015 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef REG_AIC_H
#define REG_AIC_H
#define AR_SM_BASE 0xa200
#define AR_SM1_BASE 0xb200
#define AR_AGC_BASE 0x9e00
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4)
#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8)
#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + 0x4c4)
#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + 0x4c8)
#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
#define AR_PHY_AIC_SRAM_ADDR_B0 (AR_SM_BASE + 0x5f0)
#define AR_PHY_AIC_SRAM_DATA_B0 (AR_SM_BASE + 0x5f4)
#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
#define AR_PHY_BT_COEX_4 (AR_AGC_BASE + 0x60)
#define AR_PHY_BT_COEX_5 (AR_AGC_BASE + 0x64)
/* AIC fields */
#define AR_PHY_AIC_MON_ENABLE 0x80000000
#define AR_PHY_AIC_MON_ENABLE_S 31
#define AR_PHY_AIC_CAL_MAX_HOP_COUNT 0x7F000000
#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S 24
#define AR_PHY_AIC_CAL_MIN_VALID_COUNT 0x00FE0000
#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S 17
#define AR_PHY_AIC_F_WLAN 0x0001FC00
#define AR_PHY_AIC_F_WLAN_S 10
#define AR_PHY_AIC_CAL_CH_VALID_RESET 0x00000200
#define AR_PHY_AIC_CAL_CH_VALID_RESET_S 9
#define AR_PHY_AIC_CAL_ENABLE 0x00000100
#define AR_PHY_AIC_CAL_ENABLE_S 8
#define AR_PHY_AIC_BTTX_PWR_THR 0x000000FE
#define AR_PHY_AIC_BTTX_PWR_THR_S 1
#define AR_PHY_AIC_ENABLE 0x00000001
#define AR_PHY_AIC_ENABLE_S 0
#define AR_PHY_AIC_CAL_BT_REF_DELAY 0x00F00000
#define AR_PHY_AIC_CAL_BT_REF_DELAY_S 20
#define AR_PHY_AIC_BT_IDLE_CFG 0x00080000
#define AR_PHY_AIC_BT_IDLE_CFG_S 19
#define AR_PHY_AIC_STDBY_COND 0x00060000
#define AR_PHY_AIC_STDBY_COND_S 17
#define AR_PHY_AIC_STDBY_ROT_ATT_DB 0x0001F800
#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S 11
#define AR_PHY_AIC_STDBY_COM_ATT_DB 0x00000700
#define AR_PHY_AIC_STDBY_COM_ATT_DB_S 8
#define AR_PHY_AIC_RSSI_MAX 0x000000F0
#define AR_PHY_AIC_RSSI_MAX_S 4
#define AR_PHY_AIC_RSSI_MIN 0x0000000F
#define AR_PHY_AIC_RSSI_MIN_S 0
#define AR_PHY_AIC_RADIO_DELAY 0x7F000000
#define AR_PHY_AIC_RADIO_DELAY_S 24
#define AR_PHY_AIC_CAL_STEP_SIZE_CORR 0x00F00000
#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S 20
#define AR_PHY_AIC_CAL_ROT_IDX_CORR 0x000F8000
#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S 15
#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR 0x00006000
#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S 13
#define AR_PHY_AIC_ROT_IDX_COUNT_MAX 0x00001C00
#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S 10
#define AR_PHY_AIC_CAL_SYNTH_TOGGLE 0x00000200
#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S 9
#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX 0x00000100
#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S 8
#define AR_PHY_AIC_CAL_SYNTH_SETTLING 0x000000FF
#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S 0
#define AR_PHY_AIC_MON_MAX_HOP_COUNT 0x07F00000
#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S 20
#define AR_PHY_AIC_MON_MIN_STALE_COUNT 0x000FE000
#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S 13
#define AR_PHY_AIC_MON_PWR_EST_LONG 0x00001000
#define AR_PHY_AIC_MON_PWR_EST_LONG_S 12
#define AR_PHY_AIC_MON_PD_TALLY_SCALING 0x00000C00
#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S 10
#define AR_PHY_AIC_MON_PERF_THR 0x000003E0
#define AR_PHY_AIC_MON_PERF_THR_S 5
#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING 0x00000018
#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S 3
#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR 0x00000006
#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S 1
#define AR_PHY_AIC_CAL_PWR_EST_LONG 0x00000001
#define AR_PHY_AIC_CAL_PWR_EST_LONG_S 0
#define AR_PHY_AIC_MON_DONE 0x80000000
#define AR_PHY_AIC_MON_DONE_S 31
#define AR_PHY_AIC_MON_ACTIVE 0x40000000
#define AR_PHY_AIC_MON_ACTIVE_S 30
#define AR_PHY_AIC_MEAS_COUNT 0x3F000000
#define AR_PHY_AIC_MEAS_COUNT_S 24
#define AR_PHY_AIC_CAL_ANT_ISO_EST 0x00FC0000
#define AR_PHY_AIC_CAL_ANT_ISO_EST_S 18
#define AR_PHY_AIC_CAL_HOP_COUNT 0x0003F800
#define AR_PHY_AIC_CAL_HOP_COUNT_S 11
#define AR_PHY_AIC_CAL_VALID_COUNT 0x000007F0
#define AR_PHY_AIC_CAL_VALID_COUNT_S 4
#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR 0x00000008
#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S 3
#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR 0x00000004
#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S 2
#define AR_PHY_AIC_CAL_DONE 0x00000002
#define AR_PHY_AIC_CAL_DONE_S 1
#define AR_PHY_AIC_CAL_ACTIVE 0x00000001
#define AR_PHY_AIC_CAL_ACTIVE_S 0
#define AR_PHY_AIC_MEAS_MAG_MIN 0xFFC00000
#define AR_PHY_AIC_MEAS_MAG_MIN_S 22
#define AR_PHY_AIC_MON_STALE_COUNT 0x003F8000
#define AR_PHY_AIC_MON_STALE_COUNT_S 15
#define AR_PHY_AIC_MON_HOP_COUNT 0x00007F00
#define AR_PHY_AIC_MON_HOP_COUNT_S 8
#define AR_PHY_AIC_CAL_AIC_SM 0x000000F8
#define AR_PHY_AIC_CAL_AIC_SM_S 3
#define AR_PHY_AIC_SM 0x00000007
#define AR_PHY_AIC_SM_S 0
#define AR_PHY_AIC_SRAM_VALID 0x00000001
#define AR_PHY_AIC_SRAM_VALID_S 0
#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB 0x0000007E
#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S 1
#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN 0x00000080
#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S 7
#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB 0x00003F00
#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S 8
#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN 0x00004000
#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S 14
#define AR_PHY_AIC_SRAM_COM_ATT_6DB 0x00038000
#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S 15
#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO 0x0000E000
#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S 13
#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO 0x00001E00
#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S 9
#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING 0x000001F8
#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S 3
#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF 0x00000006
#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S 1
#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED 0x00000001
#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S 0
#endif /* REG_AIC_H */
| {
"pile_set_name": "Github"
} |
"""
End-to-end tests.
"""
from __future__ import absolute_import, division, print_function
import pickle
import pytest
import six
from hypothesis import given
from hypothesis.strategies import booleans
import attr
from attr._compat import TYPE
from attr._make import NOTHING, Attribute
from attr.exceptions import FrozenInstanceError
@attr.s
class C1(object):
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
@attr.s(slots=True)
class C1Slots(object):
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
foo = None
@attr.s()
class C2(object):
x = attr.ib(default=foo)
y = attr.ib(default=attr.Factory(list))
@attr.s(slots=True)
class C2Slots(object):
x = attr.ib(default=foo)
y = attr.ib(default=attr.Factory(list))
@attr.s
class Super(object):
x = attr.ib()
def meth(self):
return self.x
@attr.s(slots=True)
class SuperSlots(object):
x = attr.ib()
def meth(self):
return self.x
@attr.s
class Sub(Super):
y = attr.ib()
@attr.s(slots=True)
class SubSlots(SuperSlots):
y = attr.ib()
@attr.s(frozen=True, slots=True)
class Frozen(object):
x = attr.ib()
@attr.s
class SubFrozen(Frozen):
y = attr.ib()
@attr.s(frozen=True, slots=False)
class FrozenNoSlots(object):
x = attr.ib()
class Meta(type):
pass
@attr.s
@six.add_metaclass(Meta)
class WithMeta(object):
pass
@attr.s(slots=True)
@six.add_metaclass(Meta)
class WithMetaSlots(object):
pass
FromMakeClass = attr.make_class("FromMakeClass", ["x"])
class TestDarkMagic(object):
"""
Integration tests.
"""
@pytest.mark.parametrize("cls", [C2, C2Slots])
def test_fields(self, cls):
"""
`attr.fields` works.
"""
assert (
Attribute(name="x", default=foo, validator=None,
repr=True, cmp=True, hash=None, init=True),
Attribute(name="y", default=attr.Factory(list), validator=None,
repr=True, cmp=True, hash=None, init=True),
) == attr.fields(cls)
@pytest.mark.parametrize("cls", [C1, C1Slots])
def test_asdict(self, cls):
"""
`attr.asdict` works.
"""
assert {
"x": 1,
"y": 2,
} == attr.asdict(cls(x=1, y=2))
@pytest.mark.parametrize("cls", [C1, C1Slots])
def test_validator(self, cls):
"""
`instance_of` raises `TypeError` on type mismatch.
"""
with pytest.raises(TypeError) as e:
cls("1", 2)
# Using C1 explicitly, since slot classes don't support this.
assert (
"'x' must be <{type} 'int'> (got '1' that is a <{type} "
"'str'>).".format(type=TYPE),
attr.fields(C1).x, int, "1",
) == e.value.args
@given(booleans())
def test_renaming(self, slots):
"""
Private members are renamed but only in `__init__`.
"""
@attr.s(slots=slots)
class C3(object):
_x = attr.ib()
assert "C3(_x=1)" == repr(C3(x=1))
@given(booleans(), booleans())
def test_programmatic(self, slots, frozen):
"""
`attr.make_class` works.
"""
PC = attr.make_class("PC", ["a", "b"], slots=slots, frozen=frozen)
assert (
Attribute(name="a", default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True),
Attribute(name="b", default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True),
) == attr.fields(PC)
@pytest.mark.parametrize("cls", [Sub, SubSlots])
def test_subclassing_with_extra_attrs(self, cls):
"""
Sub-classing (where the subclass has extra attrs) does what you'd hope
for.
"""
obj = object()
i = cls(x=obj, y=2)
assert i.x is i.meth() is obj
assert i.y == 2
if cls is Sub:
assert "Sub(x={obj}, y=2)".format(obj=obj) == repr(i)
else:
assert "SubSlots(x={obj}, y=2)".format(obj=obj) == repr(i)
@pytest.mark.parametrize("base", [Super, SuperSlots])
def test_subclass_without_extra_attrs(self, base):
"""
Sub-classing (where the subclass does not have extra attrs) still
behaves the same as a subclass with extra attrs.
"""
class Sub2(base):
pass
obj = object()
i = Sub2(x=obj)
assert i.x is i.meth() is obj
assert "Sub2(x={obj})".format(obj=obj) == repr(i)
@pytest.mark.parametrize("frozen_class", [
Frozen, # has slots=True
attr.make_class("FrozenToo", ["x"], slots=False, frozen=True),
])
def test_frozen_instance(self, frozen_class):
"""
Frozen instances can't be modified (easily).
"""
frozen = frozen_class(1)
with pytest.raises(FrozenInstanceError) as e:
frozen.x = 2
with pytest.raises(FrozenInstanceError) as e:
del frozen.x
assert e.value.args[0] == "can't set attribute"
assert 1 == frozen.x
@pytest.mark.parametrize("cls",
[C1, C1Slots, C2, C2Slots, Super, SuperSlots,
Sub, SubSlots, Frozen, FrozenNoSlots,
FromMakeClass])
@pytest.mark.parametrize("protocol",
range(2, pickle.HIGHEST_PROTOCOL + 1))
def test_pickle_attributes(self, cls, protocol):
"""
Pickling/un-pickling of Attribute instances works.
"""
for attribute in attr.fields(cls):
assert attribute == pickle.loads(pickle.dumps(attribute, protocol))
@pytest.mark.parametrize("cls",
[C1, C1Slots, C2, C2Slots, Super, SuperSlots,
Sub, SubSlots, Frozen, FrozenNoSlots,
FromMakeClass])
@pytest.mark.parametrize("protocol",
range(2, pickle.HIGHEST_PROTOCOL + 1))
def test_pickle_object(self, cls, protocol):
"""
Pickle object serialization works on all kinds of attrs classes.
"""
if len(attr.fields(cls)) == 2:
obj = cls(123, 456)
else:
obj = cls(123)
assert repr(obj) == repr(pickle.loads(pickle.dumps(obj, protocol)))
def test_subclassing_frozen_gives_frozen(self):
"""
The frozen-ness of classes is inherited. Subclasses of frozen classes
are also frozen and can be instantiated.
"""
i = SubFrozen("foo", "bar")
assert i.x == "foo"
assert i.y == "bar"
@pytest.mark.parametrize("cls", [WithMeta, WithMetaSlots])
def test_metaclass_preserved(self, cls):
"""
Metaclass data is preserved.
"""
assert Meta == type(cls)
def test_default_decorator(self):
"""
Default decorator sets the default and the respective method gets
called.
"""
@attr.s
class C(object):
x = attr.ib(default=1)
y = attr.ib()
@y.default
def compute(self):
return self.x + 1
assert C(1, 2) == C()
@pytest.mark.parametrize("slots", [True, False])
@pytest.mark.parametrize("frozen", [True, False])
def test_attrib_overwrite(self, slots, frozen):
"""
Subclasses can overwrite attributes of their superclass.
"""
@attr.s(slots=slots, frozen=frozen)
class SubOverwrite(Super):
x = attr.ib(default=attr.Factory(list))
assert SubOverwrite([]) == SubOverwrite()
def test_dict_patch_class(self):
"""
dict-classes are never replaced.
"""
class C(object):
x = attr.ib()
C_new = attr.s(C)
assert C_new is C
def test_hash_by_id(self):
"""
With dict classes, hashing by ID is active for hash=False even on
Python 3. This is incorrect behavior but we have to retain it for
backward compatibility.
"""
@attr.s(hash=False)
class HashByIDBackwardCompat(object):
x = attr.ib()
assert (
hash(HashByIDBackwardCompat(1)) != hash(HashByIDBackwardCompat(1))
)
@attr.s(hash=False, cmp=False)
class HashByID(object):
x = attr.ib()
assert hash(HashByID(1)) != hash(HashByID(1))
@attr.s(hash=True)
class HashByValues(object):
x = attr.ib()
assert hash(HashByValues(1)) == hash(HashByValues(1))
def test_handles_different_defaults(self):
"""
Unhashable defaults + subclassing values work.
"""
@attr.s
class Unhashable(object):
pass
@attr.s
class C(object):
x = attr.ib(default=Unhashable())
@attr.s
class D(C):
pass
@pytest.mark.parametrize("slots", [True, False])
def test_hash_false_cmp_false(self, slots):
"""
hash=False and cmp=False make a class hashable by ID.
"""
@attr.s(hash=False, cmp=False, slots=slots)
class C(object):
pass
assert hash(C()) != hash(C())
def test_overwrite_super(self):
"""
Super classes can overwrite each other and the attributes are added
in the order they are defined.
"""
@attr.s
class C(object):
c = attr.ib(default=100)
x = attr.ib(default=1)
b = attr.ib(default=23)
@attr.s
class D(C):
a = attr.ib(default=42)
x = attr.ib(default=2)
d = attr.ib(default=3.14)
@attr.s
class E(D):
y = attr.ib(default=3)
z = attr.ib(default=4)
assert "E(c=100, b=23, a=42, x=2, d=3.14, y=3, z=4)" == repr(E())
@pytest.mark.parametrize("base_slots", [True, False])
@pytest.mark.parametrize("sub_slots", [True, False])
@pytest.mark.parametrize("base_frozen", [True, False])
@pytest.mark.parametrize("sub_frozen", [True, False])
@pytest.mark.parametrize("base_converter", [True, False])
@pytest.mark.parametrize("sub_converter", [True, False])
def test_frozen_slots_combo(self, base_slots, sub_slots, base_frozen,
sub_frozen, base_converter, sub_converter):
"""
A class with a single attribute, inheriting from another class
with a single attribute.
"""
@attr.s(frozen=base_frozen, slots=base_slots)
class Base(object):
a = attr.ib(converter=int if base_converter else None)
@attr.s(frozen=sub_frozen, slots=sub_slots)
class Sub(Base):
b = attr.ib(converter=int if sub_converter else None)
i = Sub("1", "2")
assert i.a == (1 if base_converter else "1")
assert i.b == (2 if sub_converter else "2")
if base_frozen or sub_frozen:
with pytest.raises(FrozenInstanceError):
i.a = "2"
with pytest.raises(FrozenInstanceError):
i.b = "3"
| {
"pile_set_name": "Github"
} |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mojo/edk/embedder/platform_handle_utils.h"
#include <windows.h>
#include "base/logging.h"
namespace mojo {
namespace edk {
ScopedInternalPlatformHandle DuplicatePlatformHandle(
InternalPlatformHandle platform_handle) {
DCHECK(platform_handle.is_valid());
HANDLE new_handle;
CHECK_NE(platform_handle.handle, INVALID_HANDLE_VALUE);
if (!DuplicateHandle(GetCurrentProcess(), platform_handle.handle,
GetCurrentProcess(), &new_handle, 0, TRUE,
DUPLICATE_SAME_ACCESS))
return ScopedInternalPlatformHandle();
DCHECK_NE(new_handle, INVALID_HANDLE_VALUE);
return ScopedInternalPlatformHandle(InternalPlatformHandle(new_handle));
}
} // namespace edk
} // namespace mojo
| {
"pile_set_name": "Github"
} |
#
# Copyright 2018-2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from ipython_genutils.importstring import import_item
from typing import Type, TypeVar, Any
from .schema import SchemaManager
# Setup forward reference for type hint on return from class factory method. See
# https://stackoverflow.com/questions/39205527/can-you-annotate-return-type-when-value-is-instance-of-cls/39205612#39205612
M = TypeVar('M', bound='Metadata')
class Metadata(object):
name = None
resource = None
display_name = None
schema_name = None
metadata = {}
reason = None
def __init__(self, **kwargs: Any) -> None:
self.name = kwargs.get('name')
self.display_name = kwargs.get('display_name')
self.schema_name = kwargs.get('schema_name')
self.metadata = kwargs.get('metadata', {})
self.resource = kwargs.get('resource')
self.reason = kwargs.get('reason')
def post_load(self, **kwargs: Any) -> None:
"""Called by MetadataManager after fetching the instance.
:param kwargs: additional arguments
"""
pass
def pre_save(self, **kwargs: Any) -> None:
"""Called by MetadataManager prior to saving the instance.
:param kwargs: additional arguments
Keyword Args:
for_update (bool): indicates if this save operation if for update (True) or create (False)
"""
pass
def pre_delete(self, **kwargs: Any) -> None:
"""Called by MetadataManager prior to deleting the instance.
:param kwargs: additional arguments
"""
pass
@classmethod
def from_dict(cls: Type[M], namespace: str, metadata_dict: dict) -> M:
"""Creates an appropriate instance of Metadata from a dictionary instance """
# Get the schema and look for metadata_class entry and use that, else Metadata.
metadata_class_name = 'elyra.metadata.Metadata'
schema_name = metadata_dict.get('schema_name')
if schema_name:
try:
schema = SchemaManager.instance().get_schema(namespace, schema_name)
metadata_class_name = schema.get('metadata_class_name', metadata_class_name)
except Exception: # just use the default
pass
metadata_class = import_item(metadata_class_name)
try:
instance = metadata_class(**metadata_dict)
if not isinstance(instance, Metadata):
raise ValueError("The metadata_class_name ('{}') for schema '{}' must be a subclass of '{}'!".
format(metadata_class_name, schema_name, cls.__name__))
except TypeError as te:
raise ValueError("The metadata_class_name ('{}') for schema '{}' must be a subclass of '{}'!".
format(metadata_class_name, schema_name, cls.__name__)) from te
return instance
def to_dict(self, trim: bool = False) -> dict:
# Exclude resource, and reason only if trim is True since we don't want to persist that information.
# Method prepare_write will be used to remove name prior to writes.
d = dict(name=self.name, display_name=self.display_name, metadata=self.metadata, schema_name=self.schema_name)
if not trim:
if self.resource:
d['resource'] = self.resource
if self.reason:
d['reason'] = self.reason
return d
def to_json(self, trim: bool = False) -> str:
return json.dumps(self.to_dict(trim=trim), indent=2)
def prepare_write(self) -> dict:
"""Prepares this instance for storage, stripping name, reason, and resource and converting to a dict"""
prepared = self.to_dict(trim=True) # we should also trim 'name' when storing
prepared.pop('name', None)
return prepared
def __repr__(self):
return self.to_json()
| {
"pile_set_name": "Github"
} |
<?php
/* For licensing terms, see /license.txt */
namespace Chamilo\CoreBundle\Tool;
abstract class AbstractCourseTool extends AbstractTool
{
public function isCourseTool()
{
return true;
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<assembly manifestVersion="1.0" xmlns="urn:schemas-microsoft-com:asm.v1">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v2">
<security>
<requestedPrivileges xmlns="urn:schemas-microsoft-com:asm.v3">
<requestedExecutionLevel level="asInvoker"
uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
<application xmlns="urn:schemas-microsoft-com:asm.v3">
<windowsSettings xmlns="http://schemas.microsoft.com/SMI/2005/WindowsSettings">
<dpiAware>true</dpiAware>
</windowsSettings>
</application>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
</application>
</compatibility>
</assembly>
| {
"pile_set_name": "Github"
} |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
v1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type NetworkingV1beta1Interface interface {
RESTClient() rest.Interface
IngressesGetter
IngressClassesGetter
}
// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group.
type NetworkingV1beta1Client struct {
restClient rest.Interface
}
func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface {
return newIngresses(c, namespace)
}
func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface {
return newIngressClasses(c)
}
// NewForConfig creates a new NetworkingV1beta1Client for the given config.
func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &NetworkingV1beta1Client{client}, nil
}
// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new NetworkingV1beta1Client for the given RESTClient.
func New(c rest.Interface) *NetworkingV1beta1Client {
return &NetworkingV1beta1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *NetworkingV1beta1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
| {
"pile_set_name": "Github"
} |
/**
* MegaMek - Copyright (C) 2005 Ben Mazur ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
package megamek.common.weapons;
import java.util.Vector;
import megamek.common.AmmoType;
import megamek.common.Building;
import megamek.common.Entity;
import megamek.common.IGame;
import megamek.common.Mounted;
import megamek.common.RangeType;
import megamek.common.Report;
import megamek.common.TargetRoll;
import megamek.common.Targetable;
import megamek.common.ToHitData;
import megamek.common.WeaponType;
import megamek.common.actions.WeaponAttackAction;
import megamek.common.options.OptionsConstants;
import megamek.server.Server;
/**
* @author Jay Lawson
*/
public class CapitalMissileHandler extends AmmoWeaponHandler {
/**
*
*/
private static final long serialVersionUID = -1618484541772117621L;
boolean advancedPD = false;
/**
* @param t
* @param w
* @param g
* @param s
*/
public CapitalMissileHandler(ToHitData t, WeaponAttackAction w, IGame g,
Server s) {
super(t, w, g, s);
advancedPD = g.getOptions().booleanOption(OptionsConstants.ADVAERORULES_STRATOPS_ADV_POINTDEF);
}
/*
* (non-Javadoc)
*
* @see megamek.common.weapons.AttackHandler#handle(int, java.util.Vector)
*/
@Override
public boolean handle(IGame.Phase phase, Vector<Report> vPhaseReport) {
if (!cares(phase)) {
return true;
}
int numAttacks = 1;
Entity entityTarget = (target.getTargetType() == Targetable.TYPE_ENTITY) ? (Entity) target
: null;
if (entityTarget != null) {
ae.setLastTarget(entityTarget.getId());
ae.setLastTargetDisplayName(entityTarget.getDisplayName());
}
// Which building takes the damage?
Building bldg = game.getBoard().getBuildingAt(target.getPosition());
String number = nweapons > 1 ? " (" + nweapons + ")" : "";
for (int i = numAttacks; i > 0; i--) {
// Report weapon attack and its to-hit value.
Report r = new Report(3115);
r.indent();
r.newlines = 0;
r.subject = subjectId;
r.add(wtype.getName() + number);
if (entityTarget != null) {
if ((wtype.getAmmoType() != AmmoType.T_NA)
&& (weapon.getLinked() != null)
&& (weapon.getLinked().getType() instanceof AmmoType)) {
AmmoType atype = (AmmoType) weapon.getLinked().getType();
if (atype.getMunitionType() != AmmoType.M_STANDARD) {
r.messageId = 3116;
r.add(atype.getSubMunitionName());
}
}
r.addDesc(entityTarget);
} else {
r.messageId = 3120;
r.add(target.getDisplayName(), true);
}
vPhaseReport.addElement(r);
// are we a glancing hit? Check for this here, report it later
if (game.getOptions().booleanOption(OptionsConstants.ADVCOMBAT_TACOPS_GLANCING_BLOWS)) {
if (game.getOptions().booleanOption(OptionsConstants.ADVAERORULES_AERO_SANITY)) {
if (getParentBayHandler() != null) {
//Use the to-hit value for the bay handler, otherwise toHit is set to Automatic Success
WeaponHandler bayHandler = getParentBayHandler();
bGlancing = (roll == bayHandler.toHit.getValue());
bLowProfileGlancing = isLowProfileGlancingBlow(entityTarget, bayHandler.toHit);
}
} else {
setGlancingBlowFlags(entityTarget);
}
}
// Set Margin of Success/Failure and check for Direct Blows
if (game.getOptions().booleanOption(OptionsConstants.ADVAERORULES_AERO_SANITY)
&& getParentBayHandler() != null) {
//Use the to-hit value for the bay handler, otherwise toHit is set to Automatic Success
WeaponHandler bayHandler = getParentBayHandler();
toHit.setMoS(roll - Math.max(2, bayHandler.toHit.getValue()));
} else {
toHit.setMoS(roll - Math.max(2, toHit.getValue()));
}
bDirect = game.getOptions().booleanOption(OptionsConstants.ADVCOMBAT_TACOPS_DIRECT_BLOW)
&& ((toHit.getMoS() / 3) >= 1) && (entityTarget != null);
// Used when using a grounded dropship with individual weapons
// or a fighter squadron loaded with ASM or Alamo bombs.
nDamPerHit = calcDamagePerHit();
//Point Defense fire vs Capital Missiles
if (game.getOptions().booleanOption(OptionsConstants.ADVAERORULES_AERO_SANITY)
&& getParentBayHandler() != null) {
WeaponHandler bayHandler = getParentBayHandler();
CounterAV = bayHandler.getCounterAV();
} else {
//This gets used if you're shooting at an airborne dropship. It can defend with PD bays.
attackValue = calcAttackValue();
}
//CalcAttackValue triggers counterfire, so now we can safely get this
CapMissileAMSMod = getCapMissileAMSMod();
//Only do this if the missile wasn't destroyed
if (CapMissileAMSMod > 0 && CapMissileArmor > 0) {
toHit.addModifier(CapMissileAMSMod, "Damage from Point Defenses");
if (roll < toHit.getValue()) {
CapMissileMissed = true;
}
}
// Report any AMS bay action against Capital missiles that doesn't destroy them all.
if (amsBayEngagedCap && CapMissileArmor > 0) {
r = new Report(3358);
r.add(CapMissileAMSMod);
r.subject = subjectId;
vPhaseReport.addElement(r);
// Report any PD bay action against Capital missiles that doesn't destroy them all.
} else if (pdBayEngagedCap && CapMissileArmor > 0) {
r = new Report(3357);
r.add(CapMissileAMSMod);
r.subject = subjectId;
vPhaseReport.addElement(r);
}
if (toHit.getValue() == TargetRoll.IMPOSSIBLE) {
r = new Report (3135);
r.subject = subjectId;
r.add(" " + target.getPosition(), true);
vPhaseReport.addElement(r);
return false;
} else if (toHit.getValue() == TargetRoll.AUTOMATIC_FAIL) {
r = new Report(3140);
r.newlines = 0;
r.subject = subjectId;
r.add(toHit.getDesc());
vPhaseReport.addElement(r);
} else if (toHit.getValue() == TargetRoll.AUTOMATIC_SUCCESS) {
r = new Report(3145);
r.newlines = 0;
r.subject = subjectId;
r.add(toHit.getDesc());
vPhaseReport.addElement(r);
} else {
// roll to hit
r = new Report(3150);
r.newlines = 0;
r.subject = subjectId;
r.add(toHit.getValue());
vPhaseReport.addElement(r);
}
// dice have been rolled, thanks
r = new Report(3155);
r.newlines = 0;
r.subject = subjectId;
r.add(roll);
vPhaseReport.addElement(r);
// do we hit?
bMissed = roll < toHit.getValue();
//Report Glancing/Direct Blow here because of Capital Missile weirdness
if(!(amsBayEngagedCap || pdBayEngagedCap)) {
addGlancingBlowReports(vPhaseReport);
if (bDirect) {
r = new Report(3189);
r.subject = ae.getId();
r.newlines = 0;
vPhaseReport.addElement(r);
}
}
CounterAV = getCounterAV();
//use this if AMS counterfire destroys all the Capital missiles
if (amsBayEngagedCap && (CapMissileArmor <= 0)) {
r = new Report(3356);
r.indent();
r.subject = subjectId;
vPhaseReport.addElement(r);
nDamPerHit = 0;
}
//use this if PD counterfire destroys all the Capital missiles
if (pdBayEngagedCap && (CapMissileArmor <= 0)) {
r = new Report(3355);
r.indent();
r.subject = subjectId;
vPhaseReport.addElement(r);
nDamPerHit = 0;
}
// Any necessary PSRs, jam checks, etc.
// If this boolean is true, don't report
// the miss later, as we already reported
// it in doChecks
boolean missReported = doChecks(vPhaseReport);
if (missReported) {
bMissed = true;
}
if (bMissed && !missReported) {
reportMiss(vPhaseReport);
}
// Handle damage.
int nCluster = calcnCluster();
int id = vPhaseReport.size();
int hits = calcHits(vPhaseReport);
if (target.isAirborne() || game.getBoard().inSpace() || ae.usesWeaponBays()) {
// if we added a line to the phase report for calc hits, remove
// it now
while (vPhaseReport.size() > id) {
vPhaseReport.removeElementAt(vPhaseReport.size() - 1);
}
int[] aeroResults = calcAeroDamage(entityTarget, vPhaseReport);
hits = aeroResults[0];
// If our capital missile was destroyed, it shouldn't hit
if ((amsBayEngagedCap || pdBayEngagedCap) && (CapMissileArmor <= 0)) {
hits = 0;
}
nCluster = aeroResults[1];
}
//Capital missiles shouldn't be able to target buildings, being space-only weapons
//but if they aren't defined, handleEntityDamage() doesn't work.
int bldgAbsorbs = 0;
// We have to adjust the reports on a miss, so they line up
if (bMissed && id != vPhaseReport.size()) {
vPhaseReport.get(id - 1).newlines--;
vPhaseReport.get(id).indent(2);
vPhaseReport.get(vPhaseReport.size() - 1).newlines++;
}
// Make sure the player knows when his attack causes no damage.
if (nDamPerHit == 0) {
r = new Report(3365);
r.subject = subjectId;
vPhaseReport.addElement(r);
return false;
}
if (!bMissed) {
// for each cluster of hits, do a chunk of damage
while (hits > 0) {
int nDamage;
// targeting a hex for igniting
if ((target.getTargetType() == Targetable.TYPE_HEX_IGNITE)
|| (target.getTargetType() == Targetable.TYPE_BLDG_IGNITE)) {
handleIgnitionDamage(vPhaseReport, bldg, hits);
return false;
}
// targeting a hex for clearing
if (target.getTargetType() == Targetable.TYPE_HEX_CLEAR) {
nDamage = nDamPerHit * hits;
handleClearDamage(vPhaseReport, bldg, nDamage);
return false;
}
// Targeting a building.
if (target.getTargetType() == Targetable.TYPE_BUILDING) {
// The building takes the full brunt of the attack.
nDamage = nDamPerHit * hits;
handleBuildingDamage(vPhaseReport, bldg, nDamage,
target.getPosition());
// And we're done!
return false;
}
if (entityTarget != null) {
handleEntityDamage(entityTarget, vPhaseReport, bldg, hits,
nCluster, bldgAbsorbs);
server.creditKill(entityTarget, ae);
hits -= nCluster;
firstHit = false;
}
} // Handle the next cluster.
} else if (!bMissed) { // Hex is targeted, need to report a hit
r = new Report(3390);
r.subject = subjectId;
vPhaseReport.addElement(r);
}
}
Report.addNewline(vPhaseReport);
return false;
}
/**
* Calculate the attack value based on range
*
* @return an <code>int</code> representing the attack value at that range.
*/
@Override
protected int calcAttackValue() {
AmmoType atype = (AmmoType) ammo.getType();
int av = 0;
double counterAV = calcCounterAV();
int armor = wtype.getMissileArmor();
//AR10 munitions
if (atype != null) {
if (atype.getAmmoType() == AmmoType.T_AR10) {
if (atype.hasFlag(AmmoType.F_AR10_KILLER_WHALE)) {
av = 4;
armor = 40;
} else if (atype.hasFlag(AmmoType.F_AR10_WHITE_SHARK)) {
av = 3;
armor = 30;
} else if (atype.hasFlag(AmmoType.F_PEACEMAKER)) {
av = 1000;
armor = 40;
} else if (atype.hasFlag(AmmoType.F_SANTA_ANNA)) {
av = 100;
armor = 30;
} else {
av = 2;
armor = 20;
}
} else {
int range = RangeType.rangeBracket(nRange, wtype.getATRanges(),
true, false);
if (range == WeaponType.RANGE_SHORT) {
av = wtype.getRoundShortAV();
} else if (range == WeaponType.RANGE_MED) {
av = wtype.getRoundMedAV();
} else if (range == WeaponType.RANGE_LONG) {
av = wtype.getRoundLongAV();
} else if (range == WeaponType.RANGE_EXT) {
av = wtype.getRoundExtAV();
}
}
//Nuclear Warheads for non-AR10 missiles
if (atype.hasFlag(AmmoType.F_SANTA_ANNA)) {
av = 100;
} else if (atype.hasFlag(AmmoType.F_PEACEMAKER)) {
av = 1000;
}
nukeS2S = atype.hasFlag(AmmoType.F_NUCLEAR);
}
// For squadrons, total the missile armor for the launched volley
if (ae.isCapitalFighter()) {
armor = armor * nweapons;
}
CapMissileArmor = armor - (int) counterAV;
CapMissileAMSMod = calcCapMissileAMSMod();
if (bDirect) {
av = Math.min(av + (toHit.getMoS() / 3), av * 2);
}
av = applyGlancingBlowModifier(av, false);
av = (int) Math.floor(getBracketingMultiplier() * av);
return av;
}
/**
* Calculate the damage per hit.
*
* @return an <code>int</code> representing the damage dealt per hit.
*/
@Override
protected int calcDamagePerHit() {
AmmoType atype = (AmmoType) ammo.getType();
double toReturn = wtype.getDamage(nRange);
//AR10 munitions
if (atype != null) {
if (atype.getAmmoType() == AmmoType.T_AR10) {
if (atype.hasFlag(AmmoType.F_AR10_KILLER_WHALE)) {
toReturn = 4;
} else if (atype.hasFlag(AmmoType.F_AR10_WHITE_SHARK)) {
toReturn = 3;
} else if (atype.hasFlag(AmmoType.F_PEACEMAKER)) {
toReturn = 1000;
} else if (atype.hasFlag(AmmoType.F_SANTA_ANNA)) {
toReturn = 100;
} else {
toReturn = 2;
}
}
//Nuclear Warheads for non-AR10 missiles
if (atype.hasFlag(AmmoType.F_SANTA_ANNA)) {
toReturn = 100;
} else if (atype.hasFlag(AmmoType.F_PEACEMAKER)) {
toReturn = 1000;
}
nukeS2S = atype.hasFlag(AmmoType.F_NUCLEAR);
}
// we default to direct fire weapons for anti-infantry damage
if (bDirect) {
toReturn = Math.min(toReturn + (toHit.getMoS() / 3), toReturn * 2);
}
toReturn = applyGlancingBlowModifier(toReturn, false);
return (int) toReturn;
}
@Override
protected int calcCapMissileAMSMod() {
CapMissileAMSMod = (int) Math.ceil(CounterAV / 10.0);
return CapMissileAMSMod;
}
@Override
protected int getCapMissileAMSMod() {
return CapMissileAMSMod;
}
@Override
protected int getCapMisMod() {
AmmoType atype = (AmmoType) ammo.getType();
return getCritMod(atype);
}
/*
* get the cap mis mod given a single ammo type
*/
protected int getCritMod(AmmoType atype) {
if (atype == null || atype.getAmmoType() == AmmoType.T_PIRANHA
|| atype.getAmmoType() == AmmoType.T_AAA_MISSILE
|| atype.getAmmoType() == AmmoType.T_ASEW_MISSILE
|| atype.getAmmoType() == AmmoType.T_LAA_MISSILE) {
return 0;
}
if (atype.getAmmoType() == AmmoType.T_WHITE_SHARK
|| atype.getAmmoType() == AmmoType.T_WHITE_SHARK_T
|| atype.hasFlag(AmmoType.F_AR10_WHITE_SHARK)
// Santa Anna, per IO rules
|| atype.hasFlag(AmmoType.F_SANTA_ANNA)) {
return 9;
} else if (atype.getAmmoType() == AmmoType.T_KRAKEN_T
|| atype.getAmmoType() == AmmoType.T_KRAKENM
// Peacemaker, per IO rules
|| atype.hasFlag(AmmoType.F_PEACEMAKER)) {
return 8;
} else if (atype.getAmmoType() == AmmoType.T_KILLER_WHALE
|| atype.getAmmoType() == AmmoType.T_KILLER_WHALE_T
|| atype.hasFlag(AmmoType.F_AR10_KILLER_WHALE)
|| atype.getAmmoType() == AmmoType.T_MANTA_RAY
|| atype.getAmmoType() == AmmoType.T_ALAMO) {
return 10;
} else if (atype.getAmmoType() == AmmoType.T_STINGRAY) {
return 12;
} else {
return 11;
}
}
/**
* Checks to see if this point defense/AMS bay can engage a capital missile
* This should return true. Only when handling capital missile attacks can this be false.
*/
protected boolean canEngageCapitalMissile(Mounted counter) {
if (counter.getBayWeapons().size() < 2) {
return false;
} else {
return true;
}
}
/**
* Sets the appropriate AMS Bay reporting flag depending on what type of missile this is
*/
@Override
protected void setAMSBayReportingFlag() {
amsBayEngagedCap = true;
}
/**
* Sets the appropriate PD Bay reporting flag depending on what type of missile this is
*/
@Override
protected void setPDBayReportingFlag() {
pdBayEngagedCap = true;
}
}
| {
"pile_set_name": "Github"
} |
exiftool.exe -b -EmbeddedImage "FLIRE4.jpg" -w "VIS.png"
exiftool.exe -b -rawthermalimage "FLIRE4.jpg" -w "IR.png"
pause | {
"pile_set_name": "Github"
} |
using FluentAssertions;
using NUnit.Framework;
namespace NzbDrone.Integration.Test.ApiTests
{
[TestFixture]
public class HistoryFixture : IntegrationTest
{
[Test]
public void history_should_be_empty()
{
var history = History.GetPaged(1, 15, "date", "desc");
history.Records.Count.Should().Be(0);
history.Page.Should().Be(1);
history.PageSize.Should().Be(15);
history.Records.Should().BeEmpty();
}
}
} | {
"pile_set_name": "Github"
} |
<?php
use AbuseIO\Models\Permission;
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
class CreatePermissionRoleTable extends Migration
{
/**
* Run the migrations. This migration has a later timestamp as it depends on permissions table.
*
* @return void
*/
public function up()
{
Schema::create(
'permission_role',
function (Blueprint $table) {
// Columns
$table->increments('id');
$table->integer('permission_id')->unsigned();
$table->integer('role_id')->unsigned();
$table->timestamps();
$table->softDeletes();
// Indexes
$table->index('permission_id');
$table->index('role_id');
// Uniques
$table->unique(['permission_id', 'role_id']);
}
);
$this->addDefaultPermissionRole();
}
public function addDefaultPermissionRole()
{
// Always recreate the permissions for the system administrator
DB::table('permission_role')->where('role_id', '=', '1')->delete();
// Add all permissions to the default system administrator role (1)
$permission_role = [];
foreach (Permission::all() as $permission) {
$permission_role[] = [
'permission_id' => $permission->id,
'role_id' => '1',
'created_at' => new DateTime(),
'updated_at' => new DateTime(),
];
}
DB::table('permission_role')->insert($permission_role);
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::drop('permission_role');
}
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: e0bcb7711e193a14cbf50e0e4669856f
timeCreated: 1466270351
licenseType: Pro
NativeFormatImporter:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
/*
* Bug 1264577 - A test case for testing caches of various submodules.
* This test case will load two pages that each page loads various resources
* within the same third party domain for the same originAttributes or different
* originAttributes. And then, it verifies the number of cache entries and
* the originAttributes of loading channels. If these two pages belong to
* the same originAttributes, the number of cache entries for a certain
* resource would be one. Otherwise, it would be two.
*/
const CC = Components.Constructor;
let protocolProxyService = Cc[
"@mozilla.org/network/protocol-proxy-service;1"
].getService(Ci.nsIProtocolProxyService);
const TEST_DOMAIN = "http://example.net";
const TEST_PATH = "/browser/browser/components/originattributes/test/browser/";
const TEST_PAGE = TEST_DOMAIN + TEST_PATH + "file_cache.html";
let suffixes = [
"iframe.html",
"link.css",
"script.js",
"img.png",
"object.png",
"embed.png",
"xhr.html",
"worker.xhr.html",
"audio.ogg",
"video.ogv",
"track.vtt",
"fetch.html",
"worker.fetch.html",
"request.html",
"worker.request.html",
"import.js",
"worker.js",
"sharedworker.js",
"font.woff",
];
// A random value for isolating video/audio elements across different tests.
let randomSuffix;
function clearAllImageCaches() {
let tools = SpecialPowers.Cc["@mozilla.org/image/tools;1"].getService(
SpecialPowers.Ci.imgITools
);
let imageCache = tools.getImgCacheForDocument(window.document);
imageCache.clearCache(true); // true=chrome
imageCache.clearCache(false); // false=content
}
function cacheDataForContext(loadContextInfo) {
return new Promise(resolve => {
let cacheEntries = [];
let cacheVisitor = {
onCacheStorageInfo(num, consumption) {},
onCacheEntryInfo(uri, idEnhance) {
cacheEntries.push({ uri, idEnhance });
},
onCacheEntryVisitCompleted() {
resolve(cacheEntries);
},
QueryInterface: ChromeUtils.generateQI(["nsICacheStorageVisitor"]),
};
// Visiting the disk cache also visits memory storage so we do not
// need to use Services.cache2.memoryCacheStorage() here.
let storage = Services.cache2.diskCacheStorage(loadContextInfo, false);
storage.asyncVisitStorage(cacheVisitor, true);
});
}
let countMatchingCacheEntries = function(cacheEntries, domain, fileSuffix) {
return cacheEntries
.map(entry => entry.uri.asciiSpec)
.filter(spec => spec.includes(domain))
.filter(spec => spec.includes("file_thirdPartyChild." + fileSuffix)).length;
};
function observeChannels(onChannel) {
// We use a dummy proxy filter to catch all channels, even those that do not
// generate an "http-on-modify-request" notification, such as link preconnects.
let proxyFilter = {
applyFilter(aChannel, aProxy, aCallback) {
// We have the channel; provide it to the callback.
onChannel(aChannel);
// Pass on aProxy unmodified.
aCallback.onProxyFilterResult(aProxy);
},
};
protocolProxyService.registerChannelFilter(proxyFilter, 0);
// Return the stop() function:
return () => protocolProxyService.unregisterChannelFilter(proxyFilter);
}
function startObservingChannels(aMode) {
let stopObservingChannels = observeChannels(function(channel) {
let originalURISpec = channel.originalURI.spec;
if (originalURISpec.includes("example.net")) {
let loadInfo = channel.loadInfo;
switch (aMode) {
case TEST_MODE_FIRSTPARTY:
ok(
loadInfo.originAttributes.firstPartyDomain === "example.com" ||
loadInfo.originAttributes.firstPartyDomain === "example.org",
"first party for " +
originalURISpec +
" is " +
loadInfo.originAttributes.firstPartyDomain
);
break;
case TEST_MODE_NO_ISOLATION:
ok(
ChromeUtils.isOriginAttributesEqual(
loadInfo.originAttributes,
ChromeUtils.fillNonDefaultOriginAttributes()
),
"OriginAttributes for " + originalURISpec + " is default."
);
break;
case TEST_MODE_CONTAINERS:
ok(
loadInfo.originAttributes.userContextId === 1 ||
loadInfo.originAttributes.userContextId === 2,
"userContextId for " +
originalURISpec +
" is " +
loadInfo.originAttributes.userContextId
);
break;
default:
ok(false, "Unknown test mode.");
}
}
});
return stopObservingChannels;
}
let stopObservingChannels;
// The init function, which clears image and network caches, and generates
// the random value for isolating video and audio elements across different
// test runs.
async function doInit(aMode) {
await SpecialPowers.pushPrefEnv({
set: [
["network.predictor.enabled", false],
["network.predictor.enable-prefetch", false],
["privacy.partition.network_state", false],
],
});
clearAllImageCaches();
Services.cache2.clear();
randomSuffix = Math.random();
stopObservingChannels = startObservingChannels(aMode);
}
// In the test function, we dynamically generate the video and audio element,
// and assign a random suffix to their URL to isolate them across different
// test runs.
async function doTest(aBrowser) {
let argObj = {
randomSuffix,
urlPrefix: TEST_DOMAIN + TEST_PATH,
};
await SpecialPowers.spawn(aBrowser, [argObj], async function(arg) {
content.windowUtils.clearSharedStyleSheetCache();
let videoURL = arg.urlPrefix + "file_thirdPartyChild.video.ogv";
let audioURL = arg.urlPrefix + "file_thirdPartyChild.audio.ogg";
let trackURL = arg.urlPrefix + "file_thirdPartyChild.track.vtt";
let URLSuffix = "?r=" + arg.randomSuffix;
// Create the audio and video elements.
let audio = content.document.createElement("audio");
let video = content.document.createElement("video");
let audioSource = content.document.createElement("source");
let audioTrack = content.document.createElement("track");
// Append the audio and track element into the body, and wait until they're finished.
await new content.Promise(resolve => {
let audioLoaded = false;
let trackLoaded = false;
let audioListener = () => {
Assert.ok(true, `Audio suspended: ${audioURL + URLSuffix}`);
audio.removeEventListener("suspend", audioListener);
audioLoaded = true;
if (audioLoaded && trackLoaded) {
resolve();
}
};
let trackListener = () => {
Assert.ok(true, `Audio track loaded: ${audioURL + URLSuffix}`);
audioTrack.removeEventListener("load", trackListener);
trackLoaded = true;
if (audioLoaded && trackLoaded) {
resolve();
}
};
Assert.ok(true, `Loading audio: ${audioURL + URLSuffix}`);
// Add the event listeners before everything in case we lose events.
audioTrack.addEventListener("load", trackListener);
audio.addEventListener("suspend", audioListener);
// Assign attributes for the audio element.
audioSource.setAttribute("src", audioURL + URLSuffix);
audioSource.setAttribute("type", "audio/ogg");
audioTrack.setAttribute("src", trackURL);
audioTrack.setAttribute("kind", "subtitles");
audioTrack.setAttribute("default", true);
audio.appendChild(audioSource);
audio.appendChild(audioTrack);
audio.autoplay = true;
content.document.body.appendChild(audio);
});
// Append the video element into the body, and wait until it's finished.
await new content.Promise(resolve => {
let listener = () => {
Assert.ok(true, `Video suspended: ${videoURL + URLSuffix}`);
video.removeEventListener("suspend", listener);
resolve();
};
Assert.ok(true, `Loading video: ${videoURL + URLSuffix}`);
// Add the event listener before everything in case we lose the event.
video.addEventListener("suspend", listener);
// Assign attributes for the video element.
video.setAttribute("src", videoURL + URLSuffix);
video.setAttribute("type", "video/ogg");
content.document.body.appendChild(video);
});
});
return 0;
}
// The check function, which checks the number of cache entries.
async function doCheck(aShouldIsolate, aInputA, aInputB) {
let expectedEntryCount = 1;
let data = [];
data = data.concat(
await cacheDataForContext(Services.loadContextInfo.default)
);
data = data.concat(
await cacheDataForContext(Services.loadContextInfo.private)
);
data = data.concat(
await cacheDataForContext(Services.loadContextInfo.custom(true, {}))
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(false, { userContextId: 1 })
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(true, { userContextId: 1 })
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(false, { userContextId: 2 })
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(true, { userContextId: 2 })
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(false, {
firstPartyDomain: "example.com",
})
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(true, { firstPartyDomain: "example.com" })
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(false, {
firstPartyDomain: "example.org",
})
)
);
data = data.concat(
await cacheDataForContext(
Services.loadContextInfo.custom(true, { firstPartyDomain: "example.org" })
)
);
if (aShouldIsolate) {
expectedEntryCount = 2;
}
for (let suffix of suffixes) {
let foundEntryCount = countMatchingCacheEntries(
data,
"example.net",
suffix
);
let result = expectedEntryCount === foundEntryCount;
ok(
result,
"Cache entries expected for " +
suffix +
": " +
expectedEntryCount +
", and found " +
foundEntryCount
);
}
stopObservingChannels();
stopObservingChannels = undefined;
return true;
}
let testArgs = {
url: TEST_PAGE,
firstFrameSetting: DEFAULT_FRAME_SETTING,
secondFrameSetting: [TEST_TYPE_FRAME],
};
IsolationTestTools.runTests(testArgs, doTest, doCheck, doInit);
| {
"pile_set_name": "Github"
} |
from __future__ import with_statement
import datetime
import logging
try:
import threading
except ImportError:
threading = None
from flask_debugtoolbar.panels import DebugPanel
from flask_debugtoolbar.utils import format_fname
_ = lambda x: x
class ThreadTrackingHandler(logging.Handler):
def __init__(self):
if threading is None:
raise NotImplementedError("threading module is not available, \
the logging panel cannot be used without it")
logging.Handler.__init__(self)
self.records = {} # a dictionary that maps threads to log records
def emit(self, record):
self.get_records().append(record)
def get_records(self, thread=None):
"""
Returns a list of records for the provided thread, of if none is
provided, returns a list for the current thread.
"""
if thread is None:
thread = threading.currentThread()
if thread not in self.records:
self.records[thread] = []
return self.records[thread]
def clear_records(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread in self.records:
del self.records[thread]
handler = None
_init_lock = threading.Lock()
def _init_once():
global handler
if handler is not None:
return
with _init_lock:
if handler is not None:
return
# Call werkzeug's internal logging to make sure it gets configured
# before we add our handler. Otherwise werkzeug will see our handler
# and not configure console logging for the request log.
# Werkzeug's default log level is INFO so this message probably won't
# be seen.
try:
from werkzeug._internal import _log
except ImportError:
pass
else:
_log('debug', 'Initializing Flask-DebugToolbar log handler')
handler = ThreadTrackingHandler()
logging.root.addHandler(handler)
class LoggingPanel(DebugPanel):
name = 'Logging'
has_content = True
def process_request(self, request):
_init_once()
handler.clear_records()
def get_and_delete(self):
records = handler.get_records()
handler.clear_records()
return records
def nav_title(self):
return _("Logging")
def nav_subtitle(self):
# FIXME l10n: use ngettext
num_records = len(handler.get_records())
return '%s message%s' % (num_records, '' if num_records == 1 else 's')
def title(self):
return _('Log Messages')
def url(self):
return ''
def content(self):
records = []
for record in self.get_and_delete():
records.append({
'message': record.getMessage(),
'time': datetime.datetime.fromtimestamp(record.created),
'level': record.levelname,
'file': format_fname(record.pathname),
'file_long': record.pathname,
'line': record.lineno,
})
context = self.context.copy()
context.update({'records': records})
return self.render('panels/logger.html', context)
| {
"pile_set_name": "Github"
} |
SET ISO8859-1
TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
SFX S Y 1
SFX S y ies [^aeiou]y
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="SAttachmentForTab">
<sql id="sattachmentColumns">RID,ATTACHMENTID,ATTACHMENTNAME,ARCHIVETYPE,ATTACHMENTREMARK,TABLEID,RECORDRID,FILESIZE,FILEPATH,
LIABDEPT,LIABORG,CREATTIME,CREATORID,MODIFIEDTIME,MODIFIERID,RECORDSTATE,ATTACHMENTSTATE</sql>
<select id="listData" parameterType="hashmap" resultType="hashmap">
select sde.DEPSHORTFORM as TRANS_LIABDEPT,
sus.USERNAME as TRANS_CREATORID,
sam.*
from S_Attachment sam
left outer join S_USER sus
on sus.USERID=sam.CREATORID
left outer join S_DEPT sde
on sde.DEPTID=sam.LIABDEPT
<where>
<if test="whereCondition!=null and whereCondition!='' ">
${whereCondition} and
</if>
sam.RECORDSTATE='0'
</where>
<if test="order!=null and order!='' ">
order by ${order}
</if>
</select>
<select id="totalNum" parameterType="hashmap" resultType="hashmap">
select count(*) NUM from S_Attachment sam
left outer join S_USER sus
on sus.USERID=sam.CREATORID
left outer join S_DEPT sde
on sde.DEPTID=sam.LIABDEPT
<where>
<if test="whereCondition!=null and whereCondition!='' ">
${whereCondition} and
</if>
sam.RECORDSTATE='0'
</where>
</select>
<delete id="deleteData" parameterType="hashmap" >
delete from S_Attachment where RID = #{RID}
</delete>
<update id="deleteUpdate" parameterType="hashmap" >
update S_Attachment set
RECORDSTATE=#{RECORDSTATE}
where RID=#{RID}
</update>
<delete id="updateDataRecordstate" parameterType="hashmap" >
update S_Attachment set RECORDSTATE= '1' where RID = #{RID}
</delete>
<select id="viewDetail" parameterType="hashmap" resultType="hashmap">
select sde.DEPSHORTFORM as TRANS_LIABDEPT,
sus.USERNAME as TRANS_CREATORID,
sam.*
from S_Attachment sam
left outer join S_USER sus
on sus.USERID=sam.CREATORID
left outer join S_DEPT sde
on sde.DEPTID=sam.LIABDEPT
where sam.RID = #{RID}
</select>
<select id="updateDetail" parameterType="String" resultType="hashmap">
select sde.DEPSHORTFORM as TRANS_LIABDEPT,
sus.USERNAME as TRANS_CREATORID,
sam.*
from S_Attachment sam
left outer join S_USER sus
on sus.USERID=sam.CREATORID
left outer join S_DEPT sde
on sde.DEPTID=sam.LIABDEPT
where sam.RID = #{RID}
</select>
<select id="viewDetailByCondition" parameterType="hashmap" resultType="hashmap">
select * from S_Attachment
<where>
<if test="whereCondition!=null and whereCondition!='' ">
${whereCondition}
</if>
</where>
</select>
<insert id="insertSave" parameterType="hashmap" >
insert into S_Attachment (RID,ATTACHMENTID,ATTACHMENTNAME,ARCHIVETYPE,ATTACHMENTREMARK,
TABLEID,RECORDRID,FILESIZE,TRANS_FILESIZE,FILEPATH,LIABDEPT,LIABORG,
ATTACHMENTSTATE,CREATTIME,CREATORID,MODIFIEDTIME,MODIFIERID,RECORDSTATE)
values(#{RID},#{ATTACHMENTID},#{ATTACHMENTNAME},#{ARCHIVETYPE},#{ATTACHMENTREMARK},
#{TABLEID},#{RECORDRID},#{FILESIZE},#{TRANS_FILESIZE},#{FILEPATH},#{LIABDEPT},#{LIABORG},
#{ATTACHMENTSTATE},#{CREATTIME},#{CREATORID},#{MODIFIEDTIME},#{MODIFIERID},#{RECORDSTATE}
)
</insert>
<update id="updateSave" parameterType="hashmap" >
update S_Attachment
<set>
<if test="ATTACHMENTID != null ">ATTACHMENTID = #{ATTACHMENTID},</if>
<if test="ATTACHMENTNAME != null ">ATTACHMENTNAME = #{ATTACHMENTNAME},</if>
<if test="ARCHIVETYPE != null ">ARCHIVETYPE = #{ARCHIVETYPE},</if>
<if test="ATTACHMENTREMARK != null ">ATTACHMENTREMARK = #{ATTACHMENTREMARK},</if>
<if test="TABLEID != null ">TABLEID = #{TABLEID},</if>
<if test="RECORDRID != null ">RECORDRID = #{RECORDRID},</if>
<if test="LIABDEPT != null ">LIABDEPT = #{LIABDEPT},</if>
<if test="LIABORG != null ">LIABORG = #{LIABORG},</if>
<if test="FILESIZE != null ">FILESIZE = #{FILESIZE},</if>
<if test="FILEPATH != null ">FILEPATH = #{FILEPATH},</if>
<if test="MODIFIEDTIME != null ">MODIFIEDTIME = #{MODIFIEDTIME},</if>
<if test="MODIFIERID != null ">MODIFIERID = #{MODIFIERID},</if>
<if test="RECORDSTATE != null ">RECORDSTATE = #{RECORDSTATE},</if>
<if test="ATTACHMENTSTATE != null ">ATTACHMENTSTATE = #{ATTACHMENTSTATE}</if>
</set>
where RID =#{RID}
</update>
<!-- 将附件状态为”未确认“的附件修改为”已确认“ -->
<update id="confirmFileState" parameterType="hashmap" >
update S_Attachment set
ATTACHMENTSTATE = #{ATTACHMENTSTATE}
where ATTACHMENTSTATE = '1'
</update>
<!--用于判断附件是否已经存在-->
<select id="isExistAtt" parameterType="hashmap" resultType="hashmap">
select * from S_Attachment where
RECORDRID = #{RECORDRID} and FILEPATH = #{FILEPATH} and RECORDSTATE = #{RECORDSTATE}
</select>
<!-- 根据关键业务主键和关键业务表名更新最后上传人和最后上传时间 -->
<select id="updateUploadData" parameterType="hashmap" resultType="hashmap">
update S_ATTACHMENT set
MODIFIERID = #{MODIFIERID}, MODIFIEDTIME = #{MODIFIEDTIME}
where RECORDRID = #{RECORDRID} and TABLEID = #{TABLEID} and RECORDSTATE = '0'
</select>
<!-- 根据业务主键获取该主键所拥有的所有的报表扩展文件-->
<select id="getExtFileByBizRid" parameterType="hashmap" resultType="hashmap">
select ARCHIVETYPE,FILEPATH from S_ATTACHMENT
<where>
RECORDSTATE='0' and RECORDRID = #{REPORTDATARID} and ARCHIVETYPE like 'A%'
</where>
</select>
</mapper> | {
"pile_set_name": "Github"
} |
//
// GHTestRunner.h
//
// Created by Gabriel Handford on 1/16/09.
// Copyright 2008 Gabriel Handford
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
//
//! @cond DEV
//
// Portions of this file fall under the following license, marked with:
// GTM_BEGIN : GTM_END
//
// Copyright 2008 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
//
#import "GHTestGroup.h"
#import "GHTestSuite.h"
@class GHTestRunner;
// Delegates can be guaranteed to be notified on the main thread (using #delegateOnMainThread)
@protocol GHTestRunnerDelegate <NSObject>
@optional
- (void)testRunnerDidStart:(GHTestRunner *)runner;
- (void)testRunner:(GHTestRunner *)runner didStartTest:(id<GHTest>)test; // Test started
- (void)testRunner:(GHTestRunner *)runner didUpdateTest:(id<GHTest>)test; // Test changed
- (void)testRunner:(GHTestRunner *)runner didEndTest:(id<GHTest>)test; // Test finished
- (void)testRunnerDidCancel:(GHTestRunner *)runner;
- (void)testRunnerDidEnd:(GHTestRunner *)runner;
- (void)testRunner:(GHTestRunner *)runner didLog:(NSString *)message; // Runner logged message
- (void)testRunner:(GHTestRunner *)runner test:(id<GHTest>)test didLog:(NSString *)message; // Test logged message
@end
/*!
Runs the tests.
Tests are run a separate thread though delegates are called on the
main thread by default (see #delegateOnMainThread).
*/
@interface GHTestRunner : NSObject <GHTestDelegate> {
id<GHTest> test_; // The test to run; Could be a GHTestGroup (suite), GHTestGroup (test case), or GHTest (target/selector)
NSObject<GHTestRunnerDelegate> *delegate_; // weak
GHTestOptions options_;
BOOL running_;
BOOL cancelling_;
NSTimeInterval startInterval_;
NSOperationQueue *operationQueue_; //! If running a suite in operation queue
}
@property (retain) id<GHTest> test;
@property (assign) NSObject<GHTestRunnerDelegate> *delegate;
@property (assign) GHTestOptions options;
@property (readonly) GHTestStats stats;
@property (readonly, getter=isRunning) BOOL running;
@property (readonly, getter=isCancelling) BOOL cancelling;
@property (readonly) NSTimeInterval interval;
@property (retain, nonatomic) NSOperationQueue *operationQueue;
/*!
Create runner for test.
@param test
*/
- (id)initWithTest:(id<GHTest>)test;
/*!
Create runner for all tests.
@see GHTesting#loadAllTestCases.
@result Runner
*/
+ (GHTestRunner *)runnerForAllTests;
/*!
Create runner for test suite.
@param suite
@result Runner
*/
+ (GHTestRunner *)runnerForSuite:(GHTestSuite *)suite;
/*!
Create runner for class and method.
@param testClassName
@param methodName
@result Runner
*/
+ (GHTestRunner *)runnerForTestClassName:(NSString *)testClassName methodName:(NSString *)methodName;
/*!
Get the runner from the environment.
If the TEST env is set, then we will only run that test case or test method.
*/
+ (GHTestRunner *)runnerFromEnv;
/*!
Run the test runner. Usually called from the test main.
Reads the TEST environment variable and filters on that; or all tests are run.
@result 0 is success, otherwise the failure count
*/
+ (int)run;
- (void)runInBackground;
/*!
Start the test runner with the default test.
@result 0 is success, otherwise the failure count
*/
- (int)runTests;
- (void)cancel;
- (void)setInParallel:(BOOL)inParallel;
- (BOOL)isInParallel;
/*!
Write message to console.
*/
- (void)log:(NSString *)message;
@end
//! @endcond
| {
"pile_set_name": "Github"
} |
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="24dp"
android:height="24dp"
android:viewportWidth="24.0"
android:viewportHeight="24.0">
<path
android:fillColor="#FF000000"
android:pathData="M21,4L3,4c-1.1,0 -2,0.9 -2,2v12c0,1.1 0.9,2 2,2h18c1.1,0 1.99,-0.9 1.99,-2L23,6c0,-1.1 -0.9,-2 -2,-2zM19,18L5,18L5,6h14v12z"/>
</vector>
| {
"pile_set_name": "Github"
} |
{
"name": "flash-vue-admin",
"version": "4.1.0",
"description": "A web-flash Admin with Element UI & axios & iconfont & permission control & lint",
"author": "enilu<[email protected]>",
"license": "MIT",
"scripts": {
"dev": "vue-cli-service serve",
"build:prod": "vue-cli-service build",
"build:stage": "vue-cli-service build --mode staging",
"preview": "node build/index.js --preview",
"lint": "eslint --ext .js,.vue src",
"test:unit": "jest --clearCache && vue-cli-service test:unit",
"test:ci": "npm run lint && npm run test:unit",
"svgo": "svgo -f src/icons/svg --config=src/icons/svgo.yml"
},
"dependencies": {
"@riophae/vue-treeselect": "^0.4.0",
"axios": "0.18.0",
"element-resize-detector": "^1.2.1",
"element-ui": "2.11.0",
"fuse.js": "3.4.4",
"js-cookie": "2.2.0",
"normalize.css": "7.0.0",
"nprogress": "0.2.0",
"path-to-regexp": "2.4.0",
"vue": "2.6.10",
"vue-echarts": "^3.1.2",
"vue-i18n": "7.3.2",
"vue-router": "3.0.6",
"vuex": "3.1.1",
"webpack-dev-server": "3.3.1"
},
"devDependencies": {
"autoprefixer": "8.5.0",
"@babel/core": "7.0.0",
"@babel/register": "7.0.0",
"@vue/cli-plugin-babel": "3.6.0",
"@vue/cli-plugin-eslint": "3.6.0",
"@vue/cli-plugin-unit-jest": "3.6.3",
"@vue/cli-service": "3.6.0",
"@vue/test-utils": "1.0.0-beta.29",
"babel-core": "7.0.0-bridge.0",
"babel-eslint": "10.0.1",
"babel-jest": "23.6.0",
"chalk": "2.4.2",
"connect": "3.6.6",
"eslint": "5.15.3",
"eslint-plugin-vue": "5.2.2",
"html-webpack-plugin": "3.2.0",
"mockjs": "1.0.1-beta3",
"node-sass": "^4.9.0",
"runjs": "^4.3.2",
"sass-loader": "^7.1.0",
"script-ext-html-webpack-plugin": "2.1.3",
"script-loader": "0.7.2",
"serve-static": "^1.13.2",
"svg-sprite-loader": "4.1.3",
"svgo": "1.2.2",
"vue-template-compiler": "2.6.10"
},
"engines": {
"node": ">=8.9",
"npm": ">= 3.0.0"
},
"browserslist": [
"> 1%",
"last 2 versions",
"not ie <= 8"
]
}
| {
"pile_set_name": "Github"
} |
#include "DrawPerturb.h"
#include "render/DrawUtil.h"
#include "sim/SimObj.h"
const double cDrawPerturb::gForceScale = 0.005;
const double cDrawPerturb::gTorqueScale = 0.00075;// * 0.25;
void cDrawPerturb::DrawForce(const tVector& pos, const tVector& force)
{
const double len_scale = gForceScale;
const double arrow_size = 0.1;
tVector pos1 = pos + force * len_scale;
cDrawUtil::SetColor(tVector(1, 0, 0, 0.5));
cDrawUtil::DrawArrow3D(pos, pos1, arrow_size);
}
void cDrawPerturb::DrawTorque(const tVector& pos, const tVector& torque)
{
const double torque_scale = gTorqueScale;
const tVector color0 = tVector(1, 0, 0, 0.25);
const tVector color1 = tVector(0, 1, 1, 0.25);
tVector col;
if (torque[2] < 0)
{
col = color0;
}
else
{
col = color1;
}
double mag = torque.norm();
double r = mag * torque_scale;
cDrawUtil::SetColor(tVector(col[0], col[1], col[2], col[3]));
cDrawUtil::PushMatrixView();
cDrawUtil::Translate(pos);
cDrawUtil::DrawDisk(r);
cDrawUtil::PopMatrixView();
}
void cDrawPerturb::Draw(const tPerturb& perturb)
{
tPerturb::ePerturb type = perturb.mType;
switch (type)
{
case tPerturb::ePerturbForce:
DrawForce(perturb);
break;
case tPerturb::ePerturbTorque:
DrawTorque(perturb);
break;
default:
break;
}
}
void cDrawPerturb::DrawForce(const tPerturb& perturb)
{
tVector pos = perturb.mObj->LocalToWorldPos(perturb.mLocalPos);
const tVector& force = perturb.mPerturb;
DrawForce(pos, force);
}
void cDrawPerturb::DrawTorque(const tPerturb& perturb)
{
tVector pos = perturb.mObj->LocalToWorldPos(perturb.mLocalPos);
const tVector& torque = perturb.mPerturb;
DrawTorque(pos, torque);
} | {
"pile_set_name": "Github"
} |
{
"type": "minecraft:block",
"pools": [
{
"rolls": 1,
"entries": [
{
"type": "minecraft:item",
"name": "minecraft:flower_pot"
}
],
"conditions": [
{
"condition": "minecraft:survives_explosion"
}
]
},
{
"rolls": 1,
"entries": [
{
"type": "minecraft:item",
"name": "biomesoplenty:wildflower"
}
],
"conditions": [
{
"condition": "minecraft:survives_explosion"
}
]
}
]
} | {
"pile_set_name": "Github"
} |
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package filedesc
import (
"fmt"
"sync"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
)
type Enums struct {
List []Enum
once sync.Once
byName map[protoreflect.Name]*Enum // protected by once
}
func (p *Enums) Len() int {
return len(p.List)
}
func (p *Enums) Get(i int) protoreflect.EnumDescriptor {
return &p.List[i]
}
func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Enums) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Enums) ProtoInternal(pragma.DoNotImplement) {}
func (p *Enums) lazyInit() *Enums {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Enum, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type EnumValues struct {
List []EnumValue
once sync.Once
byName map[protoreflect.Name]*EnumValue // protected by once
byNum map[protoreflect.EnumNumber]*EnumValue // protected by once
}
func (p *EnumValues) Len() int {
return len(p.List)
}
func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor {
return &p.List[i]
}
func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
}
return nil
}
func (p *EnumValues) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {}
func (p *EnumValues) lazyInit() *EnumValues {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List))
p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
}
}
})
return p
}
type Messages struct {
List []Message
once sync.Once
byName map[protoreflect.Name]*Message // protected by once
}
func (p *Messages) Len() int {
return len(p.List)
}
func (p *Messages) Get(i int) protoreflect.MessageDescriptor {
return &p.List[i]
}
func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Messages) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Messages) ProtoInternal(pragma.DoNotImplement) {}
func (p *Messages) lazyInit() *Messages {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Message, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Fields struct {
List []Field
once sync.Once
byName map[protoreflect.Name]*Field // protected by once
byJSON map[string]*Field // protected by once
byNum map[protoreflect.FieldNumber]*Field // protected by once
}
func (p *Fields) Len() int {
return len(p.List)
}
func (p *Fields) Get(i int) protoreflect.FieldDescriptor {
return &p.List[i]
}
func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
if d := p.lazyInit().byJSON[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
}
return nil
}
func (p *Fields) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Fields) ProtoInternal(pragma.DoNotImplement) {}
func (p *Fields) lazyInit() *Fields {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Field, len(p.List))
p.byJSON = make(map[string]*Field, len(p.List))
p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
if _, ok := p.byJSON[d.JSONName()]; !ok {
p.byJSON[d.JSONName()] = d
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
}
}
})
return p
}
type Oneofs struct {
List []Oneof
once sync.Once
byName map[protoreflect.Name]*Oneof // protected by once
}
func (p *Oneofs) Len() int {
return len(p.List)
}
func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor {
return &p.List[i]
}
func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Oneofs) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {}
func (p *Oneofs) lazyInit() *Oneofs {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Oneof, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Extensions struct {
List []Extension
once sync.Once
byName map[protoreflect.Name]*Extension // protected by once
}
func (p *Extensions) Len() int {
return len(p.List)
}
func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor {
return &p.List[i]
}
func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Extensions) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {}
func (p *Extensions) lazyInit() *Extensions {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Extension, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Services struct {
List []Service
once sync.Once
byName map[protoreflect.Name]*Service // protected by once
}
func (p *Services) Len() int {
return len(p.List)
}
func (p *Services) Get(i int) protoreflect.ServiceDescriptor {
return &p.List[i]
}
func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Services) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Services) ProtoInternal(pragma.DoNotImplement) {}
func (p *Services) lazyInit() *Services {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Service, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Methods struct {
List []Method
once sync.Once
byName map[protoreflect.Name]*Method // protected by once
}
func (p *Methods) Len() int {
return len(p.List)
}
func (p *Methods) Get(i int) protoreflect.MethodDescriptor {
return &p.List[i]
}
func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Methods) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Methods) ProtoInternal(pragma.DoNotImplement) {}
func (p *Methods) lazyInit() *Methods {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Method, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
| {
"pile_set_name": "Github"
} |
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!! This file is auto-generated by Reflang. !!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#ifndef REFLANG_METADATA_RETURN_NOARGS_GEN_HPP
#define REFLANG_METADATA_RETURN_NOARGS_GEN_HPP
#include <string>
#include "lib/reflang.hpp"
#include "return-noargs.src.hpp"
namespace reflang
{
template <>
class Function<const char *(*)(), ns::NamespacedFunction> : public IFunction
{
int GetParameterCount() const override;
Parameter GetReturnType() const override;
Parameter GetParameter(int i) const override;
const std::string& GetName() const override;
Object Invoke(const std::vector<Object>& args) override;
};
template <>
class Function<const char *(*)(), GlobalFunction> : public IFunction
{
int GetParameterCount() const override;
Parameter GetReturnType() const override;
Parameter GetParameter(int i) const override;
const std::string& GetName() const override;
Object Invoke(const std::vector<Object>& args) override;
};
template <>
class Function<DummyClass(*)(), ReturnByValue> : public IFunction
{
int GetParameterCount() const override;
Parameter GetReturnType() const override;
Parameter GetParameter(int i) const override;
const std::string& GetName() const override;
Object Invoke(const std::vector<Object>& args) override;
};
template <>
class Function<const DummyClass &(*)(), ReturnByReference> : public IFunction
{
int GetParameterCount() const override;
Parameter GetReturnType() const override;
Parameter GetParameter(int i) const override;
const std::string& GetName() const override;
Object Invoke(const std::vector<Object>& args) override;
};
} // namespace reflang
#endif //REFLANG_METADATA_RETURN_NOARGS_GEN_HPP
| {
"pile_set_name": "Github"
} |
/* @flow */
import he from 'he'
import { parseHTML } from './html-parser'
import { parseText } from './text-parser'
import { parseFilters } from './filter-parser'
import { genAssignmentCode } from '../directives/model'
import { extend, cached, no, camelize } from 'shared/util'
import { isIE, isEdge, isServerRendering } from 'core/util/env'
import {
addProp,
addAttr,
baseWarn,
addHandler,
addDirective,
getBindingAttr,
getAndRemoveAttr,
pluckModuleFunction
} from '../helpers'
export const onRE = /^@|^v-on:/
export const dirRE = /^v-|^@|^:/
export const forAliasRE = /([^]*?)\s+(?:in|of)\s+([^]*)/
export const forIteratorRE = /,([^,\}\]]*)(?:,([^,\}\]]*))?$/
const stripParensRE = /^\(|\)$/g
const argRE = /:(.*)$/
export const bindRE = /^:|^v-bind:/
const modifierRE = /\.[^.]+/g
const decodeHTMLCached = cached(he.decode)
// configurable state
export let warn: any
let delimiters
let transforms
let preTransforms
let postTransforms
let platformIsPreTag
let platformMustUseProp
let platformGetTagNamespace
type Attr = { name: string; value: string };
export function createASTElement (
tag: string,
attrs: Array<Attr>,
parent: ASTElement | void
): ASTElement {
return {
type: 1,
tag,
attrsList: attrs,
attrsMap: makeAttrsMap(attrs),
parent,
children: []
}
}
/**
* Convert HTML string to AST.
*/
export function parse (
template: string,
options: CompilerOptions
): ASTElement | void {
warn = options.warn || baseWarn
platformIsPreTag = options.isPreTag || no
platformMustUseProp = options.mustUseProp || no
platformGetTagNamespace = options.getTagNamespace || no
transforms = pluckModuleFunction(options.modules, 'transformNode')
preTransforms = pluckModuleFunction(options.modules, 'preTransformNode')
postTransforms = pluckModuleFunction(options.modules, 'postTransformNode')
delimiters = options.delimiters
const stack = []
const preserveWhitespace = options.preserveWhitespace !== false
let root
let currentParent
let inVPre = false
let inPre = false
let warned = false
function warnOnce (msg) {
if (!warned) {
warned = true
warn(msg)
}
}
function closeElement (element) {
// check pre state
if (element.pre) {
inVPre = false
}
if (platformIsPreTag(element.tag)) {
inPre = false
}
// apply post-transforms
for (let i = 0; i < postTransforms.length; i++) {
postTransforms[i](element, options)
}
}
parseHTML(template, {
warn,
expectHTML: options.expectHTML,
isUnaryTag: options.isUnaryTag,
canBeLeftOpenTag: options.canBeLeftOpenTag,
shouldDecodeNewlines: options.shouldDecodeNewlines,
shouldDecodeNewlinesForHref: options.shouldDecodeNewlinesForHref,
shouldKeepComment: options.comments,
start (tag, attrs, unary) {
// check namespace.
// inherit parent ns if there is one
const ns = (currentParent && currentParent.ns) || platformGetTagNamespace(tag)
// handle IE svg bug
/* istanbul ignore if */
if (isIE && ns === 'svg') {
attrs = guardIESVGBug(attrs)
}
let element: ASTElement = createASTElement(tag, attrs, currentParent)
if (ns) {
element.ns = ns
}
if (isForbiddenTag(element) && !isServerRendering()) {
element.forbidden = true
process.env.NODE_ENV !== 'production' && warn(
'Templates should only be responsible for mapping the state to the ' +
'UI. Avoid placing tags with side-effects in your templates, such as ' +
`<${tag}>` + ', as they will not be parsed.'
)
}
// apply pre-transforms
for (let i = 0; i < preTransforms.length; i++) {
element = preTransforms[i](element, options) || element
}
if (!inVPre) {
processPre(element)
if (element.pre) {
inVPre = true
}
}
if (platformIsPreTag(element.tag)) {
inPre = true
}
if (inVPre) {
processRawAttrs(element)
} else if (!element.processed) {
// structural directives
processFor(element)
processIf(element)
processOnce(element)
// element-scope stuff
processElement(element, options)
}
function checkRootConstraints (el) {
if (process.env.NODE_ENV !== 'production') {
if (el.tag === 'slot' || el.tag === 'template') {
warnOnce(
`Cannot use <${el.tag}> as component root element because it may ` +
'contain multiple nodes.'
)
}
if (el.attrsMap.hasOwnProperty('v-for')) {
warnOnce(
'Cannot use v-for on stateful component root element because ' +
'it renders multiple elements.'
)
}
}
}
// tree management
if (!root) {
root = element
checkRootConstraints(root)
} else if (!stack.length) {
// allow root elements with v-if, v-else-if and v-else
if (root.if && (element.elseif || element.else)) {
checkRootConstraints(element)
addIfCondition(root, {
exp: element.elseif,
block: element
})
} else if (process.env.NODE_ENV !== 'production') {
warnOnce(
`Component template should contain exactly one root element. ` +
`If you are using v-if on multiple elements, ` +
`use v-else-if to chain them instead.`
)
}
}
if (currentParent && !element.forbidden) {
if (element.elseif || element.else) {
processIfConditions(element, currentParent)
} else if (element.slotScope) { // scoped slot
currentParent.plain = false
const name = element.slotTarget || '"default"'
;(currentParent.scopedSlots || (currentParent.scopedSlots = {}))[name] = element
} else {
currentParent.children.push(element)
element.parent = currentParent
}
}
if (!unary) {
currentParent = element
stack.push(element)
} else {
closeElement(element)
}
},
end () {
// remove trailing whitespace
const element = stack[stack.length - 1]
const lastNode = element.children[element.children.length - 1]
if (lastNode && lastNode.type === 3 && lastNode.text === ' ' && !inPre) {
element.children.pop()
}
// pop stack
stack.length -= 1
currentParent = stack[stack.length - 1]
closeElement(element)
},
chars (text: string) {
if (!currentParent) {
if (process.env.NODE_ENV !== 'production') {
if (text === template) {
warnOnce(
'Component template requires a root element, rather than just text.'
)
} else if ((text = text.trim())) {
warnOnce(
`text "${text}" outside root element will be ignored.`
)
}
}
return
}
// IE textarea placeholder bug
/* istanbul ignore if */
if (isIE &&
currentParent.tag === 'textarea' &&
currentParent.attrsMap.placeholder === text
) {
return
}
const children = currentParent.children
text = inPre || text.trim()
? isTextTag(currentParent) ? text : decodeHTMLCached(text)
// only preserve whitespace if its not right after a starting tag
: preserveWhitespace && children.length ? ' ' : ''
if (text) {
let res
if (!inVPre && text !== ' ' && (res = parseText(text, delimiters))) {
children.push({
type: 2,
expression: res.expression,
tokens: res.tokens,
text
})
} else if (text !== ' ' || !children.length || children[children.length - 1].text !== ' ') {
children.push({
type: 3,
text
})
}
}
},
comment (text: string) {
currentParent.children.push({
type: 3,
text,
isComment: true
})
}
})
return root
}
function processPre (el) {
if (getAndRemoveAttr(el, 'v-pre') != null) {
el.pre = true
}
}
function processRawAttrs (el) {
const l = el.attrsList.length
if (l) {
const attrs = el.attrs = new Array(l)
for (let i = 0; i < l; i++) {
attrs[i] = {
name: el.attrsList[i].name,
value: JSON.stringify(el.attrsList[i].value)
}
}
} else if (!el.pre) {
// non root node in pre blocks with no attributes
el.plain = true
}
}
export function processElement (element: ASTElement, options: CompilerOptions) {
processKey(element)
// determine whether this is a plain element after
// removing structural attributes
element.plain = !element.key && !element.attrsList.length
processRef(element)
processSlot(element)
processComponent(element)
for (let i = 0; i < transforms.length; i++) {
element = transforms[i](element, options) || element
}
processAttrs(element)
}
function processKey (el) {
const exp = getBindingAttr(el, 'key')
if (exp) {
if (process.env.NODE_ENV !== 'production' && el.tag === 'template') {
warn(`<template> cannot be keyed. Place the key on real elements instead.`)
}
el.key = exp
}
}
function processRef (el) {
const ref = getBindingAttr(el, 'ref')
if (ref) {
el.ref = ref
el.refInFor = checkInFor(el)
}
}
export function processFor (el: ASTElement) {
let exp
if ((exp = getAndRemoveAttr(el, 'v-for'))) {
const res = parseFor(exp)
if (res) {
extend(el, res)
} else if (process.env.NODE_ENV !== 'production') {
warn(
`Invalid v-for expression: ${exp}`
)
}
}
}
type ForParseResult = {
for: string;
alias: string;
iterator1?: string;
iterator2?: string;
};
export function parseFor (exp: string): ?ForParseResult {
const inMatch = exp.match(forAliasRE)
if (!inMatch) return
const res = {}
res.for = inMatch[2].trim()
const alias = inMatch[1].trim().replace(stripParensRE, '')
const iteratorMatch = alias.match(forIteratorRE)
if (iteratorMatch) {
res.alias = alias.replace(forIteratorRE, '')
res.iterator1 = iteratorMatch[1].trim()
if (iteratorMatch[2]) {
res.iterator2 = iteratorMatch[2].trim()
}
} else {
res.alias = alias
}
return res
}
function processIf (el) {
const exp = getAndRemoveAttr(el, 'v-if')
if (exp) {
el.if = exp
addIfCondition(el, {
exp: exp,
block: el
})
} else {
if (getAndRemoveAttr(el, 'v-else') != null) {
el.else = true
}
const elseif = getAndRemoveAttr(el, 'v-else-if')
if (elseif) {
el.elseif = elseif
}
}
}
function processIfConditions (el, parent) {
const prev = findPrevElement(parent.children)
if (prev && prev.if) {
addIfCondition(prev, {
exp: el.elseif,
block: el
})
} else if (process.env.NODE_ENV !== 'production') {
warn(
`v-${el.elseif ? ('else-if="' + el.elseif + '"') : 'else'} ` +
`used on element <${el.tag}> without corresponding v-if.`
)
}
}
function findPrevElement (children: Array<any>): ASTElement | void {
let i = children.length
while (i--) {
if (children[i].type === 1) {
return children[i]
} else {
if (process.env.NODE_ENV !== 'production' && children[i].text !== ' ') {
warn(
`text "${children[i].text.trim()}" between v-if and v-else(-if) ` +
`will be ignored.`
)
}
children.pop()
}
}
}
export function addIfCondition (el: ASTElement, condition: ASTIfCondition) {
if (!el.ifConditions) {
el.ifConditions = []
}
el.ifConditions.push(condition)
}
function processOnce (el) {
const once = getAndRemoveAttr(el, 'v-once')
if (once != null) {
el.once = true
}
}
function processSlot (el) {
if (el.tag === 'slot') {
el.slotName = getBindingAttr(el, 'name')
if (process.env.NODE_ENV !== 'production' && el.key) {
warn(
`\`key\` does not work on <slot> because slots are abstract outlets ` +
`and can possibly expand into multiple elements. ` +
`Use the key on a wrapping element instead.`
)
}
} else {
let slotScope
if (el.tag === 'template') {
slotScope = getAndRemoveAttr(el, 'scope')
/* istanbul ignore if */
if (process.env.NODE_ENV !== 'production' && slotScope) {
warn(
`the "scope" attribute for scoped slots have been deprecated and ` +
`replaced by "slot-scope" since 2.5. The new "slot-scope" attribute ` +
`can also be used on plain elements in addition to <template> to ` +
`denote scoped slots.`,
true
)
}
el.slotScope = slotScope || getAndRemoveAttr(el, 'slot-scope')
} else if ((slotScope = getAndRemoveAttr(el, 'slot-scope'))) {
/* istanbul ignore if */
if (process.env.NODE_ENV !== 'production' && el.attrsMap['v-for']) {
warn(
`Ambiguous combined usage of slot-scope and v-for on <${el.tag}> ` +
`(v-for takes higher priority). Use a wrapper <template> for the ` +
`scoped slot to make it clearer.`,
true
)
}
el.slotScope = slotScope
}
const slotTarget = getBindingAttr(el, 'slot')
if (slotTarget) {
el.slotTarget = slotTarget === '""' ? '"default"' : slotTarget
// preserve slot as an attribute for native shadow DOM compat
// only for non-scoped slots.
if (el.tag !== 'template' && !el.slotScope) {
addAttr(el, 'slot', slotTarget)
}
}
}
}
function processComponent (el) {
let binding
if ((binding = getBindingAttr(el, 'is'))) {
el.component = binding
}
if (getAndRemoveAttr(el, 'inline-template') != null) {
el.inlineTemplate = true
}
}
function processAttrs (el) {
const list = el.attrsList
let i, l, name, rawName, value, modifiers, isProp
for (i = 0, l = list.length; i < l; i++) {
name = rawName = list[i].name
value = list[i].value
if (dirRE.test(name)) {
// mark element as dynamic
el.hasBindings = true
// modifiers
modifiers = parseModifiers(name)
if (modifiers) {
name = name.replace(modifierRE, '')
}
if (bindRE.test(name)) { // v-bind
name = name.replace(bindRE, '')
value = parseFilters(value)
isProp = false
if (modifiers) {
if (modifiers.prop) {
isProp = true
name = camelize(name)
if (name === 'innerHtml') name = 'innerHTML'
}
if (modifiers.camel) {
name = camelize(name)
}
if (modifiers.sync) {
addHandler(
el,
`update:${camelize(name)}`,
genAssignmentCode(value, `$event`)
)
}
}
if (isProp || (
!el.component && platformMustUseProp(el.tag, el.attrsMap.type, name)
)) {
addProp(el, name, value)
} else {
addAttr(el, name, value)
}
} else if (onRE.test(name)) { // v-on
name = name.replace(onRE, '')
addHandler(el, name, value, modifiers, false, warn)
} else { // normal directives
name = name.replace(dirRE, '')
// parse arg
const argMatch = name.match(argRE)
const arg = argMatch && argMatch[1]
if (arg) {
name = name.slice(0, -(arg.length + 1))
}
addDirective(el, name, rawName, value, arg, modifiers)
if (process.env.NODE_ENV !== 'production' && name === 'model') {
checkForAliasModel(el, value)
}
}
} else {
// literal attribute
if (process.env.NODE_ENV !== 'production') {
const res = parseText(value, delimiters)
if (res) {
warn(
`${name}="${value}": ` +
'Interpolation inside attributes has been removed. ' +
'Use v-bind or the colon shorthand instead. For example, ' +
'instead of <div id="{{ val }}">, use <div :id="val">.'
)
}
}
addAttr(el, name, JSON.stringify(value))
// #6887 firefox doesn't update muted state if set via attribute
// even immediately after element creation
if (!el.component &&
name === 'muted' &&
platformMustUseProp(el.tag, el.attrsMap.type, name)) {
addProp(el, name, 'true')
}
}
}
}
function checkInFor (el: ASTElement): boolean {
let parent = el
while (parent) {
if (parent.for !== undefined) {
return true
}
parent = parent.parent
}
return false
}
function parseModifiers (name: string): Object | void {
const match = name.match(modifierRE)
if (match) {
const ret = {}
match.forEach(m => { ret[m.slice(1)] = true })
return ret
}
}
function makeAttrsMap (attrs: Array<Object>): Object {
const map = {}
for (let i = 0, l = attrs.length; i < l; i++) {
if (
process.env.NODE_ENV !== 'production' &&
map[attrs[i].name] && !isIE && !isEdge
) {
warn('duplicate attribute: ' + attrs[i].name)
}
map[attrs[i].name] = attrs[i].value
}
return map
}
// for script (e.g. type="x/template") or style, do not decode content
function isTextTag (el): boolean {
return el.tag === 'script' || el.tag === 'style'
}
function isForbiddenTag (el): boolean {
return (
el.tag === 'style' ||
(el.tag === 'script' && (
!el.attrsMap.type ||
el.attrsMap.type === 'text/javascript'
))
)
}
const ieNSBug = /^xmlns:NS\d+/
const ieNSPrefix = /^NS\d+:/
/* istanbul ignore next */
function guardIESVGBug (attrs) {
const res = []
for (let i = 0; i < attrs.length; i++) {
const attr = attrs[i]
if (!ieNSBug.test(attr.name)) {
attr.name = attr.name.replace(ieNSPrefix, '')
res.push(attr)
}
}
return res
}
function checkForAliasModel (el, value) {
let _el = el
while (_el) {
if (_el.for && _el.alias === value) {
warn(
`<${el.tag} v-model="${value}">: ` +
`You are binding v-model directly to a v-for iteration alias. ` +
`This will not be able to modify the v-for source array because ` +
`writing to the alias is like modifying a function local variable. ` +
`Consider using an array of objects and use v-model on an object property instead.`
)
}
_el = _el.parent
}
}
| {
"pile_set_name": "Github"
} |
# Tests for tr, but the test file is not utf8.
BEGIN {
chdir 't' if -d 't';
require './test.pl';
set_up_inc('../lib');
}
plan tests => 1;
{ # This test is malloc senstive. Right now on some platforms anyway, space
# for the final \xff needs to be mallocd, and that's what caused the
# problem, because the '-' had already been parsed and was later added
# without making space for it
fresh_perl_is('print "\x8c" =~ y o\x{100}ÄŒÿÿ€€-ÿoo', "1", { },
'RT #134067 heap-buffer-overflow in S_scan_const');
}
1;
| {
"pile_set_name": "Github"
} |
ограничения по брой и продължителност, които да се определи ще има
Свърлик достигаща до села Дражевци, на десет версти зад Ниш.
антибактериалното средство, които потискат чувствителните бактерии. Тя
В запазения архив на П. Шафарик се откриват записани сведения отнасящи
бактерии. Тяхното терапевтично повлияване често е невъзможно с
посвещава параграф 12, също разглеждащ въпросът за географското му
екосистемите с участието на патогенни и непатогенни бактерии, което
11. Условие за участие. Формулират се в зависимост от взетите
или в града, от където са мнозинството участници. Избраната база се
в Прага биват отпечатани първо и второ издание на "Славянски
той не само усвоява до съвършенство сръбския език, събира и обработва
"заразяват" чувствителните бактериални клетки и да ги превръщат в
на което поп Рашко отговаря, че няма да се противопоставя на
вместо език е употребил думата "реч", и то при всички славянски
гентамицина са резистентни 30-35% от
Физическия институт при БАН, където работи до пенсионирането
чудак, такъв остава Рашко Зайков в спомените на близките си;
главна, ако не единствена, причина за резистентността към модерните
глас се оказва решаващ. Акад. Бонев казва: "На аерогарата в
563-602.
където географските названия - едни турцизирани, други турски, трети
протеини (ПСП).
е хромозомна, а когато принадлежат на
професор - едно закъсняло, но заслужено признание.
на място се отчита възможността да се осигури подходяща база по вид,
със
563-602.
Анализът на механизмите на резистентност през последното десетилетие
инфекциозни болести, което мотивира тяхното изключително широко
подражават и импровизират или да се доверяват на недобросъвестни или
5. Цветообрание на старославянската книжнина в България. Събрано и на
своят крупен научен труд "История на славянския език и литература по
старославянската книжнина. На този въпрос той е посветил цял един
на баща си и хуква към някаква пропаст. Едва успяват да го
решения в т. 1., 3., 4., 5.
увеличаване относителния дял на резистентните към него бактериални
молекулите на аминогликозидите ги инактивират, и т.н. (2) Използване
време на Освободителната война с руските войски. Като
Бесарабия, 50,000 в Унгария по точно в Банат, така че около 3,500,000
резервира предварително.
език през 1849 г. тази забележителна студия бива преведена на
В посочените граници на българската реч, П. Шафарик се е опитал да
Генетични и биохимични механизми на резистентността. Инфекциозна
563-602.
Следване и работа в Германия (1922-1928)
съставна част на клетъчните им стени. Процесът са катализира от
"признаци", а писмен език е означил като "наречи", в руския превод
Разпространение на резистентните бактерии. Всяко антимикробно вещество
антибактериалното средство. Така, при откриването на пеницина 8% от
отделни страни, региони, болници и пр., което се обяснява преди всички
на място се отчита възможността да се осигури подходяща база по вид,
с различията в степента и начините на използване на антимикробните
да ни чакат да умрем, та да могат да постъпят на местата
във втория етап се поставят допълнителни изисквания (преработка на
Разцвет
се касае за ограничаване достъпа на антимикробното вещество до мястото
Зайков, един много способен български физик, с който си
Много важно е още в първия стадий на организирането на срещата да се
Интересен и неизвестен факт от биографията на Рашко Зайков е
неопитни фирми.
привлечени дарители.
като физик-теоретик за него не се намира, вероятно поради
Западните Родопи, Доспат и Неврокопска, както и мохамеданите от
Между чудачествата е и невероятната работоспособност. Можел
една жива легенда.
След напускането на гр. Иена, за кратко време Шаферик се отбива в
1843.
руския превод "обем" (9, с.31). По отношение на езиковите
култура. Българко историческо дружество. Секция "Етнография". С.,1978,
спрат. "Попречихте ми да изчисля за колко секунди ще стигна
със
пептидогликана, полимер с жизнено важно значение за бактериите като
на което поп Рашко отговаря, че няма да се противопоставя на
главна, ако не единствена, причина за резистентността към модерните
с когото съвместно обсъждат теоретични проблеми. Но работа
финансови средства понякога се издават само кратки или
(Павел Йозеф Шафарик, Р.С.), който доказа на света какъв е бил
Пенка ЛАЗАРОВА
своя син - единствен от класа. Но на свободолюбивия Рашко не
страни, където контролирано се използват антимикробните лекарства,
ни".
съставна част на клетъчните им стени. Процесът са катализира от
За научните приноси на проф. Рашко Зайков можете да
славянски езици са естествена предпоставка още през 1826 г. да напише
Павел Йозеф Шафарик, починал на 26 май 1861 г. в Прага. В некролога за
Македония и България, П. Шафарик започнал да подготвя още в Нови Сад
направи реалистична план-сметка като се преценява какви такси ще са
дискусии, постъри, "кръгли маси", демонстрации и т.н.) и съответните
механизмите на тяхната устойчивост. Автор е на много публикации по
професор в сръбската православна гимназия от 1819 до 1833 г. Престоят
| {
"pile_set_name": "Github"
} |
/******************************************************************************
* $Id: rasdamandataset.cpp 33717 2016-03-14 06:29:14Z goatbar $
* Project: rasdaman Driver
* Purpose: Implement Rasdaman GDAL driver
* Author: Constantin Jucovschi, [email protected]
*
******************************************************************************
* Copyright (c) 2010, Constantin Jucovschi
* Copyright (c) 2010, Even Rouault <even dot rouault at mines-paris dot org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
******************************************************************************/
#include "rasdamandataset.h"
#include "cpl_string.h"
#include "gdal_pam.h"
#include "gdal_frmts.h"
#include "regex.h"
#include <string>
#include <memory>
#include <map>
#include "gdal.h"
void CPL_DLL CPL_STDCALL GDALRegister_RASDAMAN();
CPL_CVSID("$Id: rasdamandataset.cpp 33717 2016-03-14 06:29:14Z goatbar $");
class Subset
{
public:
Subset(int x_loIn, int x_hiIn, int y_loIn, int y_hiIn)
: m_x_lo(x_loIn), m_x_hi(x_hiIn), m_y_lo(y_loIn), m_y_hi(y_hiIn)
{}
bool operator < (const Subset& rhs) const {
if (m_x_lo < rhs.m_x_lo || m_x_hi < rhs.m_x_hi
|| m_y_lo < rhs.m_y_lo || m_y_hi < rhs.m_y_hi) {
return true;
}
return false;
}
bool contains(const Subset& other) const {
return m_x_lo <= other.m_x_lo && m_x_hi >= other.m_x_hi
&& m_y_lo <= other.m_y_lo && m_y_hi >= other.m_y_hi;
}
bool within(const Subset& other) const {
return other.contains(*this);
}
void operator = (const Subset& rhs) {
m_x_lo = rhs.m_x_lo;
m_x_hi = rhs.m_x_hi;
m_y_lo = rhs.m_y_lo;
m_y_hi = rhs.m_y_hi;
}
int x_lo() const { return m_x_lo; }
int x_hi() const { return m_x_hi; }
int y_lo() const { return m_y_lo; }
int y_hi() const { return m_y_hi; }
private:
int m_x_lo;
int m_x_hi;
int m_y_lo;
int m_y_hi;
};
/************************************************************************/
/* ==================================================================== */
/* RasdamanDataset */
/* ==================================================================== */
/************************************************************************/
typedef std::map<Subset, r_Ref<r_GMarray> > ArrayCache;
class RasdamanRasterBand;
static CPLString getQuery(const char *templateString, const char* x_lo, const char* x_hi, const char* y_lo, const char* y_hi);
class RasdamanDataset : public GDALPamDataset
{
friend class RasdamanRasterBand;
public:
RasdamanDataset(const char*, int, const char*, const char*, const char*);
~RasdamanDataset();
static GDALDataset *Open( GDALOpenInfo * );
protected:
virtual CPLErr IRasterIO( GDALRWFlag, int, int, int, int,
void *, int, int, GDALDataType,
int, int *,
GSpacing nPixelSpace, GSpacing nLineSpace,
GSpacing nBandSpace,
GDALRasterIOExtraArg* psExtraArg);
private:
ArrayCache m_array_cache;
r_Ref<r_GMarray>& request_array(int x_lo, int x_hi, int y_lo, int y_hi, int& offsetX, int& offsetY);
r_Ref<r_GMarray>& request_array(const Subset&, int& offsetX, int& offsetY);
void clear_array_cache();
r_Set<r_Ref_Any> execute(const char* string);
void getTypes(const r_Base_Type* baseType, int &counter, int pos);
void createBands(const char* queryString);
r_Database database;
r_Transaction transaction;
CPLString queryParam;
CPLString host;
int port;
CPLString username;
CPLString userpassword;
CPLString databasename;
int xPos;
int yPos;
int tileXSize;
int tileYSize;
};
/************************************************************************/
/* RasdamanDataset() */
/************************************************************************/
RasdamanDataset::RasdamanDataset(const char* _host, int _port, const char* _username,
const char* _userpassword, const char* _databasename)
: host(_host), port(_port), username(_username), userpassword(_userpassword),
databasename(_databasename)
{
database.set_servername(host, port);
database.set_useridentification(username, userpassword);
database.open(databasename);
}
/************************************************************************/
/* ~RasdamanDataset() */
/************************************************************************/
RasdamanDataset::~RasdamanDataset()
{
if (transaction.get_status() == r_Transaction::active) {
transaction.commit();
}
database.close();
FlushCache();
}
CPLErr RasdamanDataset::IRasterIO( GDALRWFlag eRWFlag,
int nXOff, int nYOff, int nXSize, int nYSize,
void * pData, int nBufXSize, int nBufYSize,
GDALDataType eBufType,
int nBandCount, int *panBandMap,
GSpacing nPixelSpace, GSpacing nLineSpace,
GSpacing nBandSpace,
GDALRasterIOExtraArg* psExtraArg)
{
if (eRWFlag != GF_Read) {
CPLError(CE_Failure, CPLE_NoWriteAccess, "Write support is not implemented.");
return CE_Failure;
}
transaction.begin(r_Transaction::read_only);
/* TODO: Setup database access/transaction */
int dummyX, dummyY;
/* Cache the whole image region */
CPLDebug("rasdaman", "Pre-caching region (%d, %d, %d, %d).", nXOff, nXOff + nXSize, nYOff, nYOff + nYSize);
request_array(nXOff, nXOff + nXSize, nYOff, nYOff + nYSize, dummyX, dummyY);
CPLErr ret = GDALDataset::IRasterIO(eRWFlag, nXOff, nYOff, nXSize, nYSize, pData,
nBufXSize, nBufYSize, eBufType, nBandCount,
panBandMap, nPixelSpace, nLineSpace, nBandSpace,
psExtraArg);
transaction.commit();
/* Clear the cache */
clear_array_cache();
return ret;
}
r_Ref<r_GMarray>& RasdamanDataset::request_array(int x_lo, int x_hi, int y_lo, int y_hi, int& offsetX, int& offsetY)
{
return request_array(Subset(x_lo, x_hi, y_lo, y_hi), offsetX, offsetY);
};
r_Ref<r_GMarray>& RasdamanDataset::request_array(const Subset& subset, int& offsetX, int& offsetY)
{
// set the offsets to 0
offsetX = 0; offsetY = 0;
// check whether or not the subset was already requested
ArrayCache::iterator it = m_array_cache.find(subset);
if (it != m_array_cache.end()) {
CPLDebug("rasdaman", "Fetching tile (%d, %d, %d, %d) from cache.",
subset.x_lo(), subset.x_hi(), subset.y_lo(), subset.y_hi());
return it->second;
}
// check if any tile contains the requested one
for(it = m_array_cache.begin(); it != m_array_cache.end(); ++it) {
if (it->first.contains(subset)) {
const Subset& existing = it->first;
// TODO: check if offsets are correct
offsetX = subset.x_lo() - existing.x_lo();
offsetY = subset.y_lo() - existing.y_lo();
CPLDebug("rasdaman", "Found matching tile (%d, %d, %d, %d) for requested tile (%d, %d, %d, %d). Offests are (%d, %d).",
existing.x_lo(), existing.x_hi(), existing.y_lo(), existing.y_hi(),
subset.x_lo(), subset.x_hi(), subset.y_lo(), subset.y_hi(),
offsetX, offsetY);
return it->second;
}
}
if (transaction.get_status() != r_Transaction::active) {
transaction.begin(r_Transaction::read_only);
}
CPLDebug("rasdaman", "Tile (%d, %d, %d, %d) not found in cache, requesting it.",
subset.x_lo(), subset.x_hi(), subset.y_lo(), subset.y_hi());
char x_lo[11], x_hi[11], y_lo[11], y_hi[11];
snprintf(x_lo, sizeof(x_lo), "%d", subset.x_lo());
snprintf(x_hi, sizeof(x_hi), "%d", subset.x_hi());
snprintf(y_lo, sizeof(y_lo), "%d", subset.y_lo());
snprintf(y_hi, sizeof(y_hi), "%d", subset.y_hi());
CPLString queryString = getQuery(queryParam, x_lo, x_hi, y_lo, y_hi);
r_Set<r_Ref_Any> result_set = execute(queryString);
if (result_set.get_element_type_schema()->type_id() == r_Type::MARRAYTYPE) {
// TODO: throw exception
}
if (result_set.cardinality() != 1) {
// TODO: throw exception
}
r_Ref<r_GMarray> result_array = r_Ref<r_GMarray>(*result_set.create_iterator());
//std::auto_ptr<r_GMarray> ptr(new r_GMarray);
//r_GMarray* ptr_ = ptr.get();
//(*ptr) = *result_array;
//std::pair<ArrayCache::iterator, bool> inserted = m_array_cache.insert(ArrayCache::value_type(subset, ptr));
std::pair<ArrayCache::iterator, bool> inserted = m_array_cache.insert(ArrayCache::value_type(subset, result_array));
return inserted.first->second;//*(ptr);
};
void RasdamanDataset::clear_array_cache() {
m_array_cache.clear();
};
/************************************************************************/
/* ==================================================================== */
/* RasdamanRasterBand */
/* ==================================================================== */
/************************************************************************/
class RasdamanRasterBand : public GDALPamRasterBand
{
friend class RasdamanDataset;
int nRecordSize;
int typeOffset;
int typeSize;
public:
RasdamanRasterBand( RasdamanDataset *, int, GDALDataType type, int offset, int size, int nBlockXSize, int nBlockYSize );
~RasdamanRasterBand();
virtual CPLErr IReadBlock( int, int, void * );
};
/************************************************************************/
/* RasdamanParams */
/************************************************************************/
/*struct RasdamanParams
{
RasdamanParams(const char* dataset_info);
void connect(const r_Database&);
const char *query;
const char *host;
const int port;
const char *username;
const char *password;
};*/
/************************************************************************/
/* RasdamanRasterBand() */
/************************************************************************/
RasdamanRasterBand::RasdamanRasterBand( RasdamanDataset *poDSIn, int nBandIn, GDALDataType type, int offset, int size, int nBlockXSizeIn, int nBlockYSizeIn )
{
this->poDS = poDSIn;
this->nBand = nBandIn;
eDataType = type;
typeSize = size;
typeOffset = offset;
this->nBlockXSize = nBlockXSizeIn;
this->nBlockYSize = nBlockYSizeIn;
nRecordSize = nBlockXSize * nBlockYSize * typeSize;
}
/************************************************************************/
/* ~RasdamanRasterBand() */
/************************************************************************/
RasdamanRasterBand::~RasdamanRasterBand()
{}
/************************************************************************/
/* IReadBlock() */
/************************************************************************/
CPLErr RasdamanRasterBand::IReadBlock( int nBlockXOff, int nBlockYOff,
void * pImage )
{
RasdamanDataset *poGDS = (RasdamanDataset *) poDS;
memset(pImage, 0, nRecordSize);
try {
int x_lo = nBlockXOff * nBlockXSize,
x_hi = MIN(poGDS->nRasterXSize, (nBlockXOff + 1) * nBlockXSize),
y_lo = nBlockYOff * nBlockYSize,
y_hi = MIN(poGDS->nRasterYSize, (nBlockYOff + 1) * nBlockYSize),
offsetX = 0, offsetY = 0;
r_Ref<r_GMarray>& gmdd = poGDS->request_array(x_lo, x_hi, y_lo, y_hi, offsetX, offsetY);
int xPos = poGDS->xPos;
int yPos = poGDS->yPos;
r_Minterval sp = gmdd->spatial_domain();
r_Point extent = sp.get_extent();
r_Point base = sp.get_origin();
int extentX = extent[xPos];
int extentY = extent[yPos];
CPLDebug("rasdaman", "Extents (%d, %d).", extentX, extentY);
r_Point access = base;
char *resultPtr;
for(int y = y_lo; y < y_hi; ++y) {
for(int x = x_lo; x < x_hi; ++x) {
resultPtr = (char*)pImage + ((y - y_lo) * nBlockXSize + x - x_lo) * typeSize;
//resultPtr = (char*) pImage
access[xPos] = x;// base[xPos] + offsetX; TODO: check if required
access[yPos] = y;// base[yPos] + offsetY;
const char *data = (*gmdd)[access] + typeOffset;
memcpy(resultPtr, data, typeSize);
}
}
}
catch (r_Error error) {
CPLError(CE_Failure, CPLE_AppDefined, "%s", error.what());
return CPLGetLastErrorType();
}
return CE_None;
}
/************************************************************************/
/* ==================================================================== */
/* RasdamanDataset */
/* ==================================================================== */
/************************************************************************/
static CPLString getOption(const char *string, regmatch_t cMatch, const char* defaultValue) {
if (cMatch.rm_eo == -1 || cMatch.rm_so == -1)
return defaultValue;
char *result = new char[cMatch.rm_eo-cMatch.rm_so+1];
strncpy(result, string + cMatch.rm_so, cMatch.rm_eo - cMatch.rm_so);
result[cMatch.rm_eo-cMatch.rm_so] = 0;
CPLString osResult = result;
delete[] result;
return osResult;
}
static int getOption(const char *string, regmatch_t cMatch, int defaultValue) {
if (cMatch.rm_eo == -1 || cMatch.rm_so == -1)
return defaultValue;
char *result = new char[cMatch.rm_eo-cMatch.rm_so+1];
strncpy(result, string + cMatch.rm_so, cMatch.rm_eo - cMatch.rm_so);
result[cMatch.rm_eo-cMatch.rm_so] = 0;
int nRet = atoi(result);
delete[] result;
return nRet;
}
static void replace(CPLString& str, const char *from, const char *to) {
if(strlen(from) == 0)
return;
size_t start_pos = 0;
while((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, strlen(from), to);
start_pos += strlen(to); // In case 'to' contains 'from', like replacing 'x' with 'yx'
}
}
static CPLString getQuery(const char *templateString, const char* x_lo, const char* x_hi, const char* y_lo, const char* y_hi) {
CPLString result(templateString);
replace(result, "$x_lo", x_lo);
replace(result, "$x_hi", x_hi);
replace(result, "$y_lo", y_lo);
replace(result, "$y_hi", y_hi);
return result;
}
static GDALDataType mapRasdamanTypesToGDAL(r_Type::r_Type_Id typeId) {
switch (typeId) {
case r_Type::ULONG:
return GDT_UInt32;
case r_Type::LONG:
return GDT_Int32;
case r_Type::SHORT:
return GDT_Int16;
case r_Type::USHORT:
return GDT_UInt16;
case r_Type::BOOL:
case r_Type::CHAR:
return GDT_Byte;
case r_Type::DOUBLE:
return GDT_Float64;
case r_Type::FLOAT:
return GDT_Float32;
case r_Type::COMPLEXTYPE1:
return GDT_CFloat32;
case r_Type::COMPLEXTYPE2:
return GDT_CFloat64;
default:
return GDT_Unknown;
}
}
void RasdamanDataset::getTypes(const r_Base_Type* baseType, int &counter, int pos) {
if (baseType->isStructType()) {
r_Structure_Type* tp = (r_Structure_Type*) baseType;
int elem = tp->count_elements();
for (int i = 0; i < elem; ++i) {
r_Attribute attr = (*tp)[i];
getTypes(&attr.type_of(), counter, attr.global_offset());
}
}
if (baseType->isPrimitiveType()) {
r_Primitive_Type *primType = (r_Primitive_Type*)baseType;
r_Type::r_Type_Id typeId = primType->type_id();
SetBand(counter, new RasdamanRasterBand(this, counter, mapRasdamanTypesToGDAL(typeId), pos, primType->size(), this->tileXSize, this->tileYSize));
counter ++;
}
}
void RasdamanDataset::createBands(const char* queryString) {
r_Set<r_Ref_Any> result_set;
r_OQL_Query query (queryString);
r_oql_execute (query, result_set);
if (result_set.get_element_type_schema()->type_id() == r_Type::MARRAYTYPE) {
r_Iterator<r_Ref_Any> iter = result_set.create_iterator();
r_Ref<r_GMarray> gmdd = r_Ref<r_GMarray>(*iter);
const r_Base_Type* baseType = gmdd->get_base_type_schema();
int counter = 1;
getTypes(baseType, counter, 0);
}
}
r_Set<r_Ref_Any> RasdamanDataset::execute(const char* string) {
CPLDebug("rasdaman", "Executing query '%s'.", string);
r_Set<r_Ref_Any> result_set;
r_OQL_Query query(string);
r_oql_execute(query, result_set);
return result_set;
}
static int getExtent(const char *queryString, int &pos) {
r_Set<r_Ref_Any> result_set;
r_OQL_Query query (queryString);
r_oql_execute (query, result_set);
if (result_set.get_element_type_schema()->type_id() == r_Type::MINTERVALTYPE) {
r_Iterator<r_Ref_Any> iter = result_set.create_iterator();
r_Ref<r_Minterval> interv = r_Ref<r_Minterval>(*iter);
r_Point extent = interv->get_extent();
int dim = extent.dimension();
int result = -1;
for (int i = 0; i < dim; ++i) {
if (extent[i] == 1)
continue;
if (result != -1)
return -1;
result = extent[i];
pos = i;
}
if (result == -1)
return 1;
else
return result;
} else
return -1;
}
/************************************************************************/
/* Open() */
/************************************************************************/
GDALDataset *RasdamanDataset::Open( GDALOpenInfo * poOpenInfo )
{
// buffer to communicate errors
char errbuffer[4096];
// fast checks if current module should handle the request
// check 1: the request is not on a existing file in the file system
if (poOpenInfo->fpL != NULL) {
return NULL;
}
// check 2: the request contains --collection
char* connString = poOpenInfo->pszFilename;
if (!STARTS_WITH_CI(connString, "rasdaman")) {
return NULL;
}
// regex for parsing options
regex_t optionRegEx;
// regex for parsing query
regex_t queryRegEx;
// array to store matching subexpressions
regmatch_t matches[10];
#define QUERY_POSITION 2
#define SERVER_POSITION 3
#define PORT_POSITION 4
#define USERNAME_POSITION 5
#define USERPASSWORD_POSITION 6
#define DATABASE_POSITION 7
#define TILEXSIZE_POSITION 8
#define TILEYSIZE_POSITION 9
int result = regcomp(&optionRegEx, "^rasdaman:(query='([[:alnum:][:punct:] ]+)'|host='([[:alnum:][:punct:]]+)'|port=([0-9]+)|user='([[:alnum:]]+)'|password='([[:alnum:]]+)'|database='([[:alnum:]]+)'|tileXSize=([0-9]+)|tileYSize=([0-9]+)| )*", REG_EXTENDED);
// should never happen
if (result != 0) {
regerror(result, &optionRegEx, errbuffer, 4096);
CPLError(CE_Failure, CPLE_AppDefined, "Internal error at compiling option parsing regex: %s", errbuffer);
return NULL;
}
result = regcomp(&queryRegEx, "^select ([[:alnum:][:punct:] ]*) from ([[:alnum:][:punct:] ]*)$", REG_EXTENDED);
// should never happen
if (result != 0) {
regerror(result, &queryRegEx, errbuffer, 4096);
CPLError(CE_Failure, CPLE_AppDefined, "Internal error at compiling option parsing regex: %s", errbuffer);
return NULL;
}
// executing option parsing regex on the connection string and checking if it succeeds
result = regexec(&optionRegEx, connString, 10, matches, 0);
if (result != 0) {
regerror(result, &optionRegEx, errbuffer, 4096);
CPLError(CE_Failure, CPLE_AppDefined, "Parsing opening parameters failed with error: %s", errbuffer);
regfree(&optionRegEx);
regfree(&queryRegEx);
return NULL;
}
regfree(&optionRegEx);
// checking if the whole expressions was matches, if not give an error where
// the matching stopped and exit
if (size_t(matches[0].rm_eo) < strlen(connString)) {
CPLError(CE_Failure, CPLE_AppDefined, "Parsing opening parameters failed with error: %s", connString + matches[0].rm_eo);
regfree(&queryRegEx);
return NULL;
}
CPLString queryParam = getOption(connString, matches[QUERY_POSITION], (const char*)NULL);
CPLString host = getOption(connString, matches[SERVER_POSITION], "localhost");
int port = getOption(connString, matches[PORT_POSITION], 7001);
CPLString username = getOption(connString, matches[USERNAME_POSITION], "rasguest");
CPLString userpassword = getOption(connString, matches[USERPASSWORD_POSITION], "rasguest");
CPLString databasename = getOption(connString, matches[DATABASE_POSITION], "RASBASE");
int tileXSize = getOption(connString, matches[TILEXSIZE_POSITION], 1024);
int tileYSize = getOption(connString, matches[TILEYSIZE_POSITION], 1024);
result = regexec(&queryRegEx, queryParam, 10, matches, 0);
if (result != 0) {
regerror(result, &queryRegEx, errbuffer, 4096);
CPLError(CE_Failure, CPLE_AppDefined, "Parsing query parameter failed with error: %s", errbuffer);
regfree(&queryRegEx);
return NULL;
}
regfree(&queryRegEx);
CPLString osQueryString = "select sdom(";
osQueryString += getOption(queryParam, matches[1], "");
osQueryString += ") from ";
osQueryString += getOption(queryParam, matches[2], "");
CPLDebug("rasdaman", "osQueryString: %s", osQueryString.c_str());
CPLString queryX = getQuery(osQueryString, "*", "*", "0", "0");
CPLString queryY = getQuery(osQueryString, "0", "0", "*", "*");
CPLString queryUnit = getQuery(queryParam, "0", "0", "0", "0");
CPLDebug("rasdaman", "queryX: %s", queryX.c_str());
CPLDebug("rasdaman", "queryY: %s", queryY.c_str());
CPLDebug("rasdaman", "queryUnit: %s", queryUnit.c_str());
RasdamanDataset *rasDataset = NULL;
try {
rasDataset = new RasdamanDataset(host, port, username, userpassword, databasename);
//getMyExtent(osQueryString, posX, sizeX, posY, sizeY);
r_Transaction transaction;
transaction.begin(r_Transaction::read_only);
int dimX = getExtent(queryX, rasDataset->xPos);
int dimY = getExtent(queryY, rasDataset->yPos);
rasDataset->nRasterXSize = dimX;
rasDataset->nRasterYSize = dimY;
rasDataset->tileXSize = tileXSize;
rasDataset->tileYSize = tileYSize;
rasDataset->createBands(queryUnit);
transaction.commit();
rasDataset->queryParam = queryParam;
rasDataset->host = host;
rasDataset->port = port;
rasDataset->username = username;
rasDataset->userpassword = userpassword;
rasDataset->databasename = databasename;
return rasDataset;
} catch (r_Error error) {
CPLError(CE_Failure, CPLE_AppDefined, "%s", error.what());
delete rasDataset;
return NULL;
}
return rasDataset;
}
/************************************************************************/
/* GDALRegister_RASDAMAN() */
/************************************************************************/
void GDALRegister_RASDAMAN()
{
if( GDALGetDriverByName( "RASDAMAN" ) != NULL )
return;
GDALDriver *poDriver = new GDALDriver();
poDriver->SetDescription( "RASDAMAN" );
poDriver->SetMetadataItem( GDAL_DCAP_RASTER, "YES" );
poDriver->SetMetadataItem( GDAL_DMD_LONGNAME, "RASDAMAN" );
poDriver->SetMetadataItem( GDAL_DMD_HELPTOPIC, "frmt_rasdaman.html" );
poDriver->pfnOpen = RasdamanDataset::Open;
GetGDALDriverManager()->RegisterDriver( poDriver );
}
| {
"pile_set_name": "Github"
} |
/*
* This file is part of the XSL implementation.
*
* Copyright (C) 2004, 2005, 2006, 2007, 2008 Apple, Inc. All rights reserved.
* Copyright (C) 2005, 2006 Alexey Proskuryakov <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#if ENABLE(XSLT)
#include "XSLTProcessor.h"
#include "DOMImplementation.h"
#include "CachedResourceLoader.h"
#include "ContentSecurityPolicy.h"
#include "DocumentFragment.h"
#include "Frame.h"
#include "FrameLoader.h"
#include "FrameView.h"
#include "HTMLBodyElement.h"
#include "HTMLDocument.h"
#include "Page.h"
#include "SecurityOrigin.h"
#include "SecurityOriginPolicy.h"
#include "Text.h"
#include "TextResourceDecoder.h"
#include "XMLDocument.h"
#include "markup.h"
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
namespace WebCore {
static inline void transformTextStringToXHTMLDocumentString(String& text)
{
// Modify the output so that it is a well-formed XHTML document with a <pre> tag enclosing the text.
text.replaceWithLiteral('&', "&");
text.replaceWithLiteral('<', "<");
text = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n"
"<html xmlns=\"http://www.w3.org/1999/xhtml\">\n"
"<head><title/></head>\n"
"<body>\n"
"<pre>" + text + "</pre>\n"
"</body>\n"
"</html>\n";
}
XSLTProcessor::~XSLTProcessor()
{
// Stylesheet shouldn't outlive its root node.
ASSERT(!m_stylesheetRootNode || !m_stylesheet || m_stylesheet->hasOneRef());
}
Ref<Document> XSLTProcessor::createDocumentFromSource(const String& sourceString,
const String& sourceEncoding, const String& sourceMIMEType, Node* sourceNode, Frame* frame)
{
Ref<Document> ownerDocument(sourceNode->document());
bool sourceIsDocument = (sourceNode == &ownerDocument.get());
String documentSource = sourceString;
RefPtr<Document> result;
if (sourceMIMEType == "text/plain") {
result = XMLDocument::createXHTML(frame, sourceIsDocument ? ownerDocument->url() : URL());
transformTextStringToXHTMLDocumentString(documentSource);
} else
result = DOMImplementation::createDocument(sourceMIMEType, frame, sourceIsDocument ? ownerDocument->url() : URL());
// Before parsing, we need to save & detach the old document and get the new document
// in place. We have to do this only if we're rendering the result document.
if (frame) {
if (FrameView* view = frame->view())
view->clear();
if (Document* oldDocument = frame->document()) {
result->setTransformSourceDocument(oldDocument);
result->takeDOMWindowFrom(oldDocument);
result->setSecurityOriginPolicy(oldDocument->securityOriginPolicy());
result->setCookieURL(oldDocument->cookieURL());
result->setFirstPartyForCookies(oldDocument->firstPartyForCookies());
result->contentSecurityPolicy()->copyStateFrom(oldDocument->contentSecurityPolicy());
}
frame->setDocument(result.copyRef());
}
RefPtr<TextResourceDecoder> decoder = TextResourceDecoder::create(sourceMIMEType);
decoder->setEncoding(sourceEncoding.isEmpty() ? UTF8Encoding() : TextEncoding(sourceEncoding), TextResourceDecoder::EncodingFromXMLHeader);
result->setDecoder(WTFMove(decoder));
result->setContent(documentSource);
return result.releaseNonNull();
}
RefPtr<Document> XSLTProcessor::transformToDocument(Node* sourceNode)
{
if (!sourceNode)
return nullptr;
String resultMIMEType;
String resultString;
String resultEncoding;
if (!transformToString(*sourceNode, resultMIMEType, resultString, resultEncoding))
return nullptr;
return createDocumentFromSource(resultString, resultEncoding, resultMIMEType, sourceNode, 0);
}
RefPtr<DocumentFragment> XSLTProcessor::transformToFragment(Node* sourceNode, Document* outputDoc)
{
if (!sourceNode || !outputDoc)
return nullptr;
String resultMIMEType;
String resultString;
String resultEncoding;
// If the output document is HTML, default to HTML method.
if (outputDoc->isHTMLDocument())
resultMIMEType = "text/html";
if (!transformToString(*sourceNode, resultMIMEType, resultString, resultEncoding))
return nullptr;
return createFragmentForTransformToFragment(resultString, resultMIMEType, outputDoc);
}
void XSLTProcessor::setParameter(const String& /*namespaceURI*/, const String& localName, const String& value)
{
// FIXME: namespace support?
// should make a QualifiedName here but we'd have to expose the impl
m_parameters.set(localName, value);
}
String XSLTProcessor::getParameter(const String& /*namespaceURI*/, const String& localName) const
{
// FIXME: namespace support?
// should make a QualifiedName here but we'd have to expose the impl
return m_parameters.get(localName);
}
void XSLTProcessor::removeParameter(const String& /*namespaceURI*/, const String& localName)
{
// FIXME: namespace support?
m_parameters.remove(localName);
}
void XSLTProcessor::reset()
{
m_stylesheet = nullptr;
m_stylesheetRootNode = nullptr;
m_parameters.clear();
}
} // namespace WebCore
#endif // ENABLE(XSLT)
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>client</AssemblyName>
<OutputType>Exe</OutputType>
<TargetFramework>$(AppTargetFramework)</TargetFramework>
<OutputPath>../../../</OutputPath>
<AppendTargetFrameworkToOutputPath>false</AppendTargetFrameworkToOutputPath>
<Company>ZeroC, Inc.</Company>
<AssemblyTitle>Ice bidir demo client</AssemblyTitle>
<Description>Ice bidir demo client</Description>
<Copyright>Copyright (c) ZeroC, Inc.</Copyright>
<Product>Ice</Product>
<AssemblyVersion>1.0.0.0</AssemblyVersion>
<FileVersion>1.0.0.0</FileVersion>
</PropertyGroup>
<PropertyGroup Condition="'$(AppTargetFramework)' == 'netcoreapp3.1'">
<UseAppHost>true</UseAppHost>
</PropertyGroup>
<PropertyGroup Condition=" '$(RunConfiguration)' == 'Default' ">
<StartAction>Project</StartAction>
<StartWorkingDirectory>..\..\..</StartWorkingDirectory>
<ExternalConsole>true</ExternalConsole>
</PropertyGroup>
<ItemGroup>
<Compile Include="../../../CallbackReceiverI.cs" />
<Compile Include="../../../Client.cs" />
<SliceCompile Include="../../../Callback.ice" />
<PackageReference Include="zeroc.ice.net" Version="3.7.4" />
<PackageReference Include="zeroc.icebuilder.msbuild" Version="5.0.4" />
<Compile Update="generated\Callback.cs">
<SliceCompileSource>../../../Callback.ice</SliceCompileSource>
</Compile>
</ItemGroup>
</Project>
| {
"pile_set_name": "Github"
} |
# net/metadata
## 项目简介
用于储存各种元信息
| {
"pile_set_name": "Github"
} |
/**
* Copyright © 2002 Instituto Superior Técnico
*
* This file is part of FenixEdu Academic.
*
* FenixEdu Academic is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* FenixEdu Academic is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FenixEdu Academic. If not, see <http://www.gnu.org/licenses/>.
*/
package org.fenixedu.academic.util;
public class StudentCurricularPlanIDDomainType extends FenixUtil {
public static final String ALL_TYPE = "-1";
public static final String NEWEST_TYPE = "-2";
public static final String ALL_STRING = "Todos os planos curriculares";
public static final String NEWEST_STRING = "Plano curricular mais recente";
public static final StudentCurricularPlanIDDomainType ALL = new StudentCurricularPlanIDDomainType(
StudentCurricularPlanIDDomainType.ALL_TYPE);
public static final StudentCurricularPlanIDDomainType NEWEST = new StudentCurricularPlanIDDomainType(
StudentCurricularPlanIDDomainType.NEWEST_TYPE);
private String id;
public String getId() {
return id;
}
public void setId(String idType) {
id = idType;
}
public StudentCurricularPlanIDDomainType(String idType) {
super();
setId(idType);
}
@Override
public String toString() {
return "" + getId();
}
@Override
public boolean equals(Object o) {
if (o instanceof StudentCurricularPlanIDDomainType) {
StudentCurricularPlanIDDomainType sc = (StudentCurricularPlanIDDomainType) o;
if (getId().equals(sc.getId())) {
return true;
}
}
return false;
}
public boolean isAll() {
return (this.equals(StudentCurricularPlanIDDomainType.ALL));
}
public boolean isNewest() {
return (this.equals(StudentCurricularPlanIDDomainType.NEWEST));
}
}
| {
"pile_set_name": "Github"
} |
07
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2014-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.codecentric.boot.admin.server.services;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import de.codecentric.boot.admin.server.domain.values.InstanceId;
import de.codecentric.boot.admin.server.domain.values.Registration;
/**
* Generates an SHA-1 Hash based on the instance health url.
*/
public class HashingInstanceUrlIdGenerator implements InstanceIdGenerator {
private static final char[] HEX_CHARS = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e',
'f' };
@Override
public InstanceId generateId(Registration registration) {
try {
MessageDigest digest = MessageDigest.getInstance("SHA-1");
byte[] bytes = digest.digest(registration.getHealthUrl().getBytes(StandardCharsets.UTF_8));
return InstanceId.of(new String(encodeHex(bytes, 0, 12)));
}
catch (NoSuchAlgorithmException ex) {
throw new IllegalStateException(ex);
}
}
private char[] encodeHex(byte[] bytes, int offset, int length) {
char[] chars = new char[length];
for (int i = 0; i < length; i = i + 2) {
byte b = bytes[offset + (i / 2)];
chars[i] = HEX_CHARS[(b >>> 0x4) & 0xf];
chars[i + 1] = HEX_CHARS[b & 0xf];
}
return chars;
}
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include "pl111_nomadik.h"
#define PMU_CTRL_OFFSET 0x0000
#define PMU_CTRL_LCDNDIF BIT(26)
void pl111_nomadik_init(struct device *dev)
{
struct regmap *pmu_regmap;
/*
* Just bail out of this is not found, we could be running
* multiplatform on something else than Nomadik.
*/
pmu_regmap =
syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
if (IS_ERR(pmu_regmap))
return;
/*
* This bit in the PMU controller multiplexes the two graphics
* blocks found in the Nomadik STn8815. The other one is called
* MDIF (Master Display Interface) and gets muxed out here.
*/
regmap_update_bits(pmu_regmap,
PMU_CTRL_OFFSET,
PMU_CTRL_LCDNDIF,
0);
dev_info(dev, "set Nomadik PMU mux to CLCD mode\n");
}
EXPORT_SYMBOL_GPL(pl111_nomadik_init);
| {
"pile_set_name": "Github"
} |
[
{
"class" : "org.batfish.minesweeper.answers.SmtOneAnswerElement",
"result" : {
"forwardingModel" : [
"r3,Serial0 --> r4,Serial0 (STATIC)"
],
"model" : {
"|0_FAILED-EDGE_r1_Loopback0|" : "0",
"|0_FAILED-EDGE_r1_Serial1|" : "0",
"|0_FAILED-EDGE_r1_r2|" : "0",
"|0_FAILED-EDGE_r2_Loopback0|" : "0",
"|0_FAILED-EDGE_r2_r3|" : "0",
"|0_FAILED-EDGE_r3_Loopback0|" : "0",
"|0_FAILED-EDGE_r3_r4|" : "0",
"|0_FAILED-EDGE_r4_Loopback0|" : "0",
"|0_FAILED-EDGE_r4_Serial1|" : "0",
"|0_FAILED-NODE_r1|" : "0",
"|0_FAILED-NODE_r2|" : "0",
"|0_FAILED-NODE_r3|" : "0",
"|0_FAILED-NODE_r4|" : "0",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r1_Loopback0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r1_Serial0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r1_Serial1|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r1_iBGP-r2|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r2_Loopback0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r2_Serial0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r2_Serial1|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r2_iBGP-r1|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r3_Loopback0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r3_Serial0|" : "true",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r3_Serial1|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r3_iBGP-r4|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r4_Loopback0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r4_Serial0|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r4_Serial1|" : "false",
"|0_SLICE-MAIN_CONTROL-FORWARDING_r4_iBGP-r3|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r1_Loopback0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r1_Serial0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r1_Serial1|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r2_Loopback0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r2_Serial0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r2_Serial1|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r3_Loopback0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r3_Serial0|" : "true",
"|0_SLICE-MAIN_DATA-FORWARDING_r3_Serial1|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r4_Loopback0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r4_Serial0|" : "false",
"|0_SLICE-MAIN_DATA-FORWARDING_r4_Serial1|" : "false",
"|0_SLICE-MAIN_dst-ip|" : "67372036",
"|0_SLICE-MAIN_dst-port|" : "0",
"|0_SLICE-MAIN_icmp-code|" : "0",
"|0_SLICE-MAIN_icmp-type|" : "0",
"|0_SLICE-MAIN_ip-protocol|" : "0",
"|0_SLICE-MAIN_r1_BGP_EXPORT_ENV-10.11.11.1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r1_BGP_EXPORT_ENV-10.11.11.1_metric|" : "0",
"|0_SLICE-MAIN_r1_BGP_EXPORT_ENV-10.11.11.1_permitted|" : "false",
"|0_SLICE-MAIN_r1_BGP_EXPORT_ENV-10.11.11.1_prefixLength|" : "0",
"|0_SLICE-MAIN_r1_BGP_EXPORT_Serial1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r1_BGP_EXPORT_Serial1_metric|" : "0",
"|0_SLICE-MAIN_r1_BGP_EXPORT_Serial1_permitted|" : "false",
"|0_SLICE-MAIN_r1_BGP_EXPORT_Serial1_prefixLength|" : "0",
"|0_SLICE-MAIN_r1_BGP_EXPORT_iBGP-r2_bgpInternal|" : "false",
"|0_SLICE-MAIN_r1_BGP_EXPORT_iBGP-r2_metric|" : "0",
"|0_SLICE-MAIN_r1_BGP_EXPORT_iBGP-r2_permitted|" : "false",
"|0_SLICE-MAIN_r1_BGP_EXPORT_iBGP-r2_prefixLength|" : "0",
"|0_SLICE-MAIN_r1_BGP_IMPORT_Serial1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r1_BGP_IMPORT_Serial1_choice|" : "false",
"|0_SLICE-MAIN_r1_BGP_IMPORT_Serial1_metric|" : "0",
"|0_SLICE-MAIN_r1_BGP_IMPORT_Serial1_permitted|" : "false",
"|0_SLICE-MAIN_r1_BGP_IMPORT_Serial1_prefixLength|" : "0",
"|0_SLICE-MAIN_r1_BGP_IMPORT_iBGP-r2_bgpInternal|" : "false",
"|0_SLICE-MAIN_r1_BGP_IMPORT_iBGP-r2_choice|" : "false",
"|0_SLICE-MAIN_r1_BGP_IMPORT_iBGP-r2_igpMetric|" : "0",
"|0_SLICE-MAIN_r1_BGP_IMPORT_iBGP-r2_metric|" : "0",
"|0_SLICE-MAIN_r1_BGP_IMPORT_iBGP-r2_permitted|" : "false",
"|0_SLICE-MAIN_r1_BGP_IMPORT_iBGP-r2_prefixLength|" : "0",
"|0_SLICE-MAIN_r1_OVERALL_BEST_None_bgpInternal|" : "false",
"|0_SLICE-MAIN_r1_OVERALL_BEST_None_igpMetric|" : "0",
"|0_SLICE-MAIN_r1_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-MAIN_r1_OVERALL_BEST_None_permitted|" : "false",
"|0_SLICE-MAIN_r1_OVERALL_BEST_None_prefixLength|" : "0",
"|0_SLICE-MAIN_r1_OVERALL_BEST_None_routerID|" : "0",
"|0_SLICE-MAIN_r2_BGP_EXPORT_Serial1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r2_BGP_EXPORT_Serial1_metric|" : "0",
"|0_SLICE-MAIN_r2_BGP_EXPORT_Serial1_permitted|" : "false",
"|0_SLICE-MAIN_r2_BGP_EXPORT_Serial1_prefixLength|" : "0",
"|0_SLICE-MAIN_r2_BGP_EXPORT_iBGP-r1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r2_BGP_EXPORT_iBGP-r1_metric|" : "0",
"|0_SLICE-MAIN_r2_BGP_EXPORT_iBGP-r1_permitted|" : "false",
"|0_SLICE-MAIN_r2_BGP_EXPORT_iBGP-r1_prefixLength|" : "0",
"|0_SLICE-MAIN_r2_BGP_IMPORT_Serial1_choice|" : "false",
"|0_SLICE-MAIN_r2_BGP_IMPORT_iBGP-r1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r2_BGP_IMPORT_iBGP-r1_choice|" : "false",
"|0_SLICE-MAIN_r2_BGP_IMPORT_iBGP-r1_igpMetric|" : "0",
"|0_SLICE-MAIN_r2_BGP_IMPORT_iBGP-r1_metric|" : "0",
"|0_SLICE-MAIN_r2_BGP_IMPORT_iBGP-r1_permitted|" : "false",
"|0_SLICE-MAIN_r2_BGP_IMPORT_iBGP-r1_prefixLength|" : "0",
"|0_SLICE-MAIN_r2_OVERALL_BEST_None_bgpInternal|" : "false",
"|0_SLICE-MAIN_r2_OVERALL_BEST_None_igpMetric|" : "0",
"|0_SLICE-MAIN_r2_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-MAIN_r2_OVERALL_BEST_None_permitted|" : "false",
"|0_SLICE-MAIN_r2_OVERALL_BEST_None_prefixLength|" : "0",
"|0_SLICE-MAIN_r2_OVERALL_BEST_None_routerID|" : "0",
"|0_SLICE-MAIN_r3_BGP_BEST_None_bgpInternal|" : "false",
"|0_SLICE-MAIN_r3_BGP_BEST_None_igpMetric|" : "0",
"|0_SLICE-MAIN_r3_BGP_BEST_None_metric|" : "0",
"|0_SLICE-MAIN_r3_BGP_BEST_None_permitted|" : "false",
"|0_SLICE-MAIN_r3_BGP_BEST_None_prefixLength|" : "0",
"|0_SLICE-MAIN_r3_BGP_BEST_None_routerID|" : "0",
"|0_SLICE-MAIN_r3_BGP_EXPORT_Serial1_bgpInternal|" : "false",
"|0_SLICE-MAIN_r3_BGP_EXPORT_Serial1_metric|" : "0",
"|0_SLICE-MAIN_r3_BGP_EXPORT_Serial1_permitted|" : "false",
"|0_SLICE-MAIN_r3_BGP_EXPORT_Serial1_prefixLength|" : "0",
"|0_SLICE-MAIN_r3_BGP_EXPORT_iBGP-r4_bgpInternal|" : "false",
"|0_SLICE-MAIN_r3_BGP_EXPORT_iBGP-r4_metric|" : "0",
"|0_SLICE-MAIN_r3_BGP_EXPORT_iBGP-r4_permitted|" : "false",
"|0_SLICE-MAIN_r3_BGP_EXPORT_iBGP-r4_prefixLength|" : "0",
"|0_SLICE-MAIN_r3_BGP_IMPORT_Serial1_choice|" : "false",
"|0_SLICE-MAIN_r3_BGP_IMPORT_iBGP-r4_bgpInternal|" : "false",
"|0_SLICE-MAIN_r3_BGP_IMPORT_iBGP-r4_choice|" : "false",
"|0_SLICE-MAIN_r3_BGP_IMPORT_iBGP-r4_igpMetric|" : "0",
"|0_SLICE-MAIN_r3_BGP_IMPORT_iBGP-r4_metric|" : "0",
"|0_SLICE-MAIN_r3_BGP_IMPORT_iBGP-r4_permitted|" : "false",
"|0_SLICE-MAIN_r3_BGP_IMPORT_iBGP-r4_prefixLength|" : "0",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_adminDist|" : "1",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_bgpInternal|" : "false",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_history|" : "1",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_igpMetric|" : "0",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-MAIN_r3_OVERALL_BEST_None_routerID|" : "0",
"|0_SLICE-MAIN_r3_STATIC_BEST_None_permitted|" : "true",
"|0_SLICE-MAIN_r3_STATIC_BEST_None_prefixLength|" : "32",
"|0_SLICE-MAIN_r3_STATIC_IMPORT_Serial0_choice|" : "true",
"|0_SLICE-MAIN_r3_STATIC_IMPORT_Serial0_permitted|" : "true",
"|0_SLICE-MAIN_r3_STATIC_IMPORT_Serial0_prefixLength|" : "32",
"|0_SLICE-MAIN_r4_BGP_BEST_None_bgpInternal|" : "false",
"|0_SLICE-MAIN_r4_BGP_BEST_None_igpMetric|" : "0",
"|0_SLICE-MAIN_r4_BGP_BEST_None_metric|" : "0",
"|0_SLICE-MAIN_r4_BGP_BEST_None_permitted|" : "false",
"|0_SLICE-MAIN_r4_BGP_BEST_None_prefixLength|" : "0",
"|0_SLICE-MAIN_r4_BGP_BEST_None_routerID|" : "0",
"|0_SLICE-MAIN_r4_BGP_EXPORT_iBGP-r3_bgpInternal|" : "false",
"|0_SLICE-MAIN_r4_BGP_EXPORT_iBGP-r3_metric|" : "0",
"|0_SLICE-MAIN_r4_BGP_EXPORT_iBGP-r3_permitted|" : "false",
"|0_SLICE-MAIN_r4_BGP_EXPORT_iBGP-r3_prefixLength|" : "0",
"|0_SLICE-MAIN_r4_BGP_IMPORT_iBGP-r3_bgpInternal|" : "false",
"|0_SLICE-MAIN_r4_BGP_IMPORT_iBGP-r3_choice|" : "false",
"|0_SLICE-MAIN_r4_BGP_IMPORT_iBGP-r3_igpMetric|" : "0",
"|0_SLICE-MAIN_r4_BGP_IMPORT_iBGP-r3_metric|" : "0",
"|0_SLICE-MAIN_r4_BGP_IMPORT_iBGP-r3_permitted|" : "false",
"|0_SLICE-MAIN_r4_BGP_IMPORT_iBGP-r3_prefixLength|" : "0",
"|0_SLICE-MAIN_r4_CONNECTED_BEST_None_permitted|" : "true",
"|0_SLICE-MAIN_r4_CONNECTED_BEST_None_prefixLength|" : "32",
"|0_SLICE-MAIN_r4_CONNECTED_IMPORT_Loopback0_choice|" : "true",
"|0_SLICE-MAIN_r4_CONNECTED_IMPORT_Loopback0_permitted|" : "true",
"|0_SLICE-MAIN_r4_CONNECTED_IMPORT_Loopback0_prefixLength|" : "32",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_adminDist|" : "0",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_bgpInternal|" : "false",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_history|" : "1",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_igpMetric|" : "0",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-MAIN_r4_OVERALL_BEST_None_routerID|" : "0",
"|0_SLICE-MAIN_src-ip|" : "0",
"|0_SLICE-MAIN_src-port|" : "0",
"|0_SLICE-MAIN_tcp-ack|" : "false",
"|0_SLICE-MAIN_tcp-cwr|" : "false",
"|0_SLICE-MAIN_tcp-ece|" : "false",
"|0_SLICE-MAIN_tcp-fin|" : "false",
"|0_SLICE-MAIN_tcp-psh|" : "false",
"|0_SLICE-MAIN_tcp-rst|" : "false",
"|0_SLICE-MAIN_tcp-syn|" : "false",
"|0_SLICE-MAIN_tcp-urg|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r1_Loopback0|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r1_Serial0|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r1_Serial1|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r1_iBGP-r2|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r2_Loopback0|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r2_Serial0|" : "true",
"|0_SLICE-r1_CONTROL-FORWARDING_r2_Serial1|" : "false",
"|0_SLICE-r1_CONTROL-FORWARDING_r2_iBGP-r1|" : "false",
"|0_SLICE-r1_DATA-FORWARDING_r1_Loopback0|" : "false",
"|0_SLICE-r1_DATA-FORWARDING_r1_Serial0|" : "false",
"|0_SLICE-r1_DATA-FORWARDING_r1_Serial1|" : "false",
"|0_SLICE-r1_DATA-FORWARDING_r2_Loopback0|" : "false",
"|0_SLICE-r1_DATA-FORWARDING_r2_Serial0|" : "true",
"|0_SLICE-r1_DATA-FORWARDING_r2_Serial1|" : "false",
"|0_SLICE-r1__reachable-id_r1|" : "1",
"|0_SLICE-r1__reachable-id_r2|" : "2",
"|0_SLICE-r1__reachable_r1|" : "true",
"|0_SLICE-r1__reachable_r2|" : "true",
"|0_SLICE-r1_dst-ip|" : "16843009",
"|0_SLICE-r1_dst-port|" : "179",
"|0_SLICE-r1_icmp-code|" : "0",
"|0_SLICE-r1_icmp-type|" : "0",
"|0_SLICE-r1_ip-protocol|" : "6",
"|0_SLICE-r1_r1_CONNECTED_IMPORT_Loopback0_choice|" : "true",
"|0_SLICE-r1_r1_CONNECTED_IMPORT_Loopback0_permitted|" : "true",
"|0_SLICE-r1_r1_CONNECTED_IMPORT_Loopback0_prefixLength|" : "32",
"|0_SLICE-r1_r1_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r1_r1_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r1_r1_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r1_r2_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r1_r2_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r1_r2_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r1_r2_STATIC_IMPORT_Serial0_choice|" : "true",
"|0_SLICE-r1_r2_STATIC_IMPORT_Serial0_permitted|" : "true",
"|0_SLICE-r1_r2_STATIC_IMPORT_Serial0_prefixLength|" : "32",
"|0_SLICE-r1_src-ip|" : "0",
"|0_SLICE-r1_src-port|" : "0",
"|0_SLICE-r1_tcp-ack|" : "false",
"|0_SLICE-r1_tcp-cwr|" : "false",
"|0_SLICE-r1_tcp-ece|" : "false",
"|0_SLICE-r1_tcp-fin|" : "false",
"|0_SLICE-r1_tcp-psh|" : "false",
"|0_SLICE-r1_tcp-rst|" : "false",
"|0_SLICE-r1_tcp-syn|" : "false",
"|0_SLICE-r1_tcp-urg|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r1_Loopback0|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r1_Serial0|" : "true",
"|0_SLICE-r2_CONTROL-FORWARDING_r1_Serial1|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r1_iBGP-r2|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r2_Loopback0|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r2_Serial0|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r2_Serial1|" : "false",
"|0_SLICE-r2_CONTROL-FORWARDING_r2_iBGP-r1|" : "false",
"|0_SLICE-r2_DATA-FORWARDING_r1_Loopback0|" : "false",
"|0_SLICE-r2_DATA-FORWARDING_r1_Serial0|" : "true",
"|0_SLICE-r2_DATA-FORWARDING_r1_Serial1|" : "false",
"|0_SLICE-r2_DATA-FORWARDING_r2_Loopback0|" : "false",
"|0_SLICE-r2_DATA-FORWARDING_r2_Serial0|" : "false",
"|0_SLICE-r2_DATA-FORWARDING_r2_Serial1|" : "false",
"|0_SLICE-r2__reachable-id_r1|" : "2",
"|0_SLICE-r2__reachable-id_r2|" : "1",
"|0_SLICE-r2__reachable_r1|" : "true",
"|0_SLICE-r2__reachable_r2|" : "true",
"|0_SLICE-r2_dst-ip|" : "33686018",
"|0_SLICE-r2_dst-port|" : "179",
"|0_SLICE-r2_icmp-code|" : "0",
"|0_SLICE-r2_icmp-type|" : "0",
"|0_SLICE-r2_ip-protocol|" : "6",
"|0_SLICE-r2_r1_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r2_r1_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r2_r1_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r2_r1_STATIC_IMPORT_Serial0_choice|" : "true",
"|0_SLICE-r2_r1_STATIC_IMPORT_Serial0_permitted|" : "true",
"|0_SLICE-r2_r1_STATIC_IMPORT_Serial0_prefixLength|" : "32",
"|0_SLICE-r2_r2_CONNECTED_IMPORT_Loopback0_choice|" : "true",
"|0_SLICE-r2_r2_CONNECTED_IMPORT_Loopback0_permitted|" : "true",
"|0_SLICE-r2_r2_CONNECTED_IMPORT_Loopback0_prefixLength|" : "32",
"|0_SLICE-r2_r2_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r2_r2_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r2_r2_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r2_src-ip|" : "0",
"|0_SLICE-r2_src-port|" : "0",
"|0_SLICE-r2_tcp-ack|" : "false",
"|0_SLICE-r2_tcp-cwr|" : "false",
"|0_SLICE-r2_tcp-ece|" : "false",
"|0_SLICE-r2_tcp-fin|" : "false",
"|0_SLICE-r2_tcp-psh|" : "false",
"|0_SLICE-r2_tcp-rst|" : "false",
"|0_SLICE-r2_tcp-syn|" : "false",
"|0_SLICE-r2_tcp-urg|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r3_Loopback0|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r3_Serial0|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r3_Serial1|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r3_iBGP-r4|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r4_Loopback0|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r4_Serial0|" : "true",
"|0_SLICE-r3_CONTROL-FORWARDING_r4_Serial1|" : "false",
"|0_SLICE-r3_CONTROL-FORWARDING_r4_iBGP-r3|" : "false",
"|0_SLICE-r3_DATA-FORWARDING_r3_Loopback0|" : "false",
"|0_SLICE-r3_DATA-FORWARDING_r3_Serial0|" : "false",
"|0_SLICE-r3_DATA-FORWARDING_r3_Serial1|" : "false",
"|0_SLICE-r3_DATA-FORWARDING_r4_Loopback0|" : "false",
"|0_SLICE-r3_DATA-FORWARDING_r4_Serial0|" : "true",
"|0_SLICE-r3_DATA-FORWARDING_r4_Serial1|" : "false",
"|0_SLICE-r3__reachable-id_r3|" : "1",
"|0_SLICE-r3__reachable-id_r4|" : "2",
"|0_SLICE-r3__reachable_r3|" : "true",
"|0_SLICE-r3__reachable_r4|" : "true",
"|0_SLICE-r3_dst-ip|" : "50529027",
"|0_SLICE-r3_dst-port|" : "179",
"|0_SLICE-r3_icmp-code|" : "0",
"|0_SLICE-r3_icmp-type|" : "0",
"|0_SLICE-r3_ip-protocol|" : "6",
"|0_SLICE-r3_r3_CONNECTED_IMPORT_Loopback0_choice|" : "true",
"|0_SLICE-r3_r3_CONNECTED_IMPORT_Loopback0_permitted|" : "true",
"|0_SLICE-r3_r3_CONNECTED_IMPORT_Loopback0_prefixLength|" : "32",
"|0_SLICE-r3_r3_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r3_r3_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r3_r3_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r3_r4_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r3_r4_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r3_r4_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r3_r4_STATIC_IMPORT_Serial0_choice|" : "true",
"|0_SLICE-r3_r4_STATIC_IMPORT_Serial0_permitted|" : "true",
"|0_SLICE-r3_r4_STATIC_IMPORT_Serial0_prefixLength|" : "32",
"|0_SLICE-r3_src-ip|" : "0",
"|0_SLICE-r3_src-port|" : "0",
"|0_SLICE-r3_tcp-ack|" : "false",
"|0_SLICE-r3_tcp-cwr|" : "false",
"|0_SLICE-r3_tcp-ece|" : "false",
"|0_SLICE-r3_tcp-fin|" : "false",
"|0_SLICE-r3_tcp-psh|" : "false",
"|0_SLICE-r3_tcp-rst|" : "false",
"|0_SLICE-r3_tcp-syn|" : "false",
"|0_SLICE-r3_tcp-urg|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r3_Loopback0|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r3_Serial0|" : "true",
"|0_SLICE-r4_CONTROL-FORWARDING_r3_Serial1|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r3_iBGP-r4|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r4_Loopback0|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r4_Serial0|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r4_Serial1|" : "false",
"|0_SLICE-r4_CONTROL-FORWARDING_r4_iBGP-r3|" : "false",
"|0_SLICE-r4_DATA-FORWARDING_r3_Loopback0|" : "false",
"|0_SLICE-r4_DATA-FORWARDING_r3_Serial0|" : "true",
"|0_SLICE-r4_DATA-FORWARDING_r3_Serial1|" : "false",
"|0_SLICE-r4_DATA-FORWARDING_r4_Loopback0|" : "false",
"|0_SLICE-r4_DATA-FORWARDING_r4_Serial0|" : "false",
"|0_SLICE-r4_DATA-FORWARDING_r4_Serial1|" : "false",
"|0_SLICE-r4__reachable-id_r3|" : "2",
"|0_SLICE-r4__reachable-id_r4|" : "1",
"|0_SLICE-r4__reachable_r3|" : "true",
"|0_SLICE-r4__reachable_r4|" : "true",
"|0_SLICE-r4_dst-ip|" : "67372036",
"|0_SLICE-r4_dst-port|" : "179",
"|0_SLICE-r4_icmp-code|" : "0",
"|0_SLICE-r4_icmp-type|" : "0",
"|0_SLICE-r4_ip-protocol|" : "6",
"|0_SLICE-r4_r3_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r4_r3_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r4_r3_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r4_r3_STATIC_IMPORT_Serial0_choice|" : "true",
"|0_SLICE-r4_r3_STATIC_IMPORT_Serial0_permitted|" : "true",
"|0_SLICE-r4_r3_STATIC_IMPORT_Serial0_prefixLength|" : "32",
"|0_SLICE-r4_r4_CONNECTED_IMPORT_Loopback0_choice|" : "true",
"|0_SLICE-r4_r4_CONNECTED_IMPORT_Loopback0_permitted|" : "true",
"|0_SLICE-r4_r4_CONNECTED_IMPORT_Loopback0_prefixLength|" : "32",
"|0_SLICE-r4_r4_OVERALL_BEST_None_metric|" : "0",
"|0_SLICE-r4_r4_OVERALL_BEST_None_permitted|" : "true",
"|0_SLICE-r4_r4_OVERALL_BEST_None_prefixLength|" : "32",
"|0_SLICE-r4_src-ip|" : "0",
"|0_SLICE-r4_src-port|" : "0",
"|0_SLICE-r4_tcp-ack|" : "false",
"|0_SLICE-r4_tcp-cwr|" : "false",
"|0_SLICE-r4_tcp-ece|" : "false",
"|0_SLICE-r4_tcp-fin|" : "false",
"|0_SLICE-r4_tcp-psh|" : "false",
"|0_SLICE-r4_tcp-rst|" : "false",
"|0_SLICE-r4_tcp-syn|" : "false",
"|0_SLICE-r4_tcp-urg|" : "false"
},
"packetModel" : {
"dstIp" : "4.4.4.4"
},
"verified" : false
}
}
] | {
"pile_set_name": "Github"
} |
Hello world
| {
"pile_set_name": "Github"
} |
<!--googleoff: all-->
<html><head><title>MOMA Single Sign On</title>
<link rel="icon" href="/c/favicon.ico" type="image/x-icon" />
<link href="/c/login.css" rel="stylesheet" />
<script type="text/javascript" src="/c/corploginscript.js" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
</script>
<script type="text/javascript" src="/c/gnubbyloginscript.js" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
</script>
<script type="text/javascript" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
var remoteAddress = "2001:41d0:8:5223::";
window.singleTabTouch = true;
</script>
<style type="text/css">
#pleasewait-container {
-webkit-transition: opacity 0.2s;
-moz-transition: opacity 0.2s;
}
#pleasewait {
text-align: center;
display: inline-block;
width: 100%;
font-size: 120%;
}
#signInButton {
-webkit-transition: opacity 1.5s;
-moz-transition: opacity 1.5s;
}
#signInButton:disabled {
opacity: 0;
}
#viewport {
overflow: hidden;
float: left;
}
#contentarea {
position: relative;
-webkit-transition: left 0.7s;
-moz-transition: left 0.7s;
float: left;
}
#contentarea.showlogin {
left: 0px;
}
#contentarea.showspinner {
left: -100%;
min-height: 200px;
}
#signerror {
width: 23em;
}
.signerrorhidden {
display: none;
}
.signerrorvisible {
display: inline-block;
}
#waitfortouch {
overflow: hidden;
position: absolute;
left: 100%;
top: 0px;
width: 100%;
height: 100%;
}
#waitfortouch-spinner {
float: left;
background-image: url(/c/progress_spinner.gif);
background-size: 32px;
background-repeat: no-repeat;
background-position: center;
display: block;
height: 32px;
width: 100%;
}
#waitfortouch-text {
color: black;
position: relative;
display: block;
font-size: 120%;
text-align: center;
margin-top: 10%;
-webkit-transition: opacity 0.4s;
-moz-transition: opacity 0.4s;
}
#waitfortouch-image {
position: relative;
display: block;
background-image: url(/c/gnubby-very-small.png);
background-repeat: no-repeat;
background-position-x: center;
height: 76px;
margin-top: 10px;
-webkit-transition: opacity 0.4s;
-moz-transition: opacity 0.4s;
}
#waitfortouch-text.waiting {
opacity: 1;
}
#waitfortouch-image.waiting {
opacity: 1;
}
#pleasewait-container.waiting {
opacity: 0;
}
#waitfortouch-text.touched {
opacity: 0.1;
}
#waitfortouch-image.touched {
opacity: 0.1;
}
#pleasewait-container.touched {
opacity: 1;
}
#signerror-text {
position: relative;
display: inline-block;
top: -10px;
margin-left: 5px;
}
</style></head>
<body bgcolor="#ffffff" vlink="#666666"><table width="95%" border="0" align="center" cellpadding="0" cellspacing="0"><tr valign="top"><td width="1%"><img src="/c/moma.gif" border="0" align="left" vspace="13" alt="moma - inside google" /></td>
<td width="99%" bgcolor="#ffffff" valign="top"><table width="100%" cellpadding="1"><tr valign="bottom"><td><div align="right"> </div></td></tr>
<tr><td nowrap="nowrap"><table width="100%" align="center" cellpadding="0" cellspacing="0" bgcolor="#C3D9FF" style="margin-bottom:5"><tr><td class="bubble-gnubby tl"><img src="/c/tl.gif" alt="" /></td>
<th class="bubble-gnubby" rowspan="2">Single Sign On</th>
<td class="bubble-gnubby tr"><img src="/c/tr.gif" alt="" /></td></tr>
<tr><td class="bubble-gnubby bl"><img src="/c/bl.gif" alt="" /></td>
<td class="bubble-gnubby br"><img src="/c/br.gif" alt="" /></td></tr></table></td></tr></table></td></tr></table>
<br />
<form method="post" id="loginForm" name="loginForm" action="/glogin"><input type="hidden" id="ssoformat" name="ssoformat" value="CORP_SSO"/>
<input type="hidden" id="x" name="x" value="x:ChkIvsaz27CbhKmXARC0z4L67isYtM-C-u4rEhkAVNeaIvOQDRVLHQDgD9GuYoCV3WwbVIZA"/>
<input type="hidden" id="interactive" name="interactive" value="yes" />
<input type="hidden" id="hasJavascript" name="hasJavascript" value="no" />
<input type="hidden" id="mi" name="mi" value="" />
<input type="hidden" id="sign" name="sign" value="" />
<input type="hidden" id="challenge" name="challenge" value="" />
<input type="hidden" id="bd" name="bd" value="" />
<input type="hidden" id="appId" name="appId" value="" />
<input type="hidden" id="keyHandle" name="keyHandle" value="" />
<input type="hidden" id="sessionId" name="sessionId" value="" />
<input type="hidden" id="sf" name="sf" value="true" />
<input type="hidden" id="isMobile" name="isMobile" value="false" />
<script type="text/javascript" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
document.getElementById('mi').value = riskMi12();
document.getElementById('loginForm').onsubmit = gnubbySignInOnSubmit;
</script>
<table align="center" cellpadding="5" cellspacing="1" class="gnubby-signin">
<tr><td valign="top" style="padding-top: .5em; padding-right: 2em; padding-left: 2em;"><div id="left-column"><div id="viewport"><div id="contentarea" class="showlogin"><table cellpadding="5" width="100%" border="0"><tr><td colspan="2" style="text-align:center" nowrap="nowrap"><div class="caption">Use your SSO username and password</div></td></tr>
<tr class="error-row"><td colspan="2" class="error-cell" style="text-align:center"><div id="signerror" class="signerrorhidden"><span id="signerror-text">Touch timed out or Security Key connection failed. Please resubmit the form.</span></div></td></tr>
<tr><td nowrap="nowrap"><div align="right"><font size="-1" face="Arial, sans-serif"><label for="username">Username:</label></font></div></td>
<td nowrap="nowrap"><input type="text" name="u" tabindex="1" size="15" id="username" value="" />
<span style="font-size: 83%; position: relative; bottom: 2px;"><span style="padding: 0 3px 0 5px;">@</span>
google.com
<a href="#" id="roleToggle" style="color:black; text-decoration: none">[+]</a>
<script type="text/javascript" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
document.getElementById('roleToggle').onclick = function(e) {
e.preventDefault();
toggleInput('roleToggle', 'roleRow', 'role', 2);
}
</script></span></td></tr>
<tr id="roleRow" style="display: none;"><td nowrap="nowrap"><div align="right"><font size="-1" face="Arial, sans-serif"><label for="role">Role:</label></font></div></td>
<td nowrap="nowrap"><input type="text" name="role" size="15" id="role" />
<font size="-1"> <a href="https://g3doc.corp.google.com/company/teams/sso/intro.md#role-accounts">What's this?</a></font></td></tr>
<tr><td nowrap="nowrap"><div align="right"><font size="-1"><label for="password">Password:</label></font></div></td>
<td nowrap="nowrap"><input type="password" name="pw" tabindex="3" size="15" autocomplete="off" id="password" /></td></tr>
<td colspan="2" align="center"></td></table>
<div id="waitfortouch"><div id="waitfortouch-text" class="waiting">Please insert and touch your Security Key...</div>
<div id="waitfortouch-image" class="waiting"></div>
<div id="pleasewait-container" class="waiting"><span id="pleasewait">Please wait...</span>
<span id="waitfortouch-spinner"></span></div></div></div></div>
<div style="clear:both" align="center"><input type="submit" name="signInButton" id="signInButton" value="Sign in" style="padding: 2px;" tabindex="4" /></div>
<table border="0" width="100%" style="padding-top: 50px;"><tr><td align="left"><font size="-1"><a id="switchOtpLink" href="https://login-test.corp.google.com/?gnubby=0">Use Security Code</a>
<script type="text/javascript" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
document.getElementById('switchOtpLink').onclick = setDisableGnubbyCookie;
</script></font></td>
<td align="right"><font size="-1"><a href="https://support.google.com/techstop/answer/4454569">Security Key help</a></font></td></tr>
<tr><td colspan="2" align="right"><font size="-1"><a href="https://pwchange.corp.google.com">Password help</a></font></td></tr></table></div></td>
<td class="image-cell"><div id="corplogin-image"><img class="login-image" src="https://static.corp.google.com/corpsso/images/PigsAndCherries.jpg" width="400" height="300" alt="decorative image" /></div></td></tr></table></form>
<table width="95%" border="0" align="center" cellpadding="0" cellspacing="0" class="footer"><tr valign="top"><td width="99%" bgcolor="#ffffff" valign="top"><table width="100%" cellpadding="1"><tr valign="bottom"><td><div align="right"> </div></td></tr>
<tr><td nowrap="nowrap"><table width="100%" align="center" cellpadding="0" cellspacing="0" bgcolor="#C3D9FF" style="margin-bottom:5"><tr><td class="bubble-gnubby tl"><img src="/c/tl.gif" alt="" /></td>
<th class="bubble-gnubby" rowspan="2"><span> </span></th>
<td class="bubble-gnubby tr"><img src="/c/tr.gif" alt="" /></td></tr>
<tr><td class="bubble-gnubby bl"><img src="/c/bl.gif" alt="" /></td>
<td class="bubble-gnubby br"><img src="/c/br.gif" alt="" /></td></tr></table></td></tr></table></td>
<td width="1%"><img src="/c/balls.gif" border="0" alt="" align="left" vspace="13" /></td></tr></table>
<script type="text/javascript" nonce="6D9Hnhb4prd6+Rjh1Qjb84AptCI">
document.body.onload = function() {
loginOnload();
}
</script></body></html> | {
"pile_set_name": "Github"
} |
defmodule ExampleWeb.Router do
use ExampleWeb, :router
pipeline :browser do
plug :accepts, ["html"]
plug :fetch_session
plug :fetch_flash
plug :protect_from_forgery
plug :put_secure_browser_headers
end
pipeline :api do
plug :accepts, ["json"]
end
scope "/", ExampleWeb do
# Use the default browser stack
pipe_through :browser
get "/", PageController, :index
end
# Other scopes may use custom stacks.
# scope "/api", ExampleWeb do
# pipe_through :api
# end
end
| {
"pile_set_name": "Github"
} |
TARGETNAME := proto_radius_acct
ifneq "$(TARGETNAME)" ""
TARGET := $(TARGETNAME).a
endif
SOURCES := proto_radius_acct.c
TGT_PREREQS := libfreeradius-util.a libfreeradius-radius.a
| {
"pile_set_name": "Github"
} |
/**
* MIT License
* <p>
* Copyright (c) 2018 yadong.zhang
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.zyd.shiro.framework.holder;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.context.request.RequestAttributes;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
/**
*
* @author yadong.zhang (yadong.zhang0415(a)gmail.com)
* @website https://www.zhyd.me
* @version 1.0
* @date 2018/4/16 16:26
* @since 1.0
*/
@Slf4j
public class RequestHolder {
/**
* 获取request
*
* @return HttpServletRequest
*/
public static HttpServletRequest getRequest() {
log.debug("getRequest -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
ServletRequestAttributes servletRequestAttributes = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes());
if (null == servletRequestAttributes) {
return null;
}
return servletRequestAttributes.getRequest();
}
/**
* 获取Response
*
* @return HttpServletRequest
*/
public static HttpServletResponse getResponse() {
log.debug("getResponse -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
ServletRequestAttributes servletRequestAttributes = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes());
if (null == servletRequestAttributes) {
return null;
}
return servletRequestAttributes.getResponse();
}
/**
* 获取session
*
* @return HttpSession
*/
public static HttpSession getSession() {
log.debug("getSession -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
HttpServletRequest request = null;
if (null == (request = getRequest())) {
return null;
}
return request.getSession();
}
/**
* 获取session的Attribute
*
* @param name session的key
* @return Object
*/
public static Object getSession(String name) {
log.debug("getSession -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
ServletRequestAttributes servletRequestAttributes = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes());
if (null == servletRequestAttributes) {
return null;
}
return servletRequestAttributes.getAttribute(name, RequestAttributes.SCOPE_SESSION);
}
/**
* 添加session
*
* @param name
* @param value
*/
public static void setSession(String name, Object value) {
log.debug("setSession -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
ServletRequestAttributes servletRequestAttributes = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes());
if (null == servletRequestAttributes) {
return;
}
servletRequestAttributes.setAttribute(name, value, RequestAttributes.SCOPE_SESSION);
}
/**
* 清除指定session
*
* @param name
* @return void
*/
public static void removeSession(String name) {
log.debug("removeSession -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
ServletRequestAttributes servletRequestAttributes = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes());
if (null == servletRequestAttributes) {
return;
}
servletRequestAttributes.removeAttribute(name, RequestAttributes.SCOPE_SESSION);
}
/**
* 获取所有session key
*
* @return String[]
*/
public static String[] getSessionKeys() {
log.debug("getSessionKeys -- Thread id :{}, name : {}", Thread.currentThread().getId(), Thread.currentThread().getName());
ServletRequestAttributes servletRequestAttributes = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes());
if (null == servletRequestAttributes) {
return null;
}
return servletRequestAttributes.getAttributeNames(RequestAttributes.SCOPE_SESSION);
}
}
| {
"pile_set_name": "Github"
} |
//
// CATBudgetTitleView.h
// CatAccounting
//
// Created by ran on 2017/10/24.
// Copyright © 2017年 ran. All rights reserved.
//
#import <UIKit/UIKit.h>
#define kBudgetTitleHeight 32
@interface CATBudgetTitleView : UIView
@property (nonatomic, strong) UILabel *titleLabel;
@end
| {
"pile_set_name": "Github"
} |
aaa => aaaa
bbb => bbbb1 bbbb2
ccc => cccc1,cccc2
a\=>a => b\=>b
a\,a => b\,b
fooaaa,baraaa,bazaaa
# Some synonym groups specific to this example
GB,gib,gigabyte,gigabytes
MB,mib,megabyte,megabytes
Television, Televisions, TV, TVs
#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
#after us won't split it into two words.
# Synonym mappings can be used for spelling correction too
pixima => pixma
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <objc/NSObject.h>
__attribute__((visibility("hidden")))
@interface WXFont : NSObject
{
}
+ (id)fontPitchEnumMap;
+ (id)isoCharacterSetEnumMap;
+ (id)characterSetEnumMap;
+ (id)fontFamilyEnumMap;
+ (void)readFrom:(struct _xmlNode *)arg1 to:(id)arg2 state:(id)arg3;
@end
| {
"pile_set_name": "Github"
} |
use crate::i18n::i18n;
use crate::appop::AppOp;
use crate::backend::BKCommand;
impl AppOp {
pub fn initial_sync(&self, show: bool) {
if show {
self.inapp_notify(&i18n("Syncing, this could take a while"));
} else {
self.hide_inapp_notify();
}
}
pub fn sync(&mut self, initial: bool) {
if !self.syncing && self.logged_in {
self.syncing = true;
// for the initial sync we set the since to None to avoid long syncing
// the since can be a very old value and following the spec we should
// do the initial sync without a since:
// https://matrix.org/docs/spec/client_server/latest.html#syncing
let since = if initial { None } else { self.since.clone() };
self.backend.send(BKCommand::Sync(since, initial)).unwrap();
}
}
pub fn synced(&mut self, since: Option<String>) {
self.syncing = false;
self.since = since;
self.sync(false);
self.initial_sync(false);
}
pub fn sync_error(&mut self) {
self.syncing = false;
self.sync(false);
}
}
| {
"pile_set_name": "Github"
} |
package google
import (
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
containerBeta "google.golang.org/api/container/v1beta1"
)
// Matches gke-default scope from https://cloud.google.com/sdk/gcloud/reference/container/clusters/create
var defaultOauthScopes = []string{
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
}
func schemaNodeConfig() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_size_gb": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(10),
},
"disk_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd"}, false),
},
"guest_accelerator": {
Type: schema.TypeList,
Optional: true,
Computed: true,
ForceNew: true,
// Legacy config mode allows removing GPU's from an existing resource
// See https://www.terraform.io/docs/configuration/attr-as-blocks.html
ConfigMode: schema.SchemaConfigModeAttr,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"count": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
},
},
},
"image_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
// Computed=true because GKE Sandbox will automatically add labels to nodes that can/cannot run sandboxed pods.
Computed: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"local_ssd_count": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(0),
},
"machine_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"metadata": {
Type: schema.TypeMap,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"min_cpu_platform": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"oauth_scopes": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
StateFunc: func(v interface{}) string {
return canonicalizeServiceScope(v.(string))
},
},
DiffSuppressFunc: containerClusterAddedScopesSuppress,
Set: stringScopeHashcode,
},
"preemptible": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"service_account": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"tags": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"shielded_instance_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enable_secure_boot": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"enable_integrity_monitoring": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: true,
},
},
},
},
"taint": {
Type: schema.TypeList,
Optional: true,
// Computed=true because GKE Sandbox will automatically add taints to nodes that can/cannot run sandboxed pods.
Computed: true,
ForceNew: true,
// Legacy config mode allows explicitly defining an empty taint.
// See https://www.terraform.io/docs/configuration/attr-as-blocks.html
ConfigMode: schema.SchemaConfigModeAttr,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"value": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"effect": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"}, false),
},
},
},
},
"workload_metadata_config": {
Removed: "This field is in beta. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/guides/provider_versions.html for more details.",
Computed: true,
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"node_metadata": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "SECURE", "EXPOSE", "GKE_METADATA_SERVER"}, false),
},
},
},
},
"sandbox_config": {
Removed: "This field is in beta. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/guides/provider_versions.html for more details.",
Computed: true,
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sandbox_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"gvisor"}, false),
},
},
},
},
},
},
}
}
func expandNodeConfig(v interface{}) *containerBeta.NodeConfig {
nodeConfigs := v.([]interface{})
nc := &containerBeta.NodeConfig{
// Defaults can't be set on a list/set in the schema, so set the default on create here.
OauthScopes: defaultOauthScopes,
}
if len(nodeConfigs) == 0 {
return nc
}
nodeConfig := nodeConfigs[0].(map[string]interface{})
if v, ok := nodeConfig["machine_type"]; ok {
nc.MachineType = v.(string)
}
if v, ok := nodeConfig["guest_accelerator"]; ok {
accels := v.([]interface{})
guestAccelerators := make([]*containerBeta.AcceleratorConfig, 0, len(accels))
for _, raw := range accels {
data := raw.(map[string]interface{})
if data["count"].(int) == 0 {
continue
}
guestAccelerators = append(guestAccelerators, &containerBeta.AcceleratorConfig{
AcceleratorCount: int64(data["count"].(int)),
AcceleratorType: data["type"].(string),
})
}
nc.Accelerators = guestAccelerators
}
if v, ok := nodeConfig["disk_size_gb"]; ok {
nc.DiskSizeGb = int64(v.(int))
}
if v, ok := nodeConfig["disk_type"]; ok {
nc.DiskType = v.(string)
}
if v, ok := nodeConfig["local_ssd_count"]; ok {
nc.LocalSsdCount = int64(v.(int))
}
if scopes, ok := nodeConfig["oauth_scopes"]; ok {
scopesSet := scopes.(*schema.Set)
scopes := make([]string, scopesSet.Len())
for i, scope := range scopesSet.List() {
scopes[i] = canonicalizeServiceScope(scope.(string))
}
nc.OauthScopes = scopes
}
if v, ok := nodeConfig["service_account"]; ok {
nc.ServiceAccount = v.(string)
}
if v, ok := nodeConfig["metadata"]; ok {
m := make(map[string]string)
for k, val := range v.(map[string]interface{}) {
m[k] = val.(string)
}
nc.Metadata = m
}
if v, ok := nodeConfig["image_type"]; ok {
nc.ImageType = v.(string)
}
if v, ok := nodeConfig["labels"]; ok {
m := make(map[string]string)
for k, val := range v.(map[string]interface{}) {
m[k] = val.(string)
}
nc.Labels = m
}
if v, ok := nodeConfig["tags"]; ok {
tagsList := v.([]interface{})
tags := []string{}
for _, v := range tagsList {
if v != nil {
tags = append(tags, v.(string))
}
}
nc.Tags = tags
}
if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 {
conf := v.([]interface{})[0].(map[string]interface{})
nc.ShieldedInstanceConfig = &containerBeta.ShieldedInstanceConfig{
EnableSecureBoot: conf["enable_secure_boot"].(bool),
EnableIntegrityMonitoring: conf["enable_integrity_monitoring"].(bool),
}
}
// Preemptible Is Optional+Default, so it always has a value
nc.Preemptible = nodeConfig["preemptible"].(bool)
if v, ok := nodeConfig["min_cpu_platform"]; ok {
nc.MinCpuPlatform = v.(string)
}
if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 {
taints := v.([]interface{})
nodeTaints := make([]*containerBeta.NodeTaint, 0, len(taints))
for _, raw := range taints {
data := raw.(map[string]interface{})
taint := &containerBeta.NodeTaint{
Key: data["key"].(string),
Value: data["value"].(string),
Effect: data["effect"].(string),
}
nodeTaints = append(nodeTaints, taint)
}
nc.Taints = nodeTaints
}
return nc
}
func flattenNodeConfig(c *containerBeta.NodeConfig) []map[string]interface{} {
config := make([]map[string]interface{}, 0, 1)
if c == nil {
return config
}
config = append(config, map[string]interface{}{
"machine_type": c.MachineType,
"disk_size_gb": c.DiskSizeGb,
"disk_type": c.DiskType,
"guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators),
"local_ssd_count": c.LocalSsdCount,
"service_account": c.ServiceAccount,
"metadata": c.Metadata,
"image_type": c.ImageType,
"labels": c.Labels,
"tags": c.Tags,
"preemptible": c.Preemptible,
"min_cpu_platform": c.MinCpuPlatform,
"shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig),
"taint": flattenTaints(c.Taints),
})
if len(c.OauthScopes) > 0 {
config[0]["oauth_scopes"] = schema.NewSet(stringScopeHashcode, convertStringArrToInterface(c.OauthScopes))
}
return config
}
func flattenContainerGuestAccelerators(c []*containerBeta.AcceleratorConfig) []map[string]interface{} {
result := []map[string]interface{}{}
for _, accel := range c {
result = append(result, map[string]interface{}{
"count": accel.AcceleratorCount,
"type": accel.AcceleratorType,
})
}
return result
}
func flattenShieldedInstanceConfig(c *containerBeta.ShieldedInstanceConfig) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
result = append(result, map[string]interface{}{
"enable_secure_boot": c.EnableSecureBoot,
"enable_integrity_monitoring": c.EnableIntegrityMonitoring,
})
}
return result
}
func flattenTaints(c []*containerBeta.NodeTaint) []map[string]interface{} {
result := []map[string]interface{}{}
for _, taint := range c {
result = append(result, map[string]interface{}{
"key": taint.Key,
"value": taint.Value,
"effect": taint.Effect,
})
}
return result
}
| {
"pile_set_name": "Github"
} |
- fix issues when using `ImageManipulator` and `ImagePicker` on SDKs lower than 32
- fix `Location.watchPositionAsync` never resolving on SDK32
| {
"pile_set_name": "Github"
} |
{
"_args": [
[
"depd@~1.1.2",
"/home/gelo/projects/dnsFookup/FE/my-app/node_modules/express"
]
],
"_from": "depd@>=1.1.2 <1.2.0",
"_id": "[email protected]",
"_inCache": true,
"_installable": true,
"_location": "/depd",
"_nodeVersion": "6.11.1",
"_npmOperationalInternal": {
"host": "s3://npm-registry-packages",
"tmp": "tmp/depd-1.1.2.tgz_1515736023686_0.5012104702182114"
},
"_npmUser": {
"email": "[email protected]",
"name": "dougwilson"
},
"_npmVersion": "3.10.10",
"_phantomChildren": {},
"_requested": {
"name": "depd",
"raw": "depd@~1.1.2",
"rawSpec": "~1.1.2",
"scope": null,
"spec": ">=1.1.2 <1.2.0",
"type": "range"
},
"_requiredBy": [
"/body-parser",
"/express",
"/http-errors",
"/send",
"/serve-index/http-errors"
],
"_resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
"_shasum": "9bcd52e14c097763e749b274c4346ed2e560b5a9",
"_shrinkwrap": null,
"_spec": "depd@~1.1.2",
"_where": "/home/gelo/projects/dnsFookup/FE/my-app/node_modules/express",
"author": {
"email": "[email protected]",
"name": "Douglas Christopher Wilson"
},
"browser": "lib/browser/index.js",
"bugs": {
"url": "https://github.com/dougwilson/nodejs-depd/issues"
},
"dependencies": {},
"description": "Deprecate all the things",
"devDependencies": {
"beautify-benchmark": "0.2.4",
"benchmark": "2.1.4",
"eslint": "3.19.0",
"eslint-config-standard": "7.1.0",
"eslint-plugin-markdown": "1.0.0-beta.7",
"eslint-plugin-promise": "3.6.0",
"eslint-plugin-standard": "3.0.1",
"istanbul": "0.4.5",
"mocha": "~1.21.5"
},
"directories": {},
"dist": {
"shasum": "9bcd52e14c097763e749b274c4346ed2e560b5a9",
"tarball": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz"
},
"engines": {
"node": ">= 0.6"
},
"files": [
"History.md",
"LICENSE",
"Readme.md",
"index.js",
"lib/"
],
"gitHead": "9a789740084d4f07a3a611432435ae4671f722ff",
"homepage": "https://github.com/dougwilson/nodejs-depd#readme",
"keywords": [
"deprecate",
"deprecated"
],
"license": "MIT",
"maintainers": [
{
"name": "dougwilson",
"email": "[email protected]"
}
],
"name": "depd",
"optionalDependencies": {},
"readme": "ERROR: No README data found!",
"repository": {
"type": "git",
"url": "git+https://github.com/dougwilson/nodejs-depd.git"
},
"scripts": {
"bench": "node benchmark/index.js",
"lint": "eslint --plugin markdown --ext js,md .",
"test": "mocha --reporter spec --bail test/",
"test-ci": "istanbul cover node_modules/mocha/bin/_mocha --report lcovonly -- --reporter spec --no-exit test/",
"test-cov": "istanbul cover node_modules/mocha/bin/_mocha -- --reporter dot test/"
},
"version": "1.1.2"
}
| {
"pile_set_name": "Github"
} |
import { ObjectId } from 'mongodb';
import { Entity, ManyToOne, OneToOne, PrimaryKey, Property, SerializedPrimaryKey } from '@mikro-orm/core';
import FooBar from './FooBar';
import { Book } from './Book';
@Entity()
export class FooBaz {
@PrimaryKey()
_id!: ObjectId;
@SerializedPrimaryKey()
id!: string;
@Property()
name!: string;
@OneToOne(() => FooBar, bar => bar.baz, { eager: true })
bar!: FooBar;
@ManyToOne(() => Book, { eager: true })
book!: Book;
}
| {
"pile_set_name": "Github"
} |
import sys
import unittest
from test import test_support
from UserList import UserList
# We do a bit of trickery here to be able to test both the C implementation
# and the Python implementation of the module.
# Make it impossible to import the C implementation anymore.
sys.modules['_bisect'] = 0
# We must also handle the case that bisect was imported before.
if 'bisect' in sys.modules:
del sys.modules['bisect']
# Now we can import the module and get the pure Python implementation.
import bisect as py_bisect
# Restore everything to normal.
del sys.modules['_bisect']
del sys.modules['bisect']
# This is now the module with the C implementation.
import bisect as c_bisect
class TestBisect(unittest.TestCase):
module = None
def setUp(self):
self.precomputedCases = [
(self.module.bisect_right, [], 1, 0),
(self.module.bisect_right, [1], 0, 0),
(self.module.bisect_right, [1], 1, 1),
(self.module.bisect_right, [1], 2, 1),
(self.module.bisect_right, [1, 1], 0, 0),
(self.module.bisect_right, [1, 1], 1, 2),
(self.module.bisect_right, [1, 1], 2, 2),
(self.module.bisect_right, [1, 1, 1], 0, 0),
(self.module.bisect_right, [1, 1, 1], 1, 3),
(self.module.bisect_right, [1, 1, 1], 2, 3),
(self.module.bisect_right, [1, 1, 1, 1], 0, 0),
(self.module.bisect_right, [1, 1, 1, 1], 1, 4),
(self.module.bisect_right, [1, 1, 1, 1], 2, 4),
(self.module.bisect_right, [1, 2], 0, 0),
(self.module.bisect_right, [1, 2], 1, 1),
(self.module.bisect_right, [1, 2], 1.5, 1),
(self.module.bisect_right, [1, 2], 2, 2),
(self.module.bisect_right, [1, 2], 3, 2),
(self.module.bisect_right, [1, 1, 2, 2], 0, 0),
(self.module.bisect_right, [1, 1, 2, 2], 1, 2),
(self.module.bisect_right, [1, 1, 2, 2], 1.5, 2),
(self.module.bisect_right, [1, 1, 2, 2], 2, 4),
(self.module.bisect_right, [1, 1, 2, 2], 3, 4),
(self.module.bisect_right, [1, 2, 3], 0, 0),
(self.module.bisect_right, [1, 2, 3], 1, 1),
(self.module.bisect_right, [1, 2, 3], 1.5, 1),
(self.module.bisect_right, [1, 2, 3], 2, 2),
(self.module.bisect_right, [1, 2, 3], 2.5, 2),
(self.module.bisect_right, [1, 2, 3], 3, 3),
(self.module.bisect_right, [1, 2, 3], 4, 3),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 1),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 3),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 6),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 10),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10),
(self.module.bisect_left, [], 1, 0),
(self.module.bisect_left, [1], 0, 0),
(self.module.bisect_left, [1], 1, 0),
(self.module.bisect_left, [1], 2, 1),
(self.module.bisect_left, [1, 1], 0, 0),
(self.module.bisect_left, [1, 1], 1, 0),
(self.module.bisect_left, [1, 1], 2, 2),
(self.module.bisect_left, [1, 1, 1], 0, 0),
(self.module.bisect_left, [1, 1, 1], 1, 0),
(self.module.bisect_left, [1, 1, 1], 2, 3),
(self.module.bisect_left, [1, 1, 1, 1], 0, 0),
(self.module.bisect_left, [1, 1, 1, 1], 1, 0),
(self.module.bisect_left, [1, 1, 1, 1], 2, 4),
(self.module.bisect_left, [1, 2], 0, 0),
(self.module.bisect_left, [1, 2], 1, 0),
(self.module.bisect_left, [1, 2], 1.5, 1),
(self.module.bisect_left, [1, 2], 2, 1),
(self.module.bisect_left, [1, 2], 3, 2),
(self.module.bisect_left, [1, 1, 2, 2], 0, 0),
(self.module.bisect_left, [1, 1, 2, 2], 1, 0),
(self.module.bisect_left, [1, 1, 2, 2], 1.5, 2),
(self.module.bisect_left, [1, 1, 2, 2], 2, 2),
(self.module.bisect_left, [1, 1, 2, 2], 3, 4),
(self.module.bisect_left, [1, 2, 3], 0, 0),
(self.module.bisect_left, [1, 2, 3], 1, 0),
(self.module.bisect_left, [1, 2, 3], 1.5, 1),
(self.module.bisect_left, [1, 2, 3], 2, 1),
(self.module.bisect_left, [1, 2, 3], 2.5, 2),
(self.module.bisect_left, [1, 2, 3], 3, 2),
(self.module.bisect_left, [1, 2, 3], 4, 3),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 0),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 1),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 3),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 6),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10)
]
def test_precomputed(self):
for func, data, elem, expected in self.precomputedCases:
self.assertEqual(func(data, elem), expected)
self.assertEqual(func(UserList(data), elem), expected)
def test_negative_lo(self):
# Issue 3301
mod = self.module
self.assertRaises(ValueError, mod.bisect_left, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.bisect_right, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.insort_left, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.insort_right, [1, 2, 3], 5, -1, 3),
def test_random(self, n=25):
from random import randrange
for i in xrange(n):
data = [randrange(0, n, 2) for j in xrange(i)]
data.sort()
elem = randrange(-1, n+1)
ip = self.module.bisect_left(data, elem)
if ip < len(data):
self.failUnless(elem <= data[ip])
if ip > 0:
self.failUnless(data[ip-1] < elem)
ip = self.module.bisect_right(data, elem)
if ip < len(data):
self.failUnless(elem < data[ip])
if ip > 0:
self.failUnless(data[ip-1] <= elem)
def test_optionalSlicing(self):
for func, data, elem, expected in self.precomputedCases:
for lo in xrange(4):
lo = min(len(data), lo)
for hi in xrange(3,8):
hi = min(len(data), hi)
ip = func(data, elem, lo, hi)
self.failUnless(lo <= ip <= hi)
if func is self.module.bisect_left and ip < hi:
self.failUnless(elem <= data[ip])
if func is self.module.bisect_left and ip > lo:
self.failUnless(data[ip-1] < elem)
if func is self.module.bisect_right and ip < hi:
self.failUnless(elem < data[ip])
if func is self.module.bisect_right and ip > lo:
self.failUnless(data[ip-1] <= elem)
self.assertEqual(ip, max(lo, min(hi, expected)))
def test_backcompatibility(self):
self.assertEqual(self.module.bisect, self.module.bisect_right)
def test_keyword_args(self):
data = [10, 20, 30, 40, 50]
self.assertEqual(self.module.bisect_left(a=data, x=25, lo=1, hi=3), 2)
self.assertEqual(self.module.bisect_right(a=data, x=25, lo=1, hi=3), 2)
self.assertEqual(self.module.bisect(a=data, x=25, lo=1, hi=3), 2)
self.module.insort_left(a=data, x=25, lo=1, hi=3)
self.module.insort_right(a=data, x=25, lo=1, hi=3)
self.module.insort(a=data, x=25, lo=1, hi=3)
self.assertEqual(data, [10, 20, 25, 25, 25, 30, 40, 50])
class TestBisectPython(TestBisect):
module = py_bisect
class TestBisectC(TestBisect):
module = c_bisect
#==============================================================================
class TestInsort(unittest.TestCase):
module = None
def test_vsBuiltinSort(self, n=500):
from random import choice
for insorted in (list(), UserList()):
for i in xrange(n):
digit = choice("0123456789")
if digit in "02468":
f = self.module.insort_left
else:
f = self.module.insort_right
f(insorted, digit)
self.assertEqual(sorted(insorted), insorted)
def test_backcompatibility(self):
self.assertEqual(self.module.insort, self.module.insort_right)
def test_listDerived(self):
class List(list):
data = []
def insert(self, index, item):
self.data.insert(index, item)
lst = List()
self.module.insort_left(lst, 10)
self.module.insort_right(lst, 5)
self.assertEqual([5, 10], lst.data)
class TestInsortPython(TestInsort):
module = py_bisect
class TestInsortC(TestInsort):
module = c_bisect
#==============================================================================
class LenOnly:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
class GetOnly:
"Dummy sequence class defining __getitem__ but not __len__."
def __getitem__(self, ndx):
return 10
class CmpErr:
"Dummy element that always raises an error during comparison"
def __cmp__(self, other):
raise ZeroDivisionError
class TestErrorHandling(unittest.TestCase):
module = None
def test_non_sequence(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(TypeError, f, 10, 10)
def test_len_only(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(AttributeError, f, LenOnly(), 10)
def test_get_only(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(AttributeError, f, GetOnly(), 10)
def test_cmp_err(self):
seq = [CmpErr(), CmpErr(), CmpErr()]
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(ZeroDivisionError, f, seq, 10)
def test_arg_parsing(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(TypeError, f, 10)
class TestErrorHandlingPython(TestErrorHandling):
module = py_bisect
class TestErrorHandlingC(TestErrorHandling):
module = c_bisect
#==============================================================================
libreftest = """
Example from the Library Reference: Doc/library/bisect.rst
The bisect() function is generally useful for categorizing numeric data.
This example uses bisect() to look up a letter grade for an exam total
(say) based on a set of ordered numeric breakpoints: 85 and up is an `A',
75..84 is a `B', etc.
>>> grades = "FEDCBA"
>>> breakpoints = [30, 44, 66, 75, 85]
>>> from bisect import bisect
>>> def grade(total):
... return grades[bisect(breakpoints, total)]
...
>>> grade(66)
'C'
>>> map(grade, [33, 99, 77, 44, 12, 88])
['E', 'A', 'B', 'D', 'F', 'A']
"""
#------------------------------------------------------------------------------
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
from test import test_bisect
test_classes = [TestBisectPython, TestBisectC,
TestInsortPython, TestInsortC,
TestErrorHandlingPython, TestErrorHandlingC]
test_support.run_unittest(*test_classes)
test_support.run_doctest(test_bisect, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////////////////////////////
// OpenGL Mathematics Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net)
///////////////////////////////////////////////////////////////////////////////////////////////////
// Created : 2011-01-15
// Updated : 2011-09-13
// Licence : This source is under MIT licence
// File : test/core/func_common.cpp
///////////////////////////////////////////////////////////////////////////////////////////////////
//#include <boost/array.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>
//#include <boost/thread/thread.hpp>
#include <glm/gtc/constants.hpp>
#include <glm/gtc/epsilon.hpp>
#include <glm/gtx/vec1.hpp>
#include <cstdio>
#include <cmath>
int test_modf()
{
int Error(0);
{
float X(1.5f);
float I(0.0f);
float A = glm::modf(X, I);
Error += I == 1.0f ? 0 : 1;
Error += A == 0.5f ? 0 : 1;
}
{
glm::vec4 X(1.1f, 1.2f, 1.5f, 1.7f);
glm::vec4 I(0.0f);
glm::vec4 A = glm::modf(X, I);
Error += I == glm::vec4(1.0f) ? 0 : 1;
Error += glm::all(glm::epsilonEqual(A, glm::vec4(0.1f, 0.2f, 0.5f, 0.7f), 0.00001f)) ? 0 : 1;
}
{
glm::dvec4 X(1.1, 1.2, 1.5, 1.7);
glm::dvec4 I(0.0);
glm::dvec4 A = glm::modf(X, I);
Error += I == glm::dvec4(1.0) ? 0 : 1;
Error += glm::all(glm::epsilonEqual(A, glm::dvec4(0.1, 0.2, 0.5, 0.7), 0.000000001)) ? 0 : 1;
}
{
double X(1.5);
double I(0.0);
double A = glm::modf(X, I);
Error += I == 1.0 ? 0 : 1;
Error += A == 0.5 ? 0 : 1;
}
return Error;
}
int test_floatBitsToInt()
{
int Error = 0;
{
float A = 1.0f;
int B = glm::floatBitsToInt(A);
float C = glm::intBitsToFloat(B);
int D = *(int*)&A;
Error += B == D ? 0 : 1;
Error += A == C ? 0 : 1;
}
{
glm::vec2 A(1.0f, 2.0f);
glm::ivec2 B = glm::floatBitsToInt(A);
glm::vec2 C = glm::intBitsToFloat(B);
Error += B.x == *(int*)&(A.x) ? 0 : 1;
Error += B.y == *(int*)&(A.y) ? 0 : 1;
Error += A == C? 0 : 1;
}
{
glm::vec3 A(1.0f, 2.0f, 3.0f);
glm::ivec3 B = glm::floatBitsToInt(A);
glm::vec3 C = glm::intBitsToFloat(B);
Error += B.x == *(int*)&(A.x) ? 0 : 1;
Error += B.y == *(int*)&(A.y) ? 0 : 1;
Error += B.z == *(int*)&(A.z) ? 0 : 1;
Error += A == C? 0 : 1;
}
{
glm::vec4 A(1.0f, 2.0f, 3.0f, 4.0f);
glm::ivec4 B = glm::floatBitsToInt(A);
glm::vec4 C = glm::intBitsToFloat(B);
Error += B.x == *(int*)&(A.x) ? 0 : 1;
Error += B.y == *(int*)&(A.y) ? 0 : 1;
Error += B.z == *(int*)&(A.z) ? 0 : 1;
Error += B.w == *(int*)&(A.w) ? 0 : 1;
Error += A == C? 0 : 1;
}
return Error;
}
int test_floatBitsToUint()
{
int Error = 0;
{
float A = 1.0f;
glm::uint B = glm::floatBitsToUint(A);
float C = glm::intBitsToFloat(B);
Error += B == *(glm::uint*)&A ? 0 : 1;
Error += A == C? 0 : 1;
}
{
glm::vec2 A(1.0f, 2.0f);
glm::uvec2 B = glm::floatBitsToUint(A);
glm::vec2 C = glm::uintBitsToFloat(B);
Error += B.x == *(glm::uint*)&(A.x) ? 0 : 1;
Error += B.y == *(glm::uint*)&(A.y) ? 0 : 1;
Error += A == C ? 0 : 1;
}
{
glm::vec3 A(1.0f, 2.0f, 3.0f);
glm::uvec3 B = glm::floatBitsToUint(A);
glm::vec3 C = glm::uintBitsToFloat(B);
Error += B.x == *(glm::uint*)&(A.x) ? 0 : 1;
Error += B.y == *(glm::uint*)&(A.y) ? 0 : 1;
Error += B.z == *(glm::uint*)&(A.z) ? 0 : 1;
Error += A == C? 0 : 1;
}
{
glm::vec4 A(1.0f, 2.0f, 3.0f, 4.0f);
glm::uvec4 B = glm::floatBitsToUint(A);
glm::vec4 C = glm::uintBitsToFloat(B);
Error += B.x == *(glm::uint*)&(A.x) ? 0 : 1;
Error += B.y == *(glm::uint*)&(A.y) ? 0 : 1;
Error += B.z == *(glm::uint*)&(A.z) ? 0 : 1;
Error += B.w == *(glm::uint*)&(A.w) ? 0 : 1;
Error += A == C? 0 : 1;
}
return Error;
}
int test_min()
{
int Error = 0;
glm::vec1 A0 = glm::min(glm::vec1(1), glm::vec1(1));
glm::vec2 B0 = glm::min(glm::vec2(1), glm::vec2(1));
glm::vec2 B1 = glm::min(glm::vec2(1), 1.0f);
bool B2 = glm::all(glm::equal(B0, B1));
Error += B2 ? 0 : 1;
glm::vec3 C0 = glm::min(glm::vec3(1), glm::vec3(1));
glm::vec3 C1 = glm::min(glm::vec3(1), 1.0f);
bool C2 = glm::all(glm::equal(C0, C1));
Error += C2 ? 0 : 1;
glm::vec4 D0 = glm::min(glm::vec4(1), glm::vec4(1));
glm::vec4 D1 = glm::min(glm::vec4(1), 1.0f);
bool D2 = glm::all(glm::equal(D0, D1));
Error += D2 ? 0 : 1;
return Error;
}
int test_max()
{
int Error = 0;
glm::vec1 A0 = glm::max(glm::vec1(1), glm::vec1(1));
glm::vec2 B0 = glm::max(glm::vec2(1), glm::vec2(1));
glm::vec2 B1 = glm::max(glm::vec2(1), 1.0f);
bool B2 = glm::all(glm::equal(B0, B1));
Error += B2 ? 0 : 1;
glm::vec3 C0 = glm::max(glm::vec3(1), glm::vec3(1));
glm::vec3 C1 = glm::max(glm::vec3(1), 1.0f);
bool C2 = glm::all(glm::equal(C0, C1));
Error += C2 ? 0 : 1;
glm::vec4 D0 = glm::max(glm::vec4(1), glm::vec4(1));
glm::vec4 D1 = glm::max(glm::vec4(1), 1.0f);
bool D2 = glm::all(glm::equal(D0, D1));
Error += D2 ? 0 : 1;
return Error;
}
int test_clamp()
{
int Error = 0;
return Error;
}
namespace test_mix
{
template <typename T, typename B>
struct test
{
T x;
T y;
B a;
T Result;
};
test<float, bool> TestBool[] =
{
{0.0f, 1.0f, false, 0.0f},
{0.0f, 1.0f, true, 1.0f},
{-1.0f, 1.0f, false, -1.0f},
{-1.0f, 1.0f, true, 1.0f}
};
test<float, float> TestFloat[] =
{
{0.0f, 1.0f, 0.0f, 0.0f},
{0.0f, 1.0f, 1.0f, 1.0f},
{-1.0f, 1.0f, 0.0f, -1.0f},
{-1.0f, 1.0f, 1.0f, 1.0f}
};
test<glm::vec2, bool> TestVec2Bool[] =
{
{glm::vec2(0.0f), glm::vec2(1.0f), false, glm::vec2(0.0f)},
{glm::vec2(0.0f), glm::vec2(1.0f), true, glm::vec2(1.0f)},
{glm::vec2(-1.0f), glm::vec2(1.0f), false, glm::vec2(-1.0f)},
{glm::vec2(-1.0f), glm::vec2(1.0f), true, glm::vec2(1.0f)}
};
test<glm::vec2, glm::bvec2> TestBVec2[] =
{
{glm::vec2(0.0f), glm::vec2(1.0f), glm::bvec2(false), glm::vec2(0.0f)},
{glm::vec2(0.0f), glm::vec2(1.0f), glm::bvec2(true), glm::vec2(1.0f)},
{glm::vec2(-1.0f), glm::vec2(1.0f), glm::bvec2(false), glm::vec2(-1.0f)},
{glm::vec2(-1.0f), glm::vec2(1.0f), glm::bvec2(true), glm::vec2(1.0f)},
{glm::vec2(-1.0f), glm::vec2(1.0f), glm::bvec2(true, false), glm::vec2(1.0f, -1.0f)}
};
test<glm::vec3, bool> TestVec3Bool[] =
{
{glm::vec3(0.0f), glm::vec3(1.0f), false, glm::vec3(0.0f)},
{glm::vec3(0.0f), glm::vec3(1.0f), true, glm::vec3(1.0f)},
{glm::vec3(-1.0f), glm::vec3(1.0f), false, glm::vec3(-1.0f)},
{glm::vec3(-1.0f), glm::vec3(1.0f), true, glm::vec3(1.0f)}
};
test<glm::vec3, glm::bvec3> TestBVec3[] =
{
{glm::vec3(0.0f), glm::vec3(1.0f), glm::bvec3(false), glm::vec3(0.0f)},
{glm::vec3(0.0f), glm::vec3(1.0f), glm::bvec3(true), glm::vec3(1.0f)},
{glm::vec3(-1.0f), glm::vec3(1.0f), glm::bvec3(false), glm::vec3(-1.0f)},
{glm::vec3(-1.0f), glm::vec3(1.0f), glm::bvec3(true), glm::vec3(1.0f)},
{glm::vec3(1.0f, 2.0f, 3.0f), glm::vec3(4.0f, 5.0f, 6.0f), glm::bvec3(true, false, true), glm::vec3(4.0f, 2.0f, 6.0f)}
};
test<glm::vec4, bool> TestVec4Bool[] =
{
{glm::vec4(0.0f), glm::vec4(1.0f), false, glm::vec4(0.0f)},
{glm::vec4(0.0f), glm::vec4(1.0f), true, glm::vec4(1.0f)},
{glm::vec4(-1.0f), glm::vec4(1.0f), false, glm::vec4(-1.0f)},
{glm::vec4(-1.0f), glm::vec4(1.0f), true, glm::vec4(1.0f)}
};
test<glm::vec4, glm::bvec4> TestBVec4[] =
{
{glm::vec4(0.0f), glm::vec4(1.0f), glm::bvec4(false), glm::vec4(0.0f)},
{glm::vec4(0.0f), glm::vec4(1.0f), glm::bvec4(true), glm::vec4(1.0f)},
{glm::vec4(-1.0f), glm::vec4(1.0f), glm::bvec4(false), glm::vec4(-1.0f)},
{glm::vec4(-1.0f), glm::vec4(1.0f), glm::bvec4(true), glm::vec4(1.0f)},
{glm::vec4(1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(5.0f, 6.0f, 7.0f, 8.0f), glm::bvec4(true, false, true, false), glm::vec4(5.0f, 2.0f, 7.0f, 4.0f)}
};
int run()
{
int Error = 0;
// Float with bool
{
for(std::size_t i = 0; i < sizeof(TestBool) / sizeof(test<float, bool>); ++i)
{
float Result = glm::mix(TestBool[i].x, TestBool[i].y, TestBool[i].a);
Error += glm::epsilonEqual(Result, TestBool[i].Result, glm::epsilon<float>()) ? 0 : 1;
}
}
// Float with float
{
for(std::size_t i = 0; i < sizeof(TestFloat) / sizeof(test<float, float>); ++i)
{
float Result = glm::mix(TestFloat[i].x, TestFloat[i].y, TestFloat[i].a);
Error += glm::epsilonEqual(Result, TestFloat[i].Result, glm::epsilon<float>()) ? 0 : 1;
}
}
// vec2 with bool
{
for(std::size_t i = 0; i < sizeof(TestVec2Bool) / sizeof(test<glm::vec2, bool>); ++i)
{
glm::vec2 Result = glm::mix(TestVec2Bool[i].x, TestVec2Bool[i].y, TestVec2Bool[i].a);
Error += glm::epsilonEqual(Result.x, TestVec2Bool[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.y, TestVec2Bool[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
}
}
// vec2 with bvec2
{
for(std::size_t i = 0; i < sizeof(TestBVec2) / sizeof(test<glm::vec2, glm::bvec2>); ++i)
{
glm::vec2 Result = glm::mix(TestBVec2[i].x, TestBVec2[i].y, TestBVec2[i].a);
Error += glm::epsilonEqual(Result.x, TestBVec2[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.y, TestBVec2[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
}
}
// vec3 with bool
{
for(std::size_t i = 0; i < sizeof(TestVec3Bool) / sizeof(test<glm::vec3, bool>); ++i)
{
glm::vec3 Result = glm::mix(TestVec3Bool[i].x, TestVec3Bool[i].y, TestVec3Bool[i].a);
Error += glm::epsilonEqual(Result.x, TestVec3Bool[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.y, TestVec3Bool[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.z, TestVec3Bool[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
}
}
// vec3 with bvec3
{
for(std::size_t i = 0; i < sizeof(TestBVec3) / sizeof(test<glm::vec3, glm::bvec3>); ++i)
{
glm::vec3 Result = glm::mix(TestBVec3[i].x, TestBVec3[i].y, TestBVec3[i].a);
Error += glm::epsilonEqual(Result.x, TestBVec3[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.y, TestBVec3[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.z, TestBVec3[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
}
}
// vec4 with bool
{
for(std::size_t i = 0; i < sizeof(TestVec4Bool) / sizeof(test<glm::vec4, bool>); ++i)
{
glm::vec4 Result = glm::mix(TestVec4Bool[i].x, TestVec4Bool[i].y, TestVec4Bool[i].a);
Error += glm::epsilonEqual(Result.x, TestVec4Bool[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.y, TestVec4Bool[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.z, TestVec4Bool[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.w, TestVec4Bool[i].Result.w, glm::epsilon<float>()) ? 0 : 1;
}
}
// vec4 with bvec4
{
for(std::size_t i = 0; i < sizeof(TestBVec4) / sizeof(test<glm::vec4, glm::bvec4>); ++i)
{
glm::vec4 Result = glm::mix(TestBVec4[i].x, TestBVec4[i].y, TestBVec4[i].a);
Error += glm::epsilonEqual(Result.x, TestBVec4[i].Result.x, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.y, TestBVec4[i].Result.y, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.z, TestBVec4[i].Result.z, glm::epsilon<float>()) ? 0 : 1;
Error += glm::epsilonEqual(Result.w, TestBVec4[i].Result.w, glm::epsilon<float>()) ? 0 : 1;
}
}
return Error;
}
}//namespace test_mix
namespace test_step
{
template <typename EDGE, typename VEC>
struct test
{
EDGE edge;
VEC x;
VEC result;
};
test<float, glm::vec4> TestVec4Scalar [] =
{
{ 0.0f, glm::vec4(1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(1.0f) },
{ 1.0f, glm::vec4(1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(1.0f) },
{ 0.0f, glm::vec4(-1.0f, -2.0f, -3.0f, -4.0f), glm::vec4(0.0f) }
};
test<glm::vec4, glm::vec4> TestVec4Vector [] =
{
{ glm::vec4(-1.0f, -2.0f, -3.0f, -4.0f), glm::vec4(-2.0f, -3.0f, -4.0f, -5.0f), glm::vec4(0.0f) },
{ glm::vec4( 0.0f, 1.0f, 2.0f, 3.0f), glm::vec4( 1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(1.0f) },
{ glm::vec4( 2.0f, 3.0f, 4.0f, 5.0f), glm::vec4( 1.0f, 2.0f, 3.0f, 4.0f), glm::vec4(0.0f) },
{ glm::vec4( 0.0f, 1.0f, 2.0f, 3.0f), glm::vec4(-1.0f,-2.0f,-3.0f,-4.0f), glm::vec4(0.0f) }
};
int run()
{
int Error = 0;
// vec4 and float
{
for (std::size_t i = 0; i < sizeof(TestVec4Scalar) / sizeof(test<float, glm::vec4>); ++i)
{
glm::vec4 Result = glm::step(TestVec4Scalar[i].edge, TestVec4Scalar[i].x);
Error += glm::all(glm::epsilonEqual(Result, TestVec4Scalar[i].result, glm::epsilon<float>())) ? 0 : 1;
}
}
// vec4 and vec4
{
for (std::size_t i = 0; i < sizeof(TestVec4Vector) / sizeof(test<glm::vec4, glm::vec4>); ++i)
{
glm::vec4 Result = glm::step(TestVec4Vector[i].edge, TestVec4Vector[i].x);
Error += glm::all(glm::epsilonEqual(Result, TestVec4Vector[i].result, glm::epsilon<float>())) ? 0 : 1;
}
}
return Error;
}
}//namespace test_step
int test_round()
{
int Error = 0;
{
float A = glm::round(0.0f);
Error += A == 0.0f ? 0 : 1;
float B = glm::round(0.5f);
Error += B == 1.0f ? 0 : 1;
float C = glm::round(1.0f);
Error += C == 1.0f ? 0 : 1;
float D = glm::round(0.1f);
Error += D == 0.0f ? 0 : 1;
float E = glm::round(0.9f);
Error += E == 1.0f ? 0 : 1;
float F = glm::round(1.5f);
Error += F == 2.0f ? 0 : 1;
float G = glm::round(1.9f);
Error += G == 2.0f ? 0 : 1;
#if GLM_LANG >= GLM_LANG_CXX11
float A1 = glm::round(0.0f);
Error += A1 == A ? 0 : 1;
float B1 = glm::round(0.5f);
Error += B1 == B ? 0 : 1;
float C1 = glm::round(1.0f);
Error += C1 == C ? 0 : 1;
float D1 = glm::round(0.1f);
Error += D1 == D ? 0 : 1;
float E1 = glm::round(0.9f);
Error += E1 == E ? 0 : 1;
float F1 = glm::round(1.5f);
Error += F == F ? 0 : 1;
float G1 = glm::round(1.9f);
Error += G1 == G ? 0 : 1;
#endif // GLM_LANG >= GLM_CXX0X
}
{
float A = glm::round(-0.0f);
Error += A == 0.0f ? 0 : 1;
float B = glm::round(-0.5f);
Error += B == -1.0f ? 0 : 1;
float C = glm::round(-1.0f);
Error += C == -1.0f ? 0 : 1;
float D = glm::round(-0.1f);
Error += D == 0.0f ? 0 : 1;
float E = glm::round(-0.9f);
Error += E == -1.0f ? 0 : 1;
float F = glm::round(-1.5f);
Error += F == -2.0f ? 0 : 1;
float G = glm::round(-1.9f);
Error += G == -2.0f ? 0 : 1;
}
return Error;
}
int test_roundEven()
{
int Error = 0;
{
float A = glm::roundEven(-1.5f);
Error += glm::epsilonEqual(A, -2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(1.5f);
Error += glm::epsilonEqual(A, 2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(-3.5f);
Error += glm::epsilonEqual(A, -4.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(3.5f);
Error += glm::epsilonEqual(A, 4.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(-2.5f);
Error += glm::epsilonEqual(A, -2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(2.5f);
Error += glm::epsilonEqual(A, 2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(-2.4f);
Error += glm::epsilonEqual(A, -2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(2.4f);
Error += glm::epsilonEqual(A, 2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(-2.6f);
Error += glm::epsilonEqual(A, -3.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(2.6f);
Error += glm::epsilonEqual(A, 3.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(-2.0f);
Error += glm::epsilonEqual(A, -2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(2.0f);
Error += glm::epsilonEqual(A, 2.0f, 0.0001f) ? 0 : 1;
Error += 0;
}
{
float A = glm::roundEven(0.0f);
Error += A == 0.0f ? 0 : 1;
float B = glm::roundEven(0.5f);
Error += B == 0.0f ? 0 : 1;
float C = glm::roundEven(1.0f);
Error += C == 1.0f ? 0 : 1;
float D = glm::roundEven(0.1f);
Error += D == 0.0f ? 0 : 1;
float E = glm::roundEven(0.9f);
Error += E == 1.0f ? 0 : 1;
float F = glm::roundEven(1.5f);
Error += F == 2.0f ? 0 : 1;
float G = glm::roundEven(1.9f);
Error += G == 2.0f ? 0 : 1;
}
{
float A = glm::roundEven(-0.0f);
Error += A == 0.0f ? 0 : 1;
float B = glm::roundEven(-0.5f);
Error += B == -0.0f ? 0 : 1;
float C = glm::roundEven(-1.0f);
Error += C == -1.0f ? 0 : 1;
float D = glm::roundEven(-0.1f);
Error += D == 0.0f ? 0 : 1;
float E = glm::roundEven(-0.9f);
Error += E == -1.0f ? 0 : 1;
float F = glm::roundEven(-1.5f);
Error += F == -2.0f ? 0 : 1;
float G = glm::roundEven(-1.9f);
Error += G == -2.0f ? 0 : 1;
}
{
float A = glm::roundEven(1.5f);
Error += A == 2.0f ? 0 : 1;
float B = glm::roundEven(2.5f);
Error += B == 2.0f ? 0 : 1;
float C = glm::roundEven(3.5f);
Error += C == 4.0f ? 0 : 1;
float D = glm::roundEven(4.5f);
Error += D == 4.0f ? 0 : 1;
float E = glm::roundEven(5.5f);
Error += E == 6.0f ? 0 : 1;
float F = glm::roundEven(6.5f);
Error += F == 6.0f ? 0 : 1;
float G = glm::roundEven(7.5f);
Error += G == 8.0f ? 0 : 1;
}
{
float A = glm::roundEven(-1.5f);
Error += A == -2.0f ? 0 : 1;
float B = glm::roundEven(-2.5f);
Error += B == -2.0f ? 0 : 1;
float C = glm::roundEven(-3.5f);
Error += C == -4.0f ? 0 : 1;
float D = glm::roundEven(-4.5f);
Error += D == -4.0f ? 0 : 1;
float E = glm::roundEven(-5.5f);
Error += E == -6.0f ? 0 : 1;
float F = glm::roundEven(-6.5f);
Error += F == -6.0f ? 0 : 1;
float G = glm::roundEven(-7.5f);
Error += G == -8.0f ? 0 : 1;
}
return Error;
}
int test_isnan()
{
int Error = 0;
float Zero_f = 0.0;
double Zero_d = 0.0;
{
Error += true == glm::isnan(0.0/Zero_d) ? 0 : 1;
Error += true == glm::any(glm::isnan(glm::dvec2(0.0 / Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isnan(glm::dvec3(0.0 / Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isnan(glm::dvec4(0.0 / Zero_d))) ? 0 : 1;
}
{
Error += true == glm::isnan(0.0f/Zero_f) ? 0 : 1;
Error += true == glm::any(glm::isnan(glm::vec2(0.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isnan(glm::vec3(0.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isnan(glm::vec4(0.0f/Zero_f))) ? 0 : 1;
}
return Error;
}
int test_isinf()
{
int Error = 0;
float Zero_f = 0.0;
double Zero_d = 0.0;
{
Error += true == glm::isinf( 1.0/Zero_d) ? 0 : 1;
Error += true == glm::isinf(-1.0/Zero_d) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::dvec2( 1.0/Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::dvec2(-1.0/Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::dvec3( 1.0/Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::dvec3(-1.0/Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::dvec4( 1.0/Zero_d))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::dvec4(-1.0/Zero_d))) ? 0 : 1;
}
{
Error += true == glm::isinf( 1.0f/Zero_f) ? 0 : 1;
Error += true == glm::isinf(-1.0f/Zero_f) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::vec2( 1.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::vec2(-1.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::vec3( 1.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::vec3(-1.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::vec4( 1.0f/Zero_f))) ? 0 : 1;
Error += true == glm::any(glm::isinf(glm::vec4(-1.0f/Zero_f))) ? 0 : 1;
}
return Error;
}
int main()
{
int Error(0);
Error += test_modf();
Error += test_floatBitsToInt();
Error += test_floatBitsToUint();
Error += test_step::run();
Error += test_max();
Error += test_min();
Error += test_mix::run();
Error += test_round();
Error += test_roundEven();
Error += test_isnan();
//Error += test_isinf();
return Error;
}
| {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
module Rails
module Auth
module X509
# X.509 client certificates obtained from HTTP requests
class Certificate
attr_reader :certificate
def initialize(certificate)
unless certificate.is_a?(OpenSSL::X509::Certificate)
raise TypeError, "expecting OpenSSL::X509::Certificate, got #{certificate.class}"
end
@certificate = certificate.freeze
@subject = {}
@certificate.subject.to_a.each do |name, data, _type|
@subject[name.freeze] = data.freeze
end
@subject_alt_names = SubjectAltNameExtension.new(certificate)
@subject_alt_names.freeze
@subject.freeze
end
def [](component)
@subject[component.to_s.upcase]
end
def cn
@subject["CN"]
end
alias common_name cn
def dns_names
@subject_alt_names.dns_names
end
def ips
@subject_alt_names.ips
end
def ou
@subject["OU"]
end
alias organizational_unit ou
def uris
@subject_alt_names.uris
end
# According to the SPIFFE standard only one SPIFFE ID can exist in the URI
# SAN:
# (https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md#2-spiffe-id)
#
# @return [String, nil] string containing SPIFFE ID if one is present
# in the certificate
def spiffe_id
uris.detect { |uri| uri.start_with?("spiffe://") }
end
# Generates inspectable attributes for debugging
#
# @return [Hash] hash containing parts of the certificate subject (cn, ou)
# and subject alternative name extension (uris, dns_names) as well
# as SPIFFE ID (spiffe_id), which is just a convenience since those
# are already included in the uris
def attributes
{
cn: cn,
dns_names: dns_names,
ips: ips,
ou: ou,
spiffe_id: spiffe_id,
uris: uris
}.reject { |_, v| v.nil? || v.empty? }
end
# Compare ourself to another object by ensuring that it has the same type
# and that its certificate pem is the same as ours
def ==(other)
other.is_a?(self.class) && other.certificate.to_der == certificate.to_der
end
alias eql? ==
end
end
end
end
| {
"pile_set_name": "Github"
} |
StartChar: X
Encoding: 88 88 51
GlifName: X_
Width: 1024
VWidth: 0
Flags: W
HStem: 0 21G<89 247.987 904.013 1063> 1388 20G<121 278.377 873.623 1031>
LayerCount: 5
Back
Fore
SplineSet
571 824 m 1
581 824 l 1
884 1408 l 1
1031 1408 l 1
660 721 l 1
660 711 l 1
1063 0 l 1
915 0 l 1
581 608 l 1
571 608 l 1
237 0 l 1
89 0 l 1
492 711 l 1
492 721 l 1
121 1408 l 1
268 1408 l 1
571 824 l 1
EndSplineSet
Validated: 1
Layer: 2
Layer: 3
Layer: 4
Position2: "Single Positioning lookup 22 subtable" dx=128 dy=0 dh=0 dv=0
Position2: "Single Positioning lookup 20 subtable" dx=-128 dy=0 dh=0 dv=0
Position2: "Single Positioning lookup 19 subtable" dx=0 dy=0 dh=0 dv=0
Position2: "Single Positioning lookup 14 subtable" dx=0 dy=0 dh=0 dv=0
EndChar
| {
"pile_set_name": "Github"
} |
;
; Licensed to the Apache Software Foundation (ASF) under one or more
; contributor license agreements. See the NOTICE file distributed with
; this work for additional information regarding copyright ownership.
; The ASF licenses this file to You under the Apache License, Version 2.0
; (the "License"); you may not use this file except in compliance with
; the License. You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; START SNIPPET: doxia
log4php.appender.default = LoggerAppenderSocket
log4php.appender.default.layout = LoggerLayoutSimple
log4php.appender.default.remoteHost = localhost
log4php.appender.default.port = 4242
log4php.appender.default.useXml = true
log4php.appender.default.locationInfo = false
log4php.rootLogger = DEBUG, default
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2013 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.example.android.basicsyncadapter"
android:versionCode="1"
android:versionName="1.0">
<!-- SyncAdapters are available in API 5 and above. We use API 7 as a baseline for samples. -->
<!-- Min/target SDK versions (<uses-sdk>) managed by build.gradle -->
<!-- Required for fetching feed data. -->
<uses-permission android:name="android.permission.INTERNET"/>
<!-- Required to register a SyncStatusObserver to display a "syncing..." progress indicator. -->
<uses-permission android:name="android.permission.READ_SYNC_STATS"/>
<!-- Required to enable our SyncAdapter after it's created. -->
<uses-permission android:name="android.permission.WRITE_SYNC_SETTINGS"/>
<!-- Required because we're manually creating a new account. -->
<uses-permission android:name="android.permission.AUTHENTICATE_ACCOUNTS"/>
<application
android:allowBackup="true"
android:icon="@drawable/ic_launcher"
android:label="@string/app_name"
android:theme="@style/AppTheme" >
<!-- Main activity, responsible for showing a list of feed entries. -->
<activity
android:name=".EntryListActivity"
android:label="@string/app_name" >
<!-- This intent filter places this activity in the system's app launcher. -->
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<!-- ContentProvider to store feed data.
The "authorities" here are defined as part of a ContentProvider interface. It's used here
as an attachment point for the SyncAdapter. See res/xml/syncadapter.xml and
SyncService.java.
Since this ContentProvider is not exported, it will not be accessible outside of this app's
package. -->
<provider
android:name=".provider.FeedProvider"
android:authorities="com.example.android.basicsyncadapter"
android:exported="false" />
<!-- This service implements our SyncAdapter. It needs to be exported, so that the system
sync framework can access it. -->
<service android:name=".SyncService"
android:exported="true">
<!-- This intent filter is required. It allows the system to launch our sync service
as needed. -->
<intent-filter>
<action android:name="android.content.SyncAdapter" />
</intent-filter>
<!-- This points to a required XML file which describes our SyncAdapter. -->
<meta-data android:name="android.content.SyncAdapter"
android:resource="@xml/syncadapter" />
</service>
<!-- This implements the account we'll use as an attachment point for our SyncAdapter. Since
our SyncAdapter doesn't need to authenticate the current user (it just fetches a public RSS
feed), this account's implementation is largely empty.
It's also possible to attach a SyncAdapter to an existing account provided by another
package. In that case, this element could be omitted here. -->
<service android:name="com.example.android.common.accounts.GenericAccountService">
<!-- Required filter used by the system to launch our account service. -->
<intent-filter>
<action android:name="android.accounts.AccountAuthenticator" />
</intent-filter>
<!-- This points to an XMLf ile which describes our account service. -->
<meta-data android:name="android.accounts.AccountAuthenticator"
android:resource="@xml/authenticator" />
</service>
</application>
</manifest>
| {
"pile_set_name": "Github"
} |
var convert = require('./convert'),
func = convert('head', require('../head'), require('./_falseOptions'));
func.placeholder = require('./placeholder');
module.exports = func;
| {
"pile_set_name": "Github"
} |
(module TSSOP-8_4.4x3mm_P0.65mm (layer F.Cu) (tedit 5E476F32)
(descr "TSSOP, 8 Pin (JEDEC MO-153 Var AA https://www.jedec.org/document_search?search_api_views_fulltext=MO-153), generated with kicad-footprint-generator ipc_gullwing_generator.py")
(tags "TSSOP SO")
(attr smd)
(fp_text reference REF** (at 0 -2.45) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value TSSOP-8_4.4x3mm_P0.65mm (at 0 2.45) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_line (start 0 1.61) (end 2.2 1.61) (layer F.SilkS) (width 0.12))
(fp_line (start 0 1.61) (end -2.2 1.61) (layer F.SilkS) (width 0.12))
(fp_line (start 0 -1.61) (end 2.2 -1.61) (layer F.SilkS) (width 0.12))
(fp_line (start 0 -1.61) (end -3.6 -1.61) (layer F.SilkS) (width 0.12))
(fp_line (start -1.45 -1.5) (end 2.2 -1.5) (layer F.Fab) (width 0.1))
(fp_line (start 2.2 -1.5) (end 2.2 1.5) (layer F.Fab) (width 0.1))
(fp_line (start 2.2 1.5) (end -2.2 1.5) (layer F.Fab) (width 0.1))
(fp_line (start -2.2 1.5) (end -2.2 -0.75) (layer F.Fab) (width 0.1))
(fp_line (start -2.2 -0.75) (end -1.45 -1.5) (layer F.Fab) (width 0.1))
(fp_line (start -3.85 -1.75) (end -3.85 1.75) (layer F.CrtYd) (width 0.05))
(fp_line (start -3.85 1.75) (end 3.85 1.75) (layer F.CrtYd) (width 0.05))
(fp_line (start 3.85 1.75) (end 3.85 -1.75) (layer F.CrtYd) (width 0.05))
(fp_line (start 3.85 -1.75) (end -3.85 -1.75) (layer F.CrtYd) (width 0.05))
(pad 1 smd roundrect (at -2.8625 -0.975) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 2 smd roundrect (at -2.8625 -0.325) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 3 smd roundrect (at -2.8625 0.325) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 4 smd roundrect (at -2.8625 0.975) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 5 smd roundrect (at 2.8625 0.975) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 6 smd roundrect (at 2.8625 0.325) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 7 smd roundrect (at 2.8625 -0.325) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(pad 8 smd roundrect (at 2.8625 -0.975) (size 1.475 0.4) (layers F.Cu F.Mask F.Paste) (roundrect_rratio 0.25))
(fp_text user %R (at 0 0) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
(model ${KISYS3DMOD}/Package_SO.3dshapes/TSSOP-8_4.4x3mm_P0.65mm.wrl
(at (xyz 0 0 0))
(scale (xyz 1 1 1))
(rotate (xyz 0 0 0))
)
) | {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.