text
stringlengths 2
100k
| meta
dict |
---|---|
keystore(
name = 'debug',
store = 'debug.keystore',
properties = 'debug.keystore.properties',
visibility = [
'PUBLIC',
],
)
| {
"pile_set_name": "Github"
} |
<template>
<svg xmlns="http://www.w3.org/2000/svg"
:width="width"
:height="height"
viewBox="0 0 18 18"
:aria-labelledby="iconName"
role="presentation"
>
<title :id="iconName" lang="en">{{iconName}} icon</title>
<g :fill="iconColor">
<slot />
</g>
</svg>
</template>
<script>
export default {
props: {
iconName: {
type: String,
default: 'box'
},
width: {
type: [Number, String],
default: 18
},
height: {
type: [Number, String],
default: 18
},
iconColor: {
type: String,
default: 'currentColor'
}
}
}
</script>
<style scoped>
svg {
display: inline-block;
vertical-align: baseline;
margin-bottom: -2px; /* yes, I'm that particular about formatting */
}
</style> | {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using Microsoft.Build.Shared;
namespace Microsoft.Build.Utilities
{
/// <summary>
/// Structure to represent an extension sdk
/// </summary>
internal class ExtensionSDK
{
/// <summary>
/// Path to the platform sdk may be null if not a platform sdk.
/// </summary>
private readonly string _path;
/// <summary>
/// Extension SDK moniker
/// </summary>
private readonly string _sdkMoniker;
/// <summary>
/// SDK version
/// </summary>
private Version _sdkVersion;
/// <summary>
/// SDK identifier
/// </summary>
private string _sdkIdentifier;
/// <summary>
/// Object containing the properties in the SDK manifest
/// </summary>
private SDKManifest _manifest;
/// <summary>
/// Caches minimum Visual Studio version from the manifest
/// </summary>
private Version _minVSVersion;
/// <summary>
/// Caches max platform version from the manifest
/// </summary>
private Version _maxPlatformVersion;
/// <summary>
/// Constructor
/// </summary>
public ExtensionSDK(string extensionSdkMoniker, string extensionSdkPath)
{
_sdkMoniker = extensionSdkMoniker;
_path = extensionSdkPath;
}
/// <summary>
/// SDK version from the moniker
/// </summary>
public Version Version
{
get
{
if (_sdkVersion == null)
{
ParseMoniker(_sdkMoniker);
}
return _sdkVersion;
}
}
/// <summary>
/// SDK identifier from the moniker
/// </summary>
public string Identifier
{
get
{
if (_sdkIdentifier == null)
{
ParseMoniker(_sdkMoniker);
}
return _sdkIdentifier;
}
}
/// <summary>
/// The type of the SDK.
/// </summary>
public SDKType SDKType => Manifest.SDKType;
/// <summary>
/// Minimum Visual Studio version from SDKManifest.xml
/// </summary>
public Version MinVSVersion
{
get
{
if (_minVSVersion == null && Manifest.MinVSVersion != null)
{
if (!Version.TryParse(Manifest.MinVSVersion, out _minVSVersion))
{
_minVSVersion = null;
}
}
return _minVSVersion;
}
}
/// <summary>
/// Maximum platform version from SDKManifest.xml
/// </summary>
public Version MaxPlatformVersion
{
get
{
if (_maxPlatformVersion == null && Manifest.MaxPlatformVersion != null)
{
if (!Version.TryParse(Manifest.MaxPlatformVersion, out _maxPlatformVersion))
{
_maxPlatformVersion = null;
}
}
return _maxPlatformVersion;
}
}
/// <summary>
/// Api contracts from the SDKManifest, if any
/// </summary>
public ICollection<ApiContract> ApiContracts => Manifest.ApiContracts;
/// <summary>
/// Reference to the manifest object
/// Makes sure manifest is instantiated only once
/// </summary>
/// <remarks>Load manifest from disk the first time it is needed</remarks>
private SDKManifest Manifest => _manifest ?? (_manifest = new SDKManifest(_path));
/// <summary>
/// Parse SDK moniker
/// </summary>
private void ParseMoniker(string moniker)
{
string[] properties = moniker.Split(MSBuildConstants.CommaChar);
foreach (string property in properties)
{
string[] words = property.Split(MSBuildConstants.EqualsChar);
if (words[0].Trim().StartsWith("Version", StringComparison.OrdinalIgnoreCase))
{
if (words.Length > 1 && Version.TryParse(words[1], out Version ver))
{
_sdkVersion = ver;
}
}
else
{
_sdkIdentifier = words[0];
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
---
title: HDF5 Output Layer
---
# HDF5 Output Layer
* Layer type: `HDF5Output`
* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1HDF5OutputLayer.html)
* Header: [`./include/caffe/layers/hdf5_output_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/hdf5_output_layer.hpp)
* CPU implementation: [`./src/caffe/layers/hdf5_output_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hdf5_output_layer.cpp)
* CUDA GPU implementation: [`./src/caffe/layers/hdf5_output_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hdf5_output_layer.cu)
The HDF5 output layer performs the opposite function of the other layers in this section: it writes its input blobs to disk.
## Parameters
* Parameters (`HDF5OutputParameter hdf5_output_param`)
- Required
- `file_name`: name of file to write to
* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto):
{% highlight Protobuf %}
{% include proto/HDF5OutputParameter.txt %}
{% endhighlight %}
| {
"pile_set_name": "Github"
} |
-- wal3.test
--
-- execsql { SELECT * FROM t1 }
SELECT * FROM t1 | {
"pile_set_name": "Github"
} |
import * as d3 from "d3";
import {MetaDataType} from './meta_data';
import {Node} from './node';
import {classify} from './util';
export class Tooltip {
private offsetX: number;
private visibility: string;
constructor(private node: Node, private eventType: string) {
this.offsetX = 30;
this.visibility = 'hidden';
}
tspanOffsetY(isHeader: boolean): string {
return isHeader ? '2em' : '1.1em';
}
transform(): string {
return `translate(${this.node.x}, ${this.node.y})`;
}
class(): string {
return `tooltip ${this.nodeId()}`;
}
nodeId(): string {
return classify((<Node>this.node).name);
}
// This doesn't actually toggle visibility, but returns string for toggled visibility
toggleVisibility(): string {
if (this.visibility === 'hidden') {
this.visibility = 'visible';
return 'visible';
} else {
this.visibility = 'hidden';
return 'hidden';
}
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
toggleVisibilityCallback(element: SVGGElement): any {
return () => {
// Do nothing for dragging
if (event.defaultPrevented) {
return;
}
d3.select(element).attr('visibility', this.toggleVisibility());
};
}
configureNodeClickCallback(element: SVGGElement): void {
d3.select(`#${this.nodeId()}`).on('click', this.toggleVisibilityCallback(element));
}
configureNodeHoverCallback(element: SVGGElement): void {
d3.select(`#${this.nodeId()}`).on('mouseenter', this.toggleVisibilityCallback(element));
d3.select(`#${this.nodeId()}`).on('mouseleave', this.toggleVisibilityCallback(element));
}
disableZoom(element: SVGAElement): void {
d3.select(element).on('mousedown', () => {
(<MouseEvent>d3.event).stopPropagation();
})
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static render(layer: d3.Selection<any>, tooltips: Tooltip[]): d3.Selection<Tooltip> {
const tooltip = layer.selectAll('.tooltip')
.data(tooltips)
.enter()
.append('g')
.attr('visibility', (d) => d.visibility)
.attr('class', (d) => d.class())
.attr('transform', (d) => d.transform());
tooltip.each(function (d) {
Tooltip.appendText(this)
if (d.eventType === 'hover') {
d.configureNodeHoverCallback(this);
} else {
d.configureNodeClickCallback(this);
}
d.disableZoom(this);
})
return tooltip;
}
static fill(element: SVGPathElement): string {
// If no "fill" style is defined
if (getComputedStyle(element).fill.match(/\(0,\s*0,\s*0\)/)) {
return '#f8f1e9';
}
}
static pathD(x: number, y: number, width: number, height: number): string {
const round = 8;
return `M ${x},${y} L ${x + 20},${y - 10} ${x + 20},${y - 20}` +
`Q ${x + 20},${y - 20 - round} ${x + 20 + round},${y - 20 - round}` +
`L ${x + 20 + width - round},${y - 20 - round}` +
`Q ${x + 20 + width},${y - 20 - round} ${x + 20 + width},${y - 20}` +
`L ${x + 20 + width},${y - 20 + height}` +
`Q ${x + 20 + width},${y - 20 + height + round} ${x + 20 + width - round},${y - 20 + height + round}` +
`L ${x + 20 + round},${y - 20 + height + round}` +
`Q ${x + 20},${y - 20 + height + round} ${x + 20},${y - 20 + height}` +
`L ${x + 20},${y + 10} Z`;
}
static appendText(container: SVGGElement): void {
const path = d3.select(container).append('path');
const text = d3.select(container).append('text')
text.append('tspan')
.attr('x', (d: Tooltip) => d.offsetX + 40)
.attr('class', 'name')
.text('node:');
text.append('tspan')
.attr('dx', 10)
.attr('class', 'value')
.text((d: Tooltip) => d.node.name);
text.each(function (d: Tooltip) {
Tooltip.appendTspans(text, d.node.meta)
// Add "d" after bbox calculation
const bbox = this.getBBox();
path.attr('d', Tooltip.pathD(30, 0, bbox.width + 40, bbox.height + 20))
.style('fill', function () {
return Tooltip.fill(this)
});
})
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static appendTspans(container: d3.Selection<any>, meta: MetaDataType[]): void {
meta.forEach((m, i) => {
container.append('tspan')
.attr('x', (d: Tooltip) => d.offsetX + 40)
.attr('dy', (d: Tooltip) => d.tspanOffsetY(i === 0))
.attr('class', 'name')
.text(`${m.class}:`);
container.append('tspan')
.attr('dx', 10)
.attr('class', 'value')
.text(m.value);
});
}
static followNode(tooltip: d3.Selection<Tooltip>): void {
tooltip.attr('transform', (d) => d.transform());
}
}
| {
"pile_set_name": "Github"
} |
{
"parent": "immersiveengineering:block/stairs_steel_scaffolding_wooden_top"
} | {
"pile_set_name": "Github"
} |
---
nodes:
- name: start
- name: bridge1
- name: generate_signature
- name: add_signatures
- name: bridge2
- name: submit_sample
- name: quit
edges:
- start: generate_signature
- bridge1: generate_signature
weight: 0.95
- bridge1: add_signatures
- generate_signature: bridge1
- add_signatures: submit_sample
- bridge2: submit_sample
weight: 0.95
- bridge2: quit
- submit_sample: bridge2
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2009-2018, Acciente LLC
*
* Acciente LLC licenses this file to you under the
* Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in
* writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.acciente.oacc;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class TestAccessControl_unimpersonate extends TestAccessControlBase {
@Test
public void unimpersonate_valid_asSystemResource() {
authenticateSystemResource();
final Resource impersonatedResource = generateAuthenticatableResource(generateUniquePassword());
accessControlContext.impersonate(impersonatedResource);
// verify
accessControlContext.unimpersonate();
assertThat(accessControlContext.getAuthenticatedResource(), is(SYS_RESOURCE));
assertThat(accessControlContext.getSessionResource(), is(SYS_RESOURCE));
}
@Test
public void unimpersonate_unimpersonated_succeedsAsSystemResource() {
authenticateSystemResource();
generateAuthenticatableResource(generateUniquePassword());
// verify
accessControlContext.unimpersonate();
assertThat(accessControlContext.getAuthenticatedResource(), is(SYS_RESOURCE));
assertThat(accessControlContext.getSessionResource(), is(SYS_RESOURCE));
}
@Test
public void unimpersonate_valid_asAuthenticatedResource() {
authenticateSystemResource();
final char[] password = generateUniquePassword();
final Resource accessorResource = generateAuthenticatableResource(password);
final Resource impersonatedResource = generateAuthenticatableResource(generateUniquePassword());
// set up accessor --IMPERSONATE-> impersonatedResource
accessControlContext.setResourcePermissions(accessorResource,
impersonatedResource,
setOf(ResourcePermissions.getInstance(ResourcePermissions.IMPERSONATE)));
// authenticate & impersonate
accessControlContext.authenticate(accessorResource, PasswordCredentials.newInstance(password));
accessControlContext.impersonate(impersonatedResource);
// verify
accessControlContext.unimpersonate();
assertThat(accessControlContext.getAuthenticatedResource(), is(accessorResource));
assertThat(accessControlContext.getSessionResource(), is(accessorResource));
}
@Test
public void unimpersonate_unimpersonated_succeedsAsAuthenticatedResource() {
authenticateSystemResource();
final char[] password = generateUniquePassword();
final Resource accessorResource = generateAuthenticatableResource(password);
final Resource impersonatedResource = generateAuthenticatableResource(generateUniquePassword());
// set up accessor --IMPERSONATE-> impersonatedResource
accessControlContext.setResourcePermissions(accessorResource,
impersonatedResource,
setOf(ResourcePermissions.getInstance(ResourcePermissions.IMPERSONATE)));
// authenticate
accessControlContext.authenticate(accessorResource, PasswordCredentials.newInstance(password));
// verify
accessControlContext.unimpersonate();
assertThat(accessControlContext.getAuthenticatedResource(), is(accessorResource));
assertThat(accessControlContext.getSessionResource(), is(accessorResource));
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
#define HSA_RADEON_CIK_INT_H_INCLUDED
#include <linux/types.h>
struct cik_ih_ring_entry {
uint32_t source_id;
uint32_t data;
uint32_t ring_id;
uint32_t reserved;
};
#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
#endif
| {
"pile_set_name": "Github"
} |
import UIKit
class SceneDelegate: UIResponder, UIWindowSceneDelegate {
var window: UIWindow?
}
| {
"pile_set_name": "Github"
} |
package com.github.javaparser.utils;
/**
* Simply a pair of objects.
* @param <A> type of object a.
* @param <B> type of object b.
*/
public class Pair<A, B> {
public final A a;
public final B b;
public Pair(A a, B b) {
this.a = a;
this.b = b;
}
}
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"scale" : "1x",
"filename" : "login-create-account.png"
},
{
"idiom" : "universal",
"scale" : "2x",
"filename" : "[email protected]"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
<?php
/**
*
* ThinkUp/webapp/plugins/insightsgenerator/tests/TestOfLoveWinsInsight.php
*
* Copyright (c) 2015 Gina Trapani
*
* LICENSE:
*
* This file is part of ThinkUp (http://thinkup.com).
*
* ThinkUp is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any
* later version.
*
* ThinkUp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with ThinkUp. If not, see
* <http://www.gnu.org/licenses/>.
*
* Test for the LoveWinsInsight class.
*
* Copyright (c) 2015 Gina Trapani
*
* @author Gina Trapani <ginatrapani[at]gmail[dot]com>
* @license http://www.gnu.org/licenses/gpl.html
* @copyright 2015 Gina Trapani
*/
require_once dirname(__FILE__) . '/../../../../tests/init.tests.php';
require_once THINKUP_WEBAPP_PATH.'_lib/extlib/simpletest/autorun.php';
require_once THINKUP_WEBAPP_PATH.'_lib/extlib/simpletest/web_tester.php';
require_once THINKUP_ROOT_PATH. 'webapp/plugins/insightsgenerator/model/class.InsightPluginParent.php';
require_once THINKUP_ROOT_PATH. 'webapp/plugins/insightsgenerator/insights/lovewins.php';
class TestOfLoveWinsInsight extends ThinkUpInsightUnitTestCase {
public function setUp(){
parent::setUp();
}
public function tearDown() {
parent::tearDown();
}
public function testNoLoveWins() {
$insight_plugin = new LoveWinsInsight();
$insight_dao = new InsightMySQLDAO();
$insight_plugin->generateInsight($this->instance, new User(), array(), 3);
$result = $insight_dao->getInsight($insight_plugin->slug, $this->instance->id, date('Y-m-d'));
$this->assertNull($result);
}
public function testYesLoveWinsTwitter() {
$instance = new Instance();
$instance->id = 10;
$instance->network_username = 'Luke';
$instance->network_user_id = '18';
$instance->network = 'twitter';
$this->instance = $instance;
$post_objects = array();
$builders = array();
$post_array = array(
'pub_date' => '2015-05-04', 'post_id' => 1, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "I can't believe SCOTUS ruled!",
);
$builders[] = FixtureBuilder::build('posts', $post_array);
$posts[] = new Post($post_array);
$post_array = array(
'pub_date' => '2015-01-07', 'post_id' => 2, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "omg #lovewins the day!",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array);
$post_array = array(
'pub_date' => '2015-03-07', 'post_id' => 3, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "What's up with gay marriage these days?",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array );
$post_array = array(
'pub_date' => '2014-05-04', 'post_id' => 4, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "Happy #Pride yo",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array);
$post_array = array(
'pub_date' => '2014-12-05', 'post_id' => 5, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "Today's a good day for marriage equality",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array);
$insight_plugin = new LoveWinsInsight();
$insight_plugin->generateInsight($this->instance, new User(), $posts, 3);
$insight_dao = new InsightMySQLDAO();
$result = $insight_dao->getInsight($insight_plugin->slug, $this->instance->id, date("Y-m-d"));
$this->assertNotNull($result);
$this->assertEqual($result->headline, "@Luke joined the #LoveWins celebration");
$this->assertEqual($result->text,
'@Luke was all about <a href="https://twitter.com/hashtag/LoveWins">marriage equality</a> this week.');
$this->dumpRenderedInsight($result, $this->instance, "Love Wins on Twitter");
}
public function testYesLoveWinsFacebook() {
$instance = new Instance();
$instance->id = 10;
$instance->network_username = 'Leia';
$instance->network_user_id = '18';
$instance->network = 'facebook';
$this->instance = $instance;
$post_objects = array();
$builders = array();
$post_array = array(
'pub_date' => '2015-05-04', 'post_id' => 1, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "I can't believe SCOTUS ruled!",
);
$builders[] = FixtureBuilder::build('posts', $post_array);
$posts[] = new Post($post_array);
$post_array = array(
'pub_date' => '2015-01-07', 'post_id' => 2, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "omg #lovewins the day!",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array);
$post_array = array(
'pub_date' => '2015-03-07', 'post_id' => 3, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "What's up with gay marriage these days?",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array );
$post_array = array(
'pub_date' => '2014-05-04', 'post_id' => 4, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "Happy #Pride yo",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array);
$post_array = array(
'pub_date' => '2014-12-05', 'post_id' => 5, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => "Today's a good day for marriage equality",
);
$posts[] = new Post($post_array);
$builders[] = FixtureBuilder::build('posts', $post_array);
$insight_plugin = new LoveWinsInsight();
$insight_plugin->generateInsight($this->instance, new User(), $posts, 3);
$insight_dao = new InsightMySQLDAO();
$result = $insight_dao->getInsight($insight_plugin->slug, $this->instance->id, date("Y-m-d"));
$this->assertNotNull($result);
$this->assertEqual($result->headline, "Leia had enough pride for all 50 states");
$this->assertEqual($result->text,
'Leia joined the <a href="https://facebook.com/celebratepride">marriage equality celebration</a> '
.'this week!');
$this->dumpRenderedInsight($result, $this->instance, "Love Wins on Facebook");
}
public function testWordInWord() {
$year = date('Y');
$builders = array();
$builders[] = FixtureBuilder::build('posts',
array(
'pub_date' => $year.'-02-26', 'post_id' => 1, 'author_username' => $this->instance->network_username,
'author_user_id' => $this->instance->network_user_id, 'network' => $this->instance->network,
'post_text' => 'Radarscotusthoughts is not a real word.',
)
);
$post->author_username = $this->instance->network_username;
$post->author_user_id = $this->instance->network_user_id;
$insight_plugin = new LoveWinsInsight();
$insight_dao = new InsightMySQLDAO();
$insight_plugin->generateInsight($this->instance, new User(), array($post), 3);
$result = $insight_dao->getInsight($insight_plugin->slug, $this->instance->id, date('Y-m-d'));
$this->assertNull($result);
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string.h>
#include "esp_gatt_common_api.h"
#include "esp_bt_main.h"
#include "esp_gatt_defs.h"
#include "btc_gatt_common.h"
/**
* @brief This function is called to set local MTU,
* the function is called before BLE connection.
*
* @param[in] mtu: the size of MTU.
*
* @return
* - ESP_OK: success
* - other: failed
*
*/
esp_err_t esp_ble_gatt_set_local_mtu (uint16_t mtu)
{
btc_msg_t msg;
btc_ble_gatt_com_args_t arg;
ESP_BLUEDROID_STATUS_CHECK(ESP_BLUEDROID_STATUS_ENABLED);
if ((mtu < ESP_GATT_DEF_BLE_MTU_SIZE) || (mtu > ESP_GATT_MAX_MTU_SIZE)) {
return ESP_ERR_INVALID_SIZE;
}
msg.sig = BTC_SIG_API_CALL;
msg.pid = BTC_PID_GATT_COMMON;
msg.act = BTC_GATT_ACT_SET_LOCAL_MTU;
arg.set_mtu.mtu = mtu;
return (btc_transfer_context(&msg, &arg, sizeof(btc_ble_gatt_com_args_t), NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL);
}
#if (BLE_INCLUDED == TRUE)
extern UINT16 L2CA_GetFreePktBufferNum_LE(void);
/**
* @brief This function is called to get currently sendable packets number on controller,
* the function is called only in BLE running core and single connection now.
*
* @return
* sendable packets number on controller
*
*/
uint16_t esp_ble_get_sendable_packets_num (void)
{
return L2CA_GetFreePktBufferNum_LE();
}
/**
* @brief This function is used to query the number of available buffers for the current connection.
* When you need to query the current available buffer number, it is recommended to use this API.
* @param[in] conn_id: current connection id.
*
* @return
* Number of available buffers for the current connection
*
*/
extern UINT16 L2CA_GetCurFreePktBufferNum_LE(UINT16 conn_id);
uint16_t esp_ble_get_cur_sendable_packets_num (uint16_t connid)
{
return L2CA_GetCurFreePktBufferNum_LE(connid);
}
#endif | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_types.h"
#include "webrtc/voice_engine/test/auto_test/fixtures/before_initialization_fixture.h"
using namespace webrtc;
class HardwareBeforeInitializingTest : public BeforeInitializationFixture {
};
TEST_F(HardwareBeforeInitializingTest,
SetAudioDeviceLayerAcceptsPlatformDefaultBeforeInitializing) {
AudioLayers wanted_layer = kAudioPlatformDefault;
AudioLayers given_layer;
EXPECT_EQ(0, voe_hardware_->SetAudioDeviceLayer(wanted_layer));
EXPECT_EQ(0, voe_hardware_->GetAudioDeviceLayer(given_layer));
EXPECT_EQ(wanted_layer, given_layer) <<
"These should be the same before initializing.";
}
| {
"pile_set_name": "Github"
} |
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// RIPEMD-160 block step.
// In its own file so that a faster assembly or C version
// can be substituted easily.
package ripemd160
import (
"math/bits"
)
// work buffer indices and roll amounts for one line
var _n = [80]uint{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
}
var _r = [80]uint{
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
}
// same for the other parallel one
var n_ = [80]uint{
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
}
var r_ = [80]uint{
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
}
func _Block(md *digest, p []byte) int {
n := 0
var x [16]uint32
var alpha, beta uint32
for len(p) >= BlockSize {
a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
aa, bb, cc, dd, ee := a, b, c, d, e
j := 0
for i := 0; i < 16; i++ {
x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
j += 4
}
// round 1
i := 0
for i < 16 {
alpha = a + (b ^ c ^ d) + x[_n[i]]
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 2
for i < 32 {
alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 3
for i < 48 {
alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 4
for i < 64 {
alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 5
for i < 80 {
alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// combine results
dd += c + md.s[1]
md.s[1] = md.s[2] + d + ee
md.s[2] = md.s[3] + e + aa
md.s[3] = md.s[4] + a + bb
md.s[4] = md.s[0] + b + cc
md.s[0] = dd
p = p[BlockSize:]
n += BlockSize
}
return n
}
| {
"pile_set_name": "Github"
} |
/**
* TLS-Attacker - A Modular Penetration Testing Framework for TLS
*
* Copyright 2014-2020 Ruhr University Bochum, Paderborn University,
* and Hackmanit GmbH
*
* Licensed under Apache License 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*/
package de.rub.nds.tlsattacker.core.protocol.serializer;
import de.rub.nds.modifiablevariable.util.ArrayConverter;
import de.rub.nds.tlsattacker.core.constants.HandshakeByteLength;
import de.rub.nds.tlsattacker.core.constants.ProtocolVersion;
import de.rub.nds.tlsattacker.core.protocol.message.CertificateRequestMessage;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class CertificateRequestSerializer extends HandshakeMessageSerializer<CertificateRequestMessage> {
private static final Logger LOGGER = LogManager.getLogger();
private final CertificateRequestMessage msg;
/**
* Constructor for the CertificateRequestSerializer
*
* @param message
* Message that should be serialized
* @param version
* Version of the Protocol
*/
public CertificateRequestSerializer(CertificateRequestMessage message, ProtocolVersion version) {
super(message, version);
this.msg = message;
}
@Override
public byte[] serializeHandshakeMessageContent() {
LOGGER.debug("Serializing CertificateRequestMessage");
if (version.isTLS13()) {
writeCertificateRquestContextLength(msg);
writeCertificateRquestContext(msg);
writeExtensionLength();
writeExtensionBytes();
} else {
writeClientCertificateTypesCount(msg);
writeClientCertificateTypes(msg);
if (version == ProtocolVersion.TLS12 || version == ProtocolVersion.DTLS12) {
writeSignatureHandshakeAlgorithmsLength(msg);
writeSignatureHandshakeAlgorithms(msg);
}
writeDistinguishedNamesLength(msg);
if (hasDistinguishedNames(msg)) {
writeDistinguishedNames(msg);
}
}
return getAlreadySerialized();
}
/**
* Writes the ClientCertificateTypeCount of the CertificateRequestMessage
* into the final byte[]
*/
private void writeClientCertificateTypesCount(CertificateRequestMessage msg) {
appendInt(msg.getClientCertificateTypesCount().getValue(), HandshakeByteLength.CERTIFICATES_TYPES_COUNT);
LOGGER.debug("ClientCertificateTypesCount: " + msg.getClientCertificateTypesCount().getValue());
}
/**
* Writes the ClientCertificateType of the CertificateRequestMessage into
* the final byte[]
*/
private void writeClientCertificateTypes(CertificateRequestMessage msg) {
appendBytes(msg.getClientCertificateTypes().getValue());
LOGGER.debug("ClientCertificateTypes: "
+ ArrayConverter.bytesToHexString(msg.getClientCertificateTypes().getValue()));
}
/**
* Writes the SignatureHandshakeAlgorithmsLength of the
* CertificateRequestMessage into the final byte[]
*/
private void writeSignatureHandshakeAlgorithmsLength(CertificateRequestMessage msg) {
appendInt(msg.getSignatureHashAlgorithmsLength().getValue(),
HandshakeByteLength.SIGNATURE_HASH_ALGORITHMS_LENGTH);
LOGGER.debug("SignatureHashAlgorithmsLength: " + msg.getSignatureHashAlgorithmsLength().getValue());
}
/**
* Writes the SignatureHandshakeAlgorithms of the CertificateRequestMessage
* into the final byte[]
*/
private void writeSignatureHandshakeAlgorithms(CertificateRequestMessage msg) {
appendBytes(msg.getSignatureHashAlgorithms().getValue());
LOGGER.debug("SignatureHashAlgorithms: "
+ ArrayConverter.bytesToHexString(msg.getSignatureHashAlgorithms().getValue()));
}
/**
* Writes the DiestinguishedNamesLength of the CertificateRequestMessage
* into the final byte[]
*/
private void writeDistinguishedNamesLength(CertificateRequestMessage msg) {
appendInt(msg.getDistinguishedNamesLength().getValue(), HandshakeByteLength.DISTINGUISHED_NAMES_LENGTH);
LOGGER.debug("DistinguishedNamesLength: " + msg.getDistinguishedNamesLength().getValue());
}
private boolean hasDistinguishedNames(CertificateRequestMessage msg) {
return msg.getDistinguishedNamesLength().getValue() != 0;
}
/**
* Writes the DistinguishedNames of the CertificateRequestMessage into the
* final byte[]
*/
private void writeDistinguishedNames(CertificateRequestMessage msg) {
appendBytes(msg.getDistinguishedNames().getValue());
LOGGER.debug("DistinguishedNames: " + ArrayConverter.bytesToHexString(msg.getDistinguishedNames().getValue()));
}
private void writeCertificateRquestContext(CertificateRequestMessage msg) {
appendBytes(msg.getCertificateRequestContext().getValue());
LOGGER.debug("CertificateRquestContext: "
+ ArrayConverter.bytesToHexString(msg.getCertificateRequestContext().getValue()));
}
private void writeCertificateRquestContextLength(CertificateRequestMessage msg) {
appendInt(msg.getCertificateRequestContextLength().getValue(),
HandshakeByteLength.CERTIFICATE_REQUEST_CONTEXT_LENGTH);
LOGGER.debug("CertificateRquestContextLength: " + msg.getCertificateRequestContextLength().getValue());
}
}
| {
"pile_set_name": "Github"
} |
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.3/maven-wrapper-0.5.3.jar
| {
"pile_set_name": "Github"
} |
// Learn more about F# at http://fsharp.org
open Terminal.Gui
open System
open System.Collections.Generic
open System.Diagnostics
open System.Globalization
open System.Reflection
open NStack
type Demo() = class end
let ustr (x:string) = ustring.Make(x)
let mutable ml2 = Unchecked.defaultof<Label>
let mutable ml = Unchecked.defaultof<Label>
let mutable menu = Unchecked.defaultof<MenuBar>
let mutable menuKeysStyle = Unchecked.defaultof<CheckBox>
let mutable menuAutoMouseNav = Unchecked.defaultof<CheckBox>
type Box10x(x : int, y : int) =
inherit View(new Rect(x, y, 20, 10))
let w = 40
let h = 50
member val WantCursorPosition = Unchecked.defaultof<System.Boolean> with get, set
new() as _this =
(Box10x())
then
()
member this.GetContentSize() =
new Size(w, h)
member this.SetCursorPosition(pos : Point) =
raise (new NotImplementedException())
override this.Redraw(region : Rect) =
Application.Driver.SetAttribute (Application.Current.ColorScheme.Focus)
do
let mutable (y : int) = 0
while (y < h) do
this.Move (0, y)
Application.Driver.AddStr (ustr (y.ToString()))
do
let mutable (x : int) = 0
while (x < w - (y.ToString ()).Length) do
if (y.ToString ()).Length < w
then Application.Driver.AddStr (ustr " ")
x <- x + 1
x
y <- y + 1
y
()
type Filler(rect : Rect) =
inherit View(rect)
new() as _this =
(Filler ())
then
()
override this.Redraw(region : Rect) =
Application.Driver.SetAttribute (Application.Current.ColorScheme.Focus)
let mutable f = this.Frame
do
let mutable (y : int) = 0
while (y < f.Width) do
this.Move (0, y)
do
let mutable (x : int) = 0
while (x < f.Height) do
let mutable (r : Rune) = Unchecked.defaultof<Rune>
match (x % 3) with
| 0 ->
Application.Driver.AddRune ((Rune ((y.ToString ()).ToCharArray (0, 1)).[0]))
if y > 9
then Application.Driver.AddRune ((Rune ((y.ToString ()).ToCharArray (1, 1)).[0]))
r <- (Rune '.')
| 1 ->
r <- (Rune 'o')
| _ ->
r <- (Rune 'O')
Application.Driver.AddRune (r)
x <- x + 1
x
y <- y + 1
y
()
let ShowTextAlignments() =
let mutable container = new Dialog(
ustr "Text Alignments", 50, 20,
new Button (ustr "Ok", true, Clicked = Action(Application.RequestStop)),
new Button (ustr "Cancel", true, Clicked = Action(Application.RequestStop))
)
let mutable (i : int) = 0
let mutable (txt : string) = "Hello world, how are you doing today"
container.Add (
new Label (new Rect (0, 1, 40, 3), ustr ((sprintf "%O-%O" (i + 1)) txt), TextAlignment = TextAlignment.Left),
new Label (new Rect (0, 3, 40, 3), ustr ((sprintf "%O-%O" (i + 2)) txt), TextAlignment = TextAlignment.Right),
new Label (new Rect (0, 5, 40, 3), ustr ((sprintf "%O-%O" (i + 3)) txt), TextAlignment = TextAlignment.Centered),
new Label (new Rect (0, 7, 40, 3), ustr ((sprintf "%O-%O" (i + 4)) txt), TextAlignment = TextAlignment.Justified)
)
Application.Run (container)
let ShowEntries(container : View) =
let mutable scrollView = new ScrollView (new Rect (50, 10, 20, 8),
ContentSize = new Size (20, 50),
ShowVerticalScrollIndicator = true,
ShowHorizontalScrollIndicator = true
)
scrollView.Add (new Filler(new Rect(0, 0, 40, 40)))
let mutable scrollView2 = new ScrollView (new Rect (72, 10, 3, 3),
ContentSize = new Size (100, 100),
ShowVerticalScrollIndicator = true,
ShowHorizontalScrollIndicator = true
)
scrollView2.Add (new Box10x(0, 0))
let mutable progress = new ProgressBar(new Rect(68, 1, 10, 1))
let timer = Func<MainLoop, bool> (fun (caller) ->
progress.Pulse ();
true)
Application.MainLoop.AddTimeout (TimeSpan.FromMilliseconds (300.0), timer) |> ignore
let mutable login = Label (ustr "Login: ",
X = Pos.At(3),
Y = Pos.At(6)
)
let mutable password = new Label (ustr "Password: ",
X = Pos.Left (login),
Y = Pos.Bottom (login) + Pos.At(1)
)
let mutable loginText = new TextField (ustr "",
X = Pos.Right (password),
Y = Pos.Top (login),
Width = Dim.op_Implicit(40)
)
let mutable passText = new TextField (ustr "",
Secret = true,
X = Pos.Left (loginText),
Y = Pos.Top (password),
Width = Dim.Width (loginText)
)
let mutable tf = new Button(3, 19, ustr "Ok")
container.Add (login, loginText, password, passText,
new FrameView (new Rect (3, 10, 25, 6), ustr "Options",
[|new CheckBox (1, 0, ustr "Remember me");
new RadioGroup (1, 2, [|ustr "_Personal"; ustr "_Company"|])|]
),
new ListView (new Rect(59, 6, 16, 4),
[|"First row";
"<>";
"This is a very long row that should overflow what is shown";
"4th";
"There is an empty slot on the second row";
"Whoa";
"This is so cool"|]
),
scrollView, scrollView2, tf,
new Button(10, 19, ustr "Cancel"),
new TimeField(3, 20, DateTime.Now.TimeOfDay),
new TimeField(23, 20, DateTime.Now.TimeOfDay, true),
new DateField(3, 22, DateTime.Now),
new DateField(23, 22, DateTime.Now, true),
progress,
new Label(3, 24, ustr "Press F9 (on Unix, ESC+9 is an alias) to activate the menubar"),
menuKeysStyle,
menuAutoMouseNav
)
container.SendSubviewToBack (tf)
()
let NewFile() =
let mutable d = new Dialog (ustr "New File", 50, 20,
new Button (ustr "Ok", true, Clicked = Action(Application.RequestStop)),
new Button (ustr "Cancel", true, Clicked = Action(Application.RequestStop))
)
ml2 <- new Label(1, 1, ustr "Mouse Debug Line")
d.Add (ml2)
Application.Run (d)
let Editor(top : Toplevel) =
let mutable tframe = top.Frame
let mutable ntop = new Toplevel(tframe)
let mutable menu = new MenuBar([|new MenuBarItem(ustr "_File",
[|new MenuItem(ustr "_Close", "", (fun () -> Application.RequestStop ()))|]);
new MenuBarItem(ustr "_Edit", [|new MenuItem(ustr "_Copy", "", Unchecked.defaultof<_>);
new MenuItem(ustr "C_ut", "", Unchecked.defaultof<_>);
new MenuItem(ustr "_Paste", "", Unchecked.defaultof<_>)|])|]
)
ntop.Add (menu)
let mutable (fname : string) = Unchecked.defaultof<_>
for s in [|"/etc/passwd"; "c:\\windows\\win.ini"|] do
if System.IO.File.Exists (s)
then
fname <- s
let mutable win = new Window (ustr(if fname <> null then fname else "Untitled"),
X = Pos.At(0),
Y = Pos.At(1),
Width = Dim.Fill (),
Height = Dim.Fill ()
)
ntop.Add (win)
let mutable text = new TextView(new Rect(0, 0, (tframe.Width - 2), (tframe.Height - 3)))
if fname <> Unchecked.defaultof<_>
then text.Text <- ustr (System.IO.File.ReadAllText (fname))
win.Add (text)
Application.Run (ntop)
let Quit() =
let mutable n = MessageBox.Query (50, 7, ustr "Quit Demo", ustr "Are you sure you want to quit this demo?", ustr "Yes", ustr "No")
n = 0
let Close() =
MessageBox.ErrorQuery (50, 7, ustr "Error", ustr "There is nothing to close", ustr "Ok")
|> ignore
let Open() =
let mutable d = new OpenDialog (ustr "Open", ustr "Open a file", AllowsMultipleSelection = true)
Application.Run (d)
if not d.Canceled
then MessageBox.Query (50, 7, ustr "Selected File", ustr (String.Join (", ", d.FilePaths)), ustr "Ok") |> ignore
let ShowHex(top : Toplevel) =
let mutable tframe = top.Frame
let mutable ntop = new Toplevel(tframe)
let mutable menu = new MenuBar([|new MenuBarItem(ustr "_File",
[|new MenuItem(ustr "_Close", "", (fun () -> Application.RequestStop ()))|])|])
ntop.Add (menu)
let mutable win = new Window (ustr "/etc/passwd",
X = Pos.At(0),
Y = Pos.At(1),
Width = Dim.Fill (),
Height = Dim.Fill ()
)
ntop.Add (win)
let mutable source = System.IO.File.OpenRead ("/etc/passwd")
let mutable hex = new HexView (source,
X = Pos.At(0),
Y = Pos.At(0),
Width = Dim.Fill (),
Height = Dim.Fill ()
)
win.Add (hex)
Application.Run (ntop)
type MenuItemDetails() =
inherit MenuItem()
new(title : ustring, help : string, action : Action) as this =
(MenuItemDetails ())
then
this.Title <- title
this.Help <- ustr help
this.Action <- action
static member Instance(mi : MenuItem) =
(mi.GetMenuItem ()) :?> MenuItemDetails
type MenuItemDelegate = delegate of MenuItemDetails -> MenuItem
let ShowMenuItem(mi : MenuItemDetails) =
let mutable (flags : BindingFlags) = BindingFlags.Public ||| BindingFlags.Static
let mutable (minfo : MethodInfo) = typeof<MenuItemDetails>.GetMethod ("Instance", flags)
let mutable (mid : Delegate) = Delegate.CreateDelegate (typeof<MenuItemDelegate>, minfo)
MessageBox.Query (70, 7, ustr (mi.Title.ToString ()),
ustr ((sprintf "%O selected. Is from submenu: %O" (mi.Title.ToString ())) (mi.GetMenuBarItem ())), ustr "Ok")
|> ignore
let MenuKeysStyle_Toggled(e : bool) =
menu.UseKeysUpDownAsKeysLeftRight <- menuKeysStyle.Checked
let MenuAutoMouseNav_Toggled(e : bool) =
menu.WantMousePositionReports <- menuAutoMouseNav.Checked
let Copy() =
let mutable (textField : TextField) = menu.LastFocused :?> TextField
if textField <> Unchecked.defaultof<_> && textField.SelectedLength <> 0
then textField.Copy ()
()
let Cut() =
let mutable (textField : TextField) = menu.LastFocused :?> TextField
if textField <> Unchecked.defaultof<_> && textField.SelectedLength <> 0
then textField.Cut ()
()
let Paste() =
let mutable (textField : TextField) = menu.LastFocused :?> TextField
if textField <> Unchecked.defaultof<_>
then textField.Paste ()
()
let Help() =
MessageBox.Query (50, 7, ustr "Help", ustr "This is a small help\nBe kind.", ustr "Ok")
|> ignore
let Load () =
MessageBox.Query (50, 7, ustr "Load", ustr "This is a small load\nBe kind.", ustr "Ok")
|> ignore
let Save () =
MessageBox.Query (50, 7, ustr "Save ", ustr "This is a small save\nBe kind.", ustr "Ok")
|> ignore
let ListSelectionDemo(multiple : System.Boolean) =
let mutable d = new Dialog (ustr "Selection Demo", 60, 20,
new Button (ustr "Ok", true, Clicked = fun () -> Application.RequestStop ()),
new Button (ustr "Cancel", Clicked = fun () -> Application.RequestStop ())
)
let mutable animals = new List<string> ()
animals.AddRange([|"Alpaca"; "Llama"; "Lion"; "Shark"; "Goat"|])
let mutable msg = new Label (ustr "Use space bar or control-t to toggle selection",
X = Pos.At(1),
Y = Pos.At(1),
Width = Dim.Fill () - Dim.op_Implicit(1),
Height = Dim.op_Implicit(1)
)
let mutable list = new ListView (animals,
X = Pos.At(1),
Y = Pos.At(3),
Width = Dim.Fill () - Dim.op_Implicit(4),
Height = Dim.Fill () - Dim.op_Implicit(4),
AllowsMarking = true,
AllowsMultipleSelection = multiple
)
d.Add (msg, list)
Application.Run (d)
let mutable result = ""
do
let mutable (i : int) = 0
while (i < animals.Count) do
if list.Source.IsMarked (i)
then result <- result + animals.[i] + " "
i <- i + 1
i
()
MessageBox.Query (60, 10, ustr "Selected Animals", ustr (if result = "" then "No animals selected" else result), ustr "Ok") |> ignore
let OnKeyDownPressUpDemo() =
let mutable container = new Dialog (ustr "KeyDown & KeyPress & KeyUp demo", 80, 20,
new Button (ustr "Close", Clicked = fun () -> Application.RequestStop ()),
Width = Dim.Fill (),
Height = Dim.Fill ()
)
let mutable list = new List<string> ()
let mutable listView = new ListView (list,
X = Pos.At(0),
Y = Pos.At(0),
Width = Dim.Fill () - Dim.op_Implicit(1),
Height = Dim.Fill () - Dim.op_Implicit(2),
ColorScheme = Colors.TopLevel
)
container.Add (listView)
let KeyDownPressUp(keyEvent : KeyEvent, updown : string) =
let ident : int = -5
match updown with
| "Down"
| "Up"
| "Press" ->
list.Add (keyEvent.ToString ())
listView.MoveDown ();
container.KeyDown <- Action<View.KeyEventEventArgs> (fun (e : View.KeyEventEventArgs) -> KeyDownPressUp (e.KeyEvent, "Down") |> ignore)
container.KeyPress <- Action<View.KeyEventEventArgs> (fun (e : View.KeyEventEventArgs) -> KeyDownPressUp (e.KeyEvent, "Press") |> ignore)
container.KeyUp <- Action<View.KeyEventEventArgs> (fun (e : View.KeyEventEventArgs) -> KeyDownPressUp (e.KeyEvent, "Up") |> ignore)
Application.Run (container)
let Main() =
if Debugger.IsAttached
then CultureInfo.DefaultThreadCurrentUICulture <- CultureInfo.GetCultureInfo ("en-US")
Application.Init ()
let mutable top = Application.Top
let mutable (margin : int) = 3
let mutable win = new Window (ustr "Hello",
X = Pos.At(1),
Y = Pos.At(1),
Width = Dim.Fill () - Dim.op_Implicit(margin),
Height = Dim.Fill () - Dim.op_Implicit(margin)
)
let mutable (menuItems : MenuItemDetails[]) = [|new MenuItemDetails(ustr "F_ind", "", Unchecked.defaultof<_>);
new MenuItemDetails(ustr "_Replace", "", Unchecked.defaultof<_>);
new MenuItemDetails(ustr "_Item1", "", Unchecked.defaultof<_>);
new MenuItemDetails(ustr "_Not From Sub Menu", "", Unchecked.defaultof<_>)|]
menuItems.[0].Action <- fun () -> ShowMenuItem (menuItems.[0])
menuItems.[1].Action <- fun () -> ShowMenuItem (menuItems.[1])
menuItems.[2].Action <- fun () -> ShowMenuItem (menuItems.[2])
menuItems.[3].Action <- fun () -> ShowMenuItem (menuItems.[3])
menu <-
new MenuBar ([|new MenuBarItem(ustr "_File",
[|new MenuItem (ustr "Text _Editor Demo", "", (fun () -> Editor (top)));
new MenuItem (ustr "_New", "Creates new file", fun () -> NewFile());
new MenuItem (ustr "_Open", "", fun () -> Open());
new MenuItem (ustr "_Hex", "", (fun () -> ShowHex (top)));
new MenuItem (ustr "_Close", "", (fun () -> Close()));
new MenuItem (ustr "_Disabled", "", (fun () -> ()), (fun () -> false));
Unchecked.defaultof<_>;
new MenuItem (ustr "_Quit", "", (fun () -> if Quit() then top.Running <- false))|]);
new MenuBarItem (ustr "_Edit", [|new MenuItem(ustr "_Copy", "", fun () -> Copy());
new MenuItem(ustr "C_ut", "", fun () -> Cut()); new MenuItem(ustr "_Paste", "", fun () -> Paste());
new MenuItem(ustr "_Find and Replace", new MenuBarItem([|(menuItems.[0]);
(menuItems.[1])|])); (menuItems.[3])|]);
new MenuBarItem(ustr "_List Demos", [|new MenuItem(ustr "Select _Multiple Items", "", (fun () -> ListSelectionDemo (true)));
new MenuItem(ustr "Select _Single Item", "", (fun () -> ListSelectionDemo (false)))|]);
new MenuBarItem(ustr "A_ssorted", [|new MenuItem(ustr "_Show text alignments", "", (fun () -> ShowTextAlignments ()));
new MenuItem(ustr "_OnKeyDown/Press/Up", "", (fun () -> OnKeyDownPressUpDemo ()))|]);
new MenuBarItem(ustr "_Test Menu and SubMenus",
[|new MenuItem(ustr "SubMenu1Item_1", new MenuBarItem([|new MenuItem(ustr "SubMenu2Item_1",
new MenuBarItem([|new MenuItem(ustr "SubMenu3Item_1", new MenuBarItem([|(menuItems.[2])|]))|]))|]))|]);
new MenuBarItem(ustr "_About...", "Demonstrates top-level menu item",
(fun () -> MessageBox.ErrorQuery (50, 7, ustr "About Demo", ustr "This is a demo app for gui.cs", ustr "Ok") |> ignore))|])
menuKeysStyle <- new CheckBox(3, 25, ustr "UseKeysUpDownAsKeysLeftRight", true)
menuKeysStyle.Toggled <- Action<bool> (MenuKeysStyle_Toggled)
menuAutoMouseNav <- new CheckBox(40, 25, ustr "UseMenuAutoNavigation", true)
menuAutoMouseNav.Toggled <- Action<bool> (MenuAutoMouseNav_Toggled)
ShowEntries (win)
let mutable (count : int) = 0
ml <- new Label(new Rect(3, 17, 47, 1), ustr "Mouse: ")
Application.RootMouseEvent <- Action<MouseEvent> (
fun (me : MouseEvent) ->
ml.TextColor <- Colors.TopLevel.Normal
ml.Text <- ustr (
(((sprintf "Mouse: (%O,%O) - %O %O" me.X) me.Y) me.Flags) (
count <- count + 1
count))
)
let mutable test = new Label(3, 18, ustr "Se iniciará el análisis")
win.Add (test)
win.Add (ml)
let mutable drag = new Label (ustr "Drag: ", X = Pos.At(70), Y = Pos.At(24))
let mutable dragText = new TextField (ustr "",
X = Pos.Right (drag),
Y = Pos.Top (drag),
Width = Dim.op_Implicit(40)
)
let mutable statusBar = new StatusBar ([|
new StatusItem(Key.F1, ustr "~F1~ Help", Action(Help));
new StatusItem(Key.F2, ustr "~F2~ Load", Action(Load));
new StatusItem(Key.F3, ustr "~F3~ Save", Action(Save));
new StatusItem(Key.ControlX, ustr "~^X~ Quit", fun () -> if (Quit ()) then top.Running <- false)
|]
)
win.Add (drag, dragText)
let mutable bottom = new Label(ustr "This should go on the bottom of the same top-level!")
win.Add (bottom)
let mutable bottom2 = new Label(ustr "This should go on the bottom of another top-level!")
top.Add (bottom2)
Application.Loaded <- Action<Application.ResizedEventArgs> (
fun (_) ->
bottom.X <- win.X
bottom.Y <- Pos.Bottom (win) - Pos.Top (win) - Pos.At(margin)
bottom2.X <- Pos.Left (win)
bottom2.Y <- Pos.Bottom (win)
)
top.Add (win)
top.Add (menu, statusBar)
Application.Run ()
module Demo__run =
[<EntryPoint>]
let main argv =
Main ()
0 | {
"pile_set_name": "Github"
} |
'use strict';
// Load modules
const Crypto = require('crypto');
const Boom = require('boom');
// Declare internals
const internals = {};
// Generate a cryptographically strong pseudo-random data
exports.randomString = function (size) {
const buffer = exports.randomBits((size + 1) * 6);
if (buffer instanceof Error) {
return buffer;
}
const string = buffer.toString('base64').replace(/\+/g, '-').replace(/\//g, '_').replace(/\=/g, '');
return string.slice(0, size);
};
// Return a random string of digits
exports.randomDigits = function (size) {
const buffer = exports.randomBits(size * 8);
if (buffer instanceof Error) {
return buffer;
}
const digits = [];
for (let i = 0; i < buffer.length; ++i) {
digits.push(Math.floor(buffer[i] / 25.6));
}
return digits.join('');
};
// Generate a buffer of random bits
exports.randomBits = function (bits) {
if (!bits ||
bits < 0) {
return Boom.internal('Invalid random bits count');
}
const bytes = Math.ceil(bits / 8);
try {
return Crypto.randomBytes(bytes);
}
catch (err) {
return Boom.internal('Failed generating random bits: ' + err.message);
}
};
// Compare two strings using fixed time algorithm (to prevent time-based analysis of MAC digest match)
exports.fixedTimeComparison = function (a, b) {
if (typeof a !== 'string' ||
typeof b !== 'string') {
return false;
}
let mismatch = (a.length === b.length ? 0 : 1);
if (mismatch) {
b = a;
}
for (let i = 0; i < a.length; ++i) {
const ac = a.charCodeAt(i);
const bc = b.charCodeAt(i);
mismatch |= (ac ^ bc);
}
return (mismatch === 0);
};
| {
"pile_set_name": "Github"
} |
'use strict';
const electron = require('electron');
const fs = require('fs');
const app = electron.app;
// adds debug features like hotkeys for triggering dev tools and reload
// require('electron-debug')();
// prevent window being garbage collected
let mainWindow;
function onClosed() {
// dereference the window
// for multiple windows store them in an array
mainWindow = null;
}
function createMainWindow() {
const win = new electron.BrowserWindow({
width: 1200,
height: 800
});
win.loadURL(`file://${__dirname}/index.html`);
win.on('closed', onClosed);
return win;
}
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit();
}
});
app.on('activate', () => {
if (!mainWindow) {
mainWindow = createMainWindow();
}
});
app.on('ready', () => {
mainWindow = createMainWindow();
});
| {
"pile_set_name": "Github"
} |
Content-type: text/html
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<HTML><HEAD><TITLE>Man page of PLA</TITLE>
</HEAD><BODY>
<H1>PLA</H1>
Section: Misc. Reference Manual Pages (5OCTTOOLS)<BR>Updated: 8/23/81<BR><A HREF="#index">Index</A>
<A HREF="/cgi-bin/man/man2html">Return to Main Contents</A><HR>
<A NAME="lbAB"> </A>
<H2>NAME</H2>
pla - Format for physical description of Programmable Logic Arrays.
<A NAME="lbAC"> </A>
<H2>SYNOPSIS</H2>
<B>pla</B>
<A NAME="lbAD"> </A>
<H2>DESCRIPTION</H2>
This format is used by programs which manipulate
plas to describe the physical implementation.
Lines beginning with a `<B>#</B>' are comments and are ignored.
Lines beginning with a `<B>.</B>' contain control
information about the pla.
Currently, the control information is given in the following order:
<PRE>
<B>.i</B> <number of inputs>
<B>.o</B> <number of outputs>
<B>.p</B> <number of product terms (pterms)>
and optionally,
<B>.na</B><name> (the name to be used for the pla)
</PRE>
<P>
What follows then is a description of the AND and OR planes
of the pla with one line per product term.
Connections in the AND plane are represented with a `<B>1</B>' for
connection to the non-inverted input line and a <B>`0</B>' for
connection to the inverted input line.
No connection to an input line is indicated
with '<B>x</B>', '<B>X</B>', or '<B>-</B>' with '<B>-</B>' being preferred.
Connections in the OR plane are indicated by a '<B>1</B>' with no
connection being indicated
with '<B>x</B>', '<B>X</B>', '<B>0</B>', or '<B>-</B>' with '<B>-</B>' being
preferred. Spaces or tabs may be used freely and are ignored.
<P>
The end of the pla description is indicated with:
<BR>
<TT> </TT><B>.e</B><BR>
<P>
Programs capable of handling split and folded arrays
employ the following format:
<DL COMPACT><DT><DD>
<PRE>
<B>AND PLANE</B>
<DL COMPACT><DT><DD>
Column (1) Contact to input (2) No contact to input
(1) (2)
1 - Normal contacts, no splits or folds
! _ Split below
; , Fold to right
: . Split below and fold to right
</DL>
<B>OR PLANE</B>
<DL COMPACT><DT><DD>
Column (1) Contact to output (2) No contact to output
(1) (2)
I ~ Normal contacts, no splits or folds
i = Split below
| ' Fold to right
j " Split below and fold to right
</DL>
<B>ADDITIONAL ELEMENTS</B>
<DL COMPACT><DT><DD>
* Input buffer
+ Output buffer
D Depletion load associated with product term
N No depletion load associated with product term
</DL>
</PRE>
</DL>
<P>
Note that
the decoding function of the AND plane is separated
from the specification of its connectivity.
This makes the AND and OR plane specifications identical.
<P>
These programs handle the following more general set of .parameters:
<PRE>
<B>.il</B> <number of left-AND plane inputs>
<B>.ir</B> <number of right-AND plane inputs>
<B>.ol</B> <number of left-OR plane inputs>
<B>.or</B> <number of right-OR plane inputs>
<B>.p</B> <number of product terms>
<B>.ilt</B> <labels left-top-AND plane>
<B>.ilb</B> <labels left-bottom-AND plane>
<B>.irt</B> <labels right-top-AND plane>
<B>.irb</B> <labels right-bottom-AND plane>
<B>.olb</B> <labels left-bottom-OR plane>
<B>.olt</B> <labels left-top-OR plane>
<B>.orb</B> <labels right-bottom-Or plane>
<B>.ort</B> <labels right-top-Or plane>
<B>.pl</B> <labels left product terms>
<B>.pr</B> <labels right product terms>
</PRE>
The first group of parameters must precede the second group.
If there is only one AND or OR plane it is assumed
to be the left one and the companion .parameters may be shortened by dropping their (left,right)
designation character.
<P>
In order to better deal with folded and split PLAs, the
following .parameters are proposed:
<PRE>
<B>.ig</B> <input group>
<B>.og</B> <output group>
<B>.ins</B> <inputs excluded from splitting>
<B>.inf</B> <inputs excluded from folding>
<B>.ons</B> <outputs excluded from splitting>
<B>.onf</B> <outputs excluded from folding>
</PRE>
<P>
In order to build finite state machines, the following .parameters are proposed:
<PRE>
<B>.iltf</B> <left-top-AND feedback terms>
<B>.ilbf</B> <left-bottom-AND feedback terms>
<B>.irtf</B> <right-top-AND feedback terms>
<B>.irbf</B> <right-bottom-AND feedback terms>
<B>.oltf</B> <left-top-OR feedback terms>
<B>.olbf</B> <left-bottom-OR feedback terms>
<B>.ortf</B> <right-top-OR feedback terms>
<B>.orbf</B> <right-bottom-OR feedback terms>
<B>.ilr</B> <left re-ordered inputs>
<B>.irr</B> <right re-ordered inputs>
<B>.olrf</B> <left re-ordered outputs>
<B>.orrf</B> <right re-ordered outputs>
</PRE>
The .XXXf parameters must occur in pairs, with
the .oXXf line first.
Input and output terms must occur on the same side (top, bottom)
of the PLA.
Feedback terms must be given in ascending order.
The re-order .parameters simplify feedback routing.
<P>
<A NAME="lbAE"> </A>
<H2>SEE ALSO</H2>
espresso(1OCTTOOLS), espresso(5OCTTOOLS), misII(1OCTTOOLS)
<P>
<HR>
<A NAME="index"> </A><H2>Index</H2>
<DL>
<DT><A HREF="#lbAB">NAME</A><DD>
<DT><A HREF="#lbAC">SYNOPSIS</A><DD>
<DT><A HREF="#lbAD">DESCRIPTION</A><DD>
<DT><A HREF="#lbAE">SEE ALSO</A><DD>
</DL>
<HR>
This document was created by
<A HREF="/cgi-bin/man/man2html">man2html</A>,
using the manual pages.<BR>
Time: 17:31:43 GMT, December 19, 2013
</BODY>
</HTML>
| {
"pile_set_name": "Github"
} |
541560fe389993cf6861d1dabba93438
| {
"pile_set_name": "Github"
} |
/*
* LED Class Core
*
* Copyright (C) 2005 John Lenz <[email protected]>
* Copyright (C) 2005-2007 Richard Purdie <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <uapi/linux/uleds.h>
#include "leds.h"
static struct class *leds_class;
static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
/* no lock needed for this */
led_update_brightness(led_cdev);
return sprintf(buf, "%u\n", led_cdev->brightness);
}
static ssize_t brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
unsigned long state;
ssize_t ret;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &state);
if (ret)
goto unlock;
if (state == LED_OFF && !(led_cdev->flags & LED_KEEP_TRIGGER))
led_trigger_remove(led_cdev);
led_set_brightness(led_cdev, state);
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static DEVICE_ATTR_RW(brightness);
static ssize_t max_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", led_cdev->max_brightness);
}
static DEVICE_ATTR_RO(max_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
static struct attribute *led_trigger_attrs[] = {
&dev_attr_trigger.attr,
NULL,
};
static const struct attribute_group led_trigger_group = {
.attrs = led_trigger_attrs,
};
#endif
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
NULL,
};
static const struct attribute_group led_group = {
.attrs = led_class_attrs,
};
static const struct attribute_group *led_groups[] = {
&led_group,
#ifdef CONFIG_LEDS_TRIGGERS
&led_trigger_group,
#endif
NULL,
};
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
static ssize_t brightness_hw_changed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->brightness_hw_changed == -1)
return -ENODATA;
return sprintf(buf, "%u\n", led_cdev->brightness_hw_changed);
}
static DEVICE_ATTR_RO(brightness_hw_changed);
static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
{
struct device *dev = led_cdev->dev;
int ret;
ret = device_create_file(dev, &dev_attr_brightness_hw_changed);
if (ret) {
dev_err(dev, "Error creating brightness_hw_changed\n");
return ret;
}
led_cdev->brightness_hw_changed_kn =
sysfs_get_dirent(dev->kobj.sd, "brightness_hw_changed");
if (!led_cdev->brightness_hw_changed_kn) {
dev_err(dev, "Error getting brightness_hw_changed kn\n");
device_remove_file(dev, &dev_attr_brightness_hw_changed);
return -ENXIO;
}
return 0;
}
static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
{
sysfs_put(led_cdev->brightness_hw_changed_kn);
device_remove_file(led_cdev->dev, &dev_attr_brightness_hw_changed);
}
void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
if (WARN_ON(!led_cdev->brightness_hw_changed_kn))
return;
led_cdev->brightness_hw_changed = brightness;
sysfs_notify_dirent(led_cdev->brightness_hw_changed_kn);
}
EXPORT_SYMBOL_GPL(led_classdev_notify_brightness_hw_changed);
#else
static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
{
return 0;
}
static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
{
}
#endif
/**
* led_classdev_suspend - suspend an led_classdev.
* @led_cdev: the led_classdev to suspend.
*/
void led_classdev_suspend(struct led_classdev *led_cdev)
{
led_cdev->flags |= LED_SUSPENDED;
led_set_brightness_nopm(led_cdev, 0);
flush_work(&led_cdev->set_brightness_work);
}
EXPORT_SYMBOL_GPL(led_classdev_suspend);
/**
* led_classdev_resume - resume an led_classdev.
* @led_cdev: the led_classdev to resume.
*/
void led_classdev_resume(struct led_classdev *led_cdev)
{
led_set_brightness_nopm(led_cdev, led_cdev->brightness);
if (led_cdev->flash_resume)
led_cdev->flash_resume(led_cdev);
led_cdev->flags &= ~LED_SUSPENDED;
}
EXPORT_SYMBOL_GPL(led_classdev_resume);
#ifdef CONFIG_PM_SLEEP
static int led_suspend(struct device *dev)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
led_classdev_suspend(led_cdev);
return 0;
}
static int led_resume(struct device *dev)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
led_classdev_resume(led_cdev);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
static int match_name(struct device *dev, const void *data)
{
if (!dev_name(dev))
return 0;
return !strcmp(dev_name(dev), (char *)data);
}
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
{
unsigned int i = 0;
int ret = 0;
struct device *dev;
strlcpy(name, init_name, len);
while ((ret < len) &&
(dev = class_find_device(leds_class, NULL, name, match_name))) {
put_device(dev);
ret = snprintf(name, len, "%s_%u", init_name, ++i);
}
if (ret >= len)
return -ENOMEM;
return i;
}
/**
* of_led_classdev_register - register a new object of led_classdev class.
*
* @parent: parent of LED device
* @led_cdev: the led_classdev structure for this device.
* @np: DT node describing this LED
*/
int of_led_classdev_register(struct device *parent, struct device_node *np,
struct led_classdev *led_cdev)
{
char name[LED_MAX_NAME_SIZE];
int ret;
ret = led_classdev_next_name(led_cdev->name, name, sizeof(name));
if (ret < 0)
return ret;
led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
led_cdev, led_cdev->groups, "%s", name);
if (IS_ERR(led_cdev->dev))
return PTR_ERR(led_cdev->dev);
led_cdev->dev->of_node = np;
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
led_cdev->name, dev_name(led_cdev->dev));
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
ret = led_add_brightness_hw_changed(led_cdev);
if (ret) {
device_unregister(led_cdev->dev);
return ret;
}
}
led_cdev->work_flags = 0;
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
led_cdev->brightness_hw_changed = -1;
#endif
mutex_init(&led_cdev->led_access);
/* add to the list of leds */
down_write(&leds_list_lock);
list_add_tail(&led_cdev->node, &leds_list);
up_write(&leds_list_lock);
if (!led_cdev->max_brightness)
led_cdev->max_brightness = LED_FULL;
led_update_brightness(led_cdev);
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
#endif
dev_dbg(parent, "Registered led device: %s\n",
led_cdev->name);
return 0;
}
EXPORT_SYMBOL_GPL(of_led_classdev_register);
/**
* led_classdev_unregister - unregisters a object of led_properties class.
* @led_cdev: the led device to unregister
*
* Unregisters a previously registered via led_classdev_register object.
*/
void led_classdev_unregister(struct led_classdev *led_cdev)
{
#ifdef CONFIG_LEDS_TRIGGERS
down_write(&led_cdev->trigger_lock);
if (led_cdev->trigger)
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
#endif
led_cdev->flags |= LED_UNREGISTERING;
/* Stop blinking */
led_stop_software_blink(led_cdev);
led_set_brightness(led_cdev, LED_OFF);
flush_work(&led_cdev->set_brightness_work);
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED)
led_remove_brightness_hw_changed(led_cdev);
device_unregister(led_cdev->dev);
down_write(&leds_list_lock);
list_del(&led_cdev->node);
up_write(&leds_list_lock);
mutex_destroy(&led_cdev->led_access);
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
static void devm_led_classdev_release(struct device *dev, void *res)
{
led_classdev_unregister(*(struct led_classdev **)res);
}
/**
* devm_of_led_classdev_register - resource managed led_classdev_register()
*
* @parent: parent of LED device
* @led_cdev: the led_classdev structure for this device.
*/
int devm_of_led_classdev_register(struct device *parent,
struct device_node *np,
struct led_classdev *led_cdev)
{
struct led_classdev **dr;
int rc;
dr = devres_alloc(devm_led_classdev_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
rc = of_led_classdev_register(parent, np, led_cdev);
if (rc) {
devres_free(dr);
return rc;
}
*dr = led_cdev;
devres_add(parent, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_of_led_classdev_register);
static int devm_led_classdev_match(struct device *dev, void *res, void *data)
{
struct led_cdev **p = res;
if (WARN_ON(!p || !*p))
return 0;
return *p == data;
}
/**
* devm_led_classdev_unregister() - resource managed led_classdev_unregister()
* @parent: The device to unregister.
* @led_cdev: the led_classdev structure for this device.
*/
void devm_led_classdev_unregister(struct device *dev,
struct led_classdev *led_cdev)
{
WARN_ON(devres_release(dev,
devm_led_classdev_release,
devm_led_classdev_match, led_cdev));
}
EXPORT_SYMBOL_GPL(devm_led_classdev_unregister);
static int __init leds_init(void)
{
leds_class = class_create(THIS_MODULE, "leds");
if (IS_ERR(leds_class))
return PTR_ERR(leds_class);
leds_class->pm = &leds_class_dev_pm_ops;
leds_class->dev_groups = led_groups;
return 0;
}
static void __exit leds_exit(void)
{
class_destroy(leds_class);
}
subsys_initcall(leds_init);
module_exit(leds_exit);
MODULE_AUTHOR("John Lenz, Richard Purdie");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LED Class Interface");
| {
"pile_set_name": "Github"
} |
(ns clograms.models
(:require [clograms.db :as db]
[datascript.core :as d]))
(defmulti build-node (fn [entity-type id] entity-type))
(defmethod build-node :project
[_ proj-id]
{:entity {:entity/type :project
:project/id proj-id}
:diagram.node/type :clograms/project-node})
(defmethod build-node :namespace
[_ ns-id]
{:entity {:entity/type :namespace
:namespace/id ns-id}
:diagram.node/type :clograms/namespace-node})
(defmethod build-node :function
[_ var-id]
{:entity {:entity/type :function
:var/id var-id}
:diagram.node/type :clograms/function-node})
(defmethod build-node :multimethod
[_ var-id]
{:entity {:entity/type :multimethod
:var/id var-id}
:diagram.node/type :clograms/multimethod-node})
(defmethod build-node :var
[_ var-id]
{:entity {:entity/type :var
:var/id var-id}
:diagram.node/type :clograms/var-node})
(defmethod build-node :re-frame-subs
[_ id]
{:entity {:entity/type :re-frame-subs
:id id}
:diagram.node/type :clograms/re-frame-subs-node})
(defmethod build-node :re-frame-event
[_ id]
{:entity {:entity/type :re-frame-event
:id id}
:diagram.node/type :clograms/re-frame-event-node})
(defmethod build-node :re-frame-fx
[_ id]
{:entity {:entity/type :re-frame-fx
:id id}
:diagram.node/type :clograms/re-frame-fx-node})
(defmethod build-node :re-frame-cofx
[_ id]
{:entity {:entity/type :re-frame-cofx
:id id}
:diagram.node/type :clograms/re-frame-cofx-node})
(defmethod build-node :spec
[_ spec-id]
{:entity {:entity/type :spec
:spec/id spec-id}
:diagram.node/type :clograms/spec-node})
| {
"pile_set_name": "Github"
} |
--------------------------------------------------------
-- This file was automatically generated by Ocarina --
-- Do NOT hand-modify this file, as your --
-- changes will be lost when you re-run Ocarina --
--------------------------------------------------------
pragma Style_Checks
("NM32766");
with PolyORB_HI.Utils;
with System;
with PolyORB_HI_Generated.Deployment;
package PolyORB_HI_Generated.Naming is
-- Naming Table for bus the_bus
Naming_Table : constant PolyORB_HI.Utils.Naming_Table_Type :=
(PolyORB_HI_Generated.Deployment.pr_A_K =>
(PolyORB_HI.Utils.To_Hi_String
("127.0.0.1"),
4001,
System.null_Address),
PolyORB_HI_Generated.Deployment.pr_B_K =>
(PolyORB_HI.Utils.To_Hi_String
("127.0.0.1"),
4002,
System.null_Address),
others =>
(PolyORB_HI.Utils.To_Hi_String
(""),
0,
System.null_Address));
end PolyORB_HI_Generated.Naming;
| {
"pile_set_name": "Github"
} |
process.env.NODE_ENV = 'test';
var mongoose = require('mongoose'),
root = __dirname + '/../../../',
utils = require(root + 'lib/utils'),
should = require('should'),
moment = require('moment'),
_ = require('underscore'),
cleanDb = utils.cleanDb,
ENV, clientsBulk;
ENV = process.env.NODE_ENV;
describe('Models::Client', function() {
var config, Client;
before(function(done) {
utils.loadConfig(root + 'config', function(conf) {
config = conf;
mongoose = utils.connectToDatabase(mongoose, config.db[ENV].main, function (err) {
if (err) { throw err; }
Client = require(root + 'app/models/client')(mongoose);
done();
});
});
});
after(function(done) {
cleanDb(Client, function() {
mongoose.disconnect();
setTimeout(done, 1000);
});
});
describe('#New client', function() {
beforeEach(function(done) {
cleanDb(Client, function() {
setTimeout(done, 200);
});
});
it('should not have a photo by default', function() {
var newClient = new Client();
newClient.photo.should.be.false;
});
it('should have the fields name, email, company, born required', function() {
var newClient = new Client();
newClient.save(function(err) {
err.errors.should.have.property('name');
err.errors.should.have.property('email');
err.errors.should.have.property('company');
err.errors.should.have.property('born');
});
});
// 2 <= name.length <= 100
it('should have a valid name', function(done) {
var newClient = new Client(),
anotherClient = new Client(),
lastClient = new Client(),
left = 3,
temp = '',
i;
newClient.name = "A";
newClient.email = "[email protected]";
newClient.company = "Peach Corp";
newClient.born = moment().year(1987).toDate();
newClient.save(function(err) {
err.errors.should.have.property('name');
if (!--left) { done(); }
});
for (i = 1; i <= 101; i++) {
temp += 'a';
}
anotherClient.name = temp;
anotherClient.email = "[email protected]";
anotherClient.company = "Peach Corp2";
anotherClient.born = moment().year(1980).toDate();
anotherClient.save(function(err) {
err.errors.should.have.property('name');
if (!--left) { done(); }
});
lastClient.name = "Andrew";
lastClient.email = "[email protected]";
lastClient.company = "IT Corp";
lastClient.born = moment().year(1987).toDate();
lastClient.save(function(err) {
should.not.exist(err);
if (!--left) { done(); }
});
});
it('should have a valid email', function(done) {
var newClient = new Client(),
anotherClient = new Client(),
lastClient = new Client(),
left = 3;
newClient.name = "Andrew";
newClient.email = "[email protected]";
newClient.company = "Peach Corp";
newClient.born = moment().year(1987).toDate();
newClient.save(function(err) {
err.errors.should.have.property('email');
if (!--left) { done(); }
});
anotherClient.name = "John";
anotherClient.email = "example2@example";
anotherClient.company = "Peach Corp2";
anotherClient.born = moment().year(1980).toDate();
anotherClient.save(function(err) {
err.errors.should.have.property('email');
if (!--left) { done(); }
});
lastClient.name = "Andrew";
lastClient.email = "[email protected]";
lastClient.company = "IT Corp";
lastClient.born = moment().year(1987).toDate();
lastClient.save(function(err) {
should.not.exist(err);
if (!--left) { done(); }
});
});
it('should have a valid birth date', function(done) {
var newClient = new Client(),
anotherClient = new Client(),
lastClient = new Client(),
left = 3;
newClient.name = "Andrew";
newClient.email = "[email protected]";
newClient.company = "Peach Corp";
newClient.born = "abc";
newClient.save(function(err) {
err.name.should.equal('CastError');
if (!--left) { done(); }
});
anotherClient.name = "John";
anotherClient.email = "[email protected]";
anotherClient.company = "Peach Corp2";
anotherClient.born = moment().subtract('years', 17).toDate();
anotherClient.save(function(err) {
err.errors.should.have.property('born');
if (!--left) { done(); }
});
lastClient.name = "Andrew";
lastClient.email = "[email protected]";
lastClient.company = "IT Corp";
lastClient.born = moment().year(1987).toDate();
lastClient.save(function(err) {
should.not.exist(err);
if (!--left) { done(); }
});
});
});
describe('#Static methods', function() {
before(function(done) {
cleanDb(Client, function() {
utils.loadFixtures(function(err, clients) {
if (err) { throw err; }
clientsBulk = clients;
utils.bulkInsert(Client, clients, done);
});
});
});
it('should search client by name', function(done) {
var searchTerm = 'Fiona';
Client.search({ name: searchTerm }, function(err, docs) {
var bulkLen = clientsBulk.length, expectedClients;
expectedClients = _.filter(clientsBulk, function(doc) {
return doc.name.indexOf(searchTerm) !== -1;
});
// "sanitize" docs just in case, since Mongoose has strange getters and setters
docs = JSON.parse(JSON.stringify(docs));
docs.length.should.equal(2);
_.isEqual(docs, expectedClients).should.be.true;
done();
});
});
it('should search client by email', function(done) {
var searchTerm = '[email protected]';
Client.search({ email: searchTerm }, function(err, docs) {
var bulkLen = clientsBulk.length, expectedClient;
expectedClient = _.find(clientsBulk, function(doc) {
return doc.email.indexOf(searchTerm) !== -1;
});
docs = JSON.parse(JSON.stringify(docs));
docs.length.should.equal(1);
_.isEqual(docs[0], expectedClient).should.be.true;
done();
});
});
it('should search client by company', function(done) {
var searchTerm = 'Emmerich, Schuppe';
Client.search({ company: searchTerm }, function(err, docs) {
var bulkLen = clientsBulk.length, expectedClients;
expectedClients = _.filter(clientsBulk, function(doc) {
return doc.company.indexOf(searchTerm) !== -1;
});
docs = JSON.parse(JSON.stringify(docs));
docs.length.should.equal(2);
_.isEqual(docs, expectedClients).should.be.true;
done();
});
});
});
});
| {
"pile_set_name": "Github"
} |
msgid ""
msgstr ""
"Project-Id-Version: \n"
"POT-Creation-Date: \n"
"PO-Revision-Date: \n"
"Last-Translator: Joris MASSON <[email protected]>\n"
"Language-Team: \n"
"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Generator: Poedit 2.2.4\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
msgid ""
"An error occured while retrieving needed information to create a pull "
"request."
msgstr ""
"Une erreur est survenue lors de la récupération d'informations nécessaires "
"pour la création de pull request."
msgid "Cancel"
msgstr "Annuler"
msgid "Close"
msgstr "Fermer"
msgid "Create a pull request"
msgstr "Créer une pull request"
msgid "Create pull request"
msgstr "Créer une pull request"
msgid "Create the pull request"
msgstr "Créer la pull request"
msgid "Destination branch"
msgstr "Branche destination"
msgid "No pull request can currently be created"
msgstr "Aucune pull request ne peut actuellement être créée"
msgid "Oops"
msgstr "Oups"
msgid ""
"Refresh the page and try again; if the error persists, contact your\n"
" administrator."
msgstr ""
"Rafraîchissez la page et essayez à nouveau ; si l'erreur persiste, alors "
"contactez votre administrateur."
msgid "Source branch"
msgstr "Branche source"
| {
"pile_set_name": "Github"
} |
<%@page import="java.net.URLDecoder"%>
<%@page import="java.net.URLEncoder"%>
<%@page import="javax.crypto.spec.SecretKeySpec"%>
<%@page import="org.keycloak.common.util.Base64"%>
<%@page import="org.keycloak.jose.jws.JWSBuilder"%>
<%@page import="org.keycloak.representations.JsonWebToken"%>
<%
String secret = "aSqzP4reFgWR4j94BDT1r+81QYp/NYbY9SBwXtqV1ko=";
JsonWebToken tokenSentBack = new JsonWebToken();
SecretKeySpec hmacSecretKeySpec = new SecretKeySpec(Base64.decode(secret), "HmacSHA256");
for (java.util.Map.Entry<String, String[]> me : request.getParameterMap().entrySet()) {
String name = me.getKey();
if (! name.startsWith("_")) {
String decodedValue = URLDecoder.decode(me.getValue()[0], "UTF-8");
tokenSentBack.setOtherClaims(name, decodedValue);
}
}
String appToken = new JWSBuilder().jsonContent(tokenSentBack).hmac256(hmacSecretKeySpec);
String encodedToken = URLEncoder.encode(appToken, "UTF-8");
String decodedUrl = URLDecoder.decode(request.getParameter("_tokenUrl"), "UTF-8");
response.sendRedirect(decodedUrl.replace("{APP_TOKEN}", encodedToken));
%>
| {
"pile_set_name": "Github"
} |
using UnityEngine;
using System.Collections;
using UnityEngine.Profiling;
using System;
using System.Collections.Generic;
public class GameStatistics
{
public int rtt;
private readonly int _no_frames = 128;
public GameStatistics()
{
m_FrequencyMS = System.Diagnostics.Stopwatch.Frequency / 1000;
m_StopWatch = new System.Diagnostics.Stopwatch();
m_StopWatch.Start();
m_LastFrameTicks = m_StopWatch.ElapsedTicks;
m_FrameTimes = new float[_no_frames];
m_TicksPerFrame = new float[2][] { new float[_no_frames], new float[_no_frames] };
m_GraphicsDeviceName = SystemInfo.graphicsDeviceName;
for (int i = 0; i < recordersList.Length; i++)
{
var sampler = Sampler.Get(recordersList[i].name);
if (sampler != null)
{
recordersList[i].recorder = sampler.GetRecorder();
}
}
Console.AddCommand("show.profilers", CmdShowProfilers, "Show available profilers.");
}
void CmdShowProfilers(string[] args)
{
var names = new List<string>();
Sampler.GetNames(names);
string search = args.Length > 0 ? args[0].ToLower() : null;
for(var i = 0; i < names.Count; i++)
{
if(search == null || names[i].ToLower().Contains(search))
Console.Write(names[i]);
}
}
int m_LastWorldTick;
void SnapTime()
{
long now = m_StopWatch.ElapsedTicks;
long duration = now - m_LastFrameTicks;
m_LastFrameTicks = now;
float d = (float)duration / m_FrequencyMS;
m_FrameDurationMS = m_FrameDurationMS * 0.9f + 0.1f * d;
m_FrameTimes[Time.frameCount % m_FrameTimes.Length] = d;
}
void RecordTimers()
{
int ticks = 0;
if (GameWorld.s_Worlds.Count > 0)
{
var world = GameWorld.s_Worlds[0];
// Number of ticks in world since last frame.
ticks = world.worldTime.tick - m_LastWorldTick;
int l = Time.frameCount % m_TicksPerFrame[0].Length;
m_TicksPerFrame[0][l] = 1000.0f * world.worldTime.tickInterval * ticks;
m_LastWorldTick = world.worldTime.tick;
double lastTickTime = world.nextTickTime - world.worldTime.tickInterval;
m_TicksPerFrame[1][l] = (float)(1000.0 * (Game.frameTime - lastTickTime));
}
// get timing & update average accumulators
for (int i = 0; i < recordersList.Length; i++)
{
recordersList[i].time = recordersList[i].recorder.elapsedNanoseconds / 1000000.0f;
recordersList[i].count = recordersList[i].recorder.sampleBlockCount;
recordersList[i].accTime += recordersList[i].time;
recordersList[i].accCount += recordersList[i].count;
}
frameCount++;
// time to time, update average values & reset accumulators
if (frameCount >= kAverageFrameCount)
{
for (int i = 0; i < recordersList.Length; i++)
{
recordersList[i].avgTime = recordersList[i].accTime * (1.0f / kAverageFrameCount);
recordersList[i].avgCount = recordersList[i].accCount * (1.0f / kAverageFrameCount);
recordersList[i].accTime = 0.0f;
recordersList[i].accCount = 0;
}
frameCount = 0;
}
}
public void TickLateUpdate()
{
SnapTime();
if(showCompactStats.IntValue > 0)
{
DrawCompactStats();
}
if (showFPS.IntValue > 0)
{
RecordTimers();
DrawFPS();
}
}
private int frameCount = 0;
private const int kAverageFrameCount = 64;
internal class RecorderEntry
{
public string name;
public float time;
public int count;
public float avgTime;
public float avgCount;
public float accTime;
public int accCount;
public Recorder recorder;
};
RecorderEntry[] recordersList =
{
new RecorderEntry() { name="RenderLoop.Draw" },
new RecorderEntry() { name="Shadows.Draw" },
new RecorderEntry() { name="RenderLoopNewBatcher.Draw" },
new RecorderEntry() { name="ShadowLoopNewBatcher.Draw" },
new RecorderEntry() { name="RenderLoopDevice.Idle" },
new RecorderEntry() { name="StaticBatchDraw.Count" },
};
char[] buf = new char[256];
void DrawCompactStats()
{
DebugOverlay.AddQuadAbsolute(0, 0, 60, 14, '\0', new Vector4(1.0f, 1.0f, 1.0f, 0.2f));
var c = StringFormatter.Write(ref buf, 0, "FPS:{0}", Mathf.RoundToInt(1000.0f / m_FrameDurationMS));
DebugOverlay.WriteAbsolute(2, 2, 8.0f, buf, c);
DebugOverlay.AddQuadAbsolute(62, 0, 60, 14, '\0', new Vector4(1.0f, 1.0f, 0.0f, 0.2f));
if (rtt > 0)
c = StringFormatter.Write(ref buf, 0, "RTT:{0}", rtt);
else
c = StringFormatter.Write(ref buf, 0, "RTT:---");
DebugOverlay.WriteAbsolute(64, 2, 8.0f, buf, c);
}
void DrawFPS()
{
DebugOverlay.Write(0, 1, "{0} FPS ({1:##.##} ms)", Mathf.RoundToInt(1000.0f / m_FrameDurationMS), m_FrameDurationMS);
float minDuration = float.MaxValue;
float maxDuration = float.MinValue;
float sum = 0;
for (var i = 0; i < _no_frames; i++)
{
var frametime = m_FrameTimes[i];
sum += frametime;
if (frametime < minDuration) minDuration = frametime;
if (frametime > maxDuration) maxDuration = frametime;
}
DebugOverlay.Write(Color.green, 0, 2, "{0:##.##}", minDuration);
DebugOverlay.Write(Color.grey, 6, 2, "{0:##.##}", sum / _no_frames);
DebugOverlay.Write(Color.red, 12, 2, "{0:##.##}", maxDuration);
DebugOverlay.Write(0, 3, "Frame #: {0}", Time.frameCount);
DebugOverlay.Write(0, 4, m_GraphicsDeviceName);
int y = 6;
for (int i = 0; i < recordersList.Length; i++)
DebugOverlay.Write(0, y++, "{0:##.##}ms (*{1:##}) ({2:##.##}ms *{3:##}) {4}", recordersList[i].avgTime, recordersList[i].avgCount, recordersList[i].time, recordersList[i].count, recordersList[i].name);
if (showFPS.IntValue < 3)
return;
y++;
// Start at framecount+1 so the one we have just recorded will be the last
DebugOverlay.DrawHist(0, y, 20, 2, m_FrameTimes, Time.frameCount + 1, fpsColor, 20.0f);
DebugOverlay.DrawHist(0, y + 2, 20, 2, m_TicksPerFrame, Time.frameCount + 1, histColor, 3.0f * 16.0f);
DebugOverlay.DrawGraph(0, y + 6, 40, 2, m_FrameTimes, Time.frameCount + 1, fpsColor, 20.0f);
if (GameWorld.s_Worlds.Count > 0)
{
var world = GameWorld.s_Worlds[0];
DebugOverlay.Write(0, y + 8, "Tick: {0:##.#}", 1000.0f * world.worldTime.tickInterval);
}
}
Color fpsColor = new Color(0.5f, 0.0f, 0.2f);
Color[] histColor = new Color[] { Color.green, Color.grey };
System.Diagnostics.Stopwatch m_StopWatch;
long m_LastFrameTicks; // Ticks at start of last frame
float m_FrameDurationMS;
float[] m_FrameTimes;
float[][] m_TicksPerFrame;
long m_FrequencyMS;
string m_GraphicsDeviceName;
[ConfigVar(Name = "show.fps", DefaultValue = "0", Description = "Set to value > 0 to see fps stats.")]
public static ConfigVar showFPS;
[ConfigVar(Name = "show.compactstats", DefaultValue = "1", Description = "Set to value > 0 to see compact stats.")]
public static ConfigVar showCompactStats;
}
| {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.Net.Http;
using System.Threading.Tasks;
namespace UWPClient.Oidc
{
/// <summary>
/// <see cref="http://oidc-server.test/.well-known/openid-configuration"/>
/// </summary>
public class OidcClient
{
private static readonly HttpClient Http = new HttpClient();
public static readonly OidcOptions Options = new OidcOptions();
public string BuildAuthorizeUrl(string idp = null)
{
var authorizeUrl = Options.AuthorizeEndpoint
+ $"?client_id={Options.ClientId}"
+ "&scope=openid"
+ "&response_type=code"
+ "&response_mode=query"
+ $"&redirect_uri={Options.RedirectUri}"
+ $"&state={Guid.NewGuid()}"
+ $"&nonce={Guid.NewGuid()}";
if (idp != null)
{
authorizeUrl += $"&acr_values=idp:{idp}";
}
return authorizeUrl;
}
public async Task<string> GetTokenAsync(string code)
{
var tokenParams = new FormUrlEncodedContent(new Dictionary<string, string>
{
["client_id"] = Options.ClientId,
["client_secret"] = Options.ClientSecret,
["grant_type"] = "authorization_code",
["code"] = code,
["redirect_uri"] = Options.RedirectUri
});
var tokenReponse = await Http.PostAsync(Options.TokenEndpoint, tokenParams);
return await tokenReponse.Content.ReadAsStringAsync();
}
public static string GetCode(string querySring)
{
querySring = querySring.TrimStart('?');
var namevalues = querySring.Split('&');
foreach (var nameValue in namevalues)
{
if (nameValue.StartsWith("code="))
{
return nameValue.Substring(5);
}
}
return string.Empty;
}
}
}
| {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// The test fails due to the missing is_trivially_constructible intrinsic.
// XFAIL: gcc-4.9
// <utility>
// template <class T1, class T2> struct pair
// Test that we properly provide the trivial copy operations by default.
// FreeBSD provides the old ABI. This test checks the new ABI so we need
// to manually turn it on.
#if defined(__FreeBSD__)
#define _LIBCPP_ABI_UNSTABLE
#endif
#include <utility>
#include <type_traits>
#include <cstdlib>
#include <cstddef>
#include <cassert>
#include "test_macros.h"
#if defined(_LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR)
#error Non-trivial ctor ABI macro defined
#endif
template <class T>
struct HasTrivialABI : std::integral_constant<bool,
std::is_trivially_destructible<T>::value
&& (!std::is_copy_constructible<T>::value || std::is_trivially_copy_constructible<T>::value)
#if TEST_STD_VER >= 11
&& (!std::is_move_constructible<T>::value || std::is_trivially_move_constructible<T>::value)
#endif
> {};
#if TEST_STD_VER >= 11
struct NonTrivialDtor {
NonTrivialDtor(NonTrivialDtor const&) = default;
~NonTrivialDtor();
};
NonTrivialDtor::~NonTrivialDtor() {}
static_assert(!HasTrivialABI<NonTrivialDtor>::value, "");
struct NonTrivialCopy {
NonTrivialCopy(NonTrivialCopy const&);
};
NonTrivialCopy::NonTrivialCopy(NonTrivialCopy const&) {}
static_assert(!HasTrivialABI<NonTrivialCopy>::value, "");
struct NonTrivialMove {
NonTrivialMove(NonTrivialMove const&) = default;
NonTrivialMove(NonTrivialMove&&);
};
NonTrivialMove::NonTrivialMove(NonTrivialMove&&) {}
static_assert(!HasTrivialABI<NonTrivialMove>::value, "");
struct DeletedCopy {
DeletedCopy(DeletedCopy const&) = delete;
DeletedCopy(DeletedCopy&&) = default;
};
static_assert(HasTrivialABI<DeletedCopy>::value, "");
struct TrivialMove {
TrivialMove(TrivialMove &&) = default;
};
static_assert(HasTrivialABI<TrivialMove>::value, "");
struct Trivial {
Trivial(Trivial const&) = default;
};
static_assert(HasTrivialABI<Trivial>::value, "");
#endif
void test_trivial()
{
{
typedef std::pair<int, short> P;
static_assert(std::is_copy_constructible<P>::value, "");
static_assert(HasTrivialABI<P>::value, "");
}
#if TEST_STD_VER >= 11
{
typedef std::pair<int, short> P;
static_assert(std::is_move_constructible<P>::value, "");
static_assert(HasTrivialABI<P>::value, "");
}
{
using P = std::pair<NonTrivialDtor, int>;
static_assert(!std::is_trivially_destructible<P>::value, "");
static_assert(std::is_copy_constructible<P>::value, "");
static_assert(!std::is_trivially_copy_constructible<P>::value, "");
static_assert(std::is_move_constructible<P>::value, "");
static_assert(!std::is_trivially_move_constructible<P>::value, "");
static_assert(!HasTrivialABI<P>::value, "");
}
{
using P = std::pair<NonTrivialCopy, int>;
static_assert(std::is_copy_constructible<P>::value, "");
static_assert(!std::is_trivially_copy_constructible<P>::value, "");
static_assert(std::is_move_constructible<P>::value, "");
static_assert(!std::is_trivially_move_constructible<P>::value, "");
static_assert(!HasTrivialABI<P>::value, "");
}
{
using P = std::pair<NonTrivialMove, int>;
static_assert(std::is_copy_constructible<P>::value, "");
static_assert(std::is_trivially_copy_constructible<P>::value, "");
static_assert(std::is_move_constructible<P>::value, "");
static_assert(!std::is_trivially_move_constructible<P>::value, "");
static_assert(!HasTrivialABI<P>::value, "");
}
{
using P = std::pair<DeletedCopy, int>;
static_assert(!std::is_copy_constructible<P>::value, "");
static_assert(!std::is_trivially_copy_constructible<P>::value, "");
static_assert(std::is_move_constructible<P>::value, "");
static_assert(std::is_trivially_move_constructible<P>::value, "");
static_assert(HasTrivialABI<P>::value, "");
}
{
using P = std::pair<Trivial, int>;
static_assert(std::is_copy_constructible<P>::value, "");
static_assert(std::is_trivially_copy_constructible<P>::value, "");
static_assert(std::is_move_constructible<P>::value, "");
static_assert(std::is_trivially_move_constructible<P>::value, "");
static_assert(HasTrivialABI<P>::value, "");
}
{
using P = std::pair<TrivialMove, int>;
static_assert(!std::is_copy_constructible<P>::value, "");
static_assert(!std::is_trivially_copy_constructible<P>::value, "");
static_assert(std::is_move_constructible<P>::value, "");
static_assert(std::is_trivially_move_constructible<P>::value, "");
static_assert(HasTrivialABI<P>::value, "");
}
#endif
}
void test_layout() {
typedef std::pair<std::pair<char, char>, char> PairT;
static_assert(sizeof(PairT) == 3, "");
static_assert(TEST_ALIGNOF(PairT) == TEST_ALIGNOF(char), "");
static_assert(offsetof(PairT, first) == 0, "");
}
int main(int, char**) {
test_trivial();
test_layout();
return 0;
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_79) on Thu Nov 01 21:10:26 PDT 2018 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>com.ctc.wstx.cfg Class Hierarchy (Woodstox 5.2.0 API)</title>
<meta name="date" content="2018-11-01">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="com.ctc.wstx.cfg Class Hierarchy (Woodstox 5.2.0 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li class="navBarCell1Rev">Tree</li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../com/ctc/wstx/api/package-tree.html">Prev</a></li>
<li><a href="../../../../com/ctc/wstx/compat/package-tree.html">Next</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?com/ctc/wstx/cfg/package-tree.html" target="_top">Frames</a></li>
<li><a href="package-tree.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 class="title">Hierarchy For Package com.ctc.wstx.cfg</h1>
<span class="strong">Package Hierarchies:</span>
<ul class="horizontal">
<li><a href="../../../../overview-tree.html">All Packages</a></li>
</ul>
</div>
<div class="contentContainer">
<h2 title="Class Hierarchy">Class Hierarchy</h2>
<ul>
<li type="circle">java.lang.<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang"><span class="strong">Object</span></a>
<ul>
<li type="circle">com.ctc.wstx.cfg.<a href="../../../../com/ctc/wstx/cfg/ErrorConsts.html" title="class in com.ctc.wstx.cfg"><span class="strong">ErrorConsts</span></a> (implements javax.xml.stream.<a href="http://docs.oracle.com/javase/6/docs/api/javax/xml/stream/XMLStreamConstants.html?is-external=true" title="class or interface in javax.xml.stream">XMLStreamConstants</a>)</li>
</ul>
</li>
</ul>
<h2 title="Interface Hierarchy">Interface Hierarchy</h2>
<ul>
<li type="circle">com.ctc.wstx.cfg.<a href="../../../../com/ctc/wstx/cfg/InputConfigFlags.html" title="interface in com.ctc.wstx.cfg"><span class="strong">InputConfigFlags</span></a></li>
<li type="circle">com.ctc.wstx.cfg.<a href="../../../../com/ctc/wstx/cfg/OutputConfigFlags.html" title="interface in com.ctc.wstx.cfg"><span class="strong">OutputConfigFlags</span></a></li>
<li type="circle">com.ctc.wstx.cfg.<a href="../../../../com/ctc/wstx/cfg/ParsingErrorMsgs.html" title="interface in com.ctc.wstx.cfg"><span class="strong">ParsingErrorMsgs</span></a></li>
<li type="circle">com.ctc.wstx.cfg.<a href="../../../../com/ctc/wstx/cfg/XmlConsts.html" title="interface in com.ctc.wstx.cfg"><span class="strong">XmlConsts</span></a></li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li class="navBarCell1Rev">Tree</li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../com/ctc/wstx/api/package-tree.html">Prev</a></li>
<li><a href="../../../../com/ctc/wstx/compat/package-tree.html">Next</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?com/ctc/wstx/cfg/package-tree.html" target="_top">Frames</a></li>
<li><a href="package-tree.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2018 <a href="http://fasterxml.com">FasterXML</a>. All rights reserved.</small></p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/**
* @license
* Copyright (c) 2015 The Polymer Project Authors. All rights reserved.
* This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
* The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
* The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
* Code distributed by Google as part of the polymer project is also
* subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
*/
'use strict';
import * as jsdoc from './jsdoc'
import * as dom5 from 'dom5'
import {
FeatureDescriptor, FunctionDescriptor, PropertyDescriptor, Descriptor,
ElementDescriptor, BehaviorsByName, EventDescriptor, BehaviorDescriptor
} from './descriptors'
/** Properties on element prototypes that are purely configuration. */
const ELEMENT_CONFIGURATION = [
'attached',
'attributeChanged',
'beforeRegister',
'configure',
'constructor',
'created',
'detached',
'enableCustomStyleProperties',
'extends',
'hostAttributes',
'is',
'listeners',
'mixins',
'properties',
'ready',
'registered'
];
/** Tags understood by the annotation process, to be removed during `clean`. */
const HANDLED_TAGS = [
'param',
'return',
'type',
];
/**
* Annotates Hydrolysis descriptors, processing any `desc` properties as JSDoc.
*
* You probably want to use a more specialized version of this, such as
* `annotateElement`.
*
* Processed JSDoc values will be made available via the `jsdoc` property on a
* descriptor node.
*
* @param {Object} descriptor The descriptor node to process.
* @return {Object} The descriptor that was given.
*/
export function annotate(descriptor: Descriptor): Descriptor{
if (!descriptor || descriptor.jsdoc) return descriptor;
if (typeof descriptor.desc === 'string') {
descriptor.jsdoc = jsdoc.parseJsdoc(descriptor.desc);
// We want to present the normalized form of a descriptor.
descriptor.jsdoc.orig = descriptor.desc;
descriptor.desc = descriptor.jsdoc.description;
}
return descriptor;
}
/**
* Annotates @event, @hero, & @demo tags
*/
export function annotateElementHeader(descriptor: ElementDescriptor) {
if (descriptor.events) {
descriptor.events.forEach(function(event) {
_annotateEvent(event);
});
descriptor.events.sort( function(a,b) {
return a.name.localeCompare(b.name);
});
}
descriptor.demos = [];
if (descriptor.jsdoc && descriptor.jsdoc.tags) {
descriptor.jsdoc.tags.forEach( function(tag) {
switch(tag.tag) {
case 'hero':
descriptor.hero = tag.name || 'hero.png';
break;
case 'demo':
descriptor.demos.push({
desc: tag.description || 'demo',
path: tag.name || 'demo/index.html'
});
break;
}
});
}
}
function copyProperties(
from:ElementDescriptor, to:ElementDescriptor,
behaviorsByName:BehaviorsByName) {
if (from.properties) {
from.properties.forEach(function(fromProp){
for (var toProp:PropertyDescriptor, i = 0;
i < to.properties.length; i++) {
toProp = to.properties[i];
if (fromProp.name === toProp.name) {
return;
}
}
var newProp = {__fromBehavior: from.is};
if (fromProp.__fromBehavior) {
return;
}
Object.keys(fromProp).forEach(function(propertyField){
newProp[propertyField] = fromProp[propertyField];
});
to.properties.push(<any>newProp);
});
from.events.forEach(function(fromEvent){
for (var toEvent:EventDescriptor, i = 0; i < to.events.length; i++) {
toEvent = to.events[i];
if (fromEvent.name === toEvent.name) {
return;
}
}
if (fromEvent.__fromBehavior) {
return;
}
var newEvent = {__fromBehavior: from.is};
Object.keys(fromEvent).forEach(function(eventField){
newEvent[eventField] = fromEvent[eventField];
});
to.events.push(newEvent);
});
}
if (!from.behaviors) {
return;
}
for (let i = from.behaviors.length - 1; i >= 0; i--) {
// TODO: what's up with behaviors sometimes being a literal, and sometimes
// being a descriptor object?
const localBehavior: any = from.behaviors[i];
var definedBehavior =
behaviorsByName[localBehavior] || behaviorsByName[localBehavior.symbol];
if (!definedBehavior) {
console.warn("Behavior " + localBehavior + " not found when mixing " +
"properties into " + to.is + "!");
return;
}
copyProperties(definedBehavior, to, behaviorsByName);
}
}
function mixinBehaviors(
descriptor:ElementDescriptor, behaviorsByName: BehaviorsByName) {
if (descriptor.behaviors) {
for (let i = descriptor.behaviors.length - 1; i >= 0; i--) {
const behavior = <string>descriptor.behaviors[i];
if (!behaviorsByName[behavior]) {
console.warn("Behavior " + behavior + " not found when mixing " +
"properties into " + descriptor.is + "!");
break;
}
var definedBehavior = behaviorsByName[<string>behavior];
copyProperties(definedBehavior, descriptor, behaviorsByName);
}
}
}
/**
* Annotates documentation found within a Hydrolysis element descriptor. Also
* supports behaviors.
*
* If the element was processed via `hydrolize`, the element's documentation
* will also be extracted via its <dom-module>.
*
* @param {Object} descriptor The element descriptor.
* @return {Object} The descriptor that was given.
*/
export function annotateElement(
descriptor: ElementDescriptor,
behaviorsByName: BehaviorsByName): ElementDescriptor {
if (!descriptor.desc && descriptor.type === 'element') {
descriptor.desc = _findElementDocs(descriptor.is,
descriptor.domModule,
descriptor.scriptElement);
}
annotate(descriptor);
// The `<dom-module>` is too low level for most needs, and it is _not_
// serializable. So we drop it now that we've extracted all the useful bits
// from it.
// TODO: Don't worry about serializability here, provide an API to get JSON.
delete descriptor.domModule;
mixinBehaviors(descriptor, behaviorsByName);
// Descriptors that should have their `desc` properties parsed as JSDoc.
descriptor.properties.forEach(function(property) {
// Feature properties are special, configuration is really just a matter of
// inheritance...
annotateProperty(property, descriptor.abstract);
});
// It may seem like overkill to always sort, but we have an assumption that
// these properties are typically being consumed by user-visible tooling.
// As such, it's good to have consistent output/ordering to aid the user.
descriptor.properties.sort(function(a, b) {
// Private properties are always last.
if (a.private && !b.private) {
return 1;
} else if (!a.private && b.private) {
return -1;
// Otherwise, we're just sorting alphabetically.
} else {
return a.name.localeCompare(b.name);
}
});
annotateElementHeader(descriptor);
return descriptor;
}
/**
* Annotates behavior descriptor.
* @param {Object} descriptor behavior descriptor
* @return {Object} descriptor passed in as param
*/
export function annotateBehavior(
descriptor:BehaviorDescriptor): BehaviorDescriptor {
annotate(descriptor);
annotateElementHeader(descriptor);
return descriptor;
}
/**
* Annotates event documentation
*/
function _annotateEvent(descriptor:EventDescriptor): EventDescriptor {
annotate(descriptor);
// process @event
var eventTag = jsdoc.getTag(descriptor.jsdoc, 'event');
descriptor.name = eventTag ? eventTag.description : "N/A";
// process @params
descriptor.params = (descriptor.jsdoc.tags || [])
.filter(function(tag) {
return tag.tag === 'param';
})
.map(function(tag) {
return {
type: tag.type || "N/A",
desc: tag.description,
name: tag.name || "N/A"
};
});
// process @params
return descriptor;
}
/**
* Annotates documentation found about a Hydrolysis property descriptor.
*
* @param {Object} descriptor The property descriptor.
* @param {boolean} ignoreConfiguration If true, `configuration` is not set.
* @return {Object} The descriptior that was given.
*/
function annotateProperty(
descriptor:PropertyDescriptor,
ignoreConfiguration:boolean): PropertyDescriptor {
annotate(descriptor);
if (descriptor.name[0] === '_' || jsdoc.hasTag(descriptor.jsdoc, 'private')) {
descriptor.private = true;
}
if (!ignoreConfiguration &&
ELEMENT_CONFIGURATION.indexOf(descriptor.name) !== -1) {
descriptor.private = true;
descriptor.configuration = true;
}
// @type JSDoc wins
descriptor.type =
jsdoc.getTag(descriptor.jsdoc, 'type', 'type') || descriptor.type;
if (descriptor.type.match(/^function/i)) {
_annotateFunctionProperty(<FunctionDescriptor>descriptor);
}
// @default JSDoc wins
var defaultTag = jsdoc.getTag(descriptor.jsdoc, 'default');
if (defaultTag !== null) {
var newDefault = (defaultTag.name || '') + (defaultTag.description || '');
if (newDefault !== '') {
descriptor.default = newDefault;
}
}
return descriptor;
}
function _annotateFunctionProperty(descriptor:FunctionDescriptor) {
descriptor.function = true;
var returnTag = jsdoc.getTag(descriptor.jsdoc, 'return');
if (returnTag) {
descriptor.return = {
type: returnTag.type,
desc: returnTag.description,
};
}
var paramsByName = {};
(descriptor.params || []).forEach(function(param) {
paramsByName[param.name] = param;
});
(descriptor.jsdoc && descriptor.jsdoc.tags || []).forEach(function(tag) {
if (tag.tag !== 'param') return;
var param = paramsByName[tag.name];
if (!param) {
return;
}
param.type = tag.type || param.type;
param.desc = tag.description;
});
}
/**
* Converts raw features into an abstract `Polymer.Base` element.
*
* Note that docs on this element _are not processed_. You must call
* `annotateElement` on it yourself if you wish that.
*
* @param {Array<FeatureDescriptor>} features
* @return {ElementDescriptor}
*/
export function featureElement(
features:FeatureDescriptor[]): ElementDescriptor {
var properties = features.reduce<PropertyDescriptor[]>((result, feature) => {
return result.concat(feature.properties);
}, []);
return {
type: 'element',
is: 'Polymer.Base',
abstract: true,
properties: properties,
desc: '`Polymer.Base` acts as a base prototype for all Polymer ' +
'elements. It is composed via various calls to ' +
'`Polymer.Base._addFeature()`.\n' +
'\n' +
'The properties reflected here are the combined view of all ' +
'features found in this library. There may be more properties ' +
'added via other libraries, as well.',
};
}
/**
* Cleans redundant properties from a descriptor, assuming that you have already
* called `annotate`.
*
* @param {Object} descriptor
*/
export function clean(descriptor:Descriptor) {
if (!descriptor.jsdoc) return;
// The doctext was written to `descriptor.desc`
delete descriptor.jsdoc.description;
delete descriptor.jsdoc.orig;
var cleanTags:jsdoc.Tag[] = [];
(descriptor.jsdoc.tags || []).forEach(function(tag) {
// Drop any tags we've consumed.
if (HANDLED_TAGS.indexOf(tag.tag) !== -1) return;
cleanTags.push(tag);
});
if (cleanTags.length === 0) {
// No tags? no docs left!
delete descriptor.jsdoc;
} else {
descriptor.jsdoc.tags = cleanTags;
}
}
/**
* Cleans redundant properties from an element, assuming that you have already
* called `annotateElement`.
*
* @param {ElementDescriptor|BehaviorDescriptor} element
*/
export function cleanElement(element:ElementDescriptor) {
clean(element);
element.properties.forEach(cleanProperty);
}
/**
* Cleans redundant properties from a property, assuming that you have already
* called `annotateProperty`.
*
* @param {PropertyDescriptor} property
*/
function cleanProperty(property:PropertyDescriptor) {
clean(property);
}
/**
* Parse elements defined only in comments.
* @param {comments} Array<string> A list of comments to parse.
* @return {ElementDescriptor} A list of pseudo-elements.
*/
export function parsePseudoElements(comments: string[]):ElementDescriptor[] {
var elements: ElementDescriptor[] = [];
comments.forEach(function(comment) {
var parsedJsdoc = jsdoc.parseJsdoc(comment);
var pseudoTag = jsdoc.getTag(parsedJsdoc, 'pseudoElement', 'name');
if (pseudoTag) {
let element: ElementDescriptor = {
is: pseudoTag,
type: 'element',
jsdoc: {description: parsedJsdoc.description, tags: parsedJsdoc.tags},
properties: [],
desc: parsedJsdoc.description,
}
annotateElementHeader(element);
elements.push(element);
}
});
return elements;
}
/**
* @param {string} elementId
* @param {DocumentAST} domModule
* @param {DocumentAST} scriptElement The script that the element was
* defined in.
*/
function _findElementDocs(
elementId:string, domModule:dom5.Node, scriptElement:dom5.Node) {
// Note that we concatenate docs from all sources if we find them.
// element can be defined in:
// html comment right before dom-module
// html commnet right before script defining the module,
// if dom-module is empty
var found:string[] = [];
// Do we have a HTML comment on the `<dom-module>` or `<script>`?
//
// Confusingly, with our current style, the comment will be attached to
// `<head>`, rather than being a sibling to the `<dom-module>`
var searchRoot = domModule || scriptElement;
var parents = dom5.nodeWalkAllPrior(searchRoot, dom5.isCommentNode);
var comment = parents.length > 0 ? parents[0] : null;
if (comment && comment.data) {
found.push(comment.data);
}
if (found.length === 0) return null;
return found
.filter(function(comment) {
// skip @license comments
if (comment && comment.indexOf('@license') === -1) {
return true;
}
else {
return false;
}
})
.map(jsdoc.unindent).join('\n');
}
function _findLastChildNamed(name:string, parent:dom5.Node) {
var children = parent.childNodes;
for (var i = children.length - 1; i >= 0; i--) {
let child = children[i];
if (child.nodeName === name) return child;
}
return null;
}
// TODO(nevir): parse5-utils!
function _getNodeAttribute(node:dom5.Node, name:string) {
for (var i = 0; i < node.attrs.length; i++) {
let attr = node.attrs[i];
if (attr.name === name) {
return attr.value;
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.metricsproxy.service;
import com.google.inject.Inject;
import com.yahoo.component.AbstractComponent;
import java.util.logging.Level;
import com.yahoo.jrt.ErrorCode;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.Spec;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Target;
import com.yahoo.jrt.Transport;
import java.io.BufferedReader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
/**
* Connects to the config sentinel and gets information like pid for the services on the node
*/
public class ConfigSentinelClient extends AbstractComponent {
private final static Logger log = Logger.getLogger(ConfigSentinelClient.class.getName());
private final Supervisor supervisor = new Supervisor(new Transport("sentinel-client"));
@Inject
public ConfigSentinelClient() {
}
@Override
public void deconstruct() {
supervisor.transport().shutdown().join();
super.deconstruct();
}
/**
* Update all services reading from config sentinel
*
* @param services The list of services
*/
synchronized void updateServiceStatuses(List<VespaService> services) {
try {
setStatus(services);
} catch (Exception e) {
log.log(Level.SEVERE, "Unable to update service pids from sentinel", e);
}
}
/**
* Update status
*
* @param s The service to update the status for
*/
synchronized void ping(VespaService s) {
List<VespaService> services = new ArrayList<>();
services.add(s);
log.log(Level.FINE, "Ping for service " + s);
try {
setStatus(services);
} catch (Exception e) {
log.log(Level.SEVERE, "Unable to update service pids from sentinel", e);
}
}
/**
* Update the status (pid check etc)
*
* @param services list of services
* @throws Exception if something went wrong
*/
protected synchronized void setStatus(List<VespaService> services) throws Exception {
String in = sentinelLs();
BufferedReader reader = new BufferedReader(new StringReader(in));
String line;
List<VespaService> updatedServices = new ArrayList<>();
while ((line = reader.readLine()) != null) {
if (line.equals("")) {
break;
}
VespaService s = parseServiceString(line, services);
if (s != null) {
updatedServices.add(s);
}
}
//Check if there are services that were not found in output
//from the sentinel
for (VespaService s : services) {
if ((!s.getServiceName().equals("configserver")) && !updatedServices.contains(s)) {
log.log(Level.FINE,"Service " + s + " is no longer found with sentinel - setting alive = false");
s.setAlive(false);
}
}
//Close streams
reader.close();
}
static VespaService parseServiceString(String line, List<VespaService> services) {
String[] parts = line.split(" ");
if (parts.length < 3)
return null;
String name = parts[0];
int pid = -1;
String state = null;
VespaService service = null;
for (VespaService s : services) {
if (s.getInstanceName().compareToIgnoreCase(name) == 0) {
service = s;
break;
}
}
//Could not find this service
//nothing wrong with that as the check is invoked per line from sentinel
if (service == null) {
return service;
}
for (int i = 1; i < parts.length; i++) {
String keyValue[] = parts[i].split("=");
String key = keyValue[0];
String value = keyValue[1];
if (key.equals("state")) {
state = value;
} else if (key.equals("pid")) {
pid = Integer.parseInt(value);
}
}
if (state != null) {
service.setState(state);
if (pid >= 0 && "RUNNING".equals(state)) {
service.setAlive(true);
service.setPid(pid);
} else {
service.setAlive(false);
}
} else {
service.setAlive(false);
}
return service;
}
String sentinelLs() {
String servicelist = "";
int rpcPort = 19097;
Spec spec = new Spec("localhost", rpcPort);
Target connection = supervisor.connect(spec);
try {
if (connection.isValid()) {
Request req = new Request("sentinel.ls");
connection.invokeSync(req, 5.0);
if (req.errorCode() == ErrorCode.NONE &&
req.checkReturnTypes("s"))
{
servicelist = req.returnValues().get(0).asString();
} else {
log.log(Level.WARNING, "Bad answer to RPC request: " + req.errorMessage());
}
} else {
log.log(Level.WARNING, "Could not connect to sentinel at: "+spec);
}
return servicelist;
} finally {
connection.close();
}
}
}
| {
"pile_set_name": "Github"
} |
import pandas as pd
import featuretools as ft
from featuretools.entityset.deserialize import description_to_entityset
from featuretools.feature_base.features_serializer import FeaturesSerializer
SCHEMA_VERSION = "6.0.0"
def test_single_feature(es):
feature = ft.IdentityFeature(es['log']['value'])
serializer = FeaturesSerializer([feature])
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [feature.unique_name()],
'feature_definitions': {
feature.unique_name(): feature.to_dictionary()
}
}
_compare_feature_dicts(expected, serializer.to_dict())
def test_base_features_in_list(es):
value = ft.IdentityFeature(es['log']['value'])
max_feature = ft.AggregationFeature(value, es['sessions'], ft.primitives.Max)
features = [max_feature, value]
serializer = FeaturesSerializer(features)
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [max_feature.unique_name(), value.unique_name()],
'feature_definitions': {
max_feature.unique_name(): max_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
}
}
_compare_feature_dicts(expected, serializer.to_dict())
def test_multi_output_features(es):
value = ft.IdentityFeature(es['log']['product_id'])
threecommon = ft.primitives.NMostCommon()
tc = ft.Feature(es['log']['product_id'], parent_entity=es["sessions"], primitive=threecommon)
features = [tc, value]
for i in range(3):
features.append(ft.Feature(tc[i],
parent_entity=es['customers'],
primitive=ft.primitives.NumUnique))
features.append(tc[i])
serializer = FeaturesSerializer(features)
flist = [feat.unique_name() for feat in features]
fd = [feat.to_dictionary() for feat in features]
fdict = dict(zip(flist, fd))
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': flist,
'feature_definitions': fdict
}
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def test_base_features_not_in_list(es):
value = ft.IdentityFeature(es['log']['value'])
value_x2 = ft.TransformFeature(value,
ft.primitives.MultiplyNumericScalar(value=2))
max_feature = ft.AggregationFeature(value_x2, es['sessions'], ft.primitives.Max)
features = [max_feature]
serializer = FeaturesSerializer(features)
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [max_feature.unique_name()],
'feature_definitions': {
max_feature.unique_name(): max_feature.to_dictionary(),
value_x2.unique_name(): value_x2.to_dictionary(),
value.unique_name(): value.to_dictionary(),
}
}
_compare_feature_dicts(expected, serializer.to_dict())
def test_where_feature_dependency(es):
value = ft.IdentityFeature(es['log']['value'])
is_purchased = ft.IdentityFeature(es['log']['purchased'])
max_feature = ft.AggregationFeature(value, es['sessions'], ft.primitives.Max,
where=is_purchased)
features = [max_feature]
serializer = FeaturesSerializer(features)
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [max_feature.unique_name()],
'feature_definitions': {
max_feature.unique_name(): max_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
is_purchased.unique_name(): is_purchased.to_dictionary(),
}
}
_compare_feature_dicts(expected, serializer.to_dict())
def test_feature_use_previous_pd_timedelta(es):
value = ft.IdentityFeature(es['log']['id'])
td = pd.Timedelta(12, "W")
count_feature = ft.AggregationFeature(value, es['customers'], ft.primitives.Count, use_previous=td)
features = [count_feature, value]
serializer = FeaturesSerializer(features)
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [count_feature.unique_name(), value.unique_name()],
'feature_definitions': {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
}
}
_compare_feature_dicts(expected, serializer.to_dict())
def test_feature_use_previous_pd_dateoffset(es):
value = ft.IdentityFeature(es['log']['id'])
do = pd.DateOffset(months=3)
count_feature = ft.AggregationFeature(value, es['customers'], ft.primitives.Count, use_previous=do)
features = [count_feature, value]
serializer = FeaturesSerializer(features)
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [count_feature.unique_name(), value.unique_name()],
'feature_definitions': {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
}
}
_compare_feature_dicts(expected, serializer.to_dict())
value = ft.IdentityFeature(es['log']['id'])
do = pd.DateOffset(months=3, days=2, minutes=30)
count_feature = ft.AggregationFeature(value, es['customers'], ft.primitives.Count, use_previous=do)
features = [count_feature, value]
serializer = FeaturesSerializer(features)
expected = {
'ft_version': ft.__version__,
'schema_version': SCHEMA_VERSION,
'entityset': es.to_dictionary(),
'feature_list': [count_feature.unique_name(), value.unique_name()],
'feature_definitions': {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
}
}
_compare_feature_dicts(expected, serializer.to_dict())
def _compare_feature_dicts(a_dict, b_dict):
# We can't compare entityset dictionaries because variable lists are not
# guaranteed to be in the same order.
es_a = description_to_entityset(a_dict.pop('entityset'))
es_b = description_to_entityset(b_dict.pop('entityset'))
assert es_a == es_b
assert a_dict == b_dict
| {
"pile_set_name": "Github"
} |
// -*- C++ -*-
// Copyright (C) 2005, 2006, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
// Copyright (C) 2004 Ami Tavory and Vladimir Dreizin, IBM-HRL.
// Permission to use, copy, modify, sell, and distribute this software
// is hereby granted without fee, provided that the above copyright
// notice appears in all copies, and that both that copyright notice
// and this permission notice appear in supporting documentation. None
// of the above authors, nor IBM Haifa Research Laboratories, make any
// representation about the suitability of this software for any
// purpose. It is provided "as is" without express or implied
// warranty.
/**
* @file erase_no_store_hash_fn_imps.hpp
* Contains implementations of cc_ht_map_'s erase related functions,
* when the hash value is not stored.
*/
PB_DS_CLASS_T_DEC
inline bool
PB_DS_CLASS_C_DEC::
erase(const_key_reference r_key)
{
_GLIBCXX_DEBUG_ONLY(assert_valid();)
return erase_in_pos_imp(r_key, ranged_hash_fn_base::operator()(r_key));
}
PB_DS_CLASS_T_DEC
inline bool
PB_DS_CLASS_C_DEC::
erase_in_pos_imp(const_key_reference r_key, size_type pos)
{
_GLIBCXX_DEBUG_ONLY(assert_valid();)
entry_pointer p_e = m_entries[pos];
resize_base::notify_erase_search_start();
if (p_e == NULL)
{
resize_base::notify_erase_search_end();
_GLIBCXX_DEBUG_ONLY(debug_base::check_key_does_not_exist(r_key);)
_GLIBCXX_DEBUG_ONLY(assert_valid();)
return false;
}
if (hash_eq_fn_base::operator()(PB_DS_V2F(p_e->m_value), r_key))
{
resize_base::notify_erase_search_end();
_GLIBCXX_DEBUG_ONLY(debug_base:: check_key_exists(r_key);)
erase_entry_pointer(m_entries[pos]);
do_resize_if_needed_no_throw();
_GLIBCXX_DEBUG_ONLY(assert_valid();)
return true;
}
while (true)
{
entry_pointer p_next_e = p_e->m_p_next;
if (p_next_e == NULL)
{
resize_base::notify_erase_search_end();
_GLIBCXX_DEBUG_ONLY(debug_base::check_key_does_not_exist(r_key);)
_GLIBCXX_DEBUG_ONLY(assert_valid();)
return false;
}
if (hash_eq_fn_base::operator()(PB_DS_V2F(p_next_e->m_value), r_key))
{
resize_base::notify_erase_search_end();
_GLIBCXX_DEBUG_ONLY(debug_base::check_key_exists(r_key);)
erase_entry_pointer(p_e->m_p_next);
do_resize_if_needed_no_throw();
_GLIBCXX_DEBUG_ONLY(assert_valid();)
return true;
}
resize_base::notify_erase_search_collision();
p_e = p_next_e;
}
}
| {
"pile_set_name": "Github"
} |
# jwt-go
[](https://travis-ci.org/dgrijalva/jwt-go)
[](https://godoc.org/github.com/dgrijalva/jwt-go)
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
## What the heck is a JWT?
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
## Examples
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
## Extensions
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
## Compliance
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
## Project Status & Versioning
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
**BREAKING CHANGES:***
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
## Usage Tips
### Signing vs Encryption
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
* The author of the token was in the possession of the signing secret
* The data has not been modified since it was signed
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
### Choosing a Signing Method
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
### Signing Methods and Key Types
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
### JWT and OAuth
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
| {
"pile_set_name": "Github"
} |
.. _ex_hello_world:
Example: Hello World
====================
The simplest possible spreadsheet. This is a good place to start to see if
the ``xlsxwriter`` module is installed correctly.
.. image:: _images/hello01.png
.. literalinclude:: ../../../examples/hello_world.lua
:language: lua
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python3 -m pytest -s
import snowy
import numpy as np
import os
import pytest
import tempfile
from snowy.io import show_filename
from snowy.io import show_array
def path(filename: str):
scriptdir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(scriptdir, filename)
def test_io():
# Ensure that to_planar and from_planar do the right thing to shape.
a = np.array([[[1,2,3,4],[5,6,7,8]]], dtype='f8')
assert a.shape == (1,2,4)
b = snowy.to_planar(a)
assert b.shape == (4,1,2)
c = snowy.from_planar(b)
assert np.array_equal(a, c)
# Ensure that to_planar creates a copy, not a view.
b[0,0,0] = 100
assert np.amax(a) == 8
# Ensure that from_planar creates a copy, not a view.
c[0,0,0] == 200
assert np.amax(b) == 100
# Ensure that extract_rgb does the right thing with shape and makes
# a copy rather than a view.
color = snowy.extract_rgb(a)
assert color.shape == (1, 2, 3)
color[0,0,0] = 100
assert np.amax(a) == 8
# Ensure that extract_alpha does the right thing with shape and
# makes a copy rather than a view.
alpha = snowy.extract_alpha(a)
assert alpha.shape == (1, 2, 1)
alpha[0,0,0] = 100
assert np.amax(a) == 8
# This next snippet doesn't test Snowy but shows how to make a view
# of the alpha plane.
alpha_view = a[:,:,3]
assert alpha_view[0,0] == 4
assert alpha_view[0,1] == 8
alpha_view[0,0] = 100
assert np.amax(a) == 100
def test_range():
source = path('../docs/ground.jpg')
ground = snowy.load(source)
assert np.amin(ground) >= 0 and np.amax(ground) <= 1
with tempfile.NamedTemporaryFile() as fp:
target = fp.name + '.png'
snowy.export(ground, target)
show_filename(target)
show_filename(source)
show_array(ground, True)
blurred = snowy.blur(ground, radius=10)
snowy.show(blurred)
def test_solid():
gray = np.ones([100, 100, 4]) / 2
snowy.show(gray)
def test_gamma():
source = path('gamma_dalai_lama_gray.jpg')
dalai_lama = snowy.load(source)
snowy.show(dalai_lama)
small = snowy.resize(dalai_lama, height=32)
snowy.export(small, path('small_dalai_lama.png'))
snowy.show(small)
| {
"pile_set_name": "Github"
} |
// +build appengine
package logrus
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
return true
}
| {
"pile_set_name": "Github"
} |
//TestTeaBagAdaptation.java - testing the adapter
namespace Structural.Adapter {
using tap;
class TestAdapter {
public static void Main() {
Tapper tap = new Tapper();
TeaCup teaCup = new TeaCup();
TeaBag teaBag = new TeaBag();
teaCup.steepTeaBag(teaBag);
tap.test("Steeping tea bag ", teaBag.teaBagIsSteeped, true );
LooseLeafTea looseLeafTea = new LooseLeafTea();
TeaBall teaBall = new TeaBall(looseLeafTea);
teaCup.steepTeaBag(teaBall);
tap.test("Steeping loose leaf tea", teaBag.teaBagIsSteeped, true);
tap.done();
}
}
}
| {
"pile_set_name": "Github"
} |
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06.c
Label Definition File: CWE127_Buffer_Underread__malloc.label.xml
Template File: sources-sink-06.tmpl.c
*/
/*
* @description
* CWE: 127 Buffer Under-read
* BadSource: Set data pointer to before the allocated memory buffer
* GoodSource: Set data pointer to the allocated memory buffer
* Sink: ncpy
* BadSink : Copy data to string using wcsncpy
* Flow Variant: 06 Control flow: if(STATIC_CONST_FIVE==5) and if(STATIC_CONST_FIVE!=5)
*
* */
#include "std_testcase.h"
#include <wchar.h>
/* The variable below is declared "const", so a tool should be able
* to identify that reads of this will always give its initialized value. */
static const int STATIC_CONST_FIVE = 5;
#ifndef OMITBAD
void CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_bad()
{
wchar_t * data;
data = NULL;
if(STATIC_CONST_FIVE==5)
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FLAW: Set data pointer to before the allocated memory buffer */
data = dataBuffer - 8;
}
}
{
wchar_t dest[100];
wmemset(dest, L'C', 100-1); /* fill with 'C's */
dest[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copy from a memory location located before the source buffer */
wcsncpy(dest, data, wcslen(dest));
/* Ensure null termination */
dest[100-1] = L'\0';
printWLine(dest);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B1() - use goodsource and badsink by changing the STATIC_CONST_FIVE==5 to STATIC_CONST_FIVE!=5 */
static void goodG2B1()
{
wchar_t * data;
data = NULL;
if(STATIC_CONST_FIVE!=5)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FIX: Set data pointer to the allocated memory buffer */
data = dataBuffer;
}
}
{
wchar_t dest[100];
wmemset(dest, L'C', 100-1); /* fill with 'C's */
dest[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copy from a memory location located before the source buffer */
wcsncpy(dest, data, wcslen(dest));
/* Ensure null termination */
dest[100-1] = L'\0';
printWLine(dest);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
/* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */
static void goodG2B2()
{
wchar_t * data;
data = NULL;
if(STATIC_CONST_FIVE==5)
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FIX: Set data pointer to the allocated memory buffer */
data = dataBuffer;
}
}
{
wchar_t dest[100];
wmemset(dest, L'C', 100-1); /* fill with 'C's */
dest[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copy from a memory location located before the source buffer */
wcsncpy(dest, data, wcslen(dest));
/* Ensure null termination */
dest[100-1] = L'\0';
printWLine(dest);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
void CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_good()
{
goodG2B1();
goodG2B2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
| {
"pile_set_name": "Github"
} |
{
"extName": {
"message": "uBlock Origin",
"description": "extension name."
},
"extShortDesc": {
"message": "Endelig en effektiv blokkeringsutvidelse. Lavt CPU- og minnebruk.",
"description": "this will be in the Chrome web store: must be 132 characters or less"
},
"dashboardName": {
"message": "uBlock₀ — Dashbord",
"description": "English: uBlock₀ — Dashboard"
},
"dashboardUnsavedWarning": {
"message": "Advarsel! Du har ulagrede endringer",
"description": "A warning in the dashboard when navigating away from unsaved changes"
},
"dashboardUnsavedWarningStay": {
"message": "Bli",
"description": "Label for button to prevent navigating away from unsaved changes"
},
"dashboardUnsavedWarningIgnore": {
"message": "Ignorer",
"description": "Label for button to ignore unsaved changes"
},
"settingsPageName": {
"message": "Innstillinger",
"description": "appears as tab name in dashboard"
},
"3pPageName": {
"message": "Filterlister",
"description": "appears as tab name in dashboard"
},
"1pPageName": {
"message": "Mine filtre",
"description": "appears as tab name in dashboard"
},
"rulesPageName": {
"message": "Mine regler",
"description": "appears as tab name in dashboard"
},
"whitelistPageName": {
"message": "Betrodde sider",
"description": "appears as tab name in dashboard"
},
"shortcutsPageName": {
"message": "Snarveier",
"description": "appears as tab name in dashboard"
},
"statsPageName": {
"message": "uBlock₀ — Logg",
"description": "Title for the logger window"
},
"aboutPageName": {
"message": "Om",
"description": "appears as tab name in dashboard"
},
"assetViewerPageName": {
"message": "uBlock₀ — Ressursviser",
"description": "Title for the asset viewer page"
},
"advancedSettingsPageName": {
"message": "Avanserte innstillinger",
"description": "Title for the advanced settings page"
},
"popupPowerSwitchInfo": {
"message": "Klikk: Deaktiver/aktiver uBlock₀ for dette nettstedet.\n\nCtrl+klikk: Deaktiver uBlock₀ bare på denne siden.",
"description": "English: Click: disable/enable uBlock₀ for this site.\n\nCtrl+click: disable uBlock₀ only on this page."
},
"popupPowerSwitchInfo1": {
"message": "Klikk for å deaktivere uBlock₀ for dette nettstedet.\n\nCtrl+klikk for å deaktivere uBlock₀ bare på denne siden.",
"description": "Message to be read by screen readers"
},
"popupPowerSwitchInfo2": {
"message": "Klikk for å aktivere uBlock₀ for dette nettstedet.",
"description": "Message to be read by screen readers"
},
"popupBlockedRequestPrompt": {
"message": "forespørsler blokkert",
"description": "English: requests blocked"
},
"popupBlockedOnThisPagePrompt": {
"message": "på denne siden",
"description": "English: on this page"
},
"popupBlockedStats": {
"message": "{{count}} ({{percent}}%)",
"description": "Example: 15 (13%)"
},
"popupBlockedSinceInstallPrompt": {
"message": "siden installering",
"description": "English: since install"
},
"popupOr": {
"message": "eller",
"description": "English: or"
},
"popupBlockedOnThisPage_v2": {
"message": "Blokkert på denne siden",
"description": "For the new mobile-friendly popup design"
},
"popupBlockedSinceInstall_v2": {
"message": "Blokkert siden installering",
"description": "For the new mobile-friendly popup design"
},
"popupDomainsConnected_v2": {
"message": "Domener tilkoblet",
"description": "For the new mobile-friendly popup design"
},
"popupTipDashboard": {
"message": "Åpne dashbordet",
"description": "English: Click to open the dashboard"
},
"popupTipZapper": {
"message": "Gå til elementfjerningsmodus",
"description": "Tooltip for the element-zapper icon in the popup panel"
},
"popupTipPicker": {
"message": "Gå til elementvelgermodus",
"description": "English: Enter element picker mode"
},
"popupTipLog": {
"message": "Åpne loggen",
"description": "Tooltip used for the logger icon in the panel"
},
"popupTipNoPopups": {
"message": "Slå av/på blokkering av alle oppspretts-vinduer for dette nettstedet",
"description": "Tooltip for the no-popups per-site switch"
},
"popupTipNoPopups1": {
"message": "Klikk for å blokkere alle oppspretts-vinduer på dette nettstedet",
"description": "Tooltip for the no-popups per-site switch"
},
"popupTipNoPopups2": {
"message": "Klikk for å ikke lenger blokkere alle oppspretts-vinduer på dette nettstedet",
"description": "Tooltip for the no-popups per-site switch"
},
"popupTipNoLargeMedia": {
"message": "Slå av/på blokkering av store mediaelementer for dette nettstedet",
"description": "Tooltip for the no-large-media per-site switch"
},
"popupTipNoLargeMedia1": {
"message": "Klikk for å blokkere store mediaelementer på dette nettstedet",
"description": "Tooltip for the no-large-media per-site switch"
},
"popupTipNoLargeMedia2": {
"message": "Klikk for å ikke lenger blokkere store mediaelementer på dette nettstedet",
"description": "Tooltip for the no-large-media per-site switch"
},
"popupTipNoCosmeticFiltering": {
"message": "Slå av/på kosmetisk filtrering for dette nettstedet",
"description": "Tooltip for the no-cosmetic-filtering per-site switch"
},
"popupTipNoCosmeticFiltering1": {
"message": "Klikk for å slå av kosmetisk filtrering på dette nettstedet",
"description": "Tooltip for the no-cosmetic-filtering per-site switch"
},
"popupTipNoCosmeticFiltering2": {
"message": "Klikk for å slå på kosmetisk filtrering på dette nettstedet",
"description": "Tooltip for the no-cosmetic-filtering per-site switch"
},
"popupTipNoRemoteFonts": {
"message": "Slå av/på blokkering av eksterne skrifttyper for dette nettstedet",
"description": "Tooltip for the no-remote-fonts per-site switch"
},
"popupTipNoRemoteFonts1": {
"message": "Klikk for å blokkere eksterne skrifttyper på dette nettstedet",
"description": "Tooltip for the no-remote-fonts per-site switch"
},
"popupTipNoRemoteFonts2": {
"message": "Klikk for å ikke lenger blokkere eksterne skrifttyper på dette nettstedet",
"description": "Tooltip for the no-remote-fonts per-site switch"
},
"popupTipNoScripting1": {
"message": "Klikk for å slå av JavaScript på dette nettstedet",
"description": "Tooltip for the no-scripting per-site switch"
},
"popupTipNoScripting2": {
"message": "Klikk for å ikke lenger slå av JavaScript på dette nettstedet",
"description": "Tooltip for the no-scripting per-site switch"
},
"popupNoPopups_v2": {
"message": "Oppspretts-vinduer",
"description": "Caption for the no-popups per-site switch"
},
"popupNoLargeMedia_v2": {
"message": "Store mediaelementer",
"description": "Caption for the no-large-media per-site switch"
},
"popupNoCosmeticFiltering_v2": {
"message": "Kosmetisk filtrering",
"description": "Caption for the no-cosmetic-filtering per-site switch"
},
"popupNoRemoteFonts_v2": {
"message": "Eksterne skrifttyper",
"description": "Caption for the no-remote-fonts per-site switch"
},
"popupNoScripting_v2": {
"message": "JavaScript",
"description": "Caption for the no-scripting per-site switch"
},
"popupMoreButton_v2": {
"message": "Mer",
"description": "Label to be used to show popup panel sections"
},
"popupLessButton_v2": {
"message": "Mindre",
"description": "Label to be used to hide popup panel sections"
},
"popupTipGlobalRules": {
"message": "Globale regler: Denne kolonnen er for regler som gjelder alle nettsteder.",
"description": "Tooltip when hovering the top-most cell of the global-rules column."
},
"popupTipLocalRules": {
"message": "Lokale regler: Denne kolonnen er for regler som bare gjelder for det gjeldende nettstedet.\nLokale regler har prioritet foran globale regler.",
"description": "Tooltip when hovering the top-most cell of the local-rules column."
},
"popupTipSaveRules": {
"message": "Klikk for å gjøre endringer permanente.",
"description": "Tooltip when hovering over the padlock in the dynamic filtering pane."
},
"popupTipRevertRules": {
"message": "Klikk for å tilbakestille endringer.",
"description": "Tooltip when hovering over the eraser in the dynamic filtering pane."
},
"popupAnyRulePrompt": {
"message": "alle",
"description": ""
},
"popupImageRulePrompt": {
"message": "bilder",
"description": ""
},
"popup3pAnyRulePrompt": {
"message": "tredjepart",
"description": ""
},
"popup3pPassiveRulePrompt": {
"message": "tredjeparts CSS/bilder",
"description": ""
},
"popupInlineScriptRulePrompt": {
"message": "integrerte skript",
"description": ""
},
"popup1pScriptRulePrompt": {
"message": "førsteparts skript",
"description": ""
},
"popup3pScriptRulePrompt": {
"message": "tredjeparts skript",
"description": ""
},
"popup3pFrameRulePrompt": {
"message": "tredjeparts rammer",
"description": ""
},
"popupHitDomainCountPrompt": {
"message": "domener tilkoblet",
"description": "appears in popup"
},
"popupHitDomainCount": {
"message": "{{count}} av {{total}}",
"description": "appears in popup"
},
"popupVersion": {
"message": "Versjon",
"description": "Example of use: Version 1.26.4"
},
"pickerCreate": {
"message": "Opprett",
"description": "English: Create"
},
"pickerPick": {
"message": "Velg",
"description": "English: Pick"
},
"pickerQuit": {
"message": "Avslutt",
"description": "English: Quit"
},
"pickerPreview": {
"message": "Forhåndsvis",
"description": "Element picker preview mode: will cause the elements matching the current filter to be removed from the page"
},
"pickerNetFilters": {
"message": "Nettfiltre",
"description": "English: header for a type of filter in the element picker dialog"
},
"pickerCosmeticFilters": {
"message": "Kosmetiske filtre",
"description": "English: Cosmetic filters"
},
"pickerCosmeticFiltersHint": {
"message": "Klikk, Ctrl-klikk",
"description": "English: Click, Ctrl-click"
},
"pickerContextMenuEntry": {
"message": "Blokker element...",
"description": "An entry in the browser's contextual menu"
},
"settingsCollapseBlockedPrompt": {
"message": "Skjul blokkerte elementers plassholdere",
"description": "English: Hide placeholders of blocked elements"
},
"settingsIconBadgePrompt": {
"message": "Vis antall blokkerte forespørsler på ikonet",
"description": "English: Show the number of blocked requests on the icon"
},
"settingsTooltipsPrompt": {
"message": "Deaktiver hjelpetekst (verktøytips)",
"description": "A checkbox in the Settings pane"
},
"settingsContextMenuPrompt": {
"message": "Gjør bruk av høyreklikkmeny der det er hensiktsmessig",
"description": "English: Make use of context menu where appropriate"
},
"settingsColorBlindPrompt": {
"message": "Fargeblind-vennlig",
"description": "English: Color-blind friendly"
},
"settingsCloudStorageEnabledPrompt": {
"message": "Aktiver støtte for nettlagring",
"description": ""
},
"settingsAdvancedUserPrompt": {
"message": "Jeg er en avansert bruker (<a href='https://github.com/gorhill/uBlock/wiki/Advanced-user-features'>Nødvendig lesning</a>)",
"description": ""
},
"settingsAdvancedUserSettings": {
"message": "Avanserte innstillinger",
"description": "For the tooltip of a link which gives access to advanced settings"
},
"settingsPrefetchingDisabledPrompt": {
"message": "Deaktiver forhåndshenting (for å hindre enhver tilkobling for blokkerte nettverksforespørsler)",
"description": "English: "
},
"settingsHyperlinkAuditingDisabledPrompt": {
"message": "Deaktiver lenkesporing",
"description": "English: "
},
"settingsWebRTCIPAddressHiddenPrompt": {
"message": "Hindre WebRTC i å lekke lokale IP-adresser",
"description": "English: "
},
"settingPerSiteSwitchGroup": {
"message": "Standard virkemåte",
"description": ""
},
"settingPerSiteSwitchGroupSynopsis": {
"message": "Disse standard virkemåtene kan overstyres for hvert enkelt nettsted",
"description": ""
},
"settingsNoCosmeticFilteringPrompt": {
"message": "Deaktiver kosmetisk filtrering",
"description": ""
},
"settingsNoLargeMediaPrompt": {
"message": "Blokker mediaelementer større enn {{input}} KB",
"description": ""
},
"settingsNoRemoteFontsPrompt": {
"message": "Blokker eksterne skrifttyper",
"description": ""
},
"settingsNoScriptingPrompt": {
"message": "Deaktiver JavaScript",
"description": "The default state for the per-site no-scripting switch"
},
"settingsNoCSPReportsPrompt": {
"message": "Blokker CSP-rapporter",
"description": "background information: https://github.com/gorhill/uBlock/issues/3150"
},
"settingsLastRestorePrompt": {
"message": "Siste gjenoppretting:",
"description": "English: Last restore:"
},
"settingsLastBackupPrompt": {
"message": "Siste sikkerhetskopi:",
"description": "English: Last backup:"
},
"3pListsOfBlockedHostsPrompt": {
"message": "{{netFilterCount}} nettverksfiltre + {{cosmeticFilterCount}} kosmetiske filtre fra:",
"description": "Appears at the top of the _3rd-party filters_ pane"
},
"3pListsOfBlockedHostsPerListStats": {
"message": "{{used}} brukt av {{total}}",
"description": "Appears aside each filter list in the _3rd-party filters_ pane"
},
"3pAutoUpdatePrompt1": {
"message": "Automatisk oppdatering av filterlister",
"description": "A checkbox in the _3rd-party filters_ pane"
},
"3pUpdateNow": {
"message": "Oppdater nå",
"description": "A button in the in the _3rd-party filters_ pane"
},
"3pPurgeAll": {
"message": "Tøm alle hurtigbuffere",
"description": "A button in the in the _3rd-party filters_ pane"
},
"3pParseAllABPHideFiltersPrompt1": {
"message": "Analyser og bruk kosmetiske filtre",
"description": "English: Parse and enforce Adblock+ element hiding filters."
},
"3pParseAllABPHideFiltersInfo": {
"message": "Kosmetiske filtre tjener den hensikt å skjule elementer i en nettside som anses for å være en visuell ulempe, og som ikke blokkeres av de nettverkforespørselsbaserte filtreringsmotorene.",
"description": "Describes the purpose of the 'Parse and enforce cosmetic filters' feature."
},
"3pIgnoreGenericCosmeticFilters": {
"message": "Ignorer generelle kosmetiske filtre",
"description": "This will cause uBO to ignore all generic cosmetic filters."
},
"3pIgnoreGenericCosmeticFiltersInfo": {
"message": "Generelle kosmetiske filtre er de kosmetiske filtrene som er ment å brukes på alle nettsteder. Aktivering av dette alternativet vil eliminere ekstra minne- og prosessorbruk på nettsider grunnet håndteringen av generelle kosmetiske filtre.\n\nDet anbefales å aktivere dette alternativet på mindre kraftige enheter.",
"description": "Describes the purpose of the 'Ignore generic cosmetic filters' feature."
},
"3pListsOfBlockedHostsHeader": {
"message": "Lists of blocked hosts",
"description": "English: Lists of blocked hosts"
},
"3pApplyChanges": {
"message": "Bruk endringer",
"description": "English: Apply changes"
},
"3pGroupDefault": {
"message": "Innebygd",
"description": "Header for the uBlock filters section in 'Filter lists pane'"
},
"3pGroupAds": {
"message": "Reklame",
"description": "English: Ads"
},
"3pGroupPrivacy": {
"message": "Personvern",
"description": "English: Privacy"
},
"3pGroupMalware": {
"message": "Domener med skadelig programvare",
"description": "English: Malware domains"
},
"3pGroupAnnoyances": {
"message": "Irritasjonsmomenter",
"description": "The header identifying the filter lists in the category 'annoyances'"
},
"3pGroupMultipurpose": {
"message": "Multi-formål",
"description": "English: Multipurpose"
},
"3pGroupRegions": {
"message": "Regioner, språk",
"description": "English: Regions, languages"
},
"3pGroupCustom": {
"message": "Egendefinert",
"description": "English: Custom"
},
"3pImport": {
"message": "Importer...",
"description": "The label for the checkbox used to import external filter lists"
},
"3pExternalListsHint": {
"message": "En nettadresse per linje. Ugyldige nettadresser ignoreres stille.",
"description": "Short information about how to use the textarea to import external filter lists by URL"
},
"3pExternalListObsolete": {
"message": "Utgått på dato",
"description": "used as a tooltip for the out-of-date icon beside a list"
},
"3pViewContent": {
"message": "Vis innhold",
"description": "used as a tooltip for eye icon beside a list"
},
"3pLastUpdate": {
"message": "Siste oppdatering: {{ago}}.\nKlikk for å framtvinge en oppdatering.",
"description": "used as a tooltip for the clock icon beside a list"
},
"3pUpdating": {
"message": "Oppdaterer...",
"description": "used as a tooltip for the spinner icon beside a list"
},
"3pNetworkError": {
"message": "En nettverksfeil forhindret ressursen i å bli oppdatert.",
"description": "used as a tooltip for error icon beside a list"
},
"1pFormatHint": {
"message": "Ett filter per linje. Et filter kan være et vanlig vertsnavn eller et EasyList-kompatibelt filter. Linjer med prefikset <code>!</code> blir ignorert.",
"description": "Short information about how to create custom filters"
},
"1pImport": {
"message": "Importer og legg til",
"description": "English: Import and append"
},
"1pExport": {
"message": "Eksporter",
"description": "English: Export"
},
"1pExportFilename": {
"message": "mine-ublock-statiske-filtre_{{datetime}}.txt",
"description": "English: my-ublock-static-filters_{{datetime}}.txt"
},
"1pApplyChanges": {
"message": "Bruk endringer",
"description": "English: Apply changes"
},
"rulesPermanentHeader": {
"message": "Permanente regler",
"description": "header"
},
"rulesTemporaryHeader": {
"message": "Midlertidige regler",
"description": "header"
},
"rulesRevert": {
"message": "Tilbakestill",
"description": "This will remove all temporary rules"
},
"rulesCommit": {
"message": "Send",
"description": "This will persist temporary rules"
},
"rulesEdit": {
"message": "Rediger",
"description": "Will enable manual-edit mode (textarea)"
},
"rulesEditSave": {
"message": "Lagre",
"description": "Will save manually-edited content and exit manual-edit mode"
},
"rulesEditDiscard": {
"message": "Forkast",
"description": "Will discard manually-edited content and exit manual-edit mode"
},
"rulesImport": {
"message": "Importer fra fil",
"description": ""
},
"rulesExport": {
"message": "Eksporter til fil",
"description": ""
},
"rulesDefaultFileName": {
"message": "mine-ublock-dynamiske-regler_{{datetime}}.txt",
"description": "default file name to use"
},
"rulesHint": {
"message": "Liste over dine dynamiske filtreringsregler.",
"description": "English: List of your dynamic filtering rules."
},
"rulesFormatHint": {
"message": "Regelsyntaks: <code>kilde destinasjon type handling</code> (<a href='https://github.com/gorhill/uBlock/wiki/Dynamic-filtering:-rule-syntax'>full dokumentasjon</a>).",
"description": "English: dynamic rule syntax and full documentation."
},
"rulesSort": {
"message": "Sorter:",
"description": "English: label for sort option."
},
"rulesSortByType": {
"message": "Regeltype",
"description": "English: a sort option for list of rules."
},
"rulesSortBySource": {
"message": "Kilde",
"description": "English: a sort option for list of rules."
},
"rulesSortByDestination": {
"message": "Destinasjon",
"description": "English: a sort option for list of rules."
},
"whitelistPrompt": {
"message": "Direktivene for betrodde sider bestemmer hvilke nettsider uBlock Origin ikke skal være aktiv på. Én oppføring per linje. Ugyldige direktiver blir stille ignorert og kommentert ut.",
"description": "The name of the trusted sites pane."
},
"whitelistImport": {
"message": "Importer og legg til",
"description": "English: Import and append"
},
"whitelistExport": {
"message": "Eksporter",
"description": "English: Export"
},
"whitelistExportFilename": {
"message": "mine-ublock-betrodde-sider_{{datetime}}.txt",
"description": "The default filename to use for import/export purpose"
},
"whitelistApply": {
"message": "Bruk endringer",
"description": "English: Apply changes"
},
"logRequestsHeaderType": {
"message": "Type",
"description": "English: Type"
},
"logRequestsHeaderDomain": {
"message": "Domene",
"description": "English: Domain"
},
"logRequestsHeaderURL": {
"message": "Nettadresse",
"description": "English: URL"
},
"logRequestsHeaderFilter": {
"message": "Filter",
"description": "English: Filter"
},
"logAll": {
"message": "Alle",
"description": "Appears in the logger's tab selector"
},
"logBehindTheScene": {
"message": "Faneløs",
"description": "Pretty name for behind-the-scene network requests"
},
"loggerCurrentTab": {
"message": "Gjeldende fane",
"description": "Appears in the logger's tab selector"
},
"loggerReloadTip": {
"message": "Last faneinnholdet på nytt",
"description": "Tooltip for the reload button in the logger page"
},
"loggerDomInspectorTip": {
"message": "Slå av/på DOM-inspektør",
"description": "Tooltip for the DOM inspector button in the logger page"
},
"loggerPopupPanelTip": {
"message": "Slå av/på oppsprettspanelet",
"description": "Tooltip for the popup panel button in the logger page"
},
"loggerInfoTip": {
"message": "uBlock Origin wiki: Loggen",
"description": "Tooltip for the top-right info label in the logger page"
},
"loggerClearTip": {
"message": "Tøm loggen",
"description": "Tooltip for the eraser in the logger page; used to blank the content of the logger"
},
"loggerPauseTip": {
"message": "Sett loggen i pausemodus (forkast alle innkommende data)",
"description": "Tooltip for the pause button in the logger page"
},
"loggerUnpauseTip": {
"message": "Gjenoppta logging",
"description": "Tooltip for the play button in the logger page"
},
"loggerRowFiltererButtonTip": {
"message": "Slå av/på loggfiltrering",
"description": "Tooltip for the row filterer button in the logger page"
},
"logFilterPrompt": {
"message": "filtrer logginnhold",
"description": "Placeholder string for logger output filtering input field"
},
"loggerRowFiltererBuiltinTip": {
"message": "Alternativer for loggfiltrering",
"description": "Tooltip for the button to bring up logger output filtering options"
},
"loggerRowFiltererBuiltinNot": {
"message": "Ikke",
"description": "A keyword in the built-in row filtering expression"
},
"loggerRowFiltererBuiltinEventful": {
"message": "hendelsesrik",
"description": "A keyword in the built-in row filtering expression: all items corresponding to uBO doing something (blocked, allowed, redirected, etc.)"
},
"loggerRowFiltererBuiltinBlocked": {
"message": "blokkert",
"description": "A keyword in the built-in row filtering expression"
},
"loggerRowFiltererBuiltinAllowed": {
"message": "tillatt",
"description": "A keyword in the built-in row filtering expression"
},
"loggerRowFiltererBuiltin1p": {
"message": "førstepart",
"description": "A keyword in the built-in row filtering expression"
},
"loggerRowFiltererBuiltin3p": {
"message": "tredjepart",
"description": "A keyword in the built-in row filtering expression"
},
"loggerEntryDetailsHeader": {
"message": "Detaljer",
"description": "Small header to identify the 'Details' pane for a specific logger entry"
},
"loggerEntryDetailsFilter": {
"message": "Filter",
"description": "Label to identify a filter field"
},
"loggerEntryDetailsFilterList": {
"message": "Filterliste",
"description": "Label to identify a filter list field"
},
"loggerEntryDetailsRule": {
"message": "Regel",
"description": "Label to identify a rule field"
},
"loggerEntryDetailsContext": {
"message": "Kontekst",
"description": "Label to identify a context field (typically a hostname)"
},
"loggerEntryDetailsRootContext": {
"message": "Rotkontekst",
"description": "Label to identify a root context field (typically a hostname)"
},
"loggerEntryDetailsPartyness": {
"message": "Partsrelasjon",
"description": "Label to identify a field providing partyness information"
},
"loggerEntryDetailsType": {
"message": "Type",
"description": "Label to identify the type of an entry"
},
"loggerEntryDetailsURL": {
"message": "Nettadresse",
"description": "Label to identify the URL of an entry"
},
"loggerURLFilteringHeader": {
"message": "Nettadresseregel",
"description": "Small header to identify the dynamic URL filtering section"
},
"loggerURLFilteringContextLabel": {
"message": "Kontekst:",
"description": "Label for the context selector"
},
"loggerURLFilteringTypeLabel": {
"message": "Type:",
"description": "Label for the type selector"
},
"loggerStaticFilteringHeader": {
"message": "Statisk filter",
"description": "Small header to identify the static filtering section"
},
"loggerStaticFilteringSentence": {
"message": "{{action}} nettverksforespørsler av {{type}} {{br}}som matcher nettadressen {{url}} {{br}}og som stammer {{origin}},{{br}}{{importance}} det er et matchende unntaksfilter.",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartBlock": {
"message": "Blokker",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartAllow": {
"message": "Tillat",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartType": {
"message": "type “{{type}}”",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartAnyType": {
"message": "enhver type",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartOrigin": {
"message": "fra “{{origin}}”",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartAnyOrigin": {
"message": "fra hvor som helst",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartNotImportant": {
"message": "unntatt når",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringSentencePartImportant": {
"message": "selv om",
"description": "Used in the static filtering wizard"
},
"loggerStaticFilteringFinderSentence1": {
"message": "Statisk filter <code>{{filter}}</code> funnet i:",
"description": "Below this sentence, the filter list(s) in which the filter was found"
},
"loggerStaticFilteringFinderSentence2": {
"message": "Statisk filter ble ikke funnet i noen av filterlistene som er aktiverte nå",
"description": "Message to show when a filter cannot be found in any filter lists"
},
"loggerSettingDiscardPrompt": {
"message": "Loggoppføringer som ikke oppfyller alle tre betingelser nedenfor blir automatisk forkastet:",
"description": "Logger setting: A sentence to describe the purpose of the settings below"
},
"loggerSettingPerEntryMaxAge": {
"message": "Bevar oppføringer fra de siste {{input}} minuttene",
"description": "A logger setting"
},
"loggerSettingPerTabMaxLoads": {
"message": "Bevar maksimalt {{input}} sideinnlastinger per fane",
"description": "A logger setting"
},
"loggerSettingPerTabMaxEntries": {
"message": "Bevar maksimalt {{input}} oppføringer per fane",
"description": "A logger setting"
},
"loggerSettingPerEntryLineCount": {
"message": "Bruk {{input}} linjer per oppføring i vertikalt utvidet modus",
"description": "A logger setting"
},
"loggerSettingHideColumnsPrompt": {
"message": "Skjul kolonner:",
"description": "Logger settings: a sentence to describe the purpose of the checkboxes below"
},
"loggerSettingHideColumnTime": {
"message": "{{input}} Tid",
"description": "A label for the time column"
},
"loggerSettingHideColumnFilter": {
"message": "{{input}} Filter/regel",
"description": "A label for the filter or rule column"
},
"loggerSettingHideColumnContext": {
"message": "{{input}} Kontekst",
"description": "A label for the context column"
},
"loggerSettingHideColumnPartyness": {
"message": "{{input}} Partsrelasjon",
"description": "A label for the partyness column"
},
"loggerExportFormatList": {
"message": "Liste",
"description": "Label for radio-button to pick export format"
},
"loggerExportFormatTable": {
"message": "Tabell",
"description": "Label for radio-button to pick export format"
},
"loggerExportEncodePlain": {
"message": "Uformatert",
"description": "Label for radio-button to pick export text format"
},
"loggerExportEncodeMarkdown": {
"message": "Markdown",
"description": "Label for radio-button to pick export text format"
},
"aboutChangelog": {
"message": "Endringslogg",
"description": ""
},
"aboutWiki": {
"message": "Wiki",
"description": "English: project' wiki on GitHub"
},
"aboutSupport": {
"message": "Brukerstøtte",
"description": "A link for where to get support"
},
"aboutIssues": {
"message": "Problemsporing",
"description": "Text for a link to official issue tracker"
},
"aboutCode": {
"message": "Kildekode (GPLv3)",
"description": "English: Source code (GPLv3)"
},
"aboutContributors": {
"message": "Bidragsytere",
"description": "English: Contributors"
},
"aboutSourceCode": {
"message": "Kildekode",
"description": "Link text to source code repo"
},
"aboutTranslations": {
"message": "Oversettelser",
"description": "Link text to translations repo"
},
"aboutFilterLists": {
"message": "Filterlister",
"description": "Link text to uBO's own filter lists repo"
},
"aboutDependencies": {
"message": "Eksterne avhengigheter (GPLv3-kompatible):",
"description": "Shown in the About pane"
},
"aboutBackupDataButton": {
"message": "Sikkerhetskopier til fil",
"description": "Text for button to create a backup of all settings"
},
"aboutBackupFilename": {
"message": "min-ublock-sikkerhetskopi_{{datetime}}.txt",
"description": "English: my-ublock-backup_{{datetime}}.txt"
},
"aboutRestoreDataButton": {
"message": "Gjenopprett fra fil",
"description": "English: Restore from file..."
},
"aboutResetDataButton": {
"message": "Tilbakestill til standardinnstillinger",
"description": "English: Reset to default settings..."
},
"aboutRestoreDataConfirm": {
"message": "Alle dine innstillinger vil bli overskrevet med data sikkerhetskopiert {{time}}, og uBlock₀ vil starte på nytt.\n\nOverskrive alle eksisterende innstillinger med sikkerhetskopierte data?",
"description": "Message asking user to confirm restore"
},
"aboutRestoreDataError": {
"message": "Dataene kunne ikke leses eller er ugyldige",
"description": "Message to display when an error occurred during restore"
},
"aboutResetDataConfirm": {
"message": "Alle dine innstillinger blir fjernet, og uBlock₀ vil starte på nytt.\n\nTilbakestille uBlock₀ til opprinnelige innstillinger?",
"description": "Message asking user to confirm reset"
},
"errorCantConnectTo": {
"message": "Nettverksfeil: {{msg}}",
"description": "English: Network error: {{msg}}"
},
"subscriberConfirm": {
"message": "Legge til følgende nettadresse i din egendefinerte filterliste?\n\nTittel: \"{{title}}\"\nNettadresse: {{url}}",
"description": "No longer used"
},
"subscribeButton": {
"message": "Abonner",
"description": "For the button used to subscribe to a filter list"
},
"elapsedOneMinuteAgo": {
"message": "et minutt siden",
"description": "English: a minute ago"
},
"elapsedManyMinutesAgo": {
"message": "{{value}} minutter siden",
"description": "English: {{value}} minutes ago"
},
"elapsedOneHourAgo": {
"message": "en time siden",
"description": "English: an hour ago"
},
"elapsedManyHoursAgo": {
"message": "{{value}} timer siden",
"description": "English: {{value}} hours ago"
},
"elapsedOneDayAgo": {
"message": "en dag siden",
"description": "English: a day ago"
},
"elapsedManyDaysAgo": {
"message": "{{value}} dager siden",
"description": "English: {{value}} days ago"
},
"showDashboardButton": {
"message": "Vis dashbord",
"description": "Firefox/Fennec-specific: Show Dashboard"
},
"showNetworkLogButton": {
"message": "Vis logg",
"description": "Firefox/Fennec-specific: Show Logger"
},
"fennecMenuItemBlockingOff": {
"message": "av",
"description": "Firefox-specific: appears as 'uBlock₀ (off)'"
},
"docblockedPrompt1": {
"message": "uBlock Origin har forhindret lasting av følgende side:",
"description": "Used in the strict-blocking page"
},
"docblockedPrompt2": {
"message": "På grunn av følgende filter:",
"description": "Used in the strict-blocking page"
},
"docblockedNoParamsPrompt": {
"message": "uten parametere",
"description": "label to be used for the parameter-less URL: https://cloud.githubusercontent.com/assets/585534/9832014/bfb1b8f0-593b-11e5-8a27-fba472a5529a.png"
},
"docblockedFoundIn": {
"message": "Funnet i:",
"description": "English: List of filter list names follows"
},
"docblockedBack": {
"message": "Gå tilbake",
"description": "English: Go back"
},
"docblockedClose": {
"message": "Lukk dette vinduet",
"description": "English: Close this window"
},
"docblockedProceed": {
"message": "Slå av streng (fullstendig) blokkering for {{hostname}}",
"description": "English: Disable strict blocking for {{hostname}} ..."
},
"docblockedDisableTemporary": {
"message": "Midlertidig",
"description": "English: Temporarily"
},
"docblockedDisablePermanent": {
"message": "Permanent",
"description": "English: Permanently"
},
"cloudPush": {
"message": "Eksporter til nettlagring",
"description": "tooltip"
},
"cloudPull": {
"message": "Importer fra nettlagring",
"description": "tooltip"
},
"cloudPullAndMerge": {
"message": "Importer fra nettlagring og slå sammen med gjeldende innstillinger",
"description": "tooltip"
},
"cloudNoData": {
"message": "...\n...",
"description": ""
},
"cloudDeviceNamePrompt": {
"message": "Denne enhets navn:",
"description": "used as a prompt for the user to provide a custom device name"
},
"advancedSettingsWarning": {
"message": "Advarsel! Endringer av de avanserte innstillingene skjer på eget ansvar.",
"description": "A warning to users at the top of 'Advanced settings' page"
},
"genericSubmit": {
"message": "Send",
"description": "for generic 'Submit' buttons"
},
"genericApplyChanges": {
"message": "Bruk endringer",
"description": "for generic 'Apply changes' buttons"
},
"genericRevert": {
"message": "Tilbakestill",
"description": "for generic 'Revert' buttons"
},
"genericBytes": {
"message": "bytes",
"description": ""
},
"contextMenuTemporarilyAllowLargeMediaElements": {
"message": "Tillat store mediaelementer midlertidig",
"description": "A context menu entry, present when large media elements have been blocked on the current site"
},
"shortcutCapturePlaceholder": {
"message": "Oppfør en snarvei",
"description": "Placeholder string for input field used to capture a keyboard shortcut"
},
"genericMergeViewScrollLock": {
"message": "Slå av/på låst rulling",
"description": "Tooltip for the button used to lock scrolling between the views in the 'My rules' pane"
},
"genericCopyToClipboard": {
"message": "Kopier til utklippstavle",
"description": "Label for buttons used to copy something to the clipboard"
},
"toggleBlockingProfile": {
"message": "Skift blokkeringsprofil",
"description": "Label for keyboard shortcut used to toggle blocking profile"
},
"relaxBlockingMode": {
"message": "Lemp på blokkeringsmodus",
"description": "Label for keyboard shortcut used to relax blocking mode (meant to replace 'Toggle blocking profile')"
},
"storageUsed": {
"message": "Brukt lagringsplass: {{value}} {{unit}}",
"description": " In Setting pane, renders as (example): Storage used: 13.2 MB"
},
"KB": {
"message": "KB",
"description": "short for 'kilobytes'"
},
"MB": {
"message": "MB",
"description": "short for 'megabytes'"
},
"GB": {
"message": "GB",
"description": "short for 'gigabytes'"
},
"dummy": {
"message": "This entry must be the last one",
"description": "so we dont need to deal with comma for last entry"
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE catmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<catmetadata>
<longdescription lang="en">
The app-dicts category contains dictionary and word-list packages.
</longdescription>
<longdescription lang="de">
The Kategorie app-dicts enthält Wörterbücher und Wortlisten.
</longdescription>
<longdescription lang="es">
La categoría app-dicts contiene paquetes de diccionarios y
para listar palabras.
</longdescription>
<longdescription lang="ja">
app-dictsカテゴリには辞書とワードリスとパッケージが含まれます。
</longdescription>
<longdescription lang="nl">
De app-dict categorie bevat woordenboeken en andere woordlijsten.
</longdescription>
<longdescription lang="vi">
Nhóm app-dicts chứa các gói về từ điển.
</longdescription>
<longdescription lang="it">
La categoria app-dicts contiene pacchetti di dizionari ed elenchi di parole.
</longdescription>
<longdescription lang="pt">
A categoria app-dicts contém pacotes relacionados a dicionários e
listas de palavras.
</longdescription>
<longdescription lang="pl">
Kategoria app-dicts zawiera słowniki oraz listy wyrazów.
</longdescription>
</catmetadata>
| {
"pile_set_name": "Github"
} |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Api.Disks do
@moduledoc """
API calls for all endpoints tagged `Disks`.
"""
alias GoogleApi.Compute.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `disk` (*type:* `String.t`) - The disk name for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.DisksAddResourcePoliciesRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_add_resource_policies(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_add_resource_policies(
connection,
project,
zone,
disk,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{disk}/addResourcePolicies", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"disk" => URI.encode(disk, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Retrieves an aggregated list of persistent disks.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:filter` (*type:* `String.t`) - A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `>`, or `<`.
For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.
You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.
To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ```
* `:includeAllScopes` (*type:* `boolean()`) - Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.
* `:maxResults` (*type:* `integer()`) - The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
* `:orderBy` (*type:* `String.t`) - Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.
You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.
Currently, only sorting by `name` or `creationTimestamp desc` is supported.
* `:pageToken` (*type:* `String.t`) - Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.DiskAggregatedList{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_aggregated_list(Tesla.Env.client(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.Compute.V1.Model.DiskAggregatedList.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_disks_aggregated_list(connection, project, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:filter => :query,
:includeAllScopes => :query,
:maxResults => :query,
:orderBy => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/aggregated/disks", %{
"project" => URI.encode(project, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.DiskAggregatedList{}])
end
@doc """
Creates a snapshot of a specified persistent disk.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `disk` (*type:* `String.t`) - Name of the persistent disk to snapshot.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:guestFlush` (*type:* `boolean()`) - [Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.Snapshot.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_create_snapshot(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_create_snapshot(
connection,
project,
zone,
disk,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:guestFlush => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{disk}/createSnapshot", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"disk" => URI.encode(disk, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `disk` (*type:* `String.t`) - Name of the persistent disk to delete.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_delete(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_delete(connection, project, zone, disk, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query
}
request =
Request.new()
|> Request.method(:delete)
|> Request.url("/{project}/zones/{zone}/disks/{disk}", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"disk" => URI.encode(disk, &(URI.char_unreserved?(&1) || &1 == ?/))
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `disk` (*type:* `String.t`) - Name of the persistent disk to return.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Disk{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_get(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) :: {:ok, GoogleApi.Compute.V1.Model.Disk.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_get(connection, project, zone, disk, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/zones/{zone}/disks/{disk}", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"disk" => URI.encode(disk, &(URI.char_unreserved?(&1) || &1 == ?/))
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Disk{}])
end
@doc """
Gets the access control policy for a resource. May be empty if no such policy or resource exists.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:optionsRequestedPolicyVersion` (*type:* `integer()`) - Requested IAM Policy version.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Policy{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_get_iam_policy(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) :: {:ok, GoogleApi.Compute.V1.Model.Policy.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_get_iam_policy(
connection,
project,
zone,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:optionsRequestedPolicyVersion => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/zones/{zone}/disks/{resource}/getIamPolicy", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Policy{}])
end
@doc """
Creates a persistent disk in the specified project using the data in the request. You can create a disk from a source (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:sourceImage` (*type:* `String.t`) - Optional. Source image to restore onto a disk.
* `:body` (*type:* `GoogleApi.Compute.V1.Model.Disk.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_insert(Tesla.Env.client(), String.t(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_insert(connection, project, zone, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:sourceImage => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Retrieves a list of persistent disks contained within the specified zone.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:filter` (*type:* `String.t`) - A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `>`, or `<`.
For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.
You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.
To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ```
* `:maxResults` (*type:* `integer()`) - The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
* `:orderBy` (*type:* `String.t`) - Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.
You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.
Currently, only sorting by `name` or `creationTimestamp desc` is supported.
* `:pageToken` (*type:* `String.t`) - Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.DiskList{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_list(Tesla.Env.client(), String.t(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.Compute.V1.Model.DiskList.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_list(connection, project, zone, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:filter => :query,
:maxResults => :query,
:orderBy => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/zones/{zone}/disks", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.DiskList{}])
end
@doc """
Removes resource policies from a disk.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `disk` (*type:* `String.t`) - The disk name for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.DisksRemoveResourcePoliciesRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_remove_resource_policies(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_remove_resource_policies(
connection,
project,
zone,
disk,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"disk" => URI.encode(disk, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Resizes the specified persistent disk. You can only increase the size of the disk.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `disk` (*type:* `String.t`) - The name of the persistent disk.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.DisksResizeRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_resize(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_resize(connection, project, zone, disk, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{disk}/resize", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"disk" => URI.encode(disk, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Sets the access control policy on the specified resource. Replaces any existing policy.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:body` (*type:* `GoogleApi.Compute.V1.Model.ZoneSetPolicyRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Policy{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_set_iam_policy(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) :: {:ok, GoogleApi.Compute.V1.Model.Policy.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_set_iam_policy(
connection,
project,
zone,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{resource}/setIamPolicy", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Policy{}])
end
@doc """
Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.ZoneSetLabelsRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_set_labels(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_disks_set_labels(
connection,
project,
zone,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{resource}/setLabels", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Returns permissions that a caller has on the specified resource.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `zone` (*type:* `String.t`) - The name of the zone for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:body` (*type:* `GoogleApi.Compute.V1.Model.TestPermissionsRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.TestPermissionsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec compute_disks_test_iam_permissions(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.TestPermissionsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_disks_test_iam_permissions(
connection,
project,
zone,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/zones/{zone}/disks/{resource}/testIamPermissions", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"zone" => URI.encode(zone, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.TestPermissionsResponse{}])
end
end
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.tests.integration.jms.consumer;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.MessageConsumer;
import javax.jms.Session;
import javax.jms.Topic;
import java.util.Arrays;
import java.util.UUID;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.activemq.artemis.api.core.RoutingType;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.core.server.impl.AddressInfo;
import org.apache.activemq.artemis.logs.AssertionLoggerHandler;
import org.apache.activemq.artemis.tests.util.CFUtil;
import org.apache.activemq.artemis.tests.util.JMSTestBase;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(value = Parameterized.class)
public class CreateSubscriptionTest extends JMSTestBase {
private final String protocol;
@Parameterized.Parameters(name = "persistenceEnabled = {0}")
public static Iterable<? extends Object> persistenceEnabled() {
return Arrays.asList(new Object[][]{{"AMQP"}, {"CORE"}});
}
public CreateSubscriptionTest(String protocol) {
this.protocol = protocol;
}
@Test
public void testSharedConsumer() throws Exception {
server.addAddressInfo(new AddressInfo(SimpleString.toSimpleString("myTopic")).addRoutingType(RoutingType.MULTICAST));
ConnectionFactory cf = CFUtil.createConnectionFactory(protocol, "tcp://localhost:61616");
Connection connection = cf.createConnection();
Session session = connection.createSession();
Connection connecton2 = cf.createConnection();
Session session2 = connecton2.createSession();
try {
Topic topic = session.createTopic("myTopic");
MessageConsumer messageConsumer = session.createSharedConsumer(topic, "consumer1");
MessageConsumer messageConsumer2 = session2.createSharedConsumer(topic, "consumer1");
connection.close();
} finally {
connection.close();
connecton2.close();
}
}
@Test
public void testSharedDurableConsumer() throws Exception {
server.addAddressInfo(new AddressInfo(SimpleString.toSimpleString("myTopic")).addRoutingType(RoutingType.MULTICAST));
ConnectionFactory cf = CFUtil.createConnectionFactory(protocol, "tcp://localhost:61616");
Connection connection = cf.createConnection();
Session session = connection.createSession();
Connection connecton2 = cf.createConnection();
Session session2 = connecton2.createSession();
try {
Topic topic = session.createTopic("myTopic");
MessageConsumer messageConsumer = session.createSharedDurableConsumer(topic, "consumer1");
MessageConsumer messageConsumer2 = session2.createSharedDurableConsumer(topic, "consumer1");
connection.close();
} finally {
connection.close();
connecton2.close();
}
}
@Test
public void testCreateManyConsumersDurable() throws Exception {
testCreateManyConsumers("createSharedDurableConsumer");
}
@Test
public void testCreateManyConsumersNonDurable() throws Exception {
testCreateManyConsumers("createSharedConsumer");
}
@Test
public void testDurableSubscriber() throws Exception {
testCreateManyConsumers("createDurableSubscriber");
}
@Test
public void testNonDurableSubscriber() throws Exception {
testCreateManyConsumers("createConsumer");
}
public void testCreateManyConsumers(String queueType) throws Exception {
AssertionLoggerHandler.startCapture();
try {
server.addAddressInfo(new AddressInfo(SimpleString.toSimpleString("myTopic")).addRoutingType(RoutingType.MULTICAST));
ConnectionFactory cf = CFUtil.createConnectionFactory(protocol, "tcp://localhost:61616");
AtomicInteger errors = new AtomicInteger(0);
Thread[] threads = new Thread[10];
CyclicBarrier startBarrier = new CyclicBarrier(threads.length);
CyclicBarrier closeBarrier = new CyclicBarrier(threads.length);
Runnable runnable = new Runnable() {
@Override
public void run() {
Connection connection = null;
try {
connection = cf.createConnection();
if (queueType.equals("createDurableSubscriber")) {
connection.setClientID(UUID.randomUUID().toString());
}
Session session = connection.createSession();
Topic topic = session.createTopic("myTopic");
startBarrier.await(10, TimeUnit.SECONDS);
if (queueType.equals("createSharedDurableConsumer")) {
MessageConsumer messageConsumer = session.createSharedDurableConsumer(topic, "consumer1");
} else if (queueType.equals("createSharedConsumer")) {
MessageConsumer messageConsumer = session.createSharedConsumer(topic, "consumer1");
} else if (queueType.equals("createDurableSubscriber")) {
session.createDurableSubscriber(topic, "name", null, false);
} else if (queueType.equals("createDurableSubscriber")) {
session.createConsumer(topic);
}
} catch (Exception e) {
e.printStackTrace();
errors.incrementAndGet();
} finally {
try {
closeBarrier.await(10, TimeUnit.SECONDS);
if (connection != null) {
connection.close();
}
} catch (Exception ignored) {
}
}
}
};
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(null, runnable, "test " + i);
threads[i].start();
}
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
Assert.assertEquals(0, errors.get());
Assert.assertFalse(AssertionLoggerHandler.findText("AMQ229018"));
} finally {
AssertionLoggerHandler.stopCapture();
}
}
}
| {
"pile_set_name": "Github"
} |
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.eShopOnContainers.WebMVC;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
using Serilog;
using System;
using System.IO;
using System.IO.Compression;
using System.Linq;
namespace WebMVC.Infrastructure
{
public class WebContextSeed
{
public static void Seed(IApplicationBuilder applicationBuilder, IWebHostEnvironment env)
{
var log = Serilog.Log.Logger;
var settings = (AppSettings)applicationBuilder
.ApplicationServices.GetRequiredService<IOptions<AppSettings>>().Value;
var useCustomizationData = settings.UseCustomizationData;
var contentRootPath = env.ContentRootPath;
var webroot = env.WebRootPath;
if (useCustomizationData)
{
GetPreconfiguredImages(contentRootPath, webroot, log);
GetPreconfiguredCSS(contentRootPath, webroot, log);
}
}
static void GetPreconfiguredCSS(string contentRootPath, string webroot, ILogger log)
{
try
{
string overrideCssFile = Path.Combine(contentRootPath, "Setup", "override.css");
if (!File.Exists(overrideCssFile))
{
log.Error("Override css file '{FileName}' does not exists.", overrideCssFile);
return;
}
string destinationFilename = Path.Combine(webroot, "css", "override.css");
File.Copy(overrideCssFile, destinationFilename, true );
}
catch (Exception ex)
{
log.Error(ex, "EXCEPTION ERROR: {Message}", ex.Message);
}
}
static void GetPreconfiguredImages(string contentRootPath, string webroot, ILogger log)
{
try
{
string imagesZipFile = Path.Combine(contentRootPath, "Setup", "images.zip");
if (!File.Exists(imagesZipFile))
{
log.Error("Zip file '{ZipFileName}' does not exists.", imagesZipFile);
return;
}
string imagePath = Path.Combine(webroot, "images");
string[] imageFiles = Directory.GetFiles(imagePath).Select(file => Path.GetFileName(file)).ToArray();
using (ZipArchive zip = ZipFile.Open(imagesZipFile, ZipArchiveMode.Read))
{
foreach (ZipArchiveEntry entry in zip.Entries)
{
if (imageFiles.Contains(entry.Name))
{
string destinationFilename = Path.Combine(imagePath, entry.Name);
if (File.Exists(destinationFilename))
{
File.Delete(destinationFilename);
}
entry.ExtractToFile(destinationFilename);
}
else
{
log.Warning("Skipped file '{FileName}' in zipfile '{ZipFileName}'", entry.Name, imagesZipFile);
}
}
}
}
catch ( Exception ex )
{
log.Error(ex, "EXCEPTION ERROR: {Message}", ex.Message);
}
}
}
}
| {
"pile_set_name": "Github"
} |
package systemd
import (
"errors"
"net"
"strconv"
"github.com/coreos/go-systemd/activation"
)
// ListenFD returns the specified socket activated files as a slice of
// net.Listeners or all of the activated files if "*" is given.
func ListenFD(addr string) ([]net.Listener, error) {
// socket activation
listeners, err := activation.Listeners(false)
if err != nil {
return nil, err
}
if listeners == nil || len(listeners) == 0 {
return nil, errors.New("No sockets found")
}
// default to all fds just like unix:// and tcp://
if addr == "" {
addr = "*"
}
fdNum, _ := strconv.Atoi(addr)
fdOffset := fdNum - 3
if (addr != "*") && (len(listeners) < int(fdOffset)+1) {
return nil, errors.New("Too few socket activated files passed in")
}
if addr == "*" {
return listeners, nil
}
return []net.Listener{listeners[fdOffset]}, nil
}
| {
"pile_set_name": "Github"
} |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package ssh implements an SSH client and server.
SSH is a transport security protocol, an authentication protocol and a
family of application protocols. The most typical application level
protocol is a remote shell and this is specifically implemented. However,
the multiplexed nature of SSH is exposed to users that wish to support
others.
References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
*/
package ssh
| {
"pile_set_name": "Github"
} |
{
"_from": "inflight@^1.0.4",
"_id": "[email protected]",
"_inBundle": false,
"_integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
"_location": "/cordova-android/inflight",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
"raw": "inflight@^1.0.4",
"name": "inflight",
"escapedName": "inflight",
"rawSpec": "^1.0.4",
"saveSpec": null,
"fetchSpec": "^1.0.4"
},
"_requiredBy": [
"/cordova-android/glob"
],
"_resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"_shasum": "49bd6331d7d02d0c09bc910a1075ba8165b56df9",
"_spec": "inflight@^1.0.4",
"_where": "/Volumes/hackintosh/devs/repos/siberian/platforms/Android/node_modules/glob",
"author": {
"name": "Isaac Z. Schlueter",
"email": "[email protected]",
"url": "http://blog.izs.me/"
},
"bugs": {
"url": "https://github.com/isaacs/inflight/issues"
},
"bundleDependencies": false,
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
},
"deprecated": false,
"description": "Add callbacks to requests in flight to avoid async duplication",
"devDependencies": {
"tap": "^7.1.2"
},
"files": [
"inflight.js"
],
"homepage": "https://github.com/isaacs/inflight",
"license": "ISC",
"main": "inflight.js",
"name": "inflight",
"repository": {
"type": "git",
"url": "git+https://github.com/npm/inflight.git"
},
"scripts": {
"test": "tap test.js --100"
},
"version": "1.0.6"
}
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_LINUX_TYPES_H_
#define _TOOLS_LINUX_TYPES_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
#include <asm/types.h>
#include <asm/posix_types.h>
struct page;
struct kmem_cache;
typedef enum {
GFP_KERNEL,
GFP_ATOMIC,
__GFP_HIGHMEM,
__GFP_HIGH
} gfp_t;
/*
* We define u64 as uint64_t for every architecture
* so that we can print it with "%"PRIx64 without getting warnings.
*
* typedef __u64 u64;
* typedef __s64 s64;
*/
typedef uint64_t u64;
typedef int64_t s64;
typedef __u32 u32;
typedef __s32 s32;
typedef __u16 u16;
typedef __s16 s16;
typedef __u8 u8;
typedef __s8 s8;
#ifdef __CHECKER__
#define __bitwise__ __attribute__((bitwise))
#else
#define __bitwise__
#endif
#define __bitwise __bitwise__
#define __force
#define __user
#define __must_check
#define __cold
typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16;
typedef __u32 __bitwise __le32;
typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
typedef struct {
int counter;
} atomic_t;
#ifndef __aligned_u64
# define __aligned_u64 __u64 __attribute__((aligned(8)))
#endif
struct list_head {
struct list_head *next, *prev;
};
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
#endif /* _TOOLS_LINUX_TYPES_H_ */
| {
"pile_set_name": "Github"
} |
/**
* Copyright 2019 vip.com.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* </p>
*/
package com.vip.pallas.console.controller.index.template;
import static javax.servlet.http.HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotEmpty;
import javax.validation.constraints.NotNull;
import com.vip.pallas.entity.BusinessLevelExceptionCode;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.io.IOUtils;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.ObjectUtils;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;
import com.google.common.net.HostAndPort;
import com.vip.pallas.bean.PerformanceData;
import com.vip.pallas.bean.QueryParamSetting;
import com.vip.pallas.console.utils.AuditLogUtil;
import com.vip.pallas.console.utils.AuthorizeUtil;
import com.vip.pallas.exception.BusinessLevelException;
import com.vip.pallas.mybatis.entity.Cluster;
import com.vip.pallas.mybatis.entity.Index;
import com.vip.pallas.mybatis.entity.SearchTemplate;
import com.vip.pallas.utils.JsonUtil;
import com.vip.pallas.utils.ObjectMapTool;
@RestController
@RequestMapping("/index_template/performance_script")
public class PerformanceController extends PerformanceBaseController{
private static Logger logger = LoggerFactory.getLogger(PerformanceController.class);
protected static ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@RequestMapping(value = "/param.json")
public List<Map<String, String>> param(@RequestBody Map<String, Object> params, HttpServletRequest request) { // NOSONAR
SearchTemplate template = this.getSearchTemplateFromParams(params);
clearDataCache(template, request);
return performanceScriptService.getQueryParamNames(template);
}
/**
* 生成性能测试脚本,并打包下载
*/
@RequestMapping(value = "/gen.json")
public void gen(@RequestParam @NotEmpty(message = "params不能为空") String params
, HttpServletRequest request, HttpServletResponse response) throws Exception { // NOSONAR
Map<String, Object> paramMap = getJsonObj(params);
SearchTemplate template = this.getSearchTemplateFromParams(paramMap);
List<HostAndPort> hps = getHostAndPorts(template);
JsonNode paramsNode = ObjectMapTool.getObject(paramMap, "params", JsonNode.class);
List<PerformanceData> pds = getPerformanceDataFromCache(request);
List<List<QueryParamSetting>> paramSettingsList = new ArrayList<List<QueryParamSetting>>();
if (paramsNode != null) {
List<QueryParamSetting> paramSettings = OBJECT_MAPPER.readValue(paramsNode,
new TypeReference<List<QueryParamSetting>>() {
});
paramSettingsList.add(paramSettings);
}
String jmxScript = performanceScriptService.genJmxScript(template,pds, paramSettingsList);
response.setStatus(HttpServletResponse.SC_OK);
response.setContentType("application/zip");
String testname = template.getIndexId() + "_" + template.getTemplateName();
response.setHeader("Content-Disposition", "attachment; filename=\"" + testname + ".zip\"");
try {
performanceScriptService.zipFiles(testname, jmxScript, hps,pds, response.getOutputStream());
} catch (Exception e) {
logger.error("生成压缩包的时候出错", e);
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, e.getMessage());
}
}
/**
* 上传性能测试脚本中的数据源文件以及对应的查询请求参数名称
*/
@RequestMapping(value = "/upload.json")
public String upload(@RequestParam @NotNull(message = "file不能为空") MultipartFile file,
@RequestParam @NotNull(message = "indexId不能为空") @Min(value = 1, message = "indexId不能小于1") Long indexId,
@RequestParam @NotBlank(message = "paramNameDef不能为空") String paramNameDef,
@RequestParam @NotBlank(message = "templateName不能为空") String templateName,
HttpServletRequest request) { // NOSONAR
Index index = indexService.findById(indexId);
if (index == null) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "index不存在");
}
if (!AuthorizeUtil.authorizeIndexPrivilege(request, index.getId(), index.getIndexName())) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_FORBIDDEN, "无权限操作");
}
SearchTemplate template = templateService.findByNameAndIndexId(templateName, indexId);
if (template == null) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "模板不存在");
}
if (template.getType() != SearchTemplate.TYPE_TEMPLATE) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "该模板类型不正确");
}
paramNameDef = paramNameDef.replace(';', ',');
String fileName = FilenameUtils.getName(file.getOriginalFilename());
PerformanceData performanceData = new PerformanceData(null, paramNameDef, fileName, null);
addDataCache(template, request, performanceData);
String realFileName = saveDataFile(template, file, fileName);
performanceData.setRealFileName(realFileName);
AuditLogUtil.log("upload perf test data: {0}", realFileName);
return fileName;
}
@RequestMapping(value = "/update.json")
public void update(@RequestBody Map<String, Object> params, HttpServletRequest request) { // NOSONAR
SearchTemplate template = this.getSearchTemplateFromParams(params);
Long indexId = ObjectMapTool.getLong(params, "indexId");
Index index = indexService.findById(indexId);
if (index == null) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "index不存在");
}
if (!AuthorizeUtil.authorizeIndexPrivilege(request, index.getId(), index.getIndexName())) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_FORBIDDEN, "无权限操作");
}
String fileName = ObjectMapTool.getString(params, "fileName");
String paramNameDef = ObjectMapTool.getString(params, "paramNameDef");
if (ObjectUtils.isEmpty(paramNameDef)) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "paramNameDef不能为空");
}
if (ObjectUtils.isEmpty(fileName)) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "fileName不能为空");
}
paramNameDef = paramNameDef.replace(';', ',');
updatePerformanceData(template, request, paramNameDef, fileName);
}
/**
*update.json
*/
@SuppressWarnings("unchecked")
private void updatePerformanceData(SearchTemplate st, HttpServletRequest request, String paramNameDef, String fileName) {
Object dataObj = request.getSession().getAttribute(DATA_CACHE_KEY);
if (dataObj != null) {
String dataKey = getDataCacheKey(st, fileName);
Map<String, PerformanceData> dataCacheMap = (Map<String, PerformanceData>) dataObj;
PerformanceData pd = dataCacheMap.get(dataKey);
if (pd != null) {
checkReqParamName(dataCacheMap, paramNameDef,pd.getParamNameDef());
pd.setParamNameDef(paramNameDef);
AuditLogUtil.log("update perf test data: {0}", dataKey);
}
}
}
@RequestMapping(value = "/delete.json")
public void delete(@RequestBody Map<String, Object> params, HttpServletRequest request) { // NOSONAR
SearchTemplate template = this.getSearchTemplateFromParams(params);
Long indexId = ObjectMapTool.getLong(params, "indexId");
Index index = indexService.findById(indexId);
if (index == null) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "index不存在");
}
if (!AuthorizeUtil.authorizeIndexPrivilege(request, index.getId(), index.getIndexName())) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_FORBIDDEN, "无权限操作");
}
String fileName = ObjectMapTool.getString(params, "fileName");
if (ObjectUtils.isEmpty(fileName)) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "fileName不能为空");
}
deleteDataFile(template, request, fileName);
}
/**
*delete.json
*/
@SuppressWarnings("unchecked")
private void deleteDataFile(SearchTemplate st, HttpServletRequest request, String fileName) {
Object dataObj = request.getSession().getAttribute(DATA_CACHE_KEY);
if (dataObj != null) {
String dataKey = getDataCacheKey(st, fileName);
Map<String, PerformanceData> dataCacheMap = (Map<String, PerformanceData>) dataObj;
dataCacheMap.remove(dataKey);
File dir = this.getTemplateDataFileSaveDirFile(st);
File file = new File(dir, fileName);
FileUtils.deleteQuietly(file);
AuditLogUtil.log("delete perf test data: {0}", file.getAbsolutePath());
}
}
/**
* upload.json
*/
private String saveDataFile(SearchTemplate template, MultipartFile file, String fileName) {
File dir = getTemplateDataFileSaveDirFile(template);
File toSaveFile = new File(dir, fileName);
try (OutputStream fos = Files.newOutputStream(toSaveFile.toPath())) { //NOSONAR
IOUtils.copy(file.getInputStream(), fos);
} catch (IOException ioe) {
String errorMsg = "保存文件的时候出错,文件:" + fileName;
logger.error(errorMsg, ioe);
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, errorMsg);
}
return toSaveFile.getAbsolutePath();
}
@SuppressWarnings("unchecked")
private void addDataCache(SearchTemplate st, HttpServletRequest request, PerformanceData performanceData) {
Object dataObj = request.getSession().getAttribute(DATA_CACHE_KEY);
Map<String, PerformanceData> dataCacheMap;
String dataKey = getDataCacheKey(st, performanceData.getFileName());
if (dataObj == null) {
dataCacheMap = new HashMap<>();
} else {
dataCacheMap = (Map<String, PerformanceData>) dataObj;
if (dataCacheMap.containsKey(dataKey)) {
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, "已经存在同名文件," + performanceData.getFileName());
}
checkReqParamName(dataCacheMap,performanceData.getParamNameDef(),null);
}
dataCacheMap.put(dataKey, performanceData);
request.getSession().setAttribute(DATA_CACHE_KEY, dataCacheMap);
}
/**
*gen.json
*/
private List<HostAndPort> getHostAndPorts(SearchTemplate template) {
Index index = indexService.findById(template.getIndexId());
Cluster cluster = clusterService.findByName(index.getClusterName());
//Cluster cluster = clusterService.selectUsedPhysicalClustersByIndexId(index.getId());
String httpAddressStr = cluster.getHttpAddress();
String[] httpAddressList = httpAddressStr.split(",");
return Stream.of(httpAddressList).map(address -> HostAndPort.fromString(address)).collect(Collectors.toList());
}
protected Map<String, Object> getJsonObj(String jsonStr) throws Exception {
return JsonUtil.parseJsonObject(jsonStr);
}
@SuppressWarnings("unchecked")
private List<PerformanceData> getPerformanceDataFromCache(HttpServletRequest request) {
Object dataObj = request.getSession().getAttribute(DATA_CACHE_KEY);
Map<String, PerformanceData> dataCacheMap = null;
if (dataObj != null) {
dataCacheMap = (Map<String, PerformanceData>) dataObj;
return new ArrayList<>(dataCacheMap.values());
}
return new ArrayList<>();
}
/**
* param.json
*/
private void clearDataCache(SearchTemplate st, HttpServletRequest request) {
File dir = this.getTemplateDataFileSaveDirFile(st);
try {
FileUtils.cleanDirectory(dir);
} catch (IOException e) {
String errorMsg = "在初始化清空数据文件临时保存目录的时候报错," + dir.getAbsolutePath();
logger.error(errorMsg, e);
throw new BusinessLevelException(BusinessLevelExceptionCode.HTTP_INTERNAL_SERVER_ERROR, errorMsg);
}
request.getSession().removeAttribute(DATA_CACHE_KEY);
}
} | {
"pile_set_name": "Github"
} |
{
"pagination": {
"ListSignalingChannels": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "ChannelInfoList"
},
"ListStreams": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "StreamInfoList"
}
}
} | {
"pile_set_name": "Github"
} |
import ws2_32
'''
A small module for keeping a database of ordinal to symbol
mappings for DLLs which frequently get linked without symbolic
infoz.
'''
ords = {
'ws2_32.dll':ws2_32.ord_names,
'wsock32.dll':ws2_32.ord_names,
}
def ordLookup(libname, ord):
'''
Lookup a name for the given ordinal if it's in our
database.
'''
names = ords.get(libname.lower())
if names == None:
return 'ord%d' % ord
name = names.get(ord)
if name == None:
return 'ord%d' % ord
return name
| {
"pile_set_name": "Github"
} |
version https://git-lfs.github.com/spec/v1
oid sha256:f6e913cc53b43f7ecac2313eb6f5c2b3c05a82b9dccec092f268c701b5c1ebe6
size 6888
| {
"pile_set_name": "Github"
} |
diff --git a/ext/openssl/extconf.rb b/ext/openssl/extconf.rb
index 8c04cb5..132d803 100644
--- a/ext/openssl/extconf.rb
+++ b/ext/openssl/extconf.rb
@@ -104,6 +104,9 @@
have_func("SSLv2_method")
have_func("SSLv2_server_method")
have_func("SSLv2_client_method")
+have_func("SSLv3_method")
+have_func("SSLv3_server_method")
+have_func("SSLv3_client_method")
have_func("TLSv1_1_method")
have_func("TLSv1_1_server_method")
have_func("TLSv1_1_client_method")
diff --git a/ext/openssl/ossl_ssl.c b/ext/openssl/ossl_ssl.c
index f7cb7f0..47111f6 100644
--- a/ext/openssl/ossl_ssl.c
+++ b/ext/openssl/ossl_ssl.c
@@ -109,9 +109,12 @@ static const struct {
OSSL_SSL_METHOD_ENTRY(SSLv2_server),
OSSL_SSL_METHOD_ENTRY(SSLv2_client),
#endif
+#if defined(HAVE_SSLV3_METHOD) && defined(HAVE_SSLV3_SERVER_METHOD) && \
+ defined(HAVE_SSLV3_CLIENT_METHOD)
OSSL_SSL_METHOD_ENTRY(SSLv3),
OSSL_SSL_METHOD_ENTRY(SSLv3_server),
OSSL_SSL_METHOD_ENTRY(SSLv3_client),
+#endif
OSSL_SSL_METHOD_ENTRY(SSLv23),
OSSL_SSL_METHOD_ENTRY(SSLv23_server),
OSSL_SSL_METHOD_ENTRY(SSLv23_client),
| {
"pile_set_name": "Github"
} |
// This file is part of CPAchecker,
// a tool for configurable software verification:
// https://cpachecker.sosy-lab.org
//
// SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
//
// SPDX-License-Identifier: Apache-2.0
struct module;
struct lock_class_key;
struct class_interface;
struct class;
extern struct class *__class_create(struct module *owner, const char *name, struct lock_class_key *key);
extern int class_interface_register(struct class_interface *);
extern void class_interface_unregister(struct class_interface *);
extern void class_destroy(struct class *cls);
const int ERR_PTR = 2012;
long is_err(const void *ptr)
{
return ((unsigned long)ptr > ERR_PTR);
}
void *err_ptr(long error)
{
return (void *)(ERR_PTR - error);
}
long ptr_err(const void *ptr)
{
return (long)(ERR_PTR - (unsigned long)ptr);
}
long is_err_or_null(const void *ptr)
{
return !ptr || is_err((unsigned long)ptr);
}
void ldv_check_final_state(void);
void main(void)
{
struct module *cur_module;
struct class *cur_class;
struct lock_class_key *key;
struct class_interface *interface;
int is_registered = 0;
// no create
class_destroy(cur_class);
ldv_check_final_state();
}
| {
"pile_set_name": "Github"
} |
(window["webpackJsonp"] = window["webpackJsonp"] || []).push([["vendors~react-syntax-highlighter_languages_highlight_vim"],{
/***/ "./node_modules/highlight.js/lib/languages/vim.js":
/*!********************************************************!*\
!*** ./node_modules/highlight.js/lib/languages/vim.js ***!
\********************************************************/
/*! no static exports found */
/***/ (function(module, exports) {
/*
Language: Vim Script
Author: Jun Yang <[email protected]>
Description: full keyword and built-in from http://vimdoc.sourceforge.net/htmldoc/
Website: https://www.vim.org
Category: scripting
*/
function vim(hljs) {
return {
name: 'Vim Script',
keywords: {
$pattern: /[!#@\w]+/,
keyword:
// express version except: ! & * < = > !! # @ @@
'N|0 P|0 X|0 a|0 ab abc abo al am an|0 ar arga argd arge argdo argg argl argu as au aug aun b|0 bN ba bad bd be bel bf bl bm bn bo bp br brea breaka breakd breakl bro bufdo buffers bun bw c|0 cN cNf ca cabc caddb cad caddf cal cat cb cc ccl cd ce cex cf cfir cgetb cgete cg changes chd che checkt cl cla clo cm cmapc cme cn cnew cnf cno cnorea cnoreme co col colo com comc comp con conf cope '+
'cp cpf cq cr cs cst cu cuna cunme cw delm deb debugg delc delf dif diffg diffo diffp diffpu diffs diffthis dig di dl dell dj dli do doautoa dp dr ds dsp e|0 ea ec echoe echoh echom echon el elsei em en endfo endf endt endw ene ex exe exi exu f|0 files filet fin fina fini fir fix fo foldc foldd folddoc foldo for fu go gr grepa gu gv ha helpf helpg helpt hi hid his ia iabc if ij il im imapc '+
'ime ino inorea inoreme int is isp iu iuna iunme j|0 ju k|0 keepa kee keepj lN lNf l|0 lad laddb laddf la lan lat lb lc lch lcl lcs le lefta let lex lf lfir lgetb lgete lg lgr lgrepa lh ll lla lli lmak lm lmapc lne lnew lnf ln loadk lo loc lockv lol lope lp lpf lr ls lt lu lua luad luaf lv lvimgrepa lw m|0 ma mak map mapc marks mat me menut mes mk mks mksp mkv mkvie mod mz mzf nbc nb nbs new nm nmapc nme nn nnoreme noa no noh norea noreme norm nu nun nunme ol o|0 om omapc ome on ono onoreme opt ou ounme ow p|0 '+
'profd prof pro promptr pc ped pe perld po popu pp pre prev ps pt ptN ptf ptj ptl ptn ptp ptr pts pu pw py3 python3 py3d py3f py pyd pyf quita qa rec red redi redr redraws reg res ret retu rew ri rightb rub rubyd rubyf rund ru rv sN san sa sal sav sb sbN sba sbf sbl sbm sbn sbp sbr scrip scripte scs se setf setg setl sf sfir sh sim sig sil sl sla sm smap smapc sme sn sni sno snor snoreme sor '+
'so spelld spe spelli spellr spellu spellw sp spr sre st sta startg startr star stopi stj sts sun sunm sunme sus sv sw sy synti sync tN tabN tabc tabdo tabe tabf tabfir tabl tabm tabnew '+
'tabn tabo tabp tabr tabs tab ta tags tc tcld tclf te tf th tj tl tm tn to tp tr try ts tu u|0 undoj undol una unh unl unlo unm unme uns up ve verb vert vim vimgrepa vi viu vie vm vmapc vme vne vn vnoreme vs vu vunme windo w|0 wN wa wh wi winc winp wn wp wq wqa ws wu wv x|0 xa xmapc xm xme xn xnoreme xu xunme y|0 z|0 ~ '+
// full version
'Next Print append abbreviate abclear aboveleft all amenu anoremenu args argadd argdelete argedit argglobal arglocal argument ascii autocmd augroup aunmenu buffer bNext ball badd bdelete behave belowright bfirst blast bmodified bnext botright bprevious brewind break breakadd breakdel breaklist browse bunload '+
'bwipeout change cNext cNfile cabbrev cabclear caddbuffer caddexpr caddfile call catch cbuffer cclose center cexpr cfile cfirst cgetbuffer cgetexpr cgetfile chdir checkpath checktime clist clast close cmap cmapclear cmenu cnext cnewer cnfile cnoremap cnoreabbrev cnoremenu copy colder colorscheme command comclear compiler continue confirm copen cprevious cpfile cquit crewind cscope cstag cunmap '+
'cunabbrev cunmenu cwindow delete delmarks debug debuggreedy delcommand delfunction diffupdate diffget diffoff diffpatch diffput diffsplit digraphs display deletel djump dlist doautocmd doautoall deletep drop dsearch dsplit edit earlier echo echoerr echohl echomsg else elseif emenu endif endfor '+
'endfunction endtry endwhile enew execute exit exusage file filetype find finally finish first fixdel fold foldclose folddoopen folddoclosed foldopen function global goto grep grepadd gui gvim hardcopy help helpfind helpgrep helptags highlight hide history insert iabbrev iabclear ijump ilist imap '+
'imapclear imenu inoremap inoreabbrev inoremenu intro isearch isplit iunmap iunabbrev iunmenu join jumps keepalt keepmarks keepjumps lNext lNfile list laddexpr laddbuffer laddfile last language later lbuffer lcd lchdir lclose lcscope left leftabove lexpr lfile lfirst lgetbuffer lgetexpr lgetfile lgrep lgrepadd lhelpgrep llast llist lmake lmap lmapclear lnext lnewer lnfile lnoremap loadkeymap loadview '+
'lockmarks lockvar lolder lopen lprevious lpfile lrewind ltag lunmap luado luafile lvimgrep lvimgrepadd lwindow move mark make mapclear match menu menutranslate messages mkexrc mksession mkspell mkvimrc mkview mode mzscheme mzfile nbclose nbkey nbsart next nmap nmapclear nmenu nnoremap '+
'nnoremenu noautocmd noremap nohlsearch noreabbrev noremenu normal number nunmap nunmenu oldfiles open omap omapclear omenu only onoremap onoremenu options ounmap ounmenu ownsyntax print profdel profile promptfind promptrepl pclose pedit perl perldo pop popup ppop preserve previous psearch ptag ptNext '+
'ptfirst ptjump ptlast ptnext ptprevious ptrewind ptselect put pwd py3do py3file python pydo pyfile quit quitall qall read recover redo redir redraw redrawstatus registers resize retab return rewind right rightbelow ruby rubydo rubyfile rundo runtime rviminfo substitute sNext sandbox sargument sall saveas sbuffer sbNext sball sbfirst sblast sbmodified sbnext sbprevious sbrewind scriptnames scriptencoding '+
'scscope set setfiletype setglobal setlocal sfind sfirst shell simalt sign silent sleep slast smagic smapclear smenu snext sniff snomagic snoremap snoremenu sort source spelldump spellgood spellinfo spellrepall spellundo spellwrong split sprevious srewind stop stag startgreplace startreplace '+
'startinsert stopinsert stjump stselect sunhide sunmap sunmenu suspend sview swapname syntax syntime syncbind tNext tabNext tabclose tabedit tabfind tabfirst tablast tabmove tabnext tabonly tabprevious tabrewind tag tcl tcldo tclfile tearoff tfirst throw tjump tlast tmenu tnext topleft tprevious '+'trewind tselect tunmenu undo undojoin undolist unabbreviate unhide unlet unlockvar unmap unmenu unsilent update vglobal version verbose vertical vimgrep vimgrepadd visual viusage view vmap vmapclear vmenu vnew '+
'vnoremap vnoremenu vsplit vunmap vunmenu write wNext wall while winsize wincmd winpos wnext wprevious wqall wsverb wundo wviminfo xit xall xmapclear xmap xmenu xnoremap xnoremenu xunmap xunmenu yank',
built_in: //built in func
'synIDtrans atan2 range matcharg did_filetype asin feedkeys xor argv ' +
'complete_check add getwinposx getqflist getwinposy screencol ' +
'clearmatches empty extend getcmdpos mzeval garbagecollect setreg ' +
'ceil sqrt diff_hlID inputsecret get getfperm getpid filewritable ' +
'shiftwidth max sinh isdirectory synID system inputrestore winline ' +
'atan visualmode inputlist tabpagewinnr round getregtype mapcheck ' +
'hasmapto histdel argidx findfile sha256 exists toupper getcmdline ' +
'taglist string getmatches bufnr strftime winwidth bufexists ' +
'strtrans tabpagebuflist setcmdpos remote_read printf setloclist ' +
'getpos getline bufwinnr float2nr len getcmdtype diff_filler luaeval ' +
'resolve libcallnr foldclosedend reverse filter has_key bufname ' +
'str2float strlen setline getcharmod setbufvar index searchpos ' +
'shellescape undofile foldclosed setqflist buflisted strchars str2nr ' +
'virtcol floor remove undotree remote_expr winheight gettabwinvar ' +
'reltime cursor tabpagenr finddir localtime acos getloclist search ' +
'tanh matchend rename gettabvar strdisplaywidth type abs py3eval ' +
'setwinvar tolower wildmenumode log10 spellsuggest bufloaded ' +
'synconcealed nextnonblank server2client complete settabwinvar ' +
'executable input wincol setmatches getftype hlID inputsave ' +
'searchpair or screenrow line settabvar histadd deepcopy strpart ' +
'remote_peek and eval getftime submatch screenchar winsaveview ' +
'matchadd mkdir screenattr getfontname libcall reltimestr getfsize ' +
'winnr invert pow getbufline byte2line soundfold repeat fnameescape ' +
'tagfiles sin strwidth spellbadword trunc maparg log lispindent ' +
'hostname setpos globpath remote_foreground getchar synIDattr ' +
'fnamemodify cscope_connection stridx winbufnr indent min ' +
'complete_add nr2char searchpairpos inputdialog values matchlist ' +
'items hlexists strridx browsedir expand fmod pathshorten line2byte ' +
'argc count getwinvar glob foldtextresult getreg foreground cosh ' +
'matchdelete has char2nr simplify histget searchdecl iconv ' +
'winrestcmd pumvisible writefile foldlevel haslocaldir keys cos ' +
'matchstr foldtext histnr tan tempname getcwd byteidx getbufvar ' +
'islocked escape eventhandler remote_send serverlist winrestview ' +
'synstack pyeval prevnonblank readfile cindent filereadable changenr ' +
'exp'
},
illegal: /;/,
contains: [
hljs.NUMBER_MODE,
{
className: 'string',
begin: '\'', end: '\'',
illegal: '\\n'
},
/*
A double quote can start either a string or a line comment. Strings are
ended before the end of a line by another double quote and can contain
escaped double-quotes and post-escaped line breaks.
Also, any double quote at the beginning of a line is a comment but we
don't handle that properly at the moment: any double quote inside will
turn them into a string. Handling it properly will require a smarter
parser.
*/
{
className: 'string',
begin: /"(\\"|\n\\|[^"\n])*"/
},
hljs.COMMENT('"', '$'),
{
className: 'variable',
begin: /[bwtglsav]:[\w\d_]*/
},
{
className: 'function',
beginKeywords: 'function function!', end: '$',
relevance: 0,
contains: [
hljs.TITLE_MODE,
{
className: 'params',
begin: '\\(', end: '\\)'
}
]
},
{
className: 'symbol',
begin: /<[\w-]+>/
}
]
};
}
module.exports = vim;
/***/ })
}]);
//# sourceMappingURL=vendors~react-syntax-highlighter_languages_highlight_vim.js.map | {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Django
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin
)
from django.http.response import HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.translation import (
ugettext as _,
ugettext_lazy
)
from django.views.generic import (
CreateView,
DeleteView,
ListView,
UpdateView
)
# wger
from wger.gym.models import (
ContractOption,
Gym
)
from wger.utils.generic_views import (
WgerDeleteMixin,
WgerFormMixin
)
logger = logging.getLogger(__name__)
class AddView(WgerFormMixin, LoginRequiredMixin, PermissionRequiredMixin, CreateView):
"""
View to add a new contract option
"""
model = ContractOption
fields = ('name', 'description')
title = ugettext_lazy('Add option')
permission_required = 'gym.add_contractoption'
member = None
def get_success_url(self):
"""
Redirect back to overview page
"""
return reverse('gym:contract-option:list', kwargs={'gym_pk': self.object.gym_id})
def dispatch(self, request, *args, **kwargs):
"""
Can only add contract types in own gym
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
if request.user.userprofile.gym_id != int(self.kwargs['gym_pk']):
return HttpResponseForbidden()
return super(AddView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
"""
Set the foreign key to the gym object
"""
form.instance.gym_id = self.kwargs['gym_pk']
return super(AddView, self).form_valid(form)
class UpdateView(WgerFormMixin, LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
"""
View to update an existing contract option
"""
model = ContractOption
fields = ('name', 'description')
permission_required = 'gym.change_contractoption'
def dispatch(self, request, *args, **kwargs):
"""
Can only add contract types in own gym
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
contract_type = self.get_object()
if request.user.userprofile.gym_id != contract_type.gym_id:
return HttpResponseForbidden()
return super(UpdateView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
"""
Redirect back to overview page
"""
return reverse('gym:contract-option:list', kwargs={'gym_pk': self.object.gym_id})
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(UpdateView, self).get_context_data(**kwargs)
context['title'] = _('Edit {0}').format(self.object)
return context
class DeleteView(WgerDeleteMixin, LoginRequiredMixin, PermissionRequiredMixin, DeleteView):
"""
View to delete an existing contract option
"""
model = ContractOption
fields = ('name', 'description')
permission_required = 'gym.delete_contractoption'
def dispatch(self, request, *args, **kwargs):
"""
Can only add contract option in own gym
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
contract_type = self.get_object()
if request.user.userprofile.gym_id != contract_type.gym_id:
return HttpResponseForbidden()
return super(DeleteView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
"""
Redirect back to overview page
"""
return reverse('gym:contract-option:list', kwargs={'gym_pk': self.object.gym_id})
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(DeleteView, self).get_context_data(**kwargs)
context['title'] = _('Delete {0}').format(self.object)
return context
class ListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
"""
Overview of all available contract options
"""
model = ContractOption
permission_required = 'gym.add_contractoption'
template_name = 'contract_option/list.html'
gym = None
def get_queryset(self):
"""
Only documents for current user
"""
return ContractOption.objects.filter(gym=self.gym)
def dispatch(self, request, *args, **kwargs):
"""
Can only list contract types in own gym
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
self.gym = get_object_or_404(Gym, id=self.kwargs['gym_pk'])
if request.user.userprofile.gym_id != self.gym.id:
return HttpResponseForbidden()
return super(ListView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(ListView, self).get_context_data(**kwargs)
context['gym'] = self.gym
return context
| {
"pile_set_name": "Github"
} |
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package p2p
import (
"errors"
"fmt"
)
const (
errInvalidMsgCode = iota
errInvalidMsg
)
var errorToString = map[int]string{
errInvalidMsgCode: "invalid message code",
errInvalidMsg: "invalid message",
}
type peerError struct {
code int
message string
}
func newPeerError(code int, format string, v ...interface{}) *peerError {
desc, ok := errorToString[code]
if !ok {
panic("invalid error code")
}
err := &peerError{code, desc}
if format != "" {
err.message += ": " + fmt.Sprintf(format, v...)
}
return err
}
func (pe *peerError) Error() string {
return pe.message
}
var errProtocolReturned = errors.New("protocol returned")
type DiscReason uint
const (
DiscRequested DiscReason = iota
DiscNetworkError
DiscProtocolError
DiscUselessPeer
DiscTooManyPeers
DiscAlreadyConnected
DiscIncompatibleVersion
DiscInvalidIdentity
DiscQuitting
DiscUnexpectedIdentity
DiscSelf
DiscReadTimeout
DiscSubprotocolError = 0x10
)
var discReasonToString = [...]string{
DiscRequested: "disconnect requested",
DiscNetworkError: "network error",
DiscProtocolError: "breach of protocol",
DiscUselessPeer: "useless peer",
DiscTooManyPeers: "too many peers",
DiscAlreadyConnected: "already connected",
DiscIncompatibleVersion: "incompatible p2p protocol version",
DiscInvalidIdentity: "invalid node identity",
DiscQuitting: "client quitting",
DiscUnexpectedIdentity: "unexpected identity",
DiscSelf: "connected to self",
DiscReadTimeout: "read timeout",
DiscSubprotocolError: "subprotocol error",
}
func (d DiscReason) String() string {
if len(discReasonToString) < int(d) {
return fmt.Sprintf("unknown disconnect reason %d", d)
}
return discReasonToString[d]
}
func (d DiscReason) Error() string {
return d.String()
}
func discReasonForError(err error) DiscReason {
if reason, ok := err.(DiscReason); ok {
return reason
}
if err == errProtocolReturned {
return DiscQuitting
}
peerError, ok := err.(*peerError)
if ok {
switch peerError.code {
case errInvalidMsgCode, errInvalidMsg:
return DiscProtocolError
default:
return DiscSubprotocolError
}
}
return DiscSubprotocolError
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* This file is part of qaReview
*
* @version $Revision$
* @copyright Qafoo GmbH
*/
namespace Qafoo\Review;
use Qafoo\RMF;
try
{
require __DIR__ . '/../main/Qafoo/Review/bootstrap.php';
$dic = new DIC\Base();
$dic->mysqli;
$dispatcher = new RMF\Dispatcher\Simple(
new RMF\Router\Regexp( array(
'(^/$)' => array(
'GET' => array( $dic->reviewController, 'showOverview' ),
),
'(^/show/(?P<analyzer>[A-Za-z_]+))' => array(
'GET' => array( $dic->reviewController, 'showAnalyzer' ),
),
'(^/source/annotate$)' => array(
'POST' => array( $dic->sourceController, 'annotate' ),
),
'(^/source/?(?P<path>.*)$)' => array(
'GET' => array( $dic->reviewController, 'showSource' ),
),
'(^/(?:images|styles|scripts)/)' => array(
'GET' => array( $dic->assetController, 'show' ),
),
) ),
$dic->view
);
$request = new RMF\Request\HTTP();
$request->addHandler( 'body', new RMF\Request\PropertyHandler\PostBody() );
$request->addHandler( 'session', new RMF\Request\PropertyHandler\Session() );
$dispatcher->dispatch( $request );
}
catch ( \Exception $e )
{
header( 'Status: 500 Internal Server Error' );
echo "<h1>Internal Server Error</h1>\n";
echo "<pre>$e</pre>";
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2006-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Memory allocator with per-CPU caching, derived from the kmem magazine
* concept and implementation as described in the following paper:
* http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick.pdf
* That implementation is Copyright 2006 Sun Microsystems, Inc. All rights
* reserved. Use is subject to license terms.
*
* There are several major differences between this and the original kmem
* magazine: this derivative implementation allows for multiple objects to
* be allocated and freed from/to the object cache in one call; in addition,
* it provides for better flexibility where the user is allowed to define
* its own slab allocator (instead of the default zone allocator). Finally,
* no object construction/destruction takes place at the moment, although
* this could be added in future to improve efficiency.
*/
#include <sys/param.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/queue.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <kern/debug.h>
#include <kern/zalloc.h>
#include <kern/cpu_number.h>
#include <kern/locks.h>
#include <kern/thread_call.h>
#include <libkern/libkern.h>
#include <libkern/OSAtomic.h>
#include <libkern/OSDebug.h>
#include <mach/vm_param.h>
#include <machine/limits.h>
#include <machine/machine_routines.h>
#include <string.h>
#include <sys/mcache.h>
#define MCACHE_SIZE(n) \
__builtin_offsetof(mcache_t, mc_cpu[n])
/* Allocate extra in case we need to manually align the pointer */
#define MCACHE_ALLOC_SIZE \
(sizeof (void *) + MCACHE_SIZE(ncpu) + CPU_CACHE_LINE_SIZE)
#define MCACHE_CPU(c) \
(mcache_cpu_t *)((void *)((char *)(c) + MCACHE_SIZE(cpu_number())))
/*
* MCACHE_LIST_LOCK() and MCACHE_LIST_UNLOCK() are macros used
* to serialize accesses to the global list of caches in the system.
* They also record the thread currently running in the critical
* section, so that we can avoid recursive requests to reap the
* caches when memory runs low.
*/
#define MCACHE_LIST_LOCK() { \
lck_mtx_lock(mcache_llock); \
mcache_llock_owner = current_thread(); \
}
#define MCACHE_LIST_UNLOCK() { \
mcache_llock_owner = NULL; \
lck_mtx_unlock(mcache_llock); \
}
#define MCACHE_LOCK(l) lck_mtx_lock(l)
#define MCACHE_UNLOCK(l) lck_mtx_unlock(l)
#define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l)
static int ncpu;
static unsigned int cache_line_size;
static lck_mtx_t *mcache_llock;
static struct thread *mcache_llock_owner;
static lck_attr_t *mcache_llock_attr;
static lck_grp_t *mcache_llock_grp;
static lck_grp_attr_t *mcache_llock_grp_attr;
static struct zone *mcache_zone;
static const uint32_t mcache_reap_interval = 15;
static const uint32_t mcache_reap_interval_leeway = 2;
static UInt32 mcache_reaping;
static int mcache_ready;
static int mcache_updating;
static int mcache_bkt_contention = 3;
#if DEBUG
static unsigned int mcache_flags = MCF_DEBUG;
#else
static unsigned int mcache_flags = 0;
#endif
int mca_trn_max = MCA_TRN_MAX;
#define DUMP_MCA_BUF_SIZE 512
static char *mca_dump_buf;
static mcache_bkttype_t mcache_bkttype[] = {
{ 1, 4096, 32768, NULL },
{ 3, 2048, 16384, NULL },
{ 7, 1024, 12288, NULL },
{ 15, 256, 8192, NULL },
{ 31, 64, 4096, NULL },
{ 47, 0, 2048, NULL },
{ 63, 0, 1024, NULL },
{ 95, 0, 512, NULL },
{ 143, 0, 256, NULL },
{ 165, 0, 0, NULL },
};
static mcache_t *mcache_create_common(const char *, size_t, size_t,
mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t,
mcache_notifyfn_t, void *, u_int32_t, int, int);
static unsigned int mcache_slab_alloc(void *, mcache_obj_t ***,
unsigned int, int);
static void mcache_slab_free(void *, mcache_obj_t *, boolean_t);
static void mcache_slab_audit(void *, mcache_obj_t *, boolean_t);
static void mcache_cpu_refill(mcache_cpu_t *, mcache_bkt_t *, int);
static mcache_bkt_t *mcache_bkt_alloc(mcache_t *, mcache_bktlist_t *,
mcache_bkttype_t **);
static void mcache_bkt_free(mcache_t *, mcache_bktlist_t *, mcache_bkt_t *);
static void mcache_cache_bkt_enable(mcache_t *);
static void mcache_bkt_purge(mcache_t *);
static void mcache_bkt_destroy(mcache_t *, mcache_bkttype_t *,
mcache_bkt_t *, int);
static void mcache_bkt_ws_update(mcache_t *);
static void mcache_bkt_ws_zero(mcache_t *);
static void mcache_bkt_ws_reap(mcache_t *);
static void mcache_dispatch(void (*)(void *), void *);
static void mcache_cache_reap(mcache_t *);
static void mcache_cache_update(mcache_t *);
static void mcache_cache_bkt_resize(void *);
static void mcache_cache_enable(void *);
static void mcache_update(thread_call_param_t __unused, thread_call_param_t __unused);
static void mcache_update_timeout(void *);
static void mcache_applyall(void (*)(mcache_t *));
static void mcache_reap_start(void *);
static void mcache_reap_done(void *);
static void mcache_reap_timeout(thread_call_param_t __unused, thread_call_param_t);
static void mcache_notify(mcache_t *, u_int32_t);
static void mcache_purge(void *);
static LIST_HEAD(, mcache) mcache_head;
mcache_t *mcache_audit_cache;
static thread_call_t mcache_reap_tcall;
static thread_call_t mcache_update_tcall;
/*
* Initialize the framework; this is currently called as part of BSD init.
*/
__private_extern__ void
mcache_init(void)
{
mcache_bkttype_t *btp;
unsigned int i;
char name[32];
VERIFY(mca_trn_max >= 2);
ncpu = ml_get_max_cpus();
(void) mcache_cache_line_size(); /* prime it */
mcache_llock_grp_attr = lck_grp_attr_alloc_init();
mcache_llock_grp = lck_grp_alloc_init("mcache.list",
mcache_llock_grp_attr);
mcache_llock_attr = lck_attr_alloc_init();
mcache_llock = lck_mtx_alloc_init(mcache_llock_grp, mcache_llock_attr);
mcache_reap_tcall = thread_call_allocate(mcache_reap_timeout, NULL);
mcache_update_tcall = thread_call_allocate(mcache_update, NULL);
if (mcache_reap_tcall == NULL || mcache_update_tcall == NULL)
panic("mcache_init: thread_call_allocate failed");
mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE,
PAGE_SIZE, "mcache");
if (mcache_zone == NULL)
panic("mcache_init: failed to allocate mcache zone\n");
zone_change(mcache_zone, Z_CALLERACCT, FALSE);
LIST_INIT(&mcache_head);
for (i = 0; i < sizeof (mcache_bkttype) / sizeof (*btp); i++) {
btp = &mcache_bkttype[i];
(void) snprintf(name, sizeof (name), "bkt_%d",
btp->bt_bktsize);
btp->bt_cache = mcache_create(name,
(btp->bt_bktsize + 1) * sizeof (void *), 0, 0, MCR_SLEEP);
}
PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof(mcache_flags));
mcache_flags &= MCF_FLAGS_MASK;
mcache_audit_cache = mcache_create("audit", sizeof (mcache_audit_t),
0, 0, MCR_SLEEP);
mcache_applyall(mcache_cache_bkt_enable);
mcache_ready = 1;
printf("mcache: %d CPU(s), %d bytes CPU cache line size\n",
ncpu, CPU_CACHE_LINE_SIZE);
}
/*
* Return the global mcache flags.
*/
__private_extern__ unsigned int
mcache_getflags(void)
{
return (mcache_flags);
}
/*
* Return the CPU cache line size.
*/
__private_extern__ unsigned int
mcache_cache_line_size(void)
{
if (cache_line_size == 0) {
ml_cpu_info_t cpu_info;
ml_cpu_get_info(&cpu_info);
cache_line_size = cpu_info.cache_line_size;
}
return (cache_line_size);
}
/*
* Create a cache using the zone allocator as the backend slab allocator.
* The caller may specify any alignment for the object; if it specifies 0
* the default alignment (MCACHE_ALIGN) will be used.
*/
__private_extern__ mcache_t *
mcache_create(const char *name, size_t bufsize, size_t align,
u_int32_t flags, int wait)
{
return (mcache_create_common(name, bufsize, align, mcache_slab_alloc,
mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1,
wait));
}
/*
* Create a cache using a custom backend slab allocator. Since the caller
* is responsible for allocation, no alignment guarantee will be provided
* by this framework.
*/
__private_extern__ mcache_t *
mcache_create_ext(const char *name, size_t bufsize,
mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
u_int32_t flags, int wait)
{
return (mcache_create_common(name, bufsize, 0, allocfn,
freefn, auditfn, logfn, notifyfn, arg, flags, 0, wait));
}
/*
* Common cache creation routine.
*/
static mcache_t *
mcache_create_common(const char *name, size_t bufsize, size_t align,
mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
u_int32_t flags, int need_zone, int wait)
{
mcache_bkttype_t *btp;
mcache_t *cp = NULL;
size_t chunksize;
void *buf, **pbuf;
int c;
char lck_name[64];
/* If auditing is on and print buffer is NULL, allocate it now */
if ((flags & MCF_DEBUG) && mca_dump_buf == NULL) {
int malloc_wait = (wait & MCR_NOSLEEP) ? M_NOWAIT : M_WAITOK;
MALLOC(mca_dump_buf, char *, DUMP_MCA_BUF_SIZE, M_TEMP,
malloc_wait | M_ZERO);
if (mca_dump_buf == NULL)
return (NULL);
}
buf = zalloc(mcache_zone);
if (buf == NULL)
goto fail;
bzero(buf, MCACHE_ALLOC_SIZE);
/*
* In case we didn't get a cache-aligned memory, round it up
* accordingly. This is needed in order to get the rest of
* structure members aligned properly. It also means that
* the memory span gets shifted due to the round up, but it
* is okay since we've allocated extra space for this.
*/
cp = (mcache_t *)
P2ROUNDUP((intptr_t)buf + sizeof (void *), CPU_CACHE_LINE_SIZE);
pbuf = (void **)((intptr_t)cp - sizeof (void *));
*pbuf = buf;
/*
* Guaranteed alignment is valid only when we use the internal
* slab allocator (currently set to use the zone allocator).
*/
if (!need_zone) {
align = 1;
} else {
/* Enforce 64-bit minimum alignment for zone-based buffers */
if (align == 0)
align = MCACHE_ALIGN;
align = P2ROUNDUP(align, MCACHE_ALIGN);
}
if ((align & (align - 1)) != 0)
panic("mcache_create: bad alignment %lu", align);
cp->mc_align = align;
cp->mc_slab_alloc = allocfn;
cp->mc_slab_free = freefn;
cp->mc_slab_audit = auditfn;
cp->mc_slab_log = logfn;
cp->mc_slab_notify = notifyfn;
cp->mc_private = need_zone ? cp : arg;
cp->mc_bufsize = bufsize;
cp->mc_flags = (flags & MCF_FLAGS_MASK) | mcache_flags;
(void) snprintf(cp->mc_name, sizeof (cp->mc_name), "mcache.%s", name);
(void) snprintf(lck_name, sizeof (lck_name), "%s.cpu", cp->mc_name);
cp->mc_cpu_lock_grp_attr = lck_grp_attr_alloc_init();
cp->mc_cpu_lock_grp = lck_grp_alloc_init(lck_name,
cp->mc_cpu_lock_grp_attr);
cp->mc_cpu_lock_attr = lck_attr_alloc_init();
/*
* Allocation chunk size is the object's size plus any extra size
* needed to satisfy the object's alignment. It is enforced to be
* at least the size of an LP64 pointer to simplify auditing and to
* handle multiple-element allocation requests, where the elements
* returned are linked together in a list.
*/
chunksize = MAX(bufsize, sizeof (u_int64_t));
if (need_zone) {
VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0);
chunksize += sizeof (uint64_t) + align;
chunksize = P2ROUNDUP(chunksize, align);
if ((cp->mc_slab_zone = zinit(chunksize, 64 * 1024 * ncpu,
PAGE_SIZE, cp->mc_name)) == NULL)
goto fail;
zone_change(cp->mc_slab_zone, Z_EXPAND, TRUE);
}
cp->mc_chunksize = chunksize;
/*
* Initialize the bucket layer.
*/
(void) snprintf(lck_name, sizeof (lck_name), "%s.bkt", cp->mc_name);
cp->mc_bkt_lock_grp_attr = lck_grp_attr_alloc_init();
cp->mc_bkt_lock_grp = lck_grp_alloc_init(lck_name,
cp->mc_bkt_lock_grp_attr);
cp->mc_bkt_lock_attr = lck_attr_alloc_init();
lck_mtx_init(&cp->mc_bkt_lock, cp->mc_bkt_lock_grp,
cp->mc_bkt_lock_attr);
(void) snprintf(lck_name, sizeof (lck_name), "%s.sync", cp->mc_name);
cp->mc_sync_lock_grp_attr = lck_grp_attr_alloc_init();
cp->mc_sync_lock_grp = lck_grp_alloc_init(lck_name,
cp->mc_sync_lock_grp_attr);
cp->mc_sync_lock_attr = lck_attr_alloc_init();
lck_mtx_init(&cp->mc_sync_lock, cp->mc_sync_lock_grp,
cp->mc_sync_lock_attr);
for (btp = mcache_bkttype; chunksize <= btp->bt_minbuf; btp++)
continue;
cp->cache_bkttype = btp;
/*
* Initialize the CPU layer. Each per-CPU structure is aligned
* on the CPU cache line boundary to prevent false sharing.
*/
for (c = 0; c < ncpu; c++) {
mcache_cpu_t *ccp = &cp->mc_cpu[c];
VERIFY(IS_P2ALIGNED(ccp, CPU_CACHE_LINE_SIZE));
lck_mtx_init(&ccp->cc_lock, cp->mc_cpu_lock_grp,
cp->mc_cpu_lock_attr);
ccp->cc_objs = -1;
ccp->cc_pobjs = -1;
}
if (mcache_ready)
mcache_cache_bkt_enable(cp);
/* TODO: dynamically create sysctl for stats */
MCACHE_LIST_LOCK();
LIST_INSERT_HEAD(&mcache_head, cp, mc_list);
MCACHE_LIST_UNLOCK();
/*
* If cache buckets are enabled and this is the first cache
* created, start the periodic cache update.
*/
if (!(mcache_flags & MCF_NOCPUCACHE) && !mcache_updating) {
mcache_updating = 1;
mcache_update_timeout(NULL);
}
if (cp->mc_flags & MCF_DEBUG) {
printf("mcache_create: %s (%s) arg %p bufsize %lu align %lu "
"chunksize %lu bktsize %d\n", name, need_zone ? "i" : "e",
arg, bufsize, cp->mc_align, chunksize, btp->bt_bktsize);
}
return (cp);
fail:
if (buf != NULL)
zfree(mcache_zone, buf);
return (NULL);
}
/*
* Allocate one or more objects from a cache.
*/
__private_extern__ unsigned int
mcache_alloc_ext(mcache_t *cp, mcache_obj_t **list, unsigned int num, int wait)
{
mcache_cpu_t *ccp;
mcache_obj_t **top = &(*list);
mcache_bkt_t *bkt;
unsigned int need = num;
boolean_t nwretry = FALSE;
/* MCR_NOSLEEP and MCR_FAILOK are mutually exclusive */
VERIFY((wait & (MCR_NOSLEEP|MCR_FAILOK)) != (MCR_NOSLEEP|MCR_FAILOK));
ASSERT(list != NULL);
*list = NULL;
if (num == 0)
return (0);
retry_alloc:
/* We may not always be running in the same CPU in case of retries */
ccp = MCACHE_CPU(cp);
MCACHE_LOCK(&ccp->cc_lock);
for (;;) {
/*
* If we have an object in the current CPU's filled bucket,
* chain the object to any previous objects and return if
* we've satisfied the number of requested objects.
*/
if (ccp->cc_objs > 0) {
mcache_obj_t *tail;
int objs;
/*
* Objects in the bucket are already linked together
* with the most recently freed object at the head of
* the list; grab as many objects as we can.
*/
objs = MIN((unsigned int)ccp->cc_objs, need);
*list = ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
ccp->cc_objs -= objs;
ccp->cc_alloc += objs;
tail = ccp->cc_filled->bkt_obj[ccp->cc_objs];
list = &tail->obj_next;
*list = NULL;
/* If we got them all, return to caller */
if ((need -= objs) == 0) {
MCACHE_UNLOCK(&ccp->cc_lock);
if (!(cp->mc_flags & MCF_NOLEAKLOG) &&
cp->mc_slab_log != NULL)
(*cp->mc_slab_log)(num, *top, TRUE);
if (cp->mc_flags & MCF_DEBUG)
goto debug_alloc;
return (num);
}
}
/*
* The CPU's filled bucket is empty. If the previous filled
* bucket was full, exchange and try again.
*/
if (ccp->cc_pobjs > 0) {
mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
continue;
}
/*
* If the bucket layer is disabled, allocate from slab. This
* can happen either because MCF_NOCPUCACHE is set, or because
* the bucket layer is currently being resized.
*/
if (ccp->cc_bktsize == 0)
break;
/*
* Both of the CPU's buckets are empty; try to get a full
* bucket from the bucket layer. Upon success, refill this
* CPU and place any empty bucket into the empty list.
*/
bkt = mcache_bkt_alloc(cp, &cp->mc_full, NULL);
if (bkt != NULL) {
if (ccp->cc_pfilled != NULL)
mcache_bkt_free(cp, &cp->mc_empty,
ccp->cc_pfilled);
mcache_cpu_refill(ccp, bkt, ccp->cc_bktsize);
continue;
}
/*
* The bucket layer has no full buckets; allocate the
* object(s) directly from the slab layer.
*/
break;
}
MCACHE_UNLOCK(&ccp->cc_lock);
need -= (*cp->mc_slab_alloc)(cp->mc_private, &list, need, wait);
/*
* If this is a blocking allocation, or if it is non-blocking and
* the cache's full bucket is non-empty, then retry the allocation.
*/
if (need > 0) {
if (!(wait & MCR_NONBLOCKING)) {
atomic_add_32(&cp->mc_wretry_cnt, 1);
goto retry_alloc;
} else if ((wait & (MCR_NOSLEEP | MCR_TRYHARD)) &&
!mcache_bkt_isempty(cp)) {
if (!nwretry)
nwretry = TRUE;
atomic_add_32(&cp->mc_nwretry_cnt, 1);
goto retry_alloc;
} else if (nwretry) {
atomic_add_32(&cp->mc_nwfail_cnt, 1);
}
}
if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL)
(*cp->mc_slab_log)((num - need), *top, TRUE);
if (!(cp->mc_flags & MCF_DEBUG))
return (num - need);
debug_alloc:
if (cp->mc_flags & MCF_DEBUG) {
mcache_obj_t **o = top;
unsigned int n;
n = 0;
/*
* Verify that the chain of objects have the same count as
* what we are about to report to the caller. Any mismatch
* here means that the object list is insanely broken and
* therefore we must panic.
*/
while (*o != NULL) {
o = &(*o)->obj_next;
++n;
}
if (n != (num - need)) {
panic("mcache_alloc_ext: %s cp %p corrupted list "
"(got %d actual %d)\n", cp->mc_name,
(void *)cp, num - need, n);
}
}
/* Invoke the slab layer audit callback if auditing is enabled */
if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL)
(*cp->mc_slab_audit)(cp->mc_private, *top, TRUE);
return (num - need);
}
/*
* Allocate a single object from a cache.
*/
__private_extern__ void *
mcache_alloc(mcache_t *cp, int wait)
{
mcache_obj_t *buf;
(void) mcache_alloc_ext(cp, &buf, 1, wait);
return (buf);
}
__private_extern__ void
mcache_waiter_inc(mcache_t *cp)
{
atomic_add_32(&cp->mc_waiter_cnt, 1);
}
__private_extern__ void
mcache_waiter_dec(mcache_t *cp)
{
atomic_add_32(&cp->mc_waiter_cnt, -1);
}
__private_extern__ boolean_t
mcache_bkt_isempty(mcache_t *cp)
{
/*
* This isn't meant to accurately tell whether there are
* any full buckets in the cache; it is simply a way to
* obtain "hints" about the state of the cache.
*/
return (cp->mc_full.bl_total == 0);
}
/*
* Notify the slab layer about an event.
*/
static void
mcache_notify(mcache_t *cp, u_int32_t event)
{
if (cp->mc_slab_notify != NULL)
(*cp->mc_slab_notify)(cp->mc_private, event);
}
/*
* Purge the cache and disable its buckets.
*/
static void
mcache_purge(void *arg)
{
mcache_t *cp = arg;
mcache_bkt_purge(cp);
/*
* We cannot simply call mcache_cache_bkt_enable() from here as
* a bucket resize may be in flight and we would cause the CPU
* layers of the cache to point to different sizes. Therefore,
* we simply increment the enable count so that during the next
* periodic cache update the buckets can be reenabled.
*/
lck_mtx_lock_spin(&cp->mc_sync_lock);
cp->mc_enable_cnt++;
lck_mtx_unlock(&cp->mc_sync_lock);
}
__private_extern__ boolean_t
mcache_purge_cache(mcache_t *cp, boolean_t async)
{
/*
* Purging a cache that has no per-CPU caches or is already
* in the process of being purged is rather pointless.
*/
if (cp->mc_flags & MCF_NOCPUCACHE)
return (FALSE);
lck_mtx_lock_spin(&cp->mc_sync_lock);
if (cp->mc_purge_cnt > 0) {
lck_mtx_unlock(&cp->mc_sync_lock);
return (FALSE);
}
cp->mc_purge_cnt++;
lck_mtx_unlock(&cp->mc_sync_lock);
if (async)
mcache_dispatch(mcache_purge, cp);
else
mcache_purge(cp);
return (TRUE);
}
/*
* Free a single object to a cache.
*/
__private_extern__ void
mcache_free(mcache_t *cp, void *buf)
{
((mcache_obj_t *)buf)->obj_next = NULL;
mcache_free_ext(cp, (mcache_obj_t *)buf);
}
/*
* Free one or more objects to a cache.
*/
__private_extern__ void
mcache_free_ext(mcache_t *cp, mcache_obj_t *list)
{
mcache_cpu_t *ccp = MCACHE_CPU(cp);
mcache_bkttype_t *btp;
mcache_obj_t *nlist;
mcache_bkt_t *bkt;
if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL)
(*cp->mc_slab_log)(0, list, FALSE);
/* Invoke the slab layer audit callback if auditing is enabled */
if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL)
(*cp->mc_slab_audit)(cp->mc_private, list, FALSE);
MCACHE_LOCK(&ccp->cc_lock);
for (;;) {
/*
* If there is space in the current CPU's filled bucket, put
* the object there and return once all objects are freed.
* Note the cast to unsigned integer takes care of the case
* where the bucket layer is disabled (when cc_objs is -1).
*/
if ((unsigned int)ccp->cc_objs <
(unsigned int)ccp->cc_bktsize) {
/*
* Reverse the list while we place the object into the
* bucket; this effectively causes the most recently
* freed object(s) to be reused during allocation.
*/
nlist = list->obj_next;
list->obj_next = (ccp->cc_objs == 0) ? NULL :
ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
ccp->cc_filled->bkt_obj[ccp->cc_objs++] = list;
ccp->cc_free++;
if ((list = nlist) != NULL)
continue;
/* We are done; return to caller */
MCACHE_UNLOCK(&ccp->cc_lock);
/* If there is a waiter below, notify it */
if (cp->mc_waiter_cnt > 0)
mcache_notify(cp, MCN_RETRYALLOC);
return;
}
/*
* The CPU's filled bucket is full. If the previous filled
* bucket was empty, exchange and try again.
*/
if (ccp->cc_pobjs == 0) {
mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
continue;
}
/*
* If the bucket layer is disabled, free to slab. This can
* happen either because MCF_NOCPUCACHE is set, or because
* the bucket layer is currently being resized.
*/
if (ccp->cc_bktsize == 0)
break;
/*
* Both of the CPU's buckets are full; try to get an empty
* bucket from the bucket layer. Upon success, empty this
* CPU and place any full bucket into the full list.
*/
bkt = mcache_bkt_alloc(cp, &cp->mc_empty, &btp);
if (bkt != NULL) {
if (ccp->cc_pfilled != NULL)
mcache_bkt_free(cp, &cp->mc_full,
ccp->cc_pfilled);
mcache_cpu_refill(ccp, bkt, 0);
continue;
}
/*
* We need an empty bucket to put our freed objects into
* but couldn't get an empty bucket from the bucket layer;
* attempt to allocate one. We do not want to block for
* allocation here, and if the bucket allocation fails
* we will simply fall through to the slab layer.
*/
MCACHE_UNLOCK(&ccp->cc_lock);
bkt = mcache_alloc(btp->bt_cache, MCR_NOSLEEP);
MCACHE_LOCK(&ccp->cc_lock);
if (bkt != NULL) {
/*
* We have an empty bucket, but since we drop the
* CPU lock above, the cache's bucket size may have
* changed. If so, free the bucket and try again.
*/
if (ccp->cc_bktsize != btp->bt_bktsize) {
MCACHE_UNLOCK(&ccp->cc_lock);
mcache_free(btp->bt_cache, bkt);
MCACHE_LOCK(&ccp->cc_lock);
continue;
}
/*
* We have an empty bucket of the right size;
* add it to the bucket layer and try again.
*/
mcache_bkt_free(cp, &cp->mc_empty, bkt);
continue;
}
/*
* The bucket layer has no empty buckets; free the
* object(s) directly to the slab layer.
*/
break;
}
MCACHE_UNLOCK(&ccp->cc_lock);
/* If there is a waiter below, notify it */
if (cp->mc_waiter_cnt > 0)
mcache_notify(cp, MCN_RETRYALLOC);
/* Advise the slab layer to purge the object(s) */
(*cp->mc_slab_free)(cp->mc_private, list,
(cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
}
/*
* Cache destruction routine.
*/
__private_extern__ void
mcache_destroy(mcache_t *cp)
{
void **pbuf;
MCACHE_LIST_LOCK();
LIST_REMOVE(cp, mc_list);
MCACHE_LIST_UNLOCK();
mcache_bkt_purge(cp);
/*
* This cache is dead; there should be no further transaction.
* If it's still invoked, make sure that it induces a fault.
*/
cp->mc_slab_alloc = NULL;
cp->mc_slab_free = NULL;
cp->mc_slab_audit = NULL;
lck_attr_free(cp->mc_bkt_lock_attr);
lck_grp_free(cp->mc_bkt_lock_grp);
lck_grp_attr_free(cp->mc_bkt_lock_grp_attr);
lck_attr_free(cp->mc_cpu_lock_attr);
lck_grp_free(cp->mc_cpu_lock_grp);
lck_grp_attr_free(cp->mc_cpu_lock_grp_attr);
lck_attr_free(cp->mc_sync_lock_attr);
lck_grp_free(cp->mc_sync_lock_grp);
lck_grp_attr_free(cp->mc_sync_lock_grp_attr);
/*
* TODO: We need to destroy the zone here, but cannot do it
* because there is no such way to achieve that. Until then
* the memory allocated for the zone structure is leaked.
* Once it is achievable, uncomment these lines:
*
* if (cp->mc_slab_zone != NULL) {
* zdestroy(cp->mc_slab_zone);
* cp->mc_slab_zone = NULL;
* }
*/
/* Get the original address since we're about to free it */
pbuf = (void **)((intptr_t)cp - sizeof (void *));
zfree(mcache_zone, *pbuf);
}
/*
* Internal slab allocator used as a backend for simple caches. The current
* implementation uses the zone allocator for simplicity reasons.
*/
static unsigned int
mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num,
int wait)
{
#pragma unused(wait)
mcache_t *cp = arg;
unsigned int need = num;
size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t));
u_int32_t flags = cp->mc_flags;
void *buf, *base, **pbuf;
mcache_obj_t **list = *plist;
*list = NULL;
for (;;) {
buf = zalloc(cp->mc_slab_zone);
if (buf == NULL)
break;
/* Get the aligned base address for this object */
base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t),
cp->mc_align);
/*
* Wind back a pointer size from the aligned base and
* save the original address so we can free it later.
*/
pbuf = (void **)((intptr_t)base - sizeof (void *));
*pbuf = buf;
VERIFY (((intptr_t)base + cp->mc_bufsize) <=
((intptr_t)buf + cp->mc_chunksize));
/*
* If auditing is enabled, patternize the contents of
* the buffer starting from the 64-bit aligned base to
* the end of the buffer; the length is rounded up to
* the nearest 64-bit multiply; this is because we use
* 64-bit memory access to set/check the pattern.
*/
if (flags & MCF_DEBUG) {
VERIFY(((intptr_t)base + rsize) <=
((intptr_t)buf + cp->mc_chunksize));
mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
}
VERIFY(IS_P2ALIGNED(base, cp->mc_align));
*list = (mcache_obj_t *)base;
(*list)->obj_next = NULL;
list = *plist = &(*list)->obj_next;
/* If we got them all, return to mcache */
if (--need == 0)
break;
}
return (num - need);
}
/*
* Internal slab deallocator used as a backend for simple caches.
*/
static void
mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged)
{
mcache_t *cp = arg;
mcache_obj_t *nlist;
size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t));
u_int32_t flags = cp->mc_flags;
void *base;
void **pbuf;
for (;;) {
nlist = list->obj_next;
list->obj_next = NULL;
base = list;
VERIFY(IS_P2ALIGNED(base, cp->mc_align));
/* Get the original address since we're about to free it */
pbuf = (void **)((intptr_t)base - sizeof (void *));
VERIFY(((intptr_t)base + cp->mc_bufsize) <=
((intptr_t)*pbuf + cp->mc_chunksize));
if (flags & MCF_DEBUG) {
VERIFY(((intptr_t)base + rsize) <=
((intptr_t)*pbuf + cp->mc_chunksize));
mcache_audit_free_verify(NULL, base, 0, rsize);
}
/* Free it to zone */
zfree(cp->mc_slab_zone, *pbuf);
/* No more objects to free; return to mcache */
if ((list = nlist) == NULL)
break;
}
}
/*
* Internal slab auditor for simple caches.
*/
static void
mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
{
mcache_t *cp = arg;
size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t));
void *base, **pbuf;
while (list != NULL) {
mcache_obj_t *next = list->obj_next;
base = list;
VERIFY(IS_P2ALIGNED(base, cp->mc_align));
/* Get the original address */
pbuf = (void **)((intptr_t)base - sizeof (void *));
VERIFY(((intptr_t)base + rsize) <=
((intptr_t)*pbuf + cp->mc_chunksize));
if (!alloc)
mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
else
mcache_audit_free_verify_set(NULL, base, 0, rsize);
list = list->obj_next = next;
}
}
/*
* Refill the CPU's filled bucket with bkt and save the previous one.
*/
static void
mcache_cpu_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
{
ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
(ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
ASSERT(ccp->cc_bktsize > 0);
ccp->cc_pfilled = ccp->cc_filled;
ccp->cc_pobjs = ccp->cc_objs;
ccp->cc_filled = bkt;
ccp->cc_objs = objs;
}
/*
* Allocate a bucket from the bucket layer.
*/
static mcache_bkt_t *
mcache_bkt_alloc(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkttype_t **btp)
{
mcache_bkt_t *bkt;
if (!MCACHE_LOCK_TRY(&cp->mc_bkt_lock)) {
/*
* The bucket layer lock is held by another CPU; increase
* the contention count so that we can later resize the
* bucket size accordingly.
*/
MCACHE_LOCK(&cp->mc_bkt_lock);
cp->mc_bkt_contention++;
}
if ((bkt = blp->bl_list) != NULL) {
blp->bl_list = bkt->bkt_next;
if (--blp->bl_total < blp->bl_min)
blp->bl_min = blp->bl_total;
blp->bl_alloc++;
}
if (btp != NULL)
*btp = cp->cache_bkttype;
MCACHE_UNLOCK(&cp->mc_bkt_lock);
return (bkt);
}
/*
* Free a bucket to the bucket layer.
*/
static void
mcache_bkt_free(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t *bkt)
{
MCACHE_LOCK(&cp->mc_bkt_lock);
bkt->bkt_next = blp->bl_list;
blp->bl_list = bkt;
blp->bl_total++;
MCACHE_UNLOCK(&cp->mc_bkt_lock);
}
/*
* Enable the bucket layer of a cache.
*/
static void
mcache_cache_bkt_enable(mcache_t *cp)
{
mcache_cpu_t *ccp;
int cpu;
if (cp->mc_flags & MCF_NOCPUCACHE)
return;
for (cpu = 0; cpu < ncpu; cpu++) {
ccp = &cp->mc_cpu[cpu];
MCACHE_LOCK(&ccp->cc_lock);
ccp->cc_bktsize = cp->cache_bkttype->bt_bktsize;
MCACHE_UNLOCK(&ccp->cc_lock);
}
}
/*
* Purge all buckets from a cache and disable its bucket layer.
*/
static void
mcache_bkt_purge(mcache_t *cp)
{
mcache_cpu_t *ccp;
mcache_bkt_t *bp, *pbp;
mcache_bkttype_t *btp;
int cpu, objs, pobjs;
for (cpu = 0; cpu < ncpu; cpu++) {
ccp = &cp->mc_cpu[cpu];
MCACHE_LOCK(&ccp->cc_lock);
btp = cp->cache_bkttype;
bp = ccp->cc_filled;
pbp = ccp->cc_pfilled;
objs = ccp->cc_objs;
pobjs = ccp->cc_pobjs;
ccp->cc_filled = NULL;
ccp->cc_pfilled = NULL;
ccp->cc_objs = -1;
ccp->cc_pobjs = -1;
ccp->cc_bktsize = 0;
MCACHE_UNLOCK(&ccp->cc_lock);
if (bp != NULL)
mcache_bkt_destroy(cp, btp, bp, objs);
if (pbp != NULL)
mcache_bkt_destroy(cp, btp, pbp, pobjs);
}
mcache_bkt_ws_zero(cp);
mcache_bkt_ws_reap(cp);
}
/*
* Free one or more objects in the bucket to the slab layer,
* and also free the bucket itself.
*/
static void
mcache_bkt_destroy(mcache_t *cp, mcache_bkttype_t *btp, mcache_bkt_t *bkt,
int nobjs)
{
if (nobjs > 0) {
mcache_obj_t *top = bkt->bkt_obj[nobjs - 1];
if (cp->mc_flags & MCF_DEBUG) {
mcache_obj_t *o = top;
int cnt = 0;
/*
* Verify that the chain of objects in the bucket is
* valid. Any mismatch here means a mistake when the
* object(s) were freed to the CPU layer, so we panic.
*/
while (o != NULL) {
o = o->obj_next;
++cnt;
}
if (cnt != nobjs) {
panic("mcache_bkt_destroy: %s cp %p corrupted "
"list in bkt %p (nobjs %d actual %d)\n",
cp->mc_name, (void *)cp, (void *)bkt,
nobjs, cnt);
}
}
/* Advise the slab layer to purge the object(s) */
(*cp->mc_slab_free)(cp->mc_private, top,
(cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
}
mcache_free(btp->bt_cache, bkt);
}
/*
* Update the bucket layer working set statistics.
*/
static void
mcache_bkt_ws_update(mcache_t *cp)
{
MCACHE_LOCK(&cp->mc_bkt_lock);
cp->mc_full.bl_reaplimit = cp->mc_full.bl_min;
cp->mc_full.bl_min = cp->mc_full.bl_total;
cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_min;
cp->mc_empty.bl_min = cp->mc_empty.bl_total;
MCACHE_UNLOCK(&cp->mc_bkt_lock);
}
/*
* Mark everything as eligible for reaping (working set is zero).
*/
static void
mcache_bkt_ws_zero(mcache_t *cp)
{
MCACHE_LOCK(&cp->mc_bkt_lock);
cp->mc_full.bl_reaplimit = cp->mc_full.bl_total;
cp->mc_full.bl_min = cp->mc_full.bl_total;
cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_total;
cp->mc_empty.bl_min = cp->mc_empty.bl_total;
MCACHE_UNLOCK(&cp->mc_bkt_lock);
}
/*
* Reap all buckets that are beyond the working set.
*/
static void
mcache_bkt_ws_reap(mcache_t *cp)
{
long reap;
mcache_bkt_t *bkt;
mcache_bkttype_t *btp;
reap = MIN(cp->mc_full.bl_reaplimit, cp->mc_full.bl_min);
while (reap-- &&
(bkt = mcache_bkt_alloc(cp, &cp->mc_full, &btp)) != NULL)
mcache_bkt_destroy(cp, btp, bkt, btp->bt_bktsize);
reap = MIN(cp->mc_empty.bl_reaplimit, cp->mc_empty.bl_min);
while (reap-- &&
(bkt = mcache_bkt_alloc(cp, &cp->mc_empty, &btp)) != NULL)
mcache_bkt_destroy(cp, btp, bkt, 0);
}
static void
mcache_reap_timeout(thread_call_param_t dummy __unused,
thread_call_param_t arg)
{
volatile UInt32 *flag = arg;
ASSERT(flag == &mcache_reaping);
*flag = 0;
}
static void
mcache_reap_done(void *flag)
{
uint64_t deadline, leeway;
clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
&deadline);
clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
NSEC_PER_SEC, &leeway);
thread_call_enter_delayed_with_leeway(mcache_reap_tcall, flag,
deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
}
static void
mcache_reap_start(void *arg)
{
UInt32 *flag = arg;
ASSERT(flag == &mcache_reaping);
mcache_applyall(mcache_cache_reap);
mcache_dispatch(mcache_reap_done, flag);
}
__private_extern__ void
mcache_reap(void)
{
UInt32 *flag = &mcache_reaping;
if (mcache_llock_owner == current_thread() ||
!OSCompareAndSwap(0, 1, flag))
return;
mcache_dispatch(mcache_reap_start, flag);
}
__private_extern__ void
mcache_reap_now(mcache_t *cp, boolean_t purge)
{
if (purge) {
mcache_bkt_purge(cp);
mcache_cache_bkt_enable(cp);
} else {
mcache_bkt_ws_zero(cp);
mcache_bkt_ws_reap(cp);
}
}
static void
mcache_cache_reap(mcache_t *cp)
{
mcache_bkt_ws_reap(cp);
}
/*
* Performs period maintenance on a cache.
*/
static void
mcache_cache_update(mcache_t *cp)
{
int need_bkt_resize = 0;
int need_bkt_reenable = 0;
lck_mtx_assert(mcache_llock, LCK_MTX_ASSERT_OWNED);
mcache_bkt_ws_update(cp);
/*
* Cache resize and post-purge reenable are mutually exclusive.
* If the cache was previously purged, there is no point of
* increasing the bucket size as there was an indication of
* memory pressure on the system.
*/
lck_mtx_lock_spin(&cp->mc_sync_lock);
if (!(cp->mc_flags & MCF_NOCPUCACHE) && cp->mc_enable_cnt)
need_bkt_reenable = 1;
lck_mtx_unlock(&cp->mc_sync_lock);
MCACHE_LOCK(&cp->mc_bkt_lock);
/*
* If the contention count is greater than the threshold, and if
* we are not already at the maximum bucket size, increase it.
* Otherwise, if this cache was previously purged by the user
* then we simply reenable it.
*/
if ((unsigned int)cp->mc_chunksize < cp->cache_bkttype->bt_maxbuf &&
(int)(cp->mc_bkt_contention - cp->mc_bkt_contention_prev) >
mcache_bkt_contention && !need_bkt_reenable)
need_bkt_resize = 1;
cp ->mc_bkt_contention_prev = cp->mc_bkt_contention;
MCACHE_UNLOCK(&cp->mc_bkt_lock);
if (need_bkt_resize)
mcache_dispatch(mcache_cache_bkt_resize, cp);
else if (need_bkt_reenable)
mcache_dispatch(mcache_cache_enable, cp);
}
/*
* Recompute a cache's bucket size. This is an expensive operation
* and should not be done frequently; larger buckets provide for a
* higher transfer rate with the bucket while smaller buckets reduce
* the memory consumption.
*/
static void
mcache_cache_bkt_resize(void *arg)
{
mcache_t *cp = arg;
mcache_bkttype_t *btp = cp->cache_bkttype;
if ((unsigned int)cp->mc_chunksize < btp->bt_maxbuf) {
mcache_bkt_purge(cp);
/*
* Upgrade to the next bucket type with larger bucket size;
* temporarily set the previous contention snapshot to a
* negative number to prevent unnecessary resize request.
*/
MCACHE_LOCK(&cp->mc_bkt_lock);
cp->cache_bkttype = ++btp;
cp ->mc_bkt_contention_prev = cp->mc_bkt_contention + INT_MAX;
MCACHE_UNLOCK(&cp->mc_bkt_lock);
mcache_cache_enable(cp);
}
}
/*
* Reenable a previously disabled cache due to purge.
*/
static void
mcache_cache_enable(void *arg)
{
mcache_t *cp = arg;
lck_mtx_lock_spin(&cp->mc_sync_lock);
cp->mc_purge_cnt = 0;
cp->mc_enable_cnt = 0;
lck_mtx_unlock(&cp->mc_sync_lock);
mcache_cache_bkt_enable(cp);
}
static void
mcache_update_timeout(__unused void *arg)
{
uint64_t deadline, leeway;
clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
&deadline);
clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
NSEC_PER_SEC, &leeway);
thread_call_enter_delayed_with_leeway(mcache_update_tcall, NULL,
deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
}
static void
mcache_update(thread_call_param_t arg __unused,
thread_call_param_t dummy __unused)
{
mcache_applyall(mcache_cache_update);
mcache_update_timeout(NULL);
}
static void
mcache_applyall(void (*func)(mcache_t *))
{
mcache_t *cp;
MCACHE_LIST_LOCK();
LIST_FOREACH(cp, &mcache_head, mc_list) {
func(cp);
}
MCACHE_LIST_UNLOCK();
}
static void
mcache_dispatch(void (*func)(void *), void *arg)
{
ASSERT(func != NULL);
timeout(func, arg, hz/1000);
}
__private_extern__ void
mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp,
struct timeval *base_ts)
{
struct timeval now, base = { 0, 0 };
void *stack[MCACHE_STACK_DEPTH + 1];
struct mca_trn *transaction;
transaction = &mca->mca_trns[mca->mca_next_trn];
mca->mca_addr = addr;
mca->mca_cache = cp;
transaction->mca_thread = current_thread();
bzero(stack, sizeof (stack));
transaction->mca_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
bcopy(&stack[1], transaction->mca_stack,
sizeof (transaction->mca_stack));
microuptime(&now);
if (base_ts != NULL)
base = *base_ts;
/* tstamp is in ms relative to base_ts */
transaction->mca_tstamp = ((now.tv_usec - base.tv_usec) / 1000);
if ((now.tv_sec - base.tv_sec) > 0)
transaction->mca_tstamp += ((now.tv_sec - base.tv_sec) * 1000);
mca->mca_next_trn =
(mca->mca_next_trn + 1) % mca_trn_max;
}
__private_extern__ void
mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size)
{
u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
u_int64_t *buf = (u_int64_t *)buf_arg;
VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t)));
VERIFY(IS_P2ALIGNED(size, sizeof (u_int64_t)));
while (buf < buf_end)
*buf++ = pattern;
}
__private_extern__ void *
mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size)
{
u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
u_int64_t *buf;
VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t)));
VERIFY(IS_P2ALIGNED(size, sizeof (u_int64_t)));
for (buf = buf_arg; buf < buf_end; buf++) {
if (*buf != pattern)
return (buf);
}
return (NULL);
}
__private_extern__ void *
mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg,
size_t size)
{
u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
u_int64_t *buf;
VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t)));
VERIFY(IS_P2ALIGNED(size, sizeof (u_int64_t)));
for (buf = buf_arg; buf < buf_end; buf++) {
if (*buf != old) {
mcache_set_pattern(old, buf_arg,
(uintptr_t)buf - (uintptr_t)buf_arg);
return (buf);
}
*buf = new;
}
return (NULL);
}
__private_extern__ void
mcache_audit_free_verify(mcache_audit_t *mca, void *base, size_t offset,
size_t size)
{
void *addr;
u_int64_t *oaddr64;
mcache_obj_t *next;
addr = (void *)((uintptr_t)base + offset);
next = ((mcache_obj_t *)addr)->obj_next;
/* For the "obj_next" pointer in the buffer */
oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof (u_int64_t));
*oaddr64 = MCACHE_FREE_PATTERN;
if ((oaddr64 = mcache_verify_pattern(MCACHE_FREE_PATTERN,
(caddr_t)base, size)) != NULL) {
mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
(int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
/* NOTREACHED */
}
((mcache_obj_t *)addr)->obj_next = next;
}
__private_extern__ void
mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset,
size_t size)
{
void *addr;
u_int64_t *oaddr64;
mcache_obj_t *next;
addr = (void *)((uintptr_t)base + offset);
next = ((mcache_obj_t *)addr)->obj_next;
/* For the "obj_next" pointer in the buffer */
oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof (u_int64_t));
*oaddr64 = MCACHE_FREE_PATTERN;
if ((oaddr64 = mcache_verify_set_pattern(MCACHE_FREE_PATTERN,
MCACHE_UNINITIALIZED_PATTERN, (caddr_t)base, size)) != NULL) {
mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
(int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
/* NOTREACHED */
}
((mcache_obj_t *)addr)->obj_next = next;
}
#undef panic
#define DUMP_TRN_FMT() \
"%s transaction thread %p saved PC stack (%d deep):\n" \
"\t%p, %p, %p, %p, %p, %p, %p, %p\n" \
"\t%p, %p, %p, %p, %p, %p, %p, %p\n"
#define DUMP_TRN_FIELDS(s, x) \
s, \
mca->mca_trns[x].mca_thread, mca->mca_trns[x].mca_depth, \
mca->mca_trns[x].mca_stack[0], mca->mca_trns[x].mca_stack[1], \
mca->mca_trns[x].mca_stack[2], mca->mca_trns[x].mca_stack[3], \
mca->mca_trns[x].mca_stack[4], mca->mca_trns[x].mca_stack[5], \
mca->mca_trns[x].mca_stack[6], mca->mca_trns[x].mca_stack[7], \
mca->mca_trns[x].mca_stack[8], mca->mca_trns[x].mca_stack[9], \
mca->mca_trns[x].mca_stack[10], mca->mca_trns[x].mca_stack[11], \
mca->mca_trns[x].mca_stack[12], mca->mca_trns[x].mca_stack[13], \
mca->mca_trns[x].mca_stack[14], mca->mca_trns[x].mca_stack[15]
#define MCA_TRN_LAST ((mca->mca_next_trn + mca_trn_max) % mca_trn_max)
#define MCA_TRN_PREV ((mca->mca_next_trn + mca_trn_max - 1) % mca_trn_max)
__private_extern__ char *
mcache_dump_mca(mcache_audit_t *mca)
{
if (mca_dump_buf == NULL)
return (NULL);
snprintf(mca_dump_buf, DUMP_MCA_BUF_SIZE,
"mca %p: addr %p, cache %p (%s) nxttrn %d\n"
DUMP_TRN_FMT()
DUMP_TRN_FMT(),
mca, mca->mca_addr, mca->mca_cache,
mca->mca_cache ? mca->mca_cache->mc_name : "?",
mca->mca_next_trn,
DUMP_TRN_FIELDS("last", MCA_TRN_LAST),
DUMP_TRN_FIELDS("previous", MCA_TRN_PREV));
return (mca_dump_buf);
}
__private_extern__ void
mcache_audit_panic(mcache_audit_t *mca, void *addr, size_t offset,
int64_t expected, int64_t got)
{
if (mca == NULL) {
panic("mcache_audit: buffer %p modified after free at "
"offset 0x%lx (0x%llx instead of 0x%llx)\n", addr,
offset, got, expected);
/* NOTREACHED */
}
panic("mcache_audit: buffer %p modified after free at offset 0x%lx "
"(0x%llx instead of 0x%llx)\n%s\n",
addr, offset, got, expected, mcache_dump_mca(mca));
/* NOTREACHED */
}
__private_extern__ int
assfail(const char *a, const char *f, int l)
{
panic("assertion failed: %s, file: %s, line: %d", a, f, l);
return (0);
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package multierr
import (
"errors"
"fmt"
"io"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// richFormatError is an error that prints a different output depending on
// whether %v or %+v was used.
type richFormatError struct{}
func (r richFormatError) Error() string {
return fmt.Sprint(r)
}
func (richFormatError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
io.WriteString(f, "multiline\nmessage\nwith plus")
} else {
io.WriteString(f, "without plus")
}
}
func appendN(initial, err error, n int) error {
errs := initial
for i := 0; i < n; i++ {
errs = Append(errs, err)
}
return errs
}
func newMultiErr(errors ...error) error {
return &multiError{errors: errors}
}
func TestCombine(t *testing.T) {
tests := []struct {
// Input
giveErrors []error
// Resulting error
wantError error
// %+v and %v string representations
wantMultiline string
wantSingleline string
}{
{
giveErrors: nil,
wantError: nil,
},
{
giveErrors: []error{},
wantError: nil,
},
{
giveErrors: []error{
errors.New("foo"),
nil,
newMultiErr(
errors.New("bar"),
),
nil,
},
wantError: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - bar",
wantSingleline: "foo; bar",
},
{
giveErrors: []error{
errors.New("foo"),
newMultiErr(
errors.New("bar"),
),
},
wantError: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - bar",
wantSingleline: "foo; bar",
},
{
giveErrors: []error{errors.New("great sadness")},
wantError: errors.New("great sadness"),
wantMultiline: "great sadness",
wantSingleline: "great sadness",
},
{
giveErrors: []error{
errors.New("foo"),
errors.New("bar"),
},
wantError: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - bar",
wantSingleline: "foo; bar",
},
{
giveErrors: []error{
errors.New("great sadness"),
errors.New("multi\n line\nerror message"),
errors.New("single line error message"),
},
wantError: newMultiErr(
errors.New("great sadness"),
errors.New("multi\n line\nerror message"),
errors.New("single line error message"),
),
wantMultiline: "the following errors occurred:\n" +
" - great sadness\n" +
" - multi\n" +
" line\n" +
" error message\n" +
" - single line error message",
wantSingleline: "great sadness; " +
"multi\n line\nerror message; " +
"single line error message",
},
{
giveErrors: []error{
errors.New("foo"),
newMultiErr(
errors.New("bar"),
errors.New("baz"),
),
errors.New("qux"),
},
wantError: newMultiErr(
errors.New("foo"),
errors.New("bar"),
errors.New("baz"),
errors.New("qux"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - bar\n" +
" - baz\n" +
" - qux",
wantSingleline: "foo; bar; baz; qux",
},
{
giveErrors: []error{
errors.New("foo"),
nil,
newMultiErr(
errors.New("bar"),
),
nil,
},
wantError: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - bar",
wantSingleline: "foo; bar",
},
{
giveErrors: []error{
errors.New("foo"),
newMultiErr(
errors.New("bar"),
),
},
wantError: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - bar",
wantSingleline: "foo; bar",
},
{
giveErrors: []error{
errors.New("foo"),
richFormatError{},
errors.New("bar"),
},
wantError: newMultiErr(
errors.New("foo"),
richFormatError{},
errors.New("bar"),
),
wantMultiline: "the following errors occurred:\n" +
" - foo\n" +
" - multiline\n" +
" message\n" +
" with plus\n" +
" - bar",
wantSingleline: "foo; without plus; bar",
},
}
for i, tt := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
err := Combine(tt.giveErrors...)
require.Equal(t, tt.wantError, err)
if tt.wantMultiline != "" {
t.Run("Sprintf/multiline", func(t *testing.T) {
assert.Equal(t, tt.wantMultiline, fmt.Sprintf("%+v", err))
})
}
if tt.wantSingleline != "" {
t.Run("Sprintf/singleline", func(t *testing.T) {
assert.Equal(t, tt.wantSingleline, fmt.Sprintf("%v", err))
})
t.Run("Error()", func(t *testing.T) {
assert.Equal(t, tt.wantSingleline, err.Error())
})
if s, ok := err.(fmt.Stringer); ok {
t.Run("String()", func(t *testing.T) {
assert.Equal(t, tt.wantSingleline, s.String())
})
}
}
})
}
}
func TestCombineDoesNotModifySlice(t *testing.T) {
errors := []error{
errors.New("foo"),
nil,
errors.New("bar"),
}
assert.NotNil(t, Combine(errors...))
assert.Len(t, errors, 3)
assert.Nil(t, errors[1], 3)
}
func TestAppend(t *testing.T) {
tests := []struct {
left error
right error
want error
}{
{
left: nil,
right: nil,
want: nil,
},
{
left: nil,
right: errors.New("great sadness"),
want: errors.New("great sadness"),
},
{
left: errors.New("great sadness"),
right: nil,
want: errors.New("great sadness"),
},
{
left: errors.New("foo"),
right: errors.New("bar"),
want: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
},
{
left: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
right: errors.New("baz"),
want: newMultiErr(
errors.New("foo"),
errors.New("bar"),
errors.New("baz"),
),
},
{
left: errors.New("baz"),
right: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
want: newMultiErr(
errors.New("baz"),
errors.New("foo"),
errors.New("bar"),
),
},
{
left: newMultiErr(
errors.New("foo"),
),
right: newMultiErr(
errors.New("bar"),
),
want: newMultiErr(
errors.New("foo"),
errors.New("bar"),
),
},
}
for _, tt := range tests {
assert.Equal(t, tt.want, Append(tt.left, tt.right))
}
}
type notMultiErr struct{}
var _ errorGroup = notMultiErr{}
func (notMultiErr) Error() string {
return "great sadness"
}
func (notMultiErr) Errors() []error {
return []error{errors.New("great sadness")}
}
func TestErrors(t *testing.T) {
tests := []struct {
give error
want []error
// Don't attempt to cast to errorGroup or *multiError
dontCast bool
}{
{dontCast: true}, // nil
{
give: errors.New("hi"),
want: []error{errors.New("hi")},
dontCast: true,
},
{
// We don't yet support non-multierr errors.
give: notMultiErr{},
want: []error{notMultiErr{}},
dontCast: true,
},
{
give: Combine(
errors.New("foo"),
errors.New("bar"),
),
want: []error{
errors.New("foo"),
errors.New("bar"),
},
},
{
give: Append(
errors.New("foo"),
errors.New("bar"),
),
want: []error{
errors.New("foo"),
errors.New("bar"),
},
},
{
give: Append(
errors.New("foo"),
Combine(
errors.New("bar"),
),
),
want: []error{
errors.New("foo"),
errors.New("bar"),
},
},
{
give: Combine(
errors.New("foo"),
Append(
errors.New("bar"),
errors.New("baz"),
),
errors.New("qux"),
),
want: []error{
errors.New("foo"),
errors.New("bar"),
errors.New("baz"),
errors.New("qux"),
},
},
}
for i, tt := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Run("Errors()", func(t *testing.T) {
require.Equal(t, tt.want, Errors(tt.give))
})
if tt.dontCast {
return
}
t.Run("multiError", func(t *testing.T) {
require.Equal(t, tt.want, tt.give.(*multiError).Errors())
})
t.Run("errorGroup", func(t *testing.T) {
require.Equal(t, tt.want, tt.give.(errorGroup).Errors())
})
})
}
}
func createMultiErrWithCapacity() error {
// Create a multiError that has capacity for more errors so Append will
// modify the underlying array that may be shared.
return appendN(nil, errors.New("append"), 50)
}
func TestAppendDoesNotModify(t *testing.T) {
initial := createMultiErrWithCapacity()
err1 := Append(initial, errors.New("err1"))
err2 := Append(initial, errors.New("err2"))
// Make sure the error messages match, since we do modify the copyNeeded
// atomic, the values cannot be compared.
assert.EqualError(t, initial, createMultiErrWithCapacity().Error(), "Initial should not be modified")
assert.EqualError(t, err1, Append(createMultiErrWithCapacity(), errors.New("err1")).Error())
assert.EqualError(t, err2, Append(createMultiErrWithCapacity(), errors.New("err2")).Error())
}
func TestAppendRace(t *testing.T) {
initial := createMultiErrWithCapacity()
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
err := initial
for j := 0; j < 10; j++ {
err = Append(err, errors.New("err"))
}
}()
}
wg.Wait()
}
func TestErrorsSliceIsImmutable(t *testing.T) {
err1 := errors.New("err1")
err2 := errors.New("err2")
err := Append(err1, err2)
gotErrors := Errors(err)
require.Equal(t, []error{err1, err2}, gotErrors, "errors must match")
gotErrors[0] = nil
gotErrors[1] = errors.New("err3")
require.Equal(t, []error{err1, err2}, Errors(err),
"errors must match after modification")
}
func TestNilMultierror(t *testing.T) {
// For safety, all operations on multiError should be safe even if it is
// nil.
var err *multiError
require.Empty(t, err.Error())
require.Empty(t, err.Errors())
}
| {
"pile_set_name": "Github"
} |
#! /bin/sh
# Remove build directory if exist
if cd build; then
echo
echo "********** Removing older build directory **********"
cd ..
sudo rm -rf build
fi
echo
echo "**************************************************"
echo "****************** build library *****************"
echo "**************************************************"
echo
sudo python setup.py install --use-double --use-jack
| {
"pile_set_name": "Github"
} |
# These are supported funding model platforms
github: mansona
| {
"pile_set_name": "Github"
} |
#include "relabel_cpu.h"
#include "utils.h"
std::tuple<torch::Tensor, torch::Tensor> relabel_cpu(torch::Tensor col,
torch::Tensor idx) {
CHECK_CPU(col);
CHECK_CPU(idx);
CHECK_INPUT(idx.dim() == 1);
auto col_data = col.data_ptr<int64_t>();
auto idx_data = idx.data_ptr<int64_t>();
std::vector<int64_t> cols;
std::vector<int64_t> n_ids;
std::unordered_map<int64_t, int64_t> n_id_map;
int64_t i;
for (int64_t n = 0; n < idx.size(0); n++) {
i = idx_data[n];
n_id_map[i] = n;
n_ids.push_back(i);
}
int64_t c;
for (int64_t e = 0; e < col.size(0); e++) {
c = col_data[e];
if (n_id_map.count(c) == 0) {
n_id_map[c] = n_ids.size();
n_ids.push_back(c);
}
cols.push_back(n_id_map[c]);
}
int64_t n_len = n_ids.size(), e_len = cols.size();
auto out_col = torch::from_blob(cols.data(), {e_len}, col.options()).clone();
auto out_idx = torch::from_blob(n_ids.data(), {n_len}, col.options()).clone();
return std::make_tuple(out_col, out_idx);
}
| {
"pile_set_name": "Github"
} |
var baseFlatten = require('./_baseFlatten'),
map = require('./map');
/**
* Creates a flattened array of values by running each element in `collection`
* thru `iteratee` and flattening the mapped results. The iteratee is invoked
* with three arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [n, n];
* }
*
* _.flatMap([1, 2], duplicate);
* // => [1, 1, 2, 2]
*/
function flatMap(collection, iteratee) {
return baseFlatten(map(collection, iteratee), 1);
}
module.exports = flatMap;
| {
"pile_set_name": "Github"
} |
reading
on: s fileName: f
^ (self on: s)
packageDirectory: f;
yourself | {
"pile_set_name": "Github"
} |
#ifndef BENCHMARK_MUTEX_H_
#define BENCHMARK_MUTEX_H_
#include <condition_variable>
#include <mutex>
#include "check.h"
// Enable thread safety attributes only with clang.
// The attributes can be safely erased when compiling with other compilers.
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark {
typedef std::condition_variable Condition;
// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provided the required annotations.
class CAPABILITY("mutex") Mutex {
public:
Mutex() {}
void lock() ACQUIRE() { mut_.lock(); }
void unlock() RELEASE() { mut_.unlock(); }
std::mutex& native_handle() { return mut_; }
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock {
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
~MutexLock() RELEASE() {}
MutexLockImp& native_handle() { return ml_; }
private:
MutexLockImp ml_;
};
class Barrier {
public:
Barrier(int num_threads) : running_threads_(num_threads) {}
// Called by each thread
bool wait() EXCLUDES(lock_) {
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread) phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_) {
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0) phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
};
} // end namespace benchmark
#endif // BENCHMARK_MUTEX_H_
| {
"pile_set_name": "Github"
} |
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
/**
* Implement Stack using Linked List.
*/
struct StackNode {
int val;
struct StackNode *next;
};
struct Stack {
struct StackNode *top;
};
void push(struct Stack *stack, int new_val) {
if (stack == NULL) return;
struct StackNode *new_node
= (struct StackNode *)malloc(sizeof(struct StackNode));
new_node->val = new_val;
new_node->next = stack->top;
stack->top = new_node;
}
int pop(struct Stack *stack) {
if (stack == NULL || stack->top == NULL) return 0;
struct StackNode *t = stack->top;
int ans = t->val;
stack->top = stack->top->next;
free(t);
return ans;
}
bool isEmpty(struct Stack *stack) {
if (stack == NULL) return true;
return (stack->top == NULL) ? true : false;
}
/**
* Implement Queue using two stacks.
*/
typedef struct {
struct Stack in;
struct Stack out;
} Queue;
/* Create a queue */
void queueCreate(Queue *queue, int maxSize) {
if (queue == NULL) return;
/* I'm a queue implemented using linked stack, so I don't need maxSize */
queue->in.top = queue->out.top = NULL;
}
/* Push element x to the back of queue */
void queuePush(Queue *queue, int element) {
if (queue == NULL) return;
while (!isEmpty(&queue->out)) {
int top = pop(&queue->out);
push(&queue->in, top);
}
push(&queue->in, element);
}
/* Removes the element from front of queue */
void queuePop(Queue *queue) {
if (queue == NULL) return;
if (isEmpty(&queue->out)) {
while (!isEmpty(&queue->in)) {
int top = pop(&queue->in);
push(&queue->out, top);
}
}
pop(&queue->out);
}
/* Get the front element */
int queuePeek(Queue *queue) {
if (isEmpty(&queue->out)) {
while (!isEmpty(&queue->in)) {
int top = pop(&queue->in);
push(&queue->out, top);
}
}
if (!isEmpty(&queue->out) && queue->out.top) {
return queue->out.top->val;
}
else {
return 0;
}
}
/* Return whether the queue is empty */
bool queueEmpty(Queue *queue) {
if (isEmpty(&queue->in) && isEmpty(&queue->out)) {
return true;
}
else {
return false;
}
}
/* Destroy the queue */
void queueDestroy(Queue *queue) {
while (!isEmpty(&queue->in)) {
pop(&queue->in);
}
while (!isEmpty(&queue->out)) {
pop(&queue->out);
}
}
int main() {
int maxSize = 5;
Queue q;
printf("Create a queue.\n"); queueCreate(&q, maxSize);
printf("Push 1\n"); queuePush(&q, 1);
printf("Push 2\n"); queuePush(&q, 2);
printf("Push 3\n"); queuePush(&q, 3);
printf("Push 4\n"); queuePush(&q, 4);
printf("Peek of queue: %d\n", queuePeek(&q));
printf("Pop\n"); queuePop(&q);
printf("Peek of queue: %d\n", queuePeek(&q));
printf("Push 5\n"); queuePush(&q, 5);
printf("Peek of queue: %d\n", queuePeek(&q));
printf("Pop\n"); queuePop(&q);
printf("Peek of queue: %d\n", queuePeek(&q));
printf("Pop\n"); queuePop(&q);
printf("Peek of queue: %d\n", queuePeek(&q));
printf("Pop\n"); queuePop(&q);
printf("Peek of queue: %d\n", queuePeek(&q));
printf("Destroy\n"); queueDestroy(&q);
return 0;
}
| {
"pile_set_name": "Github"
} |
function HARError (errors) {
var message = 'validation failed'
this.name = 'HARError'
this.message = message
this.errors = errors
if (typeof Error.captureStackTrace === 'function') {
Error.captureStackTrace(this, this.constructor)
} else {
this.stack = (new Error(message)).stack
}
}
HARError.prototype = Error.prototype
module.exports = HARError
| {
"pile_set_name": "Github"
} |
/**
* @file coreConfig.h
* @version $Format:%h%d$
*
* Configuration settings for Matrix core module.
*/
/*
* Copyright (c) 2013-2018 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved
*
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/******************************************************************************/
#ifndef _h_PS_CORECONFIG
# define _h_PS_CORECONFIG
/******************************************************************************/
/* Debug and tracing configuration */
/******************************************************************************/
/**
Enable various levels of trace.
When these option is turned off, messages are silently
discarded and their text does not take space in the binary image.
*/
/* #define USE_CORE_TRACE */
# ifndef NO_CORE_ERROR
# define USE_CORE_ERROR
# endif
# ifndef NO_CORE_ASSERT
# define USE_CORE_ASSERT
# endif
/** Allow target file of psTrace output to be chosen with the
PSCORE_DEBUG_FILE and PSCORE_DEBUG_FILE_APPEND environment variables.
By default, stdout is used. Disable to minimize footprint. */
/* #define USE_TRACE_FILE */
/** Experimental, extensible logging facility. Only used by the SL/CL
crypto libraries; not used by the TLS library. Disable to minimize
footprint. */
/* #define PS_LOGF */
/******************************************************************************/
/* Other Configurable features */
/******************************************************************************/
/**
If enabled, calls to the psError set of APIs will perform a platform
abort on the exeutable to aid in debugging.
*/
# ifdef DEBUG
/* #define HALT_ON_PS_ERROR *//* NOT RECOMMENDED FOR PRODUCTION BUILDS */
# endif
/** Enable to disable file IO related APIs, such as psGetFileBuf
and psParseCertFile. This helps to minimize footprint when no file IO
is needed. */
/* #define NO_FILE_SYSTEM */
/**
Include the psCoreOsdepMutex family of APIs
@note If intending to compile crypto-cl, then this flag should
always be set.
*/
# ifndef NO_MULTITHREADING
# define USE_MULTITHREADING
# endif /* NO_MULTITHREADING */
/**
Include the psNetwork family of APIs.
These APIs allow simple high-level socket api.
The API derive from BSD Sockets, and therefore it can only be used
on devices which have the prerequisitive APIs.
MatrixSSL itself can be used also be used without PS networking, but
many of example programs and MatrixSSLNet are based on PS networking.
*/
# ifndef NO_PS_NETWORKING
# define USE_PS_NETWORKING
# endif /* NO_PS_NETWORKING */
/**
Use the psStat statistics measurement for CL/SL.
psStat is a generic statistics module. It contains features
e.g. required for measuring performance.
These capabilities can only be used on platforms with support for
thread-local storage and pthreads, such as Linux. Currently the support
will only be enabled for x86-64 Linux systems.
If statistics feature is not in use, the performance effect is minimal,
but for optimal performance in production environment, you may use
NO_PS_STAT_CL.
*/
# ifdef __x86_64__
# ifndef NO_PS_STAT_CL
# define USE_PS_STAT_CL
# endif /* NO_PS_STAT_CL */
# endif /* __x86_64__ */
/**
Use the psStat statistics measurement for CL/SL by default.
When psStat support has been compiled in (see above), USE_PS_STAT_CL,
it is disabled by default. To enabled statistics framework, set
environment variable PS_ENABLE_STATS to any value. Enabled setting below
to get statistics measuring without any environment variable. When
statistics are on by default, they can still be disabled with
environment variable PS_SKIP_STATS. Disabling statistics is recommended
to minimize footprint.
*/
/* # define USE_PS_STAT_CL_BY_DEFAULT */
#endif /* _h_PS_CORECONFIG */
/******************************************************************************/
| {
"pile_set_name": "Github"
} |
package parser
import (
"fmt"
"github.com/hashicorp/hcl/hcl/token"
)
// PosError is a parse error that contains a position.
type PosError struct {
Pos token.Pos
Err error
}
func (e *PosError) Error() string {
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_BigtableAdmin_GenerateConsistencyTokenRequest extends Google_Model
{
}
| {
"pile_set_name": "Github"
} |
libc {
GLIBC_2.0 {
# c*
catclose; catgets; catopen;
}
GLIBC_PRIVATE {
# functions with required interface outside normal name space
__open_catalog;
}
}
| {
"pile_set_name": "Github"
} |
This folder contains some sample formatters that may be helpful.
Feel free to change them, extend them, or use them as the basis for your own custom formatter(s).
More information about creating your own custom formatters can be found on the wiki:
https://github.com/robbiehanson/CocoaLumberjack/wiki/CustomFormatters
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2020 Martin Denham, Tuomas Airaksinen and the And Bible contributors.
*
* This file is part of And Bible (http://github.com/AndBible/and-bible).
*
* And Bible is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software Foundation,
* either version 3 of the License, or (at your option) any later version.
*
* And Bible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with And Bible.
* If not, see http://www.gnu.org/licenses/.
*
*/
package net.bible.android.view.activity.page.screen
import android.view.View
import android.view.ViewGroup
import android.widget.LinearLayout
import net.bible.android.activity.R
import net.bible.android.control.event.ABEventBus
import net.bible.android.control.event.passage.PassageChangeStartedEvent
import net.bible.android.control.event.window.NumberOfWindowsChangedEvent
import net.bible.android.control.page.window.Window
import net.bible.android.control.page.window.WindowControl
import net.bible.android.view.activity.MainBibleActivityScope
import net.bible.android.view.activity.base.DocumentView
import net.bible.android.view.activity.mynote.MyNoteViewBuilder
import net.bible.android.view.activity.page.MainBibleActivity
import javax.inject.Inject
class WebViewsBuiltEvent
class AfterRemoveWebViewEvent
/**
* Create Views for displaying documents
*
* @author Martin Denham [mjdenham at gmail dot com]
*/
@MainBibleActivityScope
class DocumentViewManager @Inject constructor(
private val mainBibleActivity: MainBibleActivity,
private val myNoteViewBuilder: MyNoteViewBuilder,
private val windowControl: WindowControl
) {
private val parent: LinearLayout = mainBibleActivity.findViewById(R.id.mainBibleView)
private var lastView: View? = null
private var splitBibleArea: SplitBibleArea? = null
fun destroy() {
ABEventBus.getDefault().unregister(this)
splitBibleArea?.destroy()
}
fun onEvent(event: NumberOfWindowsChangedEvent) {
buildView()
}
/**
* called just before starting work to change the current passage
*/
fun onEventMainThread(event: PassageChangeStartedEvent) {
buildView()
}
private fun removeView() {
parent.removeAllViews()
ABEventBus.getDefault().post(AfterRemoveWebViewEvent())
myNoteViewBuilder.afterRemove()
}
private fun buildWebViews(forceUpdate: Boolean): SplitBibleArea {
val topView = splitBibleArea?: SplitBibleArea().also {
splitBibleArea = it
}
topView.update(forceUpdate)
return topView
}
@Synchronized
fun buildView(forceUpdate: Boolean = false) {
if (myNoteViewBuilder.isMyNoteViewType) {
removeView()
mainBibleActivity.resetSystemUi()
lastView = myNoteViewBuilder.addMyNoteView(parent)
} else {
val view = buildWebViews(forceUpdate)
if(lastView != view) {
removeView()
lastView = view
parent.addView(view,
LinearLayout.LayoutParams(
ViewGroup.LayoutParams.MATCH_PARENT,
ViewGroup.LayoutParams.MATCH_PARENT)
)
}
ABEventBus.getDefault().post(WebViewsBuiltEvent())
}
}
val documentView: DocumentView?
get() = getDocumentView(windowControl.activeWindow)
private fun getDocumentView(window: Window): DocumentView? {
return if (myNoteViewBuilder.isMyNoteViewType) {
myNoteViewBuilder.view
} else { // a specific screen is specified to prevent content going to wrong screen if active screen is changed fast
splitBibleArea?.bibleViewFactory?.getOrCreateBibleView(window)
}
}
fun clearBibleViewFactory() {
splitBibleArea!!.bibleViewFactory.clear()
}
init {
ABEventBus.getDefault().register(this)
}
}
| {
"pile_set_name": "Github"
} |
[4.0.2](../../releases/tag/4.0.2) 2019-07-21 20:57:09
---------------------------------------------------------
- [01a3ec4](../../commit/01a3ec4) ✨ [feature] add characteristic of onchange and showFooter
[4.0.1](../../releases/tag/4.0.1) 2019-02-21 11:11:09
---------------------------------------------------------
-
[4.0.0](../../releases/tag/4.0.0) 2018-10-26 12:39:53
---------------------------------------------------------
- [88e8b68](../../commit/88e8b68) 📚 [document] edit readme document.
- [daf0b98](../../commit/daf0b98) ✨ [feature] add feature of caption customized(https://github.com/lanjingling0510/react-mobile-datepicker/issues/31)
[3.0.12](../../releases/tag/3.0.12) 2018-05-17 21:29:57
-----------------------------------------------------------
- [a6ec20b](../../commit/a6ec20b) 🐛 [bug]fix when scrolling 2 wheels at the same it is possible to set a date outside min/maxDate(https://github.com/lanjingling0510/react-mobile-datepicker/issues/27)
- [3f6ef0d](../../commit/3f6ef0d) 🐛 [bug]fix readme.md error
- [f4f3f01](../../commit/f4f3f01) 📚 [document] update readme.md
[3.0.11](../../releases/tag/3.0.11) 2018-05-12 10:57:46
-----------------------------------------------------------
- [b6d6b5e](../../commit/b6d6b5e) ✨ [feature] map month number to month name(https://github.com/lanjingling0510/react-mobile-datepicker/issues/26)
[3.0.10](../../releases/tag/3.0.10) 2018-04-24 22:50:29
-----------------------------------------------------------
- [99ce5da](../../commit/99ce5da) 🐛 [bug]fix the current date is not between the maximum and the minimum date.(https://github.com/lanjingling0510/react-mobile-datepicker/pull/17)
- [8582c7e](../../commit/8582c7e) ✨ [feature] add test case for dateSteps(https://github.com/lanjingling0510/react-mobile-datepicker/issues/21)
[3.0.9](../../releases/tag/3.0.9) 2018-04-23 10:48:04
---------------------------------------------------------
- [8508a7e](../../commit/8508a7e) ✨ [feature] add test case for dateSteps
- [34ff8c6](../../commit/34ff8c6) ✨ [feature] Add characteristics of set a time step
[3.0.8](../../releases/tag/3.0.8) 2017-10-15 23:17:08
---------------------------------------------------------
- [3297180](../../commit/3297180) ✨ [feature] Add a function: Customize the header(https://github.com/lanjingling0510/react-mobile-datepicker/pull/16)
[3.0.7](../../releases/tag/3.0.7) 2017-10-12 08:11:11
---------------------------------------------------------
- [9155be0](../../commit/9155be0) ✨ [feature] update some depend packages(https://github.com/lanjingling0510/react-mobile-datepicker/issues/15)
- [1feaa24](../../commit/1feaa24) 🔧 [config] add react storybook.
- [81dd701](../../commit/81dd701) 🔧 [config] add package-lock.json file.
- [e925080](../../commit/e925080) 📚 [document] edit readme document.
[3.0.6](../../releases/tag/3.0.6) 2017-07-08 20:21:30
---------------------------------------------------------
- [1d847d8](../../commit/1d847d8) 📚 [document] edit readme document.
- [d2c3372](../../commit/d2c3372) ✨ [feature] Add characteristics of automatic configuration Year, Month, Day, Hour, Minute, Second.
[3.0.5](../../releases/tag/3.0.5) 2017-07-03 20:47:51
---------------------------------------------------------
- [cec3c8a](../../commit/cec3c8a) ✨ [feature] add feature of button customized.(https://github.com/lanjingling0510/react-mobile-datepicker/issues/3)
- [49d2fc7](../../commit/49d2fc7) 🐛 [bug]fix android4.4 TouchEvent has PageY propery (https://github.com/lanjingling0510/react-mobile-datepicker/issues/9)
- [436906c](../../commit/436906c) 📚 [document] edit LICENSE.md fullname.
[3.0.4](../../releases/tag/3.0.4) 2017-04-09 17:12:18
---------------------------------------------------------
- [e2a2935](../../commit/e2a2935) 📦 [refact] edit test code for simulate event.
- [b743448](../../commit/b743448) 🐛 [bug]fix scrolling up will refresh page.
[3.0.3](../../releases/tag/3.0.3) 2017-01-05 15:06:48
---------------------------------------------------------
- [61d569f](../../commit/61d569f) ✨ [feature] Support server rendering (https://github.com/lanjingling0510/react-mobile-datepicker/issues/4)
[3.0.2](../../releases/tag/3.0.2) 2016-12-18 14:45:39
---------------------------------------------------------
- [45bcd3e](../../commit/45bcd3e) 🐛 [bug]fix Cannot find module jsdom
- [a167120](../../commit/a167120) ✨ [feature] Added an option (isPopup) [(#2)](https://github.com/lanjingling0510/react-mobile-datepicker/issues/2)
v3.0.1 - Sun, 18 Sep 2016 09:37:34 GMT
--------------------------------------
-
v3.0.0 - Sun, 18 Sep 2016 09:28:28 GMT
--------------------------------------
- [14b868c](../../commit/14b868c) [changed] ✅ update version,An increase of five theme,A slide can move multiple dates.
v2.0.7 - Tue, 13 Sep 2016 04:44:41 GMT
--------------------------------------
-
v2.0.7 - Sat, 10 Sep 2016 15:52:02 GMT
--------------------------------------
-
v2.0.6 - Sat, 10 Sep 2016 10:23:41 GMT
--------------------------------------
-
v2.0.5 - Sat, 10 Sep 2016 10:16:55 GMT
--------------------------------------
- [9e2df2f](../../commit/9e2df2f) [changed] add modal layer and add rollup for production
v2.0.4 - Tue, 12 Jul 2016 09:16:42 GMT
--------------------------------------
-
v2.0.3 - Tue, 12 Jul 2016 09:15:00 GMT
--------------------------------------
-
v2.0.2 - Tue, 05 Jul 2016 00:48:26 GMT
--------------------------------------
-
v2.0.1 - Mon, 04 Jul 2016 14:45:41 GMT
--------------------------------------
-
v2.0.0 - Mon, 04 Jul 2016 10:48:22 GMT
--------------------------------------
-
v1.0.16 - Mon, 27 Jun 2016 09:08:47 GMT
---------------------------------------
- [4516b14](../../commit/4516b14) [changed] 修改finish-btn行高
v1.0.15 - Sun, 26 Jun 2016 04:09:49 GMT
---------------------------------------
-
v1.0.14 - Sun, 26 Jun 2016 03:38:28 GMT
---------------------------------------
- [2025c43](../../commit/2025c43) [added] 添加README.md关键词
v1.0.13 - Sun, 26 Jun 2016 03:20:53 GMT
---------------------------------------
-
v1.0.12 - Sun, 26 Jun 2016 02:20:39 GMT
---------------------------------------
- [37441d7](../../commit/37441d7) [added] 添加注释, 测试用例
v1.0.11 - Fri, 24 Jun 2016 02:35:43 GMT
---------------------------------------
-
v1.0.10 - Fri, 24 Jun 2016 01:55:02 GMT
---------------------------------------
- [1687e8e](../../commit/1687e8e) [fixed] 取消isOpen, onCancel属性
v1.0.9 - Fri, 24 Jun 2016 01:33:47 GMT
--------------------------------------
-
v1.0.8 - Fri, 24 Jun 2016 01:32:53 GMT
--------------------------------------
-
v1.0.7 - Fri, 24 Jun 2016 01:29:41 GMT
--------------------------------------
- [305fb68](../../commit/305fb68) [changed] 修改README
v1.0.6 - Fri, 24 Jun 2016 01:25:40 GMT
--------------------------------------
- [a1f1db9](../../commit/a1f1db9) [fixed] 修复滚动快速出现的bug
v1.0.5 - Thu, 23 Jun 2016 13:37:16 GMT
--------------------------------------
-
v1.0.4 - Thu, 23 Jun 2016 13:34:36 GMT
--------------------------------------
-
v1.0.3 - Thu, 23 Jun 2016 13:22:13 GMT
--------------------------------------
- [5a93fe9](../../commit/5a93fe9) [changed] 更新了READEME
v1.0.2 - Thu, 23 Jun 2016 13:12:08 GMT
--------------------------------------
-
v1.0.14 - Fri, 17 Jun 2016 07:30:27 GMT
---------------------------------------
-
v1.0.13 - Fri, 17 Jun 2016 06:26:17 GMT
---------------------------------------
-
v1.0.12 - Thu, 16 Jun 2016 15:42:47 GMT
---------------------------------------
-
v1.0.11 - Thu, 16 Jun 2016 14:15:13 GMT
---------------------------------------
-
v1.0.9 - Thu, 16 Jun 2016 12:47:16 GMT
--------------------------------------
-
v1.0.8 - Thu, 16 Jun 2016 12:10:32 GMT
--------------------------------------
-
v1.0.7 - Thu, 16 Jun 2016 09:09:24 GMT
--------------------------------------
- [6d2a00b](../../commit/6d2a00b) [added] 添加README.md
v1.0.6 - Thu, 16 Jun 2016 08:54:53 GMT
--------------------------------------
- [9be9fe6](../../commit/9be9fe6) [added] 添加.travis.yml
v1.0.5 - Thu, 16 Jun 2016 08:01:06 GMT
--------------------------------------
- [a2cd387](../../commit/a2cd387) [fixed] 解决changlog无效的问题
v1.0.4 - Thu, 16 Jun 2016 07:40:50 GMT
--------------------------------------
-
v1.0.3 - Thu, 16 Jun 2016 07:40:47 GMT
--------------------------------------
-
1.0.3 - Thu, 16 Jun 2016 07:40:35 GMT
-------------------------------------
-
v1.0.4 - Thu, 16 Jun 2016 07:21:51 GMT
--------------------------------------
-
v1.0.3 - Thu, 16 Jun 2016 06:20:24 GMT
--------------------------------------
-
v1.0.2 - Thu, 16 Jun 2016 06:20:14 GMT
--------------------------------------
-
v1.0.2 - Thu, 16 Jun 2016 01:29:56 GMT
--------------------------------------
-
v1.0.1 - Thu, 16 Jun 2016 01:12:11 GMT
--------------------------------------
-
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: c1f765b2bd3d2ad49b2677f6478a9ba3
folderAsset: yes
timeCreated: 1466585494
licenseType: Store
DefaultImporter:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
********************************************************
Freescale i.MX6 Q, DL and SoloX SABRE development boards
********************************************************
This file documents the Buildroot support for the Freescale SABRE Board
for Smart Devices Based on the i.MX 6 and i.MX 6SoloX Series (SABRESD),
as well as the Freescale SABRE Board for Automotive Infotainment.
Read the i.MX 6 SABRESD Quick Start Guide for an introduction to the
board:
http://cache.freescale.com/files/32bit/doc/quick_start_guide/SABRESDB_IMX6_QSG.pdf
Read the i.MX 6 SoloX SABRESD Quick Start Guide for an introduction to
the board:
http://cache.freescale.com/files/32bit/doc/user_guide/IMX6SOLOXQSG.pdf
Read the SABRE for Automotive Infotainment Quick Start Guide for an
introduction to the board:
http://cache.freescale.com/files/32bit/doc/user_guide/IMX6SABREINFOQSG.pdf
Building with NXP kernel and NXP U-Boot
=======================================
First, configure Buildroot for your SABRE board.
For i.MX6Q SABRE SD board:
make freescale_imx6qsabresd_defconfig
For i.MX6DL SABRE SD board:
make freescale_imx6dlsabresd_defconfig
For i.MX6 SoloX SABRE SD board:
make freescale_imx6sxsabresd_defconfig
For i.MX6Q SABRE Auto board:
make freescale_imx6qsabreauto_defconfig
For i.MX6DL SABRE Auto board:
make freescale_imx6dlsabreauto_defconfig
Build all components:
make
You will find in ./output/images/ the following files:
- imx6dl-sabresd.dtb or imx6q-sabresd.dtb or imx6sx-sdb.dtb or
imx6q-sabreauto.dtb or imx6dl-sabreauto.dtb
- rootfs.ext2
- rootfs.tar
- u-boot.imx
- uImage, or zImage for i.MX6 SoloX
Building with mainline kernel and mainline U-Boot
=================================================
Mainline U-Boot uses SPL and can support the three
variants of mx6sabreauto boards: mx6q, mx6dl and mx6qp.
First, configure Buildroot for your mx6sabreauto board
make imx6-sabreauto_defconfig
Build all components:
make
You will find in output/images/ the following files:
- imx6dl-sabresd.dtb, imx6q-sabresd.dtb, imx6q-sabresd.dtb
- rootfs.ext2
- SPL and u-boot.img
- u-boot.imx
- zImage
Create a bootable SD card
=========================
To determine the device associated to the SD card have a look in the
/proc/partitions file:
cat /proc/partitions
Buildroot prepares a bootable "sdcard.img" image in the output/images/
directory, ready to be dumped on a microSD card. Launch the following
command as root:
dd if=./output/images/sdcard.img of=/dev/<your-microsd-device>
*** WARNING! The script will destroy all the card content. Use with care! ***
For details about the medium image layout, see the definition in
board/freescale/common/imx/genimage.cfg.template.
Boot the SABRE board
====================
i.MX6 SABRE SD
--------------
To boot your newly created system on an i.MX6 SABRE SD Board (refer to
the i.MX6 SABRE SD Quick Start Guide for guidance):
- insert the SD card in the SD3 slot of the board;
- locate the BOOT dip switches (SW6), set dips 2 and 7 to ON, all others to OFF;
- connect a Micro USB cable to Debug Port and connect using a terminal emulator
at 115200 bps, 8n1;
- power on the board.
i.MX6 SoloX SABRE SD
--------------------
To boot your newly created system on an i.MX6 SoloX SABRE SD Board
(refer to the i.MX6 SoloX SABRE SD Quick Start Guide for guidance):
- insert the SD card in the J4-SD4 socket at the bottom of the board;
- Set the SW10, SW11 and SW12 DIP switches at the top of the board in
their default position, to boot from SD card. Reference configuration:
SW10
1 2 3 4 5 6 7 8
off off off off off off off off
SW11
1 2 3 4 5 6 7 8
off off ON ON ON off off off
SW12
1 2 3 4 5 6 7 8
off ON off off off off off off
- connect a Micro USB cable to the J16 Debug Port at the bottom of the
board. This is a dual UART debug port; connect to the first tty using
a terminal emulator at 115200 bps, 8n1;
- power on the board with the SW1-PWR switch at the top of the board.
SABRE Auto
----------
To boot your newly created system on a SABRE Auto Board (refer to the SABRE for
Automotive Infotainment Quick Start Guide for guidance):
- insert the SD card in the CPU card SD card socket J14;
- Set the S1, S2 and S3 DIP switches and J3 jumper to boot from SD on CPU card.
Reference configuration:
S1
1 2 3 4 5 6 7 8 9 10
off ON off off ON off off off off off
S2
1 2 3 4
off off ON off
S3
1 2 3 4
off off ON ON
J3: 1-2
- connect an RS-232 UART cable to CPU card debug port J18 UART DB9 and
connect using a terminal emulator at 115200 bps, 8n1;
- power on the board.
Enjoy!
References
==========
https://community.freescale.com/docs/DOC-95015
https://community.freescale.com/docs/DOC-95017
https://community.freescale.com/docs/DOC-99218
| {
"pile_set_name": "Github"
} |
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <[email protected]>
*
*/
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_dp_helper.h"
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
#define DP_LINK_CONFIGURATION_SIZE 9
#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
uint32_t save_DP;
uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
struct intel_output *intel_output;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
};
static void
intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
static void
intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
void
intel_edp_link_config (struct intel_output *intel_output,
int *lane_num, int *link_bw)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
*lane_num = dp_priv->lane_count;
if (dp_priv->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
else if (dp_priv->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
}
static int
intel_dp_max_lane_count(struct intel_output *intel_output)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
int max_lane_count = 4;
if (dp_priv->dpcd[0] >= 0x11) {
max_lane_count = dp_priv->dpcd[2] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
default:
max_lane_count = 4;
}
}
return max_lane_count;
}
static int
intel_dp_max_link_bw(struct intel_output *intel_output)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
int max_link_bw = dp_priv->dpcd[1];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
break;
default:
max_link_bw = DP_LINK_BW_1_62;
break;
}
return max_link_bw;
}
static int
intel_dp_link_clock(uint8_t link_bw)
{
if (link_bw == DP_LINK_BW_2_7)
return 270000;
else
return 162000;
}
/* I think this is a fiction */
static int
intel_dp_link_required(struct drm_device *dev,
struct intel_output *intel_output, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_eDP(intel_output))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
}
static int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
return (max_link_clock * max_lanes * 8) / 10;
}
static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_output *intel_output = to_intel_output(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
int max_lanes = intel_dp_max_lane_count(intel_output);
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
if (!IS_eDP(intel_output) &&
(intel_dp_link_required(connector->dev, intel_output, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
return MODE_OK;
}
static uint32_t
pack_aux(uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
if (src_bytes > 4)
src_bytes = 4;
for (i = 0; i < src_bytes; i++)
v |= ((uint32_t) src[i]) << ((3-i) * 8);
return v;
}
static void
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
dst_bytes = 4;
for (i = 0; i < dst_bytes; i++)
dst[i] = src >> ((3-i) * 8);
}
/* hrawclock is 1/4 the FSB frequency */
static int
intel_hrawclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100;
case CLKCFG_FSB_533:
return 133;
case CLKCFG_FSB_667:
return 166;
case CLKCFG_FSB_800:
return 200;
case CLKCFG_FSB_1067:
return 266;
case CLKCFG_FSB_1333:
return 333;
/* these two are just a guess; one of them might be right */
case CLKCFG_FSB_1600:
case CLKCFG_FSB_1600_ALT:
return 400;
default:
return 133;
}
}
static int
intel_dp_aux_ch(struct intel_output *intel_output,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
int try;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
if (IS_eDP(intel_output))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
else if (IS_IRONLAKE(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4) {
uint32_t d = pack_aux(send + i, send_bytes - i);
I915_WRITE(ch_data + i, d);
}
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, ctl);
(void) I915_READ(ch_ctl);
for (;;) {
udelay(100);
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
}
/* Clear done status and any errors */
I915_WRITE(ch_ctl, (status |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR));
(void) I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
break;
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
return -EBUSY;
}
/* Check for timeout or receive error.
* Timeouts occur when the sink is not connected
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
return -EIO;
}
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT;
}
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
if (recv_bytes > recv_size)
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4) {
uint32_t d = I915_READ(ch_data + i);
unpack_aux(d, recv + i, recv_bytes - i);
}
return recv_bytes;
}
/* Write data to the aux channel in native mode */
static int
intel_dp_aux_native_write(struct intel_output *intel_output,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
if (send_bytes > 16)
return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
break;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
return send_bytes;
}
/* Write a single byte to the aux channel in native mode */
static int
intel_dp_aux_native_write_1(struct intel_output *intel_output,
uint16_t address, uint8_t byte)
{
return intel_dp_aux_native_write(intel_output, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
intel_dp_aux_native_read(struct intel_output *intel_output,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
int msg_bytes;
uint8_t reply[20];
int reply_bytes;
uint8_t ack;
int ret;
msg[0] = AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
for (;;) {
ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
ack = reply[0];
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
}
static int
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
struct intel_output *intel_output = dp_priv->intel_output;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
int msg_bytes;
int reply_bytes;
int ret;
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = AUX_I2C_READ << 4;
else
msg[0] = AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
msg[0] |= AUX_I2C_MOT << 4;
msg[1] = address >> 8;
msg[2] = address;
switch (mode) {
case MODE_I2C_WRITE:
msg[3] = 0;
msg[4] = write_byte;
msg_bytes = 5;
reply_bytes = 1;
break;
case MODE_I2C_READ:
msg[3] = 0;
msg_bytes = 4;
reply_bytes = 2;
break;
default:
msg_bytes = 3;
reply_bytes = 1;
break;
}
for (;;) {
ret = intel_dp_aux_ch(intel_output,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
switch (reply[0] & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
}
}
}
static int
intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
DRM_DEBUG_KMS("i2c_init %s\n", name);
dp_priv->algo.running = false;
dp_priv->algo.address = 0;
dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
dp_priv->adapter.owner = THIS_MODULE;
dp_priv->adapter.class = I2C_CLASS_DDC;
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
dp_priv->adapter.dev.parent = &intel_output->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_output *intel_output = enc_to_intel_output(encoder);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_output);
int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
<= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
}
}
}
if (IS_eDP(intel_output)) {
/* okay we failed just pick the highest */
dp_priv->lane_count = max_lane_count;
dp_priv->link_bw = bws[max_clock];
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
}
return false;
}
struct intel_dp_m_n {
uint32_t tu;
uint32_t gmch_m;
uint32_t gmch_n;
uint32_t link_m;
uint32_t link_n;
};
static void
intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
*den >>= 1;
}
}
static void
intel_dp_compute_m_n(int bytes_per_pixel,
int nlanes,
int pixel_clock,
int link_clock,
struct intel_dp_m_n *m_n)
{
m_n->tu = 64;
m_n->gmch_m = pixel_clock * bytes_per_pixel;
m_n->gmch_n = link_clock * nlanes;
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
/*
* Find the lane count in the intel_output private
*/
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
}
}
/*
* Compute the GMCH and Link ratios. The '3' here is
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (IS_IRONLAKE(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
} else {
I915_WRITE(TRANSB_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
}
} else {
if (intel_crtc->pipe == 0) {
I915_WRITE(PIPEA_GMCH_DATA_M,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(PIPEA_GMCH_DATA_N,
m_n.gmch_n);
I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
} else {
I915_WRITE(PIPEB_GMCH_DATA_M,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(PIPEB_GMCH_DATA_N,
m_n.gmch_n);
I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
}
}
}
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_output *intel_output = enc_to_intel_output(encoder);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
struct drm_crtc *crtc = intel_output->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dp_priv->DP = (DP_LINK_TRAIN_OFF |
DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0 |
DP_SYNC_VS_HIGH |
DP_SYNC_HS_HIGH);
switch (dp_priv->lane_count) {
case 1:
dp_priv->DP |= DP_PORT_WIDTH_1;
break;
case 2:
dp_priv->DP |= DP_PORT_WIDTH_2;
break;
case 4:
dp_priv->DP |= DP_PORT_WIDTH_4;
break;
}
if (dp_priv->has_audio)
dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
dp_priv->link_configuration[0] = dp_priv->link_bw;
dp_priv->link_configuration[1] = dp_priv->lane_count;
/*
* Check for DPCD version > 1.1,
* enable enahanced frame stuff in that case
*/
if (dp_priv->dpcd[0] >= 0x11) {
dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
if (intel_crtc->pipe == 1)
dp_priv->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_output)) {
/* don't miss out required setting for eDP */
dp_priv->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
dp_priv->DP |= DP_PLL_FREQ_160MHZ;
else
dp_priv->DP |= DP_PLL_FREQ_270MHZ;
}
}
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
u32 pp, pp_status;
pp_status = I915_READ(PCH_PP_STATUS);
if (pp_status & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
do {
pp_status = I915_READ(PCH_PP_STATUS);
} while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
u32 pp, pp_status;
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
do {
pp_status = I915_READ(PCH_PP_STATUS);
} while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("panel off wait timed out\n");
/* Make sure VDD is enabled so DP AUX will work */
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void ironlake_edp_backlight_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_output *intel_output = enc_to_intel_output(encoder);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_output, dp_priv->DP);
if (IS_eDP(intel_output)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
}
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
if (IS_eDP(intel_output)) {
ironlake_edp_panel_on(dev);
ironlake_edp_backlight_on(dev);
}
}
}
dp_priv->dpms_mode = mode;
}
/*
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
static bool
intel_dp_get_link_status(struct intel_output *intel_output,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
ret = intel_dp_aux_native_read(intel_output,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
return false;
return true;
}
static uint8_t
intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static void
intel_dp_save(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
dp_priv->save_DP = I915_READ(dp_priv->output_reg);
intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
dp_priv->save_link_configuration,
sizeof (dp_priv->save_link_configuration));
}
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
uint8_t l = intel_dp_link_status(link_status, i);
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static uint8_t
intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
uint8_t l = intel_dp_link_status(link_status, i);
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
"0dB", "3.5dB", "6dB", "9.5dB"
};
static char *link_train_names[] = {
"pattern 1", "pattern 2", "idle", "off"
};
#endif
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
static uint8_t
intel_dp_pre_emphasis_max(uint8_t voltage_swing)
{
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
}
static void
intel_get_adjust_train(struct intel_output *intel_output,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
for (lane = 0; lane < lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
if (v >= I830_DP_VOLTAGE_MAX)
v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
if (p >= intel_dp_pre_emphasis_max(v))
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
train_set[lane] = v | p;
}
static uint32_t
intel_dp_signal_levels(uint8_t train_set, int lane_count)
{
uint32_t signal_levels = 0;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
default:
signal_levels |= DP_VOLTAGE_0_4;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
signal_levels |= DP_VOLTAGE_0_6;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
signal_levels |= DP_VOLTAGE_0_8;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
signal_levels |= DP_VOLTAGE_1_2;
break;
}
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
signal_levels |= DP_PRE_EMPHASIS_3_5;
break;
case DP_TRAIN_PRE_EMPHASIS_6:
signal_levels |= DP_PRE_EMPHASIS_6;
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
signal_levels |= DP_PRE_EMPHASIS_9_5;
break;
}
return signal_levels;
}
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
uint8_t l = intel_dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
/* Check for clock recovery is done on all channels */
static bool
intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
{
int lane;
uint8_t lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
/* Check to see if channel eq is done on all channels */
#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
lane_align = intel_dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
return true;
}
static bool
intel_dp_set_link_train(struct intel_output *intel_output,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
int ret;
I915_WRITE(dp_priv->output_reg, dp_reg_value);
POSTING_READ(dp_priv->output_reg);
if (first)
intel_wait_for_vblank(dev);
intel_dp_aux_native_write_1(intel_output,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
ret = intel_dp_aux_native_write(intel_output,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
return true;
}
static void
intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
uint8_t voltage;
bool clock_recovery = false;
bool channel_eq = false;
bool first = true;
int tries;
/* Write the link configuration data */
intel_dp_aux_native_write(intel_output, 0x100,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
if (!intel_dp_get_link_status(intel_output, link_status))
break;
if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
for (i = 0; i < dp_priv->lane_count; i++)
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == dp_priv->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5)
break;
} else
tries = 0;
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
}
/* channel equalization */
tries = 0;
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
if (!intel_dp_get_link_status(intel_output, link_status))
break;
if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
channel_eq = true;
break;
}
/* Try 5 times */
if (tries > 5)
break;
/* Compute new train_set as requested by target */
intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
++tries;
}
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_output,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
{
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
DRM_DEBUG_KMS("\n");
if (IS_eDP(intel_output)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(dp_priv->output_reg, DP);
POSTING_READ(dp_priv->output_reg);
udelay(100);
}
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
POSTING_READ(dp_priv->output_reg);
udelay(17000);
if (IS_eDP(intel_output))
DP |= DP_LINK_TRAIN_OFF;
I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(dp_priv->output_reg);
}
static void
intel_dp_restore(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
if (dp_priv->save_DP & DP_PORT_EN)
intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
else
intel_dp_link_down(intel_output, dp_priv->save_DP);
}
/*
* According to DP spec
* 5.1.2:
* 1. Read DPCD
* 2. Configure link according to Receiver Capabilities
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
static void
intel_dp_check_link_status(struct intel_output *intel_output)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (!intel_output->enc.crtc)
return;
if (!intel_dp_get_link_status(intel_output, link_status)) {
intel_dp_link_down(intel_output, dp_priv->DP);
return;
}
if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
enum drm_connector_status status;
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_output,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
}
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
* \return false if DP port is disconnected.
*/
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
dp_priv->has_audio = false;
if (IS_IRONLAKE(dev))
return ironlake_dp_detect(connector);
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
break;
case DP_C:
bit = DPC_HOTPLUG_INT_STATUS;
break;
case DP_D:
bit = DPD_HOTPLUG_INT_STATUS;
break;
default:
return connector_status_unknown;
}
temp = I915_READ(PORT_HOTPLUG_STAT);
if ((temp & bit) == 0)
return connector_status_disconnected;
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_output,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct drm_device *dev = intel_output->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_ddc_get_modes(intel_output);
if (ret)
return ret;
/* if eDP has no EDID, try to use fixed panel mode from VBT */
if (IS_eDP(intel_output)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
}
return 0;
}
static void
intel_dp_destroy (struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
if (intel_output->i2c_bus)
intel_i2c_destroy(intel_output->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_output);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_dp_mode_set,
.commit = intel_encoder_commit,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = intel_dp_save,
.restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
.best_encoder = intel_best_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_enc_destroy,
};
void
intel_dp_hot_plug(struct intel_output *intel_output)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
return;
dp_priv = (struct intel_dp_priv *)(intel_output + 1);
connector = &intel_output->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
if (output_reg == DP_A)
intel_output->type = INTEL_OUTPUT_EDP;
else
intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (IS_eDP(intel_output))
intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
dp_priv->intel_output = intel_output;
dp_priv->output_reg = output_reg;
dp_priv->has_audio = false;
dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
intel_output->dev_priv = dp_priv;
drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
drm_mode_connector_attach_encoder(&intel_output->base,
&intel_output->enc);
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
switch (output_reg) {
case DP_A:
name = "DPDDC-A";
break;
case DP_B:
case PCH_DP_B:
dev_priv->hotplug_supported_mask |=
HDMIB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
dev_priv->hotplug_supported_mask |=
HDMIC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
dev_priv->hotplug_supported_mask |=
HDMID_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
intel_dp_i2c_init(intel_output, name);
intel_output->ddc_bus = &dp_priv->adapter;
intel_output->hot_plug = intel_dp_hot_plug;
if (output_reg == DP_A) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (dev_priv->panel_fixed_mode) {
dev_priv->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
}
}
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
if (IS_G4X(dev) && !IS_GM45(dev)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
| {
"pile_set_name": "Github"
} |
{
"user": {
"_version" : "42",
"accountRef" : [ {
"_oid" : "c0c010c0-d34d-b33f-f00d-aaaaaaaa1111",
"_type" : {
"namespace" : "http://midpoint.evolveum.com/xml/ns/test/foo-1.xsd",
"localPart" : "AccountShadowType"
}
}, {
"description" : "This is a reference with a filter",
"_oid" : "c0c010c0-d34d-b33f-f00d-aaaaaaaa1112",
"_type" : {
"namespace" : "http://midpoint.evolveum.com/xml/ns/test/foo-1.xsd",
"localPart" : "AccountShadowType"
}
}, {
"_type" : {
"namespace" : "http://midpoint.evolveum.com/xml/ns/test/foo-1.xsd",
"localPart" : "AccountType"
},
"_oid" : "c0c010c0-d34d-b33f-f00d-aaaaaaaa1113"
} ],
"description" : "This must be the best pirate the world has ever seen",
"name" : "jack",
"assignment" : [ {
"_id" : 1111,
"description" : "Assignment 1"
}, {
"_id" : 1112,
"description" : "Assignment 2",
"accountConstruction" : {
"when" : "2012-02-24T10:48:52.000Z",
"howto" : "Just do it"
}
} ],
"polyName" : "Džek Sperou",
"activation" : {
"enabled" : true,
"validFrom" : "1975-05-30T21:30:00.000Z"
},
"extension" : {
"num" : [ 42 ],
"singleStringType" : "foobar",
"bar" : [ "BAR" ],
"multi" : [ "raz", "dva", "tri" ],
"indexedString" : [ "alpha", "bravo" ]
},
"familyName" : "Sparrow",
"_oid" : "c0c010c0-d34d-b33f-f00d-111111111111",
"givenName" : "Jack",
"additionalNames" : [ "Captain", "Jackie" ],
"fullName" : "cpt. Jack Sparrow"
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.ui;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.util.AttributeSet;
import android.widget.Button;
/**
* Simple class that draws a white border around a button, purely for a UI change.
*/
public class ColorPickerMoreButton extends Button {
// A cache for the paint used to draw the border, so it doesn't have to be created in
// every onDraw() call.
private Paint mBorderPaint;
public ColorPickerMoreButton(Context context, AttributeSet attrs) {
super(context, attrs);
init();
}
public ColorPickerMoreButton(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
init();
}
/**
* Sets up the paint to use for drawing the border.
*/
public void init() {
mBorderPaint = new Paint();
mBorderPaint.setStyle(Paint.Style.STROKE);
mBorderPaint.setColor(Color.WHITE);
// Set the width to one pixel.
mBorderPaint.setStrokeWidth(1.0f);
// And make sure the border doesn't bleed into the outside.
mBorderPaint.setAntiAlias(false);
}
/**
* Draws the border around the edge of the button.
*
* @param canvas The canvas to draw on.
*/
@Override
protected void onDraw(Canvas canvas) {
canvas.drawRect(0.5f, 0.5f, getWidth() - 1.5f, getHeight() - 1.5f, mBorderPaint);
super.onDraw(canvas);
}
}
| {
"pile_set_name": "Github"
} |
---
title: seoHrefOptimize Plugin
published: true
lang: en
position: 100
---
# `seoHrefOptimize` Plugin
## Overview
Adds a trailing slash (`/`) to all routes that are in the routeService. Increases SEO scoring.
| {
"pile_set_name": "Github"
} |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd netbsd openbsd
package terminal
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
const ioctlWriteTermios = syscall.TIOCSETA
| {
"pile_set_name": "Github"
} |
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="24dp"
android:height="24dp"
android:viewportWidth="24.0"
android:viewportHeight="24.0">
<path
android:fillColor="#FF000000"
android:pathData="M15,7v4h1v2h-3V5h2l-3,-4 -3,4h2v8H8v-2.07c0.7,-0.37 1.2,-1.08 1.2,-1.93 0,-1.21 -0.99,-2.2 -2.2,-2.2 -1.21,0 -2.2,0.99 -2.2,2.2 0,0.85 0.5,1.56 1.2,1.93V13c0,1.11 0.89,2 2,2h3v3.05c-0.71,0.37 -1.2,1.1 -1.2,1.95 0,1.22 0.99,2.2 2.2,2.2 1.21,0 2.2,-0.98 2.2,-2.2 0,-0.85 -0.49,-1.58 -1.2,-1.95V15h3c1.11,0 2,-0.89 2,-2v-2h1V7h-4z"/>
</vector>
| {
"pile_set_name": "Github"
} |
/* Soot - a J*va Optimization Framework
* Copyright (C) 1997-2014 Raja Vallee-Rai and others
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
package soot.asm;
import java.io.IOException;
import java.io.InputStream;
import org.objectweb.asm.ClassReader;
import soot.ClassSource;
import soot.SootClass;
import soot.javaToJimple.IInitialResolver.Dependencies;
/**
* ASM class source implementation.
*
* @author Aaloan Miftah
*/
class AsmClassSource extends ClassSource {
private InputStream data;
/**
* Constructs a new ASM class source.
* @param cls fully qualified name of the class.
* @param data stream containing data for class.
*/
AsmClassSource(String cls, InputStream data) {
super(cls);
this.data = data;
}
private ClassReader read() throws IOException {
InputStream d = data;
if (d == null)
throw new IllegalStateException();
data = null;
try {
return new ClassReader(d);
} finally {
d.close();
}
}
@Override
public Dependencies resolve(SootClass sc) {
ClassReader clsr;
try {
clsr = read();
} catch (IOException e) {
throw new RuntimeException(e);
}
SootClassBuilder scb = new SootClassBuilder(sc);
clsr.accept(scb, ClassReader.SKIP_FRAMES);
Dependencies deps = new Dependencies();
deps.typesToSignature.addAll(scb.deps);
return deps;
}
} | {
"pile_set_name": "Github"
} |
#include <vector>
#include "unary.h"
#include <algorithm>
#include "cppunit/cppunit_proxy.h"
#if !defined (STLPORT) || defined(_STLP_USE_NAMESPACES)
using namespace std;
#endif
//
// TestCase class
//
class UnaryTest : public CPPUNIT_NS::TestCase
{
CPPUNIT_TEST_SUITE(UnaryTest);
#if !defined (STLPORT) || defined (_STLP_NO_EXTENSIONS)
CPPUNIT_IGNORE;
#endif
CPPUNIT_TEST(ucompos1);
CPPUNIT_TEST(ucompos2);
CPPUNIT_STOP_IGNORE;
CPPUNIT_TEST(unegate1);
CPPUNIT_TEST(unegate2);
#if defined (STLPORT) && !defined (_STLP_CLASS_PARTIAL_SPECIALIZATION)
CPPUNIT_IGNORE;
#endif
CPPUNIT_TEST(unegate3);
CPPUNIT_TEST_SUITE_END();
protected:
void ucompos1();
void ucompos2();
void unegate1();
void unegate2();
void unegate3();
};
CPPUNIT_TEST_SUITE_REGISTRATION(UnaryTest);
//
// tests implementation
//
void UnaryTest::unegate1()
{
int array [3] = { 1, 2, 3 };
//unary_negate<odd>::argument_type arg_val = 0;
int* p = find_if((int*)array, (int*)array + 3, unary_negate<odd>(odd()));
CPPUNIT_ASSERT((p != array + 3));
CPPUNIT_ASSERT(*p==2);
}
void UnaryTest::unegate2()
{
int array [3] = { 1, 2, 3 };
int* p = find_if((int*)array, (int*)array + 3, not1(odd()));
CPPUNIT_ASSERT(p != array + 3);
CPPUNIT_ASSERT(*p==2);
}
bool test_func(int param) {
return param < 3;
}
void UnaryTest::unegate3()
{
#if !defined (STLPORT) || defined (_STLP_CLASS_PARTIAL_SPECIALIZATION)
int array [3] = { 1, 2, 3 };
int* p = find_if((int*)array, (int*)array + 3, not1(ptr_fun(test_func)));
CPPUNIT_ASSERT(p != array + 3);
CPPUNIT_ASSERT(*p==3);
#endif
}
void UnaryTest::ucompos1()
{
#if defined (STLPORT) && !defined (_STLP_NO_EXTENSIONS)
int input [3] = { -1, -4, -16 };
double output[3];
transform((int*)input, (int*)input + 3, output, unary_compose<square_root, negate<int> >(square_root(), negate<int>()));
CPPUNIT_ASSERT(output[0]==1);
CPPUNIT_ASSERT(output[1]==2);
CPPUNIT_ASSERT(output[2]==4);
#endif
}
void UnaryTest::ucompos2()
{
#if defined (STLPORT) && !defined (_STLP_NO_EXTENSIONS)
int input [3] = { -1, -4, -16 };
double output [3];
transform((int*)input, (int*)input + 3, output, compose1(square_root(), negate<int>()));
CPPUNIT_ASSERT(output[0]==1);
CPPUNIT_ASSERT(output[1]==2);
CPPUNIT_ASSERT(output[2]==4);
#endif
}
| {
"pile_set_name": "Github"
} |
<div class="reorder_pages">
<div class="editor_toolbar_controls reorder_pages_holder">
<div class="minibutton not_enabled reorder_pages_confirm_input default"><%= _.t('save_page_order') %></div>
<div class="minibutton close_editor"><%= _.t('cancel') %></div>
</div>
<div class="editor_hint"><%= _.t('reorder_hint') %></div>
</div> | {
"pile_set_name": "Github"
} |
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include <jni.h>
#include "include/org_rocksdb_WriteBufferManager.h"
#include "rocksdb/cache.h"
#include "rocksdb/write_buffer_manager.h"
/*
* Class: org_rocksdb_WriteBufferManager
* Method: newWriteBufferManager
* Signature: (JJ)J
*/
jlong Java_org_rocksdb_WriteBufferManager_newWriteBufferManager(
JNIEnv* /*env*/, jclass /*jclazz*/, jlong jbuffer_size, jlong jcache_handle) {
auto* cache_ptr =
reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::Cache>*>(
jcache_handle);
auto* write_buffer_manager =
new std::shared_ptr<ROCKSDB_NAMESPACE::WriteBufferManager>(
std::make_shared<ROCKSDB_NAMESPACE::WriteBufferManager>(jbuffer_size,
*cache_ptr));
return reinterpret_cast<jlong>(write_buffer_manager);
}
/*
* Class: org_rocksdb_WriteBufferManager
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_WriteBufferManager_disposeInternal(
JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
auto* write_buffer_manager =
reinterpret_cast<std::shared_ptr<ROCKSDB_NAMESPACE::WriteBufferManager>*>(
jhandle);
assert(write_buffer_manager != nullptr);
delete write_buffer_manager;
}
| {
"pile_set_name": "Github"
} |
-----BEGIN ENCRYPTED PRIVATE KEY-----
MIIFLTBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQwwHAQIG+MeTStZSz8CAggA
MAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBDl6AHXEtGaJzi08s/85HIpBIIE
0M4fBopP+Mwj8kJzXge0/FGm8gbug9wl4OFsfvKQRpZyt8g9WJsw3jjI47kiIQt5
8fnrIiB08Eu1GkWxS/gBkVJkSfG7Ac90Tx4Xr+KheysG8aODUNPBSdt1NLc1wWen
qiInky0IXG2+SUU5s5rmQ45RFBeYTZD3mipyt1vVF5jIfaD1xooA1CoIIwxZ1uHZ
zbrV+pYGYxHjJUJtDMOQQbOwenPREznVdpIFNM/PfRcR9BzFONkA2y75Onn8U4sg
yqvCjzf1gMGFGh6s/52iddjMsi7CIkBV7SyFw4uBowrgaYd1hXW0pN84EC5psdAV
k1AzR9sLpcv7KJOy15qgm9Tz24J+TwQ4DMo8BFGdxHxyTbcQcxbqu38NlSp/LENs
SVh/hOrUxwqAnVoDHCb6+wnTKa/gdmMt0tFvn+nUsjgDMwXrluyc2UxCeKfzfmCs
jyGF5SLh1nKxuZv+7lAPP1VlCkUEdPw3RUaybOjkkd3a30n8Dcl1425LlIdaB1FK
n0aK+wzwOzbmMX1v481W4CYHWScvit8kaucZ7sEomvACLOTF3J4pEYy7AZoDe4AQ
zEA7aPRp67uoAFWAABlbK9O5w+bL2TBZpIGSlrMKRP2cLWANaZkWMPgXtVGKFFOO
+qB+IYDOKpxf1T186QDMQeh8o/d5xLqlynw20DE4jm+qCeGGmNZbIe53/Flj7Asw
dlQP2+wXQdFVlyZ7vLIqyVEJ+tZS/8Jjo1mNZLdnesNsNzH0KKb4lYfx/fXjorXG
YHOzfRGrwkn9RsH6A074X0Z6KJzsDR0wrPXBB0K4MsR4LQzJdkgBdJ2hXFuuitOB
Gy0mdPE6Mo4vpS499uyqDs2Fh3sodsVKz15+QRnrk6RNQ5ydqb8nrydQsG0ZEXQU
2VnfN2R5rHv4KlUbyRxZWxeP1IRlkMFjpXpXzjMIsmye2M7DCGkjPVyd5i5ulS33
kHPQd+L7EWRpUDYLrzRDnslDSNwdhNuYd2kiDkGWf3hXrRjeMBUbCLfQBPZi/o2O
AIb1+60ItjA9koDjWd9ppvz4N9d8Mnb5Watz4El/Uuj24rmNxEautNO4nQfyt5FS
2dIo4+MG/QqnOq1Q8NACeSwyHqvjS4F032XlwpPsaWhM0TSVxt1E3xVUVJ4Vebp2
jC52o5/Awifjlg7wHWRqqi3gVwdTshIsOUQspD9c2Sa8x0wbQM6lKDmm6b4s1mlX
ETkTSXlxJcfYzFQVdFMSd6qvWVj2oN/gWfSB7w7n6SqVS9G7858bCtNVBQPmN4RS
GAe5dZSN65Q6LtRcDDz3ttNjxasjsbeB986t/9h5fb4D+iRM7c7qIpd2U0frT3sY
p31gC0F1spwdTiJ3yW46q3m7MUXThMjqWAvP0OEIO4z71oezIhWfk9eiGnkuJUIh
r+B7rpbLkYpi8gEtiq4FTZoqgX1rbxcgAtMyQwdsUfZHp9zR42zs+XtXJdaHrb12
hb+D8pmc/FY33ApsioJOv/QjQC8eRUfu+6AQu4AUhgIH0YfO9LD8eMRsCaYrX5ac
P1IV0xBBrrnmnTacFrjtEw4k4YdAKPO8v/x41PP9zeHcuAfQ4Zi7RS3ea3qey6C3
6GoB3zuOG68yUYu5eSbDnctX1JnXNFCqxFB26fHiv33Q
-----END ENCRYPTED PRIVATE KEY-----
| {
"pile_set_name": "Github"
} |
/*
* ProbableIntersectionCursorTest.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2015-2019 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb.record.provider.foundationdb.cursors;
import com.apple.foundationdb.async.AsyncUtil;
import com.apple.foundationdb.record.RecordCoreException;
import com.apple.foundationdb.record.RecordCursor;
import com.apple.foundationdb.record.RecordCursorProto;
import com.apple.foundationdb.record.RecordCursorResult;
import com.apple.foundationdb.record.RecordCursorTest;
import com.apple.foundationdb.record.cursors.FirableCursor;
import com.apple.foundationdb.record.cursors.RowLimitedCursor;
import com.apple.foundationdb.record.logging.KeyValueLogMessage;
import com.apple.foundationdb.record.logging.TestLogMessageKeys;
import com.apple.foundationdb.record.provider.foundationdb.FDBStoreTimer;
import com.google.common.collect.Iterators;
import com.google.common.hash.BloomFilter;
import com.google.protobuf.InvalidProtocolBufferException;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Tests of the {@link ProbableIntersectionCursor} class. This class is somewhat difficult to test because of its
* relatively weak contract. In particular, because it is allowed to return values even if they aren't actually
* in all child cursors, the result set is a little hard to predict.
*/
public class ProbableIntersectionCursorTest {
private static final Logger LOGGER = LoggerFactory.getLogger(ProbableIntersectionCursorTest.class);
@Nonnull
private <T, C extends RecordCursor<T>> List<Function<byte[], RecordCursor<T>>> cursorsToFunctions(@Nonnull List<C> cursors) {
return cursors.stream()
.map(cursor -> (Function<byte[], RecordCursor<T>>)(bignore -> cursor))
.collect(Collectors.toList());
}
@Nonnull
private <T, L extends List<T>> List<Function<byte[], RecordCursor<T>>> listsToFunctions(@Nonnull List<L> lists) {
return lists.stream()
.map(list -> (Function<byte[], RecordCursor<T>>)(continuation -> RecordCursor.fromList(list, continuation)))
.collect(Collectors.toList());
}
/**
* Show that a basic intersection succeeds.
*/
@Test
public void basicIntersection() {
final FDBStoreTimer timer = new FDBStoreTimer();
final Iterator<Integer> iterator1 = IntStream.iterate(0, x -> x + 2).limit(150).iterator();
final Iterator<Integer> iterator2 = IntStream.iterate(0, x -> x + 3).limit(100).iterator();
final FirableCursor<Integer> cursor1 = new FirableCursor<>(RecordCursor.fromIterator(iterator1));
final FirableCursor<Integer> cursor2 = new FirableCursor<>(RecordCursor.fromIterator(iterator2));
final RecordCursor<Integer> intersectionCursor = ProbableIntersectionCursor.create(
Collections::singletonList,
Arrays.asList(bignore -> cursor1, bignore -> cursor2),
null,
timer
);
cursor1.fireAll(); // Intersection consumes first cursor
CompletableFuture<RecordCursorResult<Integer>> firstFuture = intersectionCursor.onNext();
cursor2.fire();
RecordCursorResult<Integer> firstResult = firstFuture.join();
assertEquals(0, (int)firstResult.get());
assertThat(firstResult.hasNext(), is(true));
assertEquals(cursor1.getNext().getNoNextReason(), RecordCursor.NoNextReason.SOURCE_EXHAUSTED);
cursor2.fireAll(); // Intersection consumes second cursor as they come
AtomicInteger falsePositives = new AtomicInteger();
AsyncUtil.whileTrue(() -> intersectionCursor.onNext().thenApply(result -> {
if (result.hasNext()) {
int value = result.get();
assertEquals(0, value % 3); // every result *must* be divisible by 3
if (value % 2 != 0) {
falsePositives.incrementAndGet(); // most results should be divisible by 2
}
assertThat(result.getContinuation().isEnd(), is(false));
assertNotNull(result.getContinuation().toBytes());
try {
RecordCursorProto.ProbableIntersectionContinuation protoContinuation = RecordCursorProto.ProbableIntersectionContinuation.parseFrom(result.getContinuation().toBytes());
assertEquals(2, protoContinuation.getChildStateCount());
assertThat(protoContinuation.getChildState(0).getExhausted(), is(true));
assertThat(protoContinuation.getChildState(0).hasContinuation(), is(false));
assertThat(protoContinuation.getChildState(1).getExhausted(), is(false));
assertThat(protoContinuation.getChildState(1).hasContinuation(), is(true));
} catch (InvalidProtocolBufferException e) {
throw new RecordCoreException("error parsing proto continuation", e);
}
} else {
assertThat(result.getNoNextReason().isSourceExhausted(), is(true));
assertThat(result.getContinuation().isEnd(), is(true));
assertNull(result.getContinuation().toBytes());
}
return result.hasNext();
}), intersectionCursor.getExecutor()).join();
assertThat(falsePositives.get(), lessThan(5));
assertEquals(50 + falsePositives.get(), timer.getCount(FDBStoreTimer.Counts.QUERY_INTERSECTION_PLAN_MATCHES));
assertEquals(200 - falsePositives.get(), timer.getCount(FDBStoreTimer.Counts.QUERY_INTERSECTION_PLAN_NONMATCHES));
}
/**
* Test that the cursor can be resumed by deserializing its state from the continuation object.
*/
@Test
public void resumeFromContinuation() {
final FDBStoreTimer timer = new FDBStoreTimer();
final List<Integer> list1 = Arrays.asList(10, 2, 5, 6, 8, 19, 0);
final List<Integer> list2 = Arrays.asList( 9, 1, 3, 5, 2, 4, 8);
final List<Function<byte[], RecordCursor<Integer>>> cursorFuncs = listsToFunctions(Arrays.asList(list1, list2));
final Function<byte[], ProbableIntersectionCursor<Integer>> intersectionCursorFunction = continuation ->
ProbableIntersectionCursor.create(Collections::singletonList, cursorFuncs, continuation, timer);
final Iterator<Integer> resultIterator = Iterators.forArray(5, 2, 8);
byte[] continuation = null;
boolean done = false;
List<BloomFilter<List<Object>>> lastBloomFilters = null;
while (!done) {
ProbableIntersectionCursor<Integer> intersectionCursor = intersectionCursorFunction.apply(continuation);
List<BloomFilter<List<Object>>> bloomFilters = intersectionCursor.getCursorStates().stream()
.map(ProbableIntersectionCursorState::getBloomFilter)
.collect(Collectors.toList());
if (lastBloomFilters != null) {
assertEquals(lastBloomFilters, bloomFilters);
}
lastBloomFilters = bloomFilters;
RecordCursorResult<Integer> result = intersectionCursor.getNext();
if (resultIterator.hasNext()) {
assertThat(result.hasNext(), is(true));
assertEquals(resultIterator.next(), result.get());
assertThat(result.getContinuation().isEnd(), is(false));
assertNotNull(result.getContinuation().toBytes());
} else {
assertThat(result.hasNext(), is(false));
assertEquals(RecordCursor.NoNextReason.SOURCE_EXHAUSTED, result.getNoNextReason());
assertThat(result.getContinuation().isEnd(), is(true));
assertNull(result.getContinuation().toBytes());
done = true;
}
continuation = result.getContinuation().toBytes();
}
assertEquals(3, timer.getCount(FDBStoreTimer.Counts.QUERY_INTERSECTION_PLAN_MATCHES));
assertEquals(list1.size() + list2.size() - 3, timer.getCount(FDBStoreTimer.Counts.QUERY_INTERSECTION_PLAN_NONMATCHES));
}
@Test
public void longLists() {
final Random r = new Random(0xba5eba11);
for (int itr = 0; itr < 50; itr++) {
long seed = r.nextLong();
LOGGER.info(KeyValueLogMessage.of("running intersection with large lists",
TestLogMessageKeys.SEED, seed,
TestLogMessageKeys.ITERATION, itr));
r.setSeed(seed);
final List<List<Integer>> lists = Stream.generate(
() -> IntStream.generate(() -> r.nextInt(500)).limit(1000).boxed().collect(Collectors.toList())
).limit(5).collect(Collectors.toList());
final List<Function<byte[], RecordCursor<Integer>>> cursorFuncs = lists.stream()
.map(list -> (Function<byte[], RecordCursor<Integer>>)((byte[] continuation) -> new RowLimitedCursor<>(RecordCursor.fromList(list, continuation), r.nextInt(50) + 10)))
.collect(Collectors.toList());
final List<Set<Integer>> sets = lists.stream().map(HashSet::new).collect(Collectors.toList());
final Set<Integer> actualIntersection = new HashSet<>(sets.get(0));
sets.forEach(actualIntersection::retainAll);
Set<Integer> found = new HashSet<>();
AtomicInteger falsePositives = new AtomicInteger();
boolean done = false;
byte[] continuation = null;
while (!done) {
RecordCursor<Integer> intersectionCursor = ProbableIntersectionCursor.create(Collections::singletonList, cursorFuncs, continuation, null);
AsyncUtil.whileTrue(() -> intersectionCursor.onNext().thenApply(result -> {
if (result.hasNext()) {
// Each value should be in at least one set and hopefully all
int value = result.get();
assertThat(sets.stream().anyMatch(set -> set.contains(value)), is(true));
if (!actualIntersection.contains(value)) {
falsePositives.incrementAndGet();
}
found.add(value);
}
return result.hasNext();
}), intersectionCursor.getExecutor()).join();
RecordCursorResult<Integer> result = intersectionCursor.getNext();
assertThat(result.hasNext(), is(false));
if (result.getNoNextReason().isSourceExhausted()) {
done = true;
} else {
assertEquals(RecordCursor.NoNextReason.RETURN_LIMIT_REACHED, result.getNoNextReason());
}
continuation = result.getContinuation().toBytes();
}
assertThat(found.containsAll(actualIntersection), is(true));
LOGGER.info(KeyValueLogMessage.of("intersection false positives",
"false_positives", falsePositives.get(),
"actual_intersection_size", actualIntersection.size(),
"iteration", itr));
assertThat(falsePositives.get(), lessThan(20));
}
}
private void verifyResults(@Nonnull RecordCursor<Integer> cursor, @Nonnull RecordCursor.NoNextReason expectedReason, int...expectedResults) {
for (int expectedResult : expectedResults) {
RecordCursorResult<Integer> result = cursor.getNext();
assertThat(result.hasNext(), is(true));
assertEquals(expectedResult, (int)result.get());
assertThat(result.getContinuation().isEnd(), is(false));
assertNotNull(result.getContinuation().toBytes());
}
RecordCursorResult<Integer> result = cursor.getNext();
assertThat(result.hasNext(), is(false));
assertEquals(expectedReason, result.getNoNextReason());
assertThat(result.getContinuation().isEnd(), is(expectedReason.isSourceExhausted()));
if (expectedReason.isSourceExhausted()) {
assertNull(result.getContinuation().toBytes());
} else {
assertNotNull(result.getContinuation().toBytes());
}
}
@Test
public void noNextReasons() {
// Both one out of band limit reached
RecordCursor<Integer> cursor = ProbableIntersectionCursor.create(Collections::singletonList,
cursorsToFunctions(Arrays.asList(
new RecordCursorTest.FakeOutOfBandCursor<>(RecordCursor.fromList(Arrays.asList(1, 4, 3, 7, 9)), 3),
new RecordCursorTest.FakeOutOfBandCursor<>(RecordCursor.fromList(Arrays.asList(3, 7, 8, 4, 1)), 2)
)),
null,
null);
verifyResults(cursor, RecordCursor.NoNextReason.TIME_LIMIT_REACHED, 3);
// One in-band limit reached, one out of band
cursor = ProbableIntersectionCursor.create(Collections::singletonList,
cursorsToFunctions(Arrays.asList(
new RecordCursorTest.FakeOutOfBandCursor<>(RecordCursor.fromList(Arrays.asList(1, 4, 3, 7, 9)), 3),
RecordCursor.fromList(Arrays.asList(3, 7, 8, 4, 1)).limitRowsTo(2)
)),
null,
null);
verifyResults(cursor, RecordCursor.NoNextReason.TIME_LIMIT_REACHED, 3);
// Both in-band limit reached
cursor = ProbableIntersectionCursor.create(Collections::singletonList,
cursorsToFunctions(Arrays.asList(
RecordCursor.fromList(Arrays.asList(1, 4, 3, 7, 9)).limitRowsTo(3),
RecordCursor.fromList(Arrays.asList(3, 7, 8, 4, 1)).limitRowsTo(2)
)),
null,
null);
verifyResults(cursor, RecordCursor.NoNextReason.RETURN_LIMIT_REACHED, 3);
// One out-of-band limit reached, one exhausted
cursor = ProbableIntersectionCursor.create(Collections::singletonList,
cursorsToFunctions(Arrays.asList(
new RecordCursorTest.FakeOutOfBandCursor<>(RecordCursor.fromList(Arrays.asList(1, 4, 3, 7, 9)), 3),
RecordCursor.fromList(Arrays.asList(3, 7, 8, 4, 1))
)),
null,
null);
verifyResults(cursor, RecordCursor.NoNextReason.TIME_LIMIT_REACHED, 3, 4, 1);
// One in band limit reached, one exhausted
cursor = ProbableIntersectionCursor.create(Collections::singletonList,
cursorsToFunctions(Arrays.asList(
RecordCursor.fromList(Arrays.asList(1, 4, 3, 7, 9)).limitRowsTo(3),
RecordCursor.fromList(Arrays.asList(3, 7, 8, 4, 1))
)),
null,
null);
verifyResults(cursor, RecordCursor.NoNextReason.RETURN_LIMIT_REACHED, 3, 4, 1);
// Both exhausted
cursor = ProbableIntersectionCursor.create(Collections::singletonList,
cursorsToFunctions(Arrays.asList(
RecordCursor.fromList(Arrays.asList(1, 4, 3, 7, 9)),
RecordCursor.fromList(Arrays.asList(3, 7, 8, 4, 1))
)),
null,
null);
verifyResults(cursor, RecordCursor.NoNextReason.SOURCE_EXHAUSTED, 3, 7, 4, 1);
}
@Test
public void errorInChild() {
CompletableFuture<Integer> future = new CompletableFuture<>();
RecordCursor<Integer> cursor = ProbableIntersectionCursor.create(Collections::singletonList, Arrays.asList(
continuation -> RecordCursor.fromList(Arrays.asList(1, 2), continuation),
continuation -> RecordCursor.fromFuture(future)
), null, null);
CompletableFuture<RecordCursorResult<Integer>> cursorResultFuture = cursor.onNext();
final RecordCoreException ex = new RecordCoreException("something bad happened!");
future.completeExceptionally(ex);
ExecutionException executionException = assertThrows(ExecutionException.class, cursorResultFuture::get);
assertNotNull(executionException.getCause());
assertSame(ex, executionException.getCause());
}
@Test
public void errorAndLimitInChild() {
CompletableFuture<Integer> future = new CompletableFuture<>();
RecordCursor<Integer> cursor = ProbableIntersectionCursor.create(Collections::singletonList, Arrays.asList(
continuation -> RecordCursor.fromList(Arrays.asList(1, 2), continuation).limitRowsTo(1),
continuation -> RecordCursor.fromFuture(future)
), null, null);
CompletableFuture<RecordCursorResult<Integer>> cursorResultFuture = cursor.onNext();
final RecordCoreException ex = new RecordCoreException("something bad happened!");
future.completeExceptionally(ex);
ExecutionException executionException = assertThrows(ExecutionException.class, cursorResultFuture::get);
assertNotNull(executionException.getCause());
assertSame(ex, executionException.getCause());
}
@Test
public void loopIterationWithLimit() throws ExecutionException, InterruptedException {
FDBStoreTimer timer = new FDBStoreTimer();
FirableCursor<Integer> secondCursor = new FirableCursor<>(RecordCursor.fromList(Arrays.asList(2, 1)));
RecordCursor<Integer> cursor = ProbableIntersectionCursor.create(Collections::singletonList, Arrays.asList(
continuation -> RecordCursor.fromList(Arrays.asList(1, 2), continuation).limitRowsTo(1),
continuation -> secondCursor
), null, timer);
CompletableFuture<RecordCursorResult<Integer>> cursorResultFuture = cursor.onNext();
secondCursor.fire();
assertFalse(cursorResultFuture.isDone());
secondCursor.fire();
RecordCursorResult<Integer> cursorResult = cursorResultFuture.get();
assertEquals(1, (int)cursorResult.get());
secondCursor.fire();
cursorResult = cursor.getNext();
assertEquals(RecordCursor.NoNextReason.RETURN_LIMIT_REACHED, cursorResult.getNoNextReason());
assertThat(timer.getCount(FDBStoreTimer.Events.QUERY_INTERSECTION), lessThanOrEqualTo(5));
}
}
| {
"pile_set_name": "Github"
} |
from sage.structure.parent cimport Parent
cdef class Group(Parent):
pass
cdef class AbelianGroup(Group):
pass
cdef class FiniteGroup(Group):
pass
cdef class AlgebraicGroup(Group):
pass
| {
"pile_set_name": "Github"
} |
package models_test
import (
"bytes"
"fmt"
"io"
"math"
"math/rand"
"reflect"
"strconv"
"strings"
"testing"
"time"
"github.com/influxdata/influxdb/models"
)
var (
tags = models.NewTags(map[string]string{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"})
fields = models.Fields{
"int64": int64(math.MaxInt64),
"uint32": uint32(math.MaxUint32),
"string": "String field that has a decent length, probably some log message or something",
"boolean": false,
"float64-tiny": float64(math.SmallestNonzeroFloat64),
"float64-large": float64(math.MaxFloat64),
}
maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64)
minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64)
sink interface{}
)
func TestMarshal(t *testing.T) {
got := tags.HashKey()
if exp := ",apple=orange,foo=bar,host=serverA,region=uswest"; string(got) != exp {
t.Log("got: ", string(got))
t.Log("exp: ", exp)
t.Error("invalid match")
}
}
func BenchmarkMarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
tags.HashKey()
}
}
func TestPoint_StringSize(t *testing.T) {
testPoint_cube(t, func(p models.Point) {
l := p.StringSize()
s := p.String()
if l != len(s) {
t.Errorf("Incorrect length for %q. got %v, exp %v", s, l, len(s))
}
})
}
func TestPoint_AppendString(t *testing.T) {
testPoint_cube(t, func(p models.Point) {
got := p.AppendString(nil)
exp := []byte(p.String())
if !reflect.DeepEqual(exp, got) {
t.Errorf("AppendString() didn't match String(): got %v, exp %v", got, exp)
}
})
}
func testPoint_cube(t *testing.T, f func(p models.Point)) {
// heard of a table-driven test? let's make a cube-driven test...
tagList := []models.Tags{nil, {models.Tag{Key: []byte("foo"), Value: []byte("bar")}}, tags}
fieldList := []models.Fields{{"a": 42.0}, {"a": 42, "b": "things"}, fields}
timeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()}
for _, tagSet := range tagList {
for _, fieldSet := range fieldList {
for _, pointTime := range timeList {
p, err := models.NewPoint("test", tagSet, fieldSet, pointTime)
if err != nil {
t.Errorf("unexpected error creating point: %v", err)
continue
}
f(p)
}
}
}
}
func TestTag_Clone(t *testing.T) {
tag := models.Tag{Key: []byte("key"), Value: []byte("value")}
c := tag.Clone()
if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) {
t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key)
}
if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) {
t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value)
}
}
func TestTags_Clone(t *testing.T) {
tags := models.NewTags(map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"})
clone := tags.Clone()
for i := range tags {
tag := tags[i]
c := clone[i]
if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) {
t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key)
}
if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) {
t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value)
}
}
}
var p models.Point
func BenchmarkNewPoint(b *testing.B) {
ts := time.Now()
for i := 0; i < b.N; i++ {
p, _ = models.NewPoint("measurement", tags, fields, ts)
}
}
func BenchmarkParsePointNoTags5000(b *testing.B) {
var batch [5000]string
for i := 0; i < len(batch); i++ {
batch[i] = `cpu value=1i 1000000000`
}
lines := strings.Join(batch[:], "\n")
b.ResetTimer()
for i := 0; i < b.N; i++ {
models.ParsePoints([]byte(lines))
b.SetBytes(int64(len(lines)))
}
}
func BenchmarkParsePointNoTags(b *testing.B) {
line := `cpu value=1i 1000000000`
for i := 0; i < b.N; i++ {
models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
}
}
func BenchmarkParsePointWithPrecisionN(b *testing.B) {
line := `cpu value=1i 1000000000`
defaultTime := time.Now().UTC()
for i := 0; i < b.N; i++ {
models.ParsePointsWithPrecision([]byte(line), defaultTime, "n")
b.SetBytes(int64(len(line)))
}
}
func BenchmarkParsePointWithPrecisionU(b *testing.B) {
line := `cpu value=1i 1000000000`
defaultTime := time.Now().UTC()
for i := 0; i < b.N; i++ {
models.ParsePointsWithPrecision([]byte(line), defaultTime, "u")
b.SetBytes(int64(len(line)))
}
}
func BenchmarkParsePointsTagsSorted2(b *testing.B) {
line := `cpu,host=serverA,region=us-west value=1i 1000000000`
for i := 0; i < b.N; i++ {
models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
}
}
func BenchmarkParsePointsTagsSorted5(b *testing.B) {
line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000`
for i := 0; i < b.N; i++ {
models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
}
}
func BenchmarkParsePointsTagsSorted10(b *testing.B) {
line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000`
for i := 0; i < b.N; i++ {
models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
}
}
func BenchmarkParsePointsTagsUnSorted2(b *testing.B) {
line := `cpu,region=us-west,host=serverA value=1i 1000000000`
for i := 0; i < b.N; i++ {
pt, _ := models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
pt[0].Key()
}
}
func BenchmarkParsePointsTagsUnSorted5(b *testing.B) {
line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000`
for i := 0; i < b.N; i++ {
pt, _ := models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
pt[0].Key()
}
}
func BenchmarkParsePointsTagsUnSorted10(b *testing.B) {
line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000`
for i := 0; i < b.N; i++ {
pt, _ := models.ParsePoints([]byte(line))
b.SetBytes(int64(len(line)))
pt[0].Key()
}
}
func BenchmarkParseKey(b *testing.B) {
line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5`
for i := 0; i < b.N; i++ {
models.ParseKey([]byte(line))
}
}
// TestPoint wraps a models.Point but also makes available the raw
// arguments to the Point.
//
// This is useful for ensuring that comparisons between results of
// operations on Points match the expected input data to the Point,
// since models.Point does not expose the raw input data (e.g., tags)
// via its API.
type TestPoint struct {
RawFields models.Fields
RawTags models.Tags
RawTime time.Time
models.Point
}
// NewTestPoint returns a new TestPoint.
//
// NewTestPoint panics if it is not a valid models.Point.
func NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint {
return TestPoint{
RawTags: tags,
RawFields: fields,
RawTime: time,
Point: models.MustNewPoint(name, tags, fields, time),
}
}
func test(t *testing.T, line string, point TestPoint) {
pts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), "n")
if err != nil {
t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, line, err)
}
if exp := 1; len(pts) != exp {
t.Fatalf(`ParsePoints("%s") len mismatch. got %d, exp %d`, line, len(pts), exp)
}
if exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) {
t.Errorf("ParsePoints(\"%s\") key mismatch.\ngot %v\nexp %v", line, string(pts[0].Key()), string(exp))
}
if exp := len(point.Tags()); len(pts[0].Tags()) != exp {
t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp)
}
for _, tag := range pts[0].Tags() {
if !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) {
t.Errorf(`ParsePoints("%s") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key))
}
}
for name, value := range point.RawFields {
fields, err := pts[0].Fields()
if err != nil {
t.Fatal(err)
}
val := fields[name]
expfval, ok := val.(float64)
if ok && math.IsNaN(expfval) {
gotfval, ok := value.(float64)
if ok && !math.IsNaN(gotfval) {
t.Errorf(`ParsePoints("%s") field '%s' mismatch. exp NaN`, line, name)
}
}
if !reflect.DeepEqual(val, value) {
t.Errorf(`ParsePoints("%s") field '%s' mismatch. got %[3]v (%[3]T), exp %[4]v (%[4]T)`, line, name, val, value)
}
}
if !pts[0].Time().Equal(point.Time()) {
t.Errorf(`ParsePoints("%s") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time())
}
if !strings.HasPrefix(pts[0].String(), line) {
t.Errorf("ParsePoints string mismatch.\ngot: %v\nexp: %v", pts[0].String(), line)
}
}
func TestParsePointNoValue(t *testing.T) {
pts, err := models.ParsePointsString("")
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err)
}
if exp := 0; len(pts) != exp {
t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp)
}
}
func TestParsePointWhitespaceValue(t *testing.T) {
pts, err := models.ParsePointsString(" ")
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err)
}
if exp := 0; len(pts) != exp {
t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp)
}
}
func TestParsePointNoFields(t *testing.T) {
expectedSuffix := "missing fields"
examples := []string{
"cpu_load_short,host=server01,region=us-west",
"cpu",
"cpu,host==",
"=",
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example)
} else if !strings.HasSuffix(err.Error(), expectedSuffix) {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)
}
}
}
func TestParsePointNoTimestamp(t *testing.T) {
test(t, "cpu value=1", NewTestPoint("cpu", nil, models.Fields{"value": 1.0}, time.Unix(0, 0)))
}
func TestParsePointMissingQuote(t *testing.T) {
expectedSuffix := "unbalanced quotes"
examples := []string{
`cpu,host=serverA value="test`,
`cpu,host=serverA value="test""`,
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example)
} else if !strings.HasSuffix(err.Error(), expectedSuffix) {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)
}
}
}
func TestParsePointMissingTagKey(t *testing.T) {
expectedSuffix := "missing tag key"
examples := []string{
`cpu, value=1`,
`cpu,`,
`cpu,,,`,
`cpu,host=serverA,=us-east value=1i`,
`cpu,host=serverAa\,,=us-east value=1i`,
`cpu,host=serverA\,,=us-east value=1i`,
`cpu, =serverA value=1i`,
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example)
} else if !strings.HasSuffix(err.Error(), expectedSuffix) {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)
}
}
_, err := models.ParsePointsString(`cpu,host=serverA,\ =us-east value=1i`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,\ =us-east value=1i`, err)
}
}
func TestParsePointMissingTagValue(t *testing.T) {
expectedSuffix := "missing tag value"
examples := []string{
`cpu,host`,
`cpu,host,`,
`cpu,host=`,
`cpu,host value=1i`,
`cpu,host=serverA,region value=1i`,
`cpu,host=serverA,region= value=1i`,
`cpu,host=serverA,region=,zone=us-west value=1i`,
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example)
} else if !strings.HasSuffix(err.Error(), expectedSuffix) {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)
}
}
}
func TestParsePointInvalidTagFormat(t *testing.T) {
expectedSuffix := "invalid tag format"
examples := []string{
`cpu,host=f=o,`,
`cpu,host=f\==o,`,
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example)
} else if !strings.HasSuffix(err.Error(), expectedSuffix) {
t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)
}
}
}
func TestParsePointMissingFieldName(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`)
}
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`)
}
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\ =123i`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\ =123i`)
}
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`)
}
}
func TestParsePointMissingFieldValue(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`)
}
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`)
}
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`)
}
_, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`)
}
_, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`)
}
}
func TestParsePointBadNumber(t *testing.T) {
for _, tt := range []string{
"cpu v=- ",
"cpu v=-i ",
"cpu v=-. ",
"cpu v=. ",
"cpu v=1.0i ",
"cpu v=1ii ",
"cpu v=1a ",
"cpu v=-e-e-e ",
"cpu v=42+3 ",
"cpu v= ",
} {
_, err := models.ParsePointsString(tt)
if err == nil {
t.Errorf("Point %q should be invalid", tt)
}
}
}
func TestParsePointMaxInt64(t *testing.T) {
// out of range
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`)
exp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing "9223372036854775808": value out of range`
if err == nil || (err != nil && err.Error() != exp) {
t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err)
}
// max int
p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`)
if err != nil {
t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err)
}
fields, err := p[0].Fields()
if err != nil {
t.Fatal(err)
}
if exp, got := int64(9223372036854775807), fields["value"].(int64); exp != got {
t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got)
}
// leading zeros
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`)
if err != nil {
t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err)
}
}
func TestParsePointMinInt64(t *testing.T) {
// out of range
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`)
}
// min int
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err)
}
// leading zeros
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err)
}
}
func TestParsePointMaxFloat64(t *testing.T) {
// out of range
_, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "1"+string(maxFloat64)))
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`)
}
// max float
_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64)))
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err)
}
// leading zeros
_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "0000"+string(maxFloat64)))
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err)
}
}
func TestParsePointMinFloat64(t *testing.T) {
// out of range
_, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-1"+string(minFloat64)[1:]))
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`)
}
// min float
_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64)))
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err)
}
// leading zeros
_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-0000000"+string(minFloat64)[1:]))
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err)
}
}
func TestParsePointNumberNonNumeric(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`)
}
}
func TestParsePointNegativeWrongPlace(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`)
}
}
func TestParsePointOnlyNegativeSign(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`)
}
}
func TestParsePointFloatMultipleDecimals(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`)
}
}
func TestParsePointInteger(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err)
}
}
func TestParsePointNegativeInteger(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err)
}
}
func TestParsePointNegativeFloat(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err)
}
}
func TestParsePointFloatNoLeadingDigit(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err)
}
}
func TestParsePointFloatScientific(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err)
}
pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err)
}
fields, err := pts[0].Fields()
if err != nil {
t.Fatal(err)
}
if fields["value"] != 1e4 {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err)
}
}
func TestParsePointFloatScientificUpper(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err)
}
pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err)
}
fields, err := pts[0].Fields()
if err != nil {
t.Fatal(err)
}
if fields["value"] != 1e4 {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err)
}
}
func TestParsePointFloatScientificDecimal(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err)
}
}
func TestParsePointFloatNegativeScientific(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`)
if err != nil {
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err)
}
}
func TestParsePointBooleanInvalid(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`)
}
}
func TestParsePointScientificIntInvalid(t *testing.T) {
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`)
}
_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`)
if err == nil {
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`)
}
}
func TestParsePointWhitespace(t *testing.T) {
examples := []string{
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000 `,
`cpu value=1.0 1257894000000000000
`,
`cpu value=1.0 1257894000000000000
`,
}
expPoint := NewTestPoint("cpu", models.Tags{}, models.Fields{"value": 1.0}, time.Unix(0, 1257894000000000000))
for i, example := range examples {
pts, err := models.ParsePoints([]byte(example))
if err != nil {
t.Fatalf(`[Example %d] ParsePoints("%s") error. got %v, exp nil`, i, example, err)
}
if got, exp := len(pts), 1; got != exp {
t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp)
}
if got, exp := pts[0].Name(), expPoint.Name(); got != exp {
t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp)
}
fields, err := pts[0].Fields()
if err != nil {
t.Fatal(err)
}
eFields, err := expPoint.Fields()
if err != nil {
t.Fatal(err)
}
if got, exp := len(fields), len(eFields); got != exp {
t.Fatalf("[Example %d] got %d fields, expected %d", i, got, exp)
}
if got, exp := fields["value"], eFields["value"]; got != exp {
t.Fatalf(`[Example %d] got %v for field "value", expected %v`, i, got, exp)
}
if got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp {
t.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp)
}
}
}
func TestParsePointUnescape(t *testing.T) {
// commas in measurement name
test(t, `foo\,bar value=1i`,
NewTestPoint(
"foo,bar", // comma in the name
models.NewTags(map[string]string{}),
models.Fields{
"value": int64(1),
},
time.Unix(0, 0)))
// comma in measurement name with tags
test(t, `cpu\,main,regions=east value=1.0`,
NewTestPoint(
"cpu,main", // comma in the name
models.NewTags(map[string]string{
"regions": "east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// spaces in measurement name
test(t, `cpu\ load,region=east value=1.0`,
NewTestPoint(
"cpu load", // space in the name
models.NewTags(map[string]string{
"region": "east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// equals in measurement name
test(t, `cpu\=load,region=east value=1.0`,
NewTestPoint(
`cpu\=load`, // backslash is literal
models.NewTags(map[string]string{
"region": "east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// equals in measurement name
test(t, `cpu=load,region=east value=1.0`,
NewTestPoint(
`cpu=load`, // literal equals is fine in measurement name
models.NewTags(map[string]string{
"region": "east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// commas in tag names
test(t, `cpu,region\,zone=east value=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"region,zone": "east", // comma in the tag key
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// spaces in tag name
test(t, `cpu,region\ zone=east value=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"region zone": "east", // space in the tag name
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// backslash with escaped equals in tag name
test(t, `cpu,reg\\=ion=east value=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
`reg\=ion`: "east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// space is tag name
test(t, `cpu,\ =east value=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
" ": "east", // tag name is single space
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// commas in tag values
test(t, `cpu,regions=east\,west value=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"regions": "east,west", // comma in the tag value
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// backslash literal followed by escaped space
test(t, `cpu,regions=\\ east value=1.0`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"regions": `\ east`,
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// backslash literal followed by escaped space
test(t, `cpu,regions=eas\\ t value=1.0`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"regions": `eas\ t`,
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// backslash literal followed by trailing space
test(t, `cpu,regions=east\\ value=1.0`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"regions": `east\ `,
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// spaces in tag values
test(t, `cpu,regions=east\ west value=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"regions": "east west", // comma in the tag value
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// commas in field keys
test(t, `cpu,regions=east value\,ms=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"regions": "east",
}),
models.Fields{
"value,ms": 1.0, // comma in the field keys
},
time.Unix(0, 0)))
// spaces in field keys
test(t, `cpu,regions=east value\ ms=1.0`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"regions": "east",
}),
models.Fields{
"value ms": 1.0, // comma in the field keys
},
time.Unix(0, 0)))
// tag with no value
test(t, `cpu,regions=east value="1"`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"regions": "east",
"foobar": "",
}),
models.Fields{
"value": "1",
},
time.Unix(0, 0)))
// commas in field values
test(t, `cpu,regions=east value="1,0"`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"regions": "east",
}),
models.Fields{
"value": "1,0", // comma in the field value
},
time.Unix(0, 0)))
// random character escaped
test(t, `cpu,regions=eas\t value=1.0`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"regions": "eas\\t",
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// backslash literal followed by escaped characters
test(t, `cpu,regions=\\,\,\=east value=1.0`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"regions": `\,,=east`,
}),
models.Fields{
"value": 1.0,
},
time.Unix(0, 0)))
// field keys using escape char.
test(t, `cpu \a=1i`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"\\a": int64(1), // Left as parsed since it's not a known escape sequence.
},
time.Unix(0, 0)))
// measurement, tag and tag value with equals
test(t, `cpu=load,equals\=foo=tag\=value value=1i`,
NewTestPoint(
"cpu=load", // Not escaped
models.NewTags(map[string]string{
"equals=foo": "tag=value", // Tag and value unescaped
}),
models.Fields{
"value": int64(1),
},
time.Unix(0, 0)))
}
func TestParsePointWithTags(t *testing.T) {
test(t,
"cpu,host=serverA,region=us-east value=1.0 1000000000",
NewTestPoint("cpu",
models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}),
models.Fields{"value": 1.0}, time.Unix(1, 0)))
}
func TestParsePointWithDuplicateTags(t *testing.T) {
for i, tt := range []struct {
line string
err string
}{
{
line: `cpu,host=serverA,host=serverB value=1i 1000000000`,
err: `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`,
},
{
line: `cpu,b=2,b=1,c=3 value=1i 1000000000`,
err: `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`,
},
{
line: `cpu,b=2,c=3,b=1 value=1i 1000000000`,
err: `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`,
},
} {
_, err := models.ParsePointsString(tt.line)
if err == nil || tt.err != err.Error() {
t.Errorf("%d. ParsePoint() expected error '%s'. got '%s'", i, tt.err, err)
}
}
}
func TestParsePointWithStringField(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
"str": "foo",
"str2": "bar",
},
time.Unix(1, 0)),
)
test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`,
NewTestPoint("cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"str": `foo " bar`,
},
time.Unix(1, 0)),
)
}
func TestParsePointWithStringWithSpaces(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
"str": "foo bar", // spaces in string value
},
time.Unix(1, 0)),
)
}
func TestParsePointWithStringWithNewline(t *testing.T) {
test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000",
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
"str": "foo\nbar", // newline in string value
},
time.Unix(1, 0)),
)
}
func TestParsePointWithStringWithCommas(t *testing.T) {
// escaped comma
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
"str": `foo\,bar`, // commas in string value
},
time.Unix(1, 0)),
)
// non-escaped comma
test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
"str": "foo,bar", // commas in string value
},
time.Unix(1, 0)),
)
// string w/ trailing escape chars
test(t, `cpu,host=serverA,region=us-east str="foo\\",str2="bar" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"str": "foo\\", // trailing escape char
"str2": "bar",
},
time.Unix(1, 0)),
)
}
func TestParsePointQuotedMeasurement(t *testing.T) {
// non-escaped comma
test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`,
NewTestPoint(
`"cpu"`,
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(1, 0)),
)
}
func TestParsePointQuotedTags(t *testing.T) {
test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
`"host"`: `"serverA"`,
"region": "us-east",
}),
models.Fields{
"value": 1.0,
},
time.Unix(1, 0)),
)
}
func TestParsePointsUnbalancedQuotedTags(t *testing.T) {
pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126")
if err != nil {
t.Fatalf("ParsePoints failed: %v", err)
}
if exp := 2; len(pts) != exp {
t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp)
}
// Expected " in the tag value
exp := models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `"a`}),
models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125))
if pts[0].String() != exp.String() {
t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String())
}
// Expected two points to ensure we did not overscan the line
exp = models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `a`}),
models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126))
if pts[1].String() != exp.String() {
t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String())
}
}
func TestParsePointEscapedStringsAndCommas(t *testing.T) {
// non-escaped comma and quotes
test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": `{Hello"{,}" World}`,
},
time.Unix(1, 0)),
)
// escaped comma and quotes
test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": `{Hello"{\,}" World}`,
},
time.Unix(1, 0)),
)
}
func TestParsePointWithStringWithEquals(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": 1.0,
"str": "foo=bar", // spaces in string value
},
time.Unix(1, 0)),
)
}
func TestParsePointWithStringWithBackslash(t *testing.T) {
test(t, `cpu value="test\\\"" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": `test\"`,
},
time.Unix(1, 0)),
)
test(t, `cpu value="test\\" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": `test\`,
},
time.Unix(1, 0)),
)
test(t, `cpu value="test\\\"" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": `test\"`,
},
time.Unix(1, 0)),
)
test(t, `cpu value="test\"" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": `test"`,
},
time.Unix(1, 0)),
)
}
func TestParsePointWithBoolField(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"t": true,
"T": true,
"true": true,
"True": true,
"TRUE": true,
"f": false,
"F": false,
"false": false,
"False": false,
"FALSE": false,
},
time.Unix(1, 0)),
)
}
func TestParsePointUnicodeString(t *testing.T) {
test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{
"host": "serverA",
"region": "us-east",
}),
models.Fields{
"value": "wè",
},
time.Unix(1, 0)),
)
}
func TestParsePointNegativeTimestamp(t *testing.T) {
test(t, `cpu value=1 -1`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": 1.0,
},
time.Unix(0, -1)),
)
}
func TestParsePointMaxTimestamp(t *testing.T) {
test(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime),
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": 1.0,
},
time.Unix(0, models.MaxNanoTime)),
)
}
func TestParsePointMinTimestamp(t *testing.T) {
test(t, `cpu value=1 -9223372036854775806`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": 1.0,
},
time.Unix(0, models.MinNanoTime)),
)
}
func TestParsePointInvalidTimestamp(t *testing.T) {
examples := []string{
"cpu value=1 9223372036854775808",
"cpu value=1 -92233720368547758078",
"cpu value=1 -",
"cpu value=1 -/",
"cpu value=1 -1?",
"cpu value=1 1-",
"cpu value=1 9223372036854775807 12",
}
for i, example := range examples {
_, err := models.ParsePointsString(example)
if err == nil {
t.Fatalf("[Example %d] ParsePoints failed: %v", i, err)
}
}
}
func TestNewPointFloatWithoutDecimal(t *testing.T) {
test(t, `cpu value=1 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": 1.0,
},
time.Unix(1, 0)),
)
}
func TestNewPointNegativeFloat(t *testing.T) {
test(t, `cpu value=-0.64 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": -0.64,
},
time.Unix(1, 0)),
)
}
func TestNewPointFloatNoDecimal(t *testing.T) {
test(t, `cpu value=1. 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": 1.0,
},
time.Unix(1, 0)),
)
}
func TestNewPointFloatScientific(t *testing.T) {
test(t, `cpu value=6.632243e+06 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": float64(6632243),
},
time.Unix(1, 0)),
)
}
func TestNewPointLargeInteger(t *testing.T) {
test(t, `cpu value=6632243i 1000000000`,
NewTestPoint(
"cpu",
models.NewTags(map[string]string{}),
models.Fields{
"value": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06
},
time.Unix(1, 0)),
)
}
func TestParsePointNaN(t *testing.T) {
_, err := models.ParsePointsString("cpu value=NaN 1000000000")
if err == nil {
t.Fatalf("ParsePoints expected error, got nil")
}
_, err = models.ParsePointsString("cpu value=nAn 1000000000")
if err == nil {
t.Fatalf("ParsePoints expected error, got nil")
}
_, err = models.ParsePointsString("cpu value=NaN")
if err == nil {
t.Fatalf("ParsePoints expected error, got nil")
}
}
func TestNewPointLargeNumberOfTags(t *testing.T) {
tags := ""
for i := 0; i < 255; i++ {
tags += fmt.Sprintf(",tag%d=value%d", i, i)
}
pt, err := models.ParsePointsString(fmt.Sprintf("cpu%s value=1", tags))
if err != nil {
t.Fatalf("ParsePoints() with max tags failed: %v", err)
}
if len(pt[0].Tags()) != 255 {
t.Fatalf("expected %d tags, got %d", 255, len(pt[0].Tags()))
}
}
func TestParsePointIntsFloats(t *testing.T) {
pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`))
if err != nil {
t.Fatalf(`ParsePoints() failed. got %s`, err)
}
if exp := 1; len(pts) != exp {
t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
}
pt := pts[0]
fields, err := pt.Fields()
if err != nil {
t.Fatal(err)
}
if _, ok := fields["int"].(int64); !ok {
t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", fields["int"], int64(10))
}
if _, ok := fields["float"].(float64); !ok {
t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(11.0))
}
if _, ok := fields["float2"].(float64); !ok {
t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(12.1))
}
}
func TestParsePointKeyUnsorted(t *testing.T) {
pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i"))
if err != nil {
t.Fatalf(`ParsePoints() failed. got %s`, err)
}
if exp := 1; len(pts) != exp {
t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
}
pt := pts[0]
if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp {
t.Errorf("ParsePoint key not sorted. got %v, exp %v", string(pt.Key()), exp)
}
}
func TestParsePointToString(t *testing.T) {
line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000`
pts, err := models.ParsePoints([]byte(line))
if err != nil {
t.Fatalf(`ParsePoints() failed. got %s`, err)
}
if exp := 1; len(pts) != exp {
t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
}
pt := pts[0]
got := pt.String()
if line != got {
t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line)
}
pt = models.MustNewPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}),
models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"},
time.Unix(1, 0))
got = pt.String()
if line != got {
t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line)
}
}
func TestParsePointsWithPrecision(t *testing.T) {
tests := []struct {
name string
line string
precision string
exp string
}{
{
name: "nanosecond by default",
line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`,
precision: "",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
},
{
name: "nanosecond",
line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`,
precision: "n",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
},
{
name: "microsecond",
line: `cpu,host=serverA,region=us-east value=1.0 946730096789012`,
precision: "u",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000",
},
{
name: "millisecond",
line: `cpu,host=serverA,region=us-east value=1.0 946730096789`,
precision: "ms",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000",
},
{
name: "second",
line: `cpu,host=serverA,region=us-east value=1.0 946730096`,
precision: "s",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000",
},
{
name: "minute",
line: `cpu,host=serverA,region=us-east value=1.0 15778834`,
precision: "m",
exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000",
},
{
name: "hour",
line: `cpu,host=serverA,region=us-east value=1.0 262980`,
precision: "h",
exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000",
},
}
for _, test := range tests {
pts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision)
if err != nil {
t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err)
}
if exp := 1; len(pts) != exp {
t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp)
}
pt := pts[0]
got := pt.String()
if got != test.exp {
t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp)
}
}
}
func TestParsePointsWithPrecisionNoTime(t *testing.T) {
line := `cpu,host=serverA,region=us-east value=1.0`
tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z")
tests := []struct {
name string
precision string
exp string
}{
{
name: "no precision",
precision: "",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
},
{
name: "nanosecond precision",
precision: "n",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
},
{
name: "microsecond precision",
precision: "u",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000",
},
{
name: "millisecond precision",
precision: "ms",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000",
},
{
name: "second precision",
precision: "s",
exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000",
},
{
name: "minute precision",
precision: "m",
exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000",
},
{
name: "hour precision",
precision: "h",
exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000",
},
}
for _, test := range tests {
pts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision)
if err != nil {
t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err)
}
if exp := 1; len(pts) != exp {
t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp)
}
pt := pts[0]
got := pt.String()
if got != test.exp {
t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp)
}
}
}
func TestParsePointsWithPrecisionComments(t *testing.T) {
tests := []struct {
name string
batch string
exp string
lenPoints int
}{
{
name: "comment only",
batch: `# comment only`,
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
lenPoints: 0,
},
{
name: "point with comment above",
batch: `# a point is below
cpu,host=serverA,region=us-east value=1.0 946730096789012345`,
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
lenPoints: 1,
},
{
name: "point with comment below",
batch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345
# end of points`,
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
lenPoints: 1,
},
{
name: "indented comment",
batch: ` # a point is below
cpu,host=serverA,region=us-east value=1.0 946730096789012345`,
exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345",
lenPoints: 1,
},
}
for _, test := range tests {
pts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), "")
if err != nil {
t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err)
}
pointsLength := len(pts)
if exp := test.lenPoints; pointsLength != exp {
t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, pointsLength, exp)
}
if pointsLength > 0 {
pt := pts[0]
got := pt.String()
if got != test.exp {
t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp)
}
}
}
}
func TestNewPointEscaped(t *testing.T) {
// commas
pt := models.MustNewPoint("cpu,main", models.NewTags(map[string]string{"tag,bar": "value"}), models.Fields{"name,bar": 1.0}, time.Unix(0, 0))
if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
// spaces
pt = models.MustNewPoint("cpu main", models.NewTags(map[string]string{"tag bar": "value"}), models.Fields{"name bar": 1.0}, time.Unix(0, 0))
if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
// equals
pt = models.MustNewPoint("cpu=main", models.NewTags(map[string]string{"tag=bar": "value=foo"}), models.Fields{"name=bar": 1.0}, time.Unix(0, 0))
if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
}
func TestNewPointWithoutField(t *testing.T) {
_, err := models.NewPoint("cpu", models.NewTags(map[string]string{"tag": "bar"}), models.Fields{}, time.Unix(0, 0))
if err == nil {
t.Fatalf(`NewPoint() expected error. got nil`)
}
}
func TestNewPointUnhandledType(t *testing.T) {
// nil value
pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0))
if exp := `cpu value= 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
// unsupported type gets stored as string
now := time.Unix(0, 0).UTC()
pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0))
if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
fields, err := pt.Fields()
if err != nil {
t.Fatal(err)
}
if exp := "1970-01-01 00:00:00 +0000 UTC"; fields["value"] != exp {
t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp)
}
}
func TestMakeKeyEscaped(t *testing.T) {
if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.NewTags(map[string]string{})); string(got) != exp {
t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp)
}
if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp {
t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp)
}
if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.NewTags(map[string]string{})); string(got) != exp {
t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp)
}
if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp {
t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp)
}
}
func TestPrecisionString(t *testing.T) {
tags := map[string]interface{}{"value": float64(1)}
tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z")
tests := []struct {
name string
precision string
exp string
}{
{
name: "no precision",
precision: "",
exp: "cpu value=1 946730096789012345",
},
{
name: "nanosecond precision",
precision: "ns",
exp: "cpu value=1 946730096789012345",
},
{
name: "microsecond precision",
precision: "u",
exp: "cpu value=1 946730096789012",
},
{
name: "millisecond precision",
precision: "ms",
exp: "cpu value=1 946730096789",
},
{
name: "second precision",
precision: "s",
exp: "cpu value=1 946730096",
},
{
name: "minute precision",
precision: "m",
exp: "cpu value=1 15778834",
},
{
name: "hour precision",
precision: "h",
exp: "cpu value=1 262980",
},
}
for _, test := range tests {
pt := models.MustNewPoint("cpu", nil, tags, tm)
act := pt.PrecisionString(test.precision)
if act != test.exp {
t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v",
test.name, act, test.exp)
}
}
}
func TestRoundedString(t *testing.T) {
tags := map[string]interface{}{"value": float64(1)}
tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z")
tests := []struct {
name string
precision time.Duration
exp string
}{
{
name: "no precision",
precision: time.Duration(0),
exp: "cpu value=1 946730096789012345",
},
{
name: "nanosecond precision",
precision: time.Nanosecond,
exp: "cpu value=1 946730096789012345",
},
{
name: "microsecond precision",
precision: time.Microsecond,
exp: "cpu value=1 946730096789012000",
},
{
name: "millisecond precision",
precision: time.Millisecond,
exp: "cpu value=1 946730096789000000",
},
{
name: "second precision",
precision: time.Second,
exp: "cpu value=1 946730097000000000",
},
{
name: "minute precision",
precision: time.Minute,
exp: "cpu value=1 946730100000000000",
},
{
name: "hour precision",
precision: time.Hour,
exp: "cpu value=1 946731600000000000",
},
}
for _, test := range tests {
pt := models.MustNewPoint("cpu", nil, tags, tm)
act := pt.RoundedString(test.precision)
if act != test.exp {
t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v",
test.name, act, test.exp)
}
}
}
func TestParsePointsStringWithExtraBuffer(t *testing.T) {
b := make([]byte, 70*5000)
buf := bytes.NewBuffer(b)
key := "cpu,host=A,region=uswest"
buf.WriteString(fmt.Sprintf("%s value=%.3f 1\n", key, rand.Float64()))
points, err := models.ParsePointsString(buf.String())
if err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
pointKey := string(points[0].Key())
if len(key) != len(pointKey) {
t.Fatalf("expected length of both keys are same but got %d and %d", len(key), len(pointKey))
}
if key != pointKey {
t.Fatalf("expected both keys are same but got %s and %s", key, pointKey)
}
}
func TestParsePointsQuotesInFieldKey(t *testing.T) {
buf := `cpu "a=1
cpu value=2 1`
points, err := models.ParsePointsString(buf)
if err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
fields, err := points[0].Fields()
if err != nil {
t.Fatal(err)
}
value, ok := fields["\"a"]
if !ok {
t.Fatalf("expected to parse field '\"a'")
}
if value != float64(1) {
t.Fatalf("expected field value to be 1, got %v", value)
}
// The following input should not parse
buf = `cpu "\, '= "\ v=1.0`
_, err = models.ParsePointsString(buf)
if err == nil {
t.Fatalf("expected parsing failure but got no error")
}
}
func TestParsePointsQuotesInTags(t *testing.T) {
buf := `t159,label=hey\ "ya a=1i,value=0i
t159,label=another a=2i,value=1i 1`
points, err := models.ParsePointsString(buf)
if err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if len(points) != 2 {
t.Fatalf("expected 2 points, got %d", len(points))
}
}
func TestParsePointsBlankLine(t *testing.T) {
buf := `cpu value=1i 1000000000
cpu value=2i 2000000000`
points, err := models.ParsePointsString(buf)
if err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if len(points) != 2 {
t.Fatalf("expected 2 points, got %d", len(points))
}
}
func TestNewPointsWithBytesWithCorruptData(t *testing.T) {
corrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148}
p, err := models.NewPointFromBytes(corrupted)
if p != nil || err == nil {
t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err)
}
}
func TestNewPointsWithShortBuffer(t *testing.T) {
_, err := models.NewPointFromBytes([]byte{0, 0, 0, 3, 4})
if err != io.ErrShortBuffer {
t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err)
}
}
func TestNewPointsRejectsEmptyFieldNames(t *testing.T) {
if _, err := models.NewPoint("foo", nil, models.Fields{"": 1}, time.Now()); err == nil {
t.Fatalf("new point with empty field name. got: nil, expected: error")
}
}
func TestNewPointsRejectsMaxKey(t *testing.T) {
var key string
for i := 0; i < 65536; i++ {
key += "a"
}
if _, err := models.NewPoint(key, nil, models.Fields{"value": 1}, time.Now()); err == nil {
t.Fatalf("new point with max key. got: nil, expected: error")
}
if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1", key)); err == nil {
t.Fatalf("parse point with max key. got: nil, expected: error")
}
}
func TestParseKeyEmpty(t *testing.T) {
if _, _, err := models.ParseKey(nil); err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func TestParseKeyMissingValue(t *testing.T) {
if _, _, err := models.ParseKey([]byte("cpu,foo ")); err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func TestPoint_FieldIterator_Simple(t *testing.T) {
p, err := models.ParsePoints([]byte(`m v=42i,f=42 36`))
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Fatalf("wrong number of points, got %d, exp %d", len(p), 1)
}
fi := p[0].FieldIterator()
if !fi.Next() {
t.Fatal("field iterator terminated before first field")
}
if fi.Type() != models.Integer {
t.Fatalf("'42i' should be an Integer, got %v", fi.Type())
}
iv, err := fi.IntegerValue()
if err != nil {
t.Fatal(err)
}
if exp, got := int64(42), iv; exp != got {
t.Fatalf("'42i' should be %d, got %d", exp, got)
}
if !fi.Next() {
t.Fatalf("field iterator terminated before second field")
}
if fi.Type() != models.Float {
t.Fatalf("'42' should be a Float, got %v", fi.Type())
}
fv, err := fi.FloatValue()
if err != nil {
t.Fatal(err)
}
if exp, got := 42.0, fv; exp != got {
t.Fatalf("'42' should be %f, got %f", exp, got)
}
if fi.Next() {
t.Fatal("field iterator didn't terminate")
}
}
func toFields(fi models.FieldIterator) models.Fields {
m := make(models.Fields)
for fi.Next() {
var v interface{}
var err error
switch fi.Type() {
case models.Float:
v, err = fi.FloatValue()
case models.Integer:
v, err = fi.IntegerValue()
case models.String:
v = fi.StringValue()
case models.Boolean:
v, err = fi.BooleanValue()
case models.Empty:
v = nil
default:
panic("unknown type")
}
if err != nil {
panic(err)
}
m[string(fi.FieldKey())] = v
}
return m
}
func TestPoint_FieldIterator_FieldMap(t *testing.T) {
points, err := models.ParsePointsString(`
m v=42
m v=42i
m v="string"
m v=true
m v="string\"with\"escapes"
m v=42i,f=42,g=42.314
m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456
`)
if err != nil {
t.Fatal("failed to parse test points:", err)
}
for _, p := range points {
exp, err := p.Fields()
if err != nil {
t.Fatal(err)
}
got := toFields(p.FieldIterator())
if !reflect.DeepEqual(got, exp) {
t.Errorf("FieldIterator failed for %#q: got %#v, exp %#v", p.String(), got, exp)
}
}
}
func TestPoint_FieldIterator_Delete_Begin(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"b": float64(2), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
}
func TestPoint_FieldIterator_Delete_Middle(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Next() // b
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
}
func TestPoint_FieldIterator_Delete_End(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Next() // b
fi.Next() // c
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "b": float64(2)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
}
func TestPoint_FieldIterator_Delete_Nothing(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "b": float64(2), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
}
func TestPoint_FieldIterator_Delete_Twice(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Next() // b
fi.Delete()
fi.Delete() // no-op
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
}
func TestEscapeStringField(t *testing.T) {
cases := []struct {
in string
expOut string
}{
{in: "abcdefg", expOut: "abcdefg"},
{in: `one double quote " .`, expOut: `one double quote \" .`},
{in: `quote " then backslash \ .`, expOut: `quote \" then backslash \\ .`},
{in: `backslash \ then quote " .`, expOut: `backslash \\ then quote \" .`},
}
for _, c := range cases {
// Unescapes as expected.
got := models.EscapeStringField(c.in)
if got != c.expOut {
t.Errorf("unexpected result from EscapeStringField(%s)\ngot [%s]\nexp [%s]\n", c.in, got, c.expOut)
continue
}
pointLine := fmt.Sprintf(`t s="%s"`, got)
test(t, pointLine, NewTestPoint(
"t",
models.NewTags(nil),
models.Fields{"s": c.in},
time.Unix(0, 0),
))
}
}
func BenchmarkEscapeStringField_Plain(b *testing.B) {
s := "nothing special"
for i := 0; i < b.N; i++ {
sink = models.EscapeStringField(s)
}
}
func BenchmarkEscapeString_Quotes(b *testing.B) {
s := `Hello, "world"`
for i := 0; i < b.N; i++ {
sink = models.EscapeStringField(s)
}
}
func BenchmarkEscapeString_Backslashes(b *testing.B) {
s := `C:\windows\system32`
for i := 0; i < b.N; i++ {
sink = models.EscapeStringField(s)
}
}
func BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) {
s1 := `a quote " then backslash \ .`
s2 := `a backslash \ then quote " .`
for i := 0; i < b.N; i++ {
sink = [...]string{models.EscapeStringField(s1), models.EscapeStringField(s2)}
}
}
| {
"pile_set_name": "Github"
} |
/********************************************************************
* Copyright (c) 2007 Contributors. All rights reserved.
* This program and the accompanying materials are made available
* under the terms of the Eclipse Public License v1.0
* which accompanies this distribution and is available at
* http://eclipse.org/legal/epl-v10.html
*
* Contributors: IBM Corporation - initial API and implementation
* Helen Hawkins - initial version (bug 148190)
*******************************************************************/
package org.aspectj.ajde.ui.utils;
import java.io.IOException;
import java.util.List;
import org.aspectj.ajde.EditorAdapter;
import org.aspectj.bridge.ISourceLocation;
/**
* EditorAdapter with empty implementation
*/
public class TestEditorAdapter implements EditorAdapter {
public String getCurrFile() {
return null;
}
public void pasteToCaretPos(String text) {
}
public void saveContents() throws IOException {
}
public void showSourceLine(String filePath, int lineNumber,
boolean highlight) {
}
public void showSourceLine(ISourceLocation sourceLocation, boolean highlight) {
}
public void showSourceLine(int lineNumber, boolean highlight) {
}
public void showSourcelineAnnotation(String filePath, int lineNumber,
List items) {
}
}
| {
"pile_set_name": "Github"
} |
[theme]
inherit = basic
stylesheet = nature.css
pygments_style = tango
[options]
oldversion = False
collapsiblesidebar = True | {
"pile_set_name": "Github"
} |
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st:-1 flags:0 ts:-1.000000
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st:-1 flags:1 ts: 1.894167
ret: 0 st: 0 flags:1 dts: 1.894172 pts: 1.894172 pos: 334178 size: 4096
ret: 0 st: 0 flags:0 ts: 0.788345
ret: 0 st: 0 flags:1 dts: 0.788345 pts: 0.788345 pos: 139110 size: 4096
ret: 0 st: 0 flags:1 ts:-0.317506
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st:-1 flags:0 ts: 2.576668
ret: 0 st: 0 flags:1 dts: 2.576667 pts: 2.576667 pos: 454570 size: 4096
ret: 0 st:-1 flags:1 ts: 1.470835
ret: 0 st: 0 flags:1 dts: 1.470839 pts: 1.470839 pos: 259502 size: 4096
ret: 0 st: 0 flags:0 ts: 0.365011
ret: 0 st: 0 flags:1 dts: 0.365011 pts: 0.365011 pos: 64434 size: 4096
ret: 0 st: 0 flags:1 ts:-0.740839
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st:-1 flags:0 ts: 2.153336
ret: 0 st: 0 flags:1 dts: 2.153333 pts: 2.153333 pos: 379894 size: 4096
ret: 0 st:-1 flags:1 ts: 1.047503
ret: 0 st: 0 flags:1 dts: 1.047506 pts: 1.047506 pos: 184826 size: 4096
ret: 0 st: 0 flags:0 ts:-0.058322
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st: 0 flags:1 ts: 2.835828
ret: 0 st: 0 flags:1 dts: 2.835828 pts: 2.835828 pos: 500286 size: 4096
ret: 0 st:-1 flags:0 ts: 1.730004
ret: 0 st: 0 flags:1 dts: 1.730000 pts: 1.730000 pos: 305218 size: 4096
ret: 0 st:-1 flags:1 ts: 0.624171
ret: 0 st: 0 flags:1 dts: 0.624172 pts: 0.624172 pos: 110150 size: 4096
ret: 0 st: 0 flags:0 ts:-0.481655
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st: 0 flags:1 ts: 2.412494
ret: 0 st: 0 flags:1 dts: 2.412494 pts: 2.412494 pos: 425610 size: 4096
ret: 0 st:-1 flags:0 ts: 1.306672
ret: 0 st: 0 flags:1 dts: 1.306667 pts: 1.306667 pos: 230542 size: 4096
ret: 0 st:-1 flags:1 ts: 0.200839
ret: 0 st: 0 flags:1 dts: 0.200839 pts: 0.200839 pos: 35474 size: 4096
ret: 0 st: 0 flags:0 ts:-0.904989
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st: 0 flags:1 ts: 1.989184
ret: 0 st: 0 flags:1 dts: 1.989184 pts: 1.989184 pos: 350938 size: 4096
ret: 0 st:-1 flags:0 ts: 0.883340
ret: 0 st: 0 flags:1 dts: 0.883333 pts: 0.883333 pos: 155866 size: 4096
ret: 0 st:-1 flags:1 ts:-0.222493
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
ret: 0 st: 0 flags:0 ts: 2.671678
ret: 0 st: 0 flags:1 dts: 2.671678 pts: 2.671678 pos: 471330 size: 4096
ret: 0 st: 0 flags:1 ts: 1.565850
ret: 0 st: 0 flags:1 dts: 1.565850 pts: 1.565850 pos: 276262 size: 4096
ret: 0 st:-1 flags:0 ts: 0.460008
ret: 0 st: 0 flags:1 dts: 0.460000 pts: 0.460000 pos: 81190 size: 4096
ret: 0 st:-1 flags:1 ts:-0.645825
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 46 size: 4096
| {
"pile_set_name": "Github"
} |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/apimachinery/announced"
"k8s.io/apimachinery/pkg/apimachinery/registered"
"k8s.io/apimachinery/pkg/runtime"
resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
resourcequotav1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1"
)
// Install registers the API group and adds types to a scheme
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
if err := announced.NewGroupMetaFactory(
&announced.GroupMetaFactoryArgs{
GroupName: resourcequotaapi.GroupName,
VersionPreferenceOrder: []string{resourcequotav1alpha1.SchemeGroupVersion.Version},
ImportPrefix: "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota",
AddInternalObjectsToScheme: resourcequotaapi.AddToScheme,
},
announced.VersionToSchemeFunc{
resourcequotav1alpha1.SchemeGroupVersion.Version: resourcequotav1alpha1.AddToScheme,
},
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
panic(err)
}
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.