text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2.0,
* as published by the Free Software Foundation.
*
* This program is also distributed with certain software (including
* but not limited to OpenSSL) that is licensed under separate terms,
* as designated in a particular file or component or in included license
* documentation. The authors of MySQL hereby grant you an additional
* permission to link the program and your derivative works with the
* separately licensed software that they have included with MySQL.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License, version 2.0, for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <my_byteorder.h>
#include "plugin/x/src/prepare_param_handler.h"
#include "plugin/x/src/xpl_error.h"
#include "unittest/gunit/xplugin/xpl/assert_error_code.h"
#include "unittest/gunit/xplugin/xpl/mock/session.h"
#include "unittest/gunit/xplugin/xpl/mysqlx_pb_wrapper.h"
namespace xpl {
namespace test {
using Arg_list = Prepare_param_handler::Arg_list;
using Param_list = Prepare_param_handler::Param_list;
using Placeholder_list = Prepare_param_handler::Placeholder_list;
using Param_value_list = Prepare_param_handler::Param_value_list;
using Param_svalue_list = Prepare_param_handler::Param_svalue_list;
class Prepare_param_handler_base_test : public testing::Test {
public:
Placeholder_list m_placeholders;
Prepare_param_handler m_handler{m_placeholders};
};
struct Param_check_argument_placeholder_consistency {
int expect_error_code;
std::size_t args_size;
Placeholder_list phs;
};
Param_check_argument_placeholder_consistency
check_argument_placeholder_consistency_param[] = {
{ER_X_SUCCESS, 0, {}},
{ER_X_SUCCESS, 1, {0}},
{ER_X_SUCCESS, 1, {0, 0, 0}},
{ER_X_SUCCESS, 2, {1, 0, 1}},
{ER_X_SUCCESS, 3, {0, 1, 2}},
{ER_X_SUCCESS, 3, {1, 2, 0}},
{ER_X_SUCCESS, 3, {2, 1, 0}},
{ER_X_SUCCESS, 3, {2, 0, 1}},
{ER_X_SUCCESS, 3, {1, 0, 2}},
{ER_X_SUCCESS, 3, {0, 2, 1}},
{ER_X_PREPARED_EXECUTE_ARGUMENT_CONSISTENCY, 0, {0}},
{ER_X_PREPARED_EXECUTE_ARGUMENT_CONSISTENCY, 1, {2}},
};
class Prepare_param_handler_argument_consistency_test
: public Prepare_param_handler_base_test,
public testing::WithParamInterface<
Param_check_argument_placeholder_consistency> {};
TEST_P(Prepare_param_handler_argument_consistency_test,
check_argument_placeholder_consistency) {
const Param_check_argument_placeholder_consistency ¶m = GetParam();
m_placeholders = param.phs;
ASSERT_ERROR_CODE(
param.expect_error_code,
m_handler.check_argument_placeholder_consistency(param.args_size, 0));
}
INSTANTIATE_TEST_CASE_P(
Prepare_command_handler, Prepare_param_handler_argument_consistency_test,
testing::ValuesIn(check_argument_placeholder_consistency_param));
namespace {
class Value {
public:
Value() {}
explicit Value(const unsigned v) {
int8store(buf.data(), static_cast<uint64_t>(v));
}
explicit Value(const int v) {
int8store(buf.data(), static_cast<int64_t>(v));
}
explicit Value(const double v) { float8store(buf.data(), v); }
explicit Value(const float v) { float4store(buf.data(), v); }
explicit Value(const bool v) { buf[0] = v ? 1 : 0; }
operator const Param_value_list::value_type &() const { return buf; }
private:
Param_value_list::value_type buf{{0}};
};
} // namespace
struct Param_prepare_parameters {
int expect_error_code;
Param_list expect_params;
Param_value_list expect_param_values;
Param_svalue_list expect_param_svalues;
Arg_list args;
Placeholder_list phs;
};
#define NLL \
{ true, MYSQL_TYPE_NULL, false, nullptr, 0ul }
#define SIN \
{ false, MYSQL_TYPE_LONGLONG, false, nullptr, sizeof(int64_t) }
#define UIN \
{ false, MYSQL_TYPE_LONGLONG, true, nullptr, sizeof(uint64_t) }
#define STR(len) \
{ false, MYSQL_TYPE_STRING, false, nullptr, len }
#define DBL \
{ false, MYSQL_TYPE_DOUBLE, false, nullptr, sizeof(double) }
#define FLT \
{ false, MYSQL_TYPE_FLOAT, false, nullptr, sizeof(float) }
#define BOL \
{ false, MYSQL_TYPE_TINY, false, nullptr, 1ul }
#define RAW(id) \
{ id, Placeholder_info::Type::k_raw }
#define JSN(id) \
{ id, Placeholder_info::Type::k_json }
using Octets = Scalar::Octets;
using String = Scalar::String;
Param_prepare_parameters prepare_parameters_param[] = {
{ER_X_SUCCESS, {}, {}, {}, {}, {}},
{ER_X_SUCCESS, {NLL}, {}, {}, Any_list{Scalar::Null()}, {RAW(0)}},
{ER_X_SUCCESS, {STR(4)}, {}, {"null"}, Any_list{Scalar::Null()}, {JSN(0)}},
{ER_X_SUCCESS, {SIN}, {Value(-1)}, {}, Any_list{Scalar(-1)}, {RAW(0)}},
{ER_X_SUCCESS, {SIN}, {Value(-1)}, {}, Any_list{Scalar(-1)}, {JSN(0)}},
{ER_X_SUCCESS, {UIN}, {Value(1u)}, {}, Any_list{Scalar(1u)}, {RAW(0)}},
{ER_X_SUCCESS, {UIN}, {Value(1u)}, {}, Any_list{Scalar(1u)}, {JSN(0)}},
{ER_X_SUCCESS, {STR(3)}, {}, {}, Any_list{String("abc")}, {RAW(0)}},
{ER_X_SUCCESS,
{STR(5)},
{},
{"\"abc\""},
Any_list{String("abc")},
{JSN(0)}},
{ER_X_SUCCESS, {STR(3)}, {}, {}, Any_list{Octets("abc")}, {RAW(0)}},
{ER_X_SUCCESS,
{STR(3)},
{},
{},
Any_list{Octets("abc", Octets::Content_type::k_json)},
{RAW(0)}},
{ER_X_SUCCESS,
{STR(5)},
{},
{"\"abc\""},
Any_list{Octets("abc")},
{JSN(0)}},
{ER_X_SUCCESS,
{STR(3)},
{},
{"abc"},
Any_list{Octets("abc", Octets::Content_type::k_json)},
{JSN(0)}},
{ER_X_SUCCESS, {DBL}, {Value(1.1)}, {}, Any_list{Scalar(1.1)}, {RAW(0)}},
{ER_X_SUCCESS, {DBL}, {Value(1.1)}, {}, Any_list{Scalar(1.1)}, {JSN(0)}},
{ER_X_SUCCESS, {FLT}, {Value(1.1f)}, {}, Any_list{Scalar(1.1f)}, {RAW(0)}},
{ER_X_SUCCESS, {FLT}, {Value(1.1f)}, {}, Any_list{Scalar(1.1f)}, {JSN(0)}},
{ER_X_SUCCESS, {BOL}, {Value(true)}, {}, Any_list{Scalar(true)}, {RAW(0)}},
{ER_X_SUCCESS,
{BOL},
{Value(false)},
{},
Any_list{Scalar(false)},
{RAW(0)}},
{ER_X_SUCCESS, {STR(4)}, {}, {"true"}, Any_list{Scalar(true)}, {JSN(0)}},
{ER_X_SUCCESS, {STR(5)}, {}, {"false"}, Any_list{Scalar(false)}, {JSN(0)}},
{ER_X_SUCCESS,
{UIN, SIN},
{Value(2u), Value(1)},
{},
Any_list{Scalar(2u), Scalar(1)},
{RAW(0), RAW(1)}},
{ER_X_SUCCESS,
{SIN, UIN},
{Value(1), Value(2u)},
{},
Any_list{Scalar(2u), Scalar(1)},
{RAW(1), RAW(0)}},
{ER_X_SUCCESS,
{SIN, SIN, SIN},
{Value(1), Value(1), Value(1)},
{},
Any_list{Scalar(1)},
{RAW(0), RAW(0), RAW(0)}},
{ER_X_SUCCESS,
{NLL, SIN, NLL},
{Value(1)},
{},
Any_list{Scalar::Null(), Scalar(1)},
{RAW(0), RAW(1), RAW(0)}},
{ER_X_SUCCESS,
{NLL, STR(2), STR(3)},
{},
{},
Any_list{Scalar::String("ab"), Scalar::Octets("abc"), Scalar::Null()},
{RAW(2), RAW(0), RAW(1)}},
{ER_X_PREPARED_EXECUTE_ARGUMENT_NOT_SUPPORTED,
{},
{},
{},
Any_list{Any::Object()},
{RAW(0)}},
{ER_X_PREPARED_EXECUTE_ARGUMENT_NOT_SUPPORTED,
{},
{},
{},
Any_list{Any::Array()},
{RAW(0)}},
{ER_X_SUCCESS,
{SIN},
{Value(1)},
{},
Any_list{Scalar(1), Any::Array()},
{RAW(0)}},
{ER_X_SUCCESS,
{BOL, STR(4)},
{Value(true)},
{"true"},
Any_list{Scalar(true)},
{RAW(0), JSN(0)}},
};
class Prepare_command_handler_prepare_parameters_test
: public Prepare_param_handler_base_test,
public testing::WithParamInterface<Param_prepare_parameters> {};
MATCHER(Eq_param, "") {
using ::testing::get;
// intentionally skip comparison of "value" value;
return get<0>(arg).null_bit == get<1>(arg).null_bit &&
get<0>(arg).type == get<1>(arg).type &&
get<0>(arg).unsigned_type == get<1>(arg).unsigned_type &&
get<0>(arg).length == get<1>(arg).length;
}
TEST_P(Prepare_command_handler_prepare_parameters_test, prepare_parameters) {
const auto ¶m = GetParam();
m_placeholders = param.phs;
ASSERT_ERROR_CODE(param.expect_error_code,
m_handler.prepare_parameters(param.args));
EXPECT_THAT(m_handler.get_params(),
testing::Pointwise(Eq_param(), param.expect_params));
EXPECT_EQ(param.expect_param_svalues, m_handler.get_string_values());
EXPECT_EQ(param.expect_param_values, m_handler.get_values());
}
INSTANTIATE_TEST_CASE_P(Prepare_command_handler,
Prepare_command_handler_prepare_parameters_test,
testing::ValuesIn(prepare_parameters_param));
} // namespace test
} // namespace xpl
| {
"pile_set_name": "Github"
} |
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
listenerComplete = false;
exception = false;
// The base part of all evaluate requests.
var base_request = '"seq":0,"type":"request","command":"evaluate"'
function safeEval(code) {
try {
return eval('(' + code + ')');
} catch (e) {
assertEquals(void 0, e);
return undefined;
}
}
function testRequest(dcp, arguments, success, result) {
// Generate request with the supplied arguments.
var request;
if (arguments) {
request = '{' + base_request + ',"arguments":' + arguments + '}';
} else {
request = '{' + base_request + '}'
}
var response = safeEval(dcp.processDebugJSONRequest(request));
if (success) {
assertTrue(response.success, request + ' -> ' + response.message);
assertEquals(result, response.body.value);
} else {
assertFalse(response.success, request + ' -> ' + response.message);
}
assertEquals(response.running, "unspecified_running_state",
request + ' -> expected not running');
}
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
// Test some illegal evaluate requests.
testRequest(dcp, void 0, false);
testRequest(dcp, '{"expression":"1","global"=true}', false);
testRequest(dcp, '{"expression":"a","frame":4}', false);
// Test some legal evaluate requests.
testRequest(dcp, '{"expression":"1+2"}', true, 3);
testRequest(dcp, '{"expression":"a+2"}', true, 5);
testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
// Test evaluation of a in the stack frames and the global context.
testRequest(dcp, '{"expression":"a"}', true, 3);
testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
testRequest(dcp, '{"expression":"a","global":true}', true, 1);
testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
// Test that the whole string text is returned if maxStringLength
// parameter is passed.
testRequest(
dcp,
'{"expression":"this.longString","global":true,"maxStringLength":-1}',
true,
longString);
testRequest(
dcp,
'{"expression":"this.longString","global":true,"maxStringLength":' +
longString.length + '}',
true,
longString);
var truncatedStringSuffix = '... (length: ' + longString.length + ')';
testRequest(
dcp,
'{"expression":"this.longString","global":true,"maxStringLength":0}',
true,
truncatedStringSuffix);
testRequest(
dcp,
'{"expression":"this.longString","global":true,"maxStringLength":1}',
true,
longString.charAt(0) + truncatedStringSuffix);
// Test that by default string is truncated to first 80 chars.
testRequest(
dcp,
'{"expression":"this.longString","global":true}',
true,
longString.substring(0, 80) + truncatedStringSuffix);
// Indicate that all was processed.
listenerComplete = true;
}
} catch (e) {
exception = e
};
};
// Add the debug event listener.
Debug.setListener(listener);
function f() {
var a = 3;
};
function g() {
var a = 2;
f();
};
a = 1;
// String which is longer than 80 chars.
var longString = "1234567890_";
for (var i = 0; i < 4; i++) {
longString += longString;
}
// Set a break point at return in f and invoke g to hit the breakpoint.
Debug.setBreakPoint(f, 2, 0);
g();
assertFalse(exception, "exception in listener")
// Make sure that the debug event listener vas invoked.
assertTrue(listenerComplete, "listener did not run to completion");
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of PhpSpec, A php toolset to drive emergent
* design by specification.
*
* (c) Marcello Duarte <[email protected]>
* (c) Konstantin Kudryashov <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace PhpSpec\Runner;
use PhpSpec\Event\SuiteEvent;
use PhpSpec\Exception\Example\StopOnFailureException;
use PhpSpec\Loader\Suite;
use Symfony\Component\EventDispatcher\EventDispatcher;
class SuiteRunner
{
/**
* @var EventDispatcher
*/
private $dispatcher;
/**
* @var SpecificationRunner
*/
private $specRunner;
/**
* @param EventDispatcher $dispatcher
* @param SpecificationRunner $specRunner
*/
public function __construct(EventDispatcher $dispatcher, SpecificationRunner $specRunner)
{
$this->dispatcher = $dispatcher;
$this->specRunner = $specRunner;
}
/**
* @param Suite $suite
*
* @return integer
*/
public function run(Suite $suite): int
{
$this->dispatcher->dispatch('beforeSuite', new SuiteEvent($suite));
$result = 0;
$startTime = microtime(true);
foreach ($suite->getSpecifications() as $specification) {
try {
$result = max($result, $this->specRunner->run($specification));
} catch (StopOnFailureException $e) {
$result = $e->getResult();
break;
}
}
$endTime = microtime(true);
$this->dispatcher->dispatch(
'afterSuite',
new SuiteEvent($suite, $endTime-$startTime, $result)
);
return $result;
}
}
| {
"pile_set_name": "Github"
} |
<!doctype html public "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html> <head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title> Postfix manual - proxymap(8) </title>
</head> <body> <pre>
PROXYMAP(8) PROXYMAP(8)
<b>NAME</b>
proxymap - Postfix lookup table proxy server
<b>SYNOPSIS</b>
<b>proxymap</b> [generic Postfix daemon options]
<b>DESCRIPTION</b>
The <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server provides read-only or read-write table lookup
service to Postfix processes. These services are implemented with dis-
tinct service names: <b>proxymap</b> and <b>proxywrite</b>, respectively. The purpose
of these services is:
<b>o</b> To overcome chroot restrictions. For example, a chrooted SMTP
server needs access to the system passwd file in order to reject
mail for non-existent local addresses, but it is not practical
to maintain a copy of the passwd file in the chroot jail. The
solution:
<a href="postconf.5.html#local_recipient_maps">local_recipient_maps</a> =
<a href="proxymap.8.html">proxy</a>:unix:passwd.byname $<a href="postconf.5.html#alias_maps">alias_maps</a>
<b>o</b> To consolidate the number of open lookup tables by sharing one
open table among multiple processes. For example, making mysql
connections from every Postfix daemon process results in "too
many connections" errors. The solution:
<a href="postconf.5.html#virtual_alias_maps">virtual_alias_maps</a> =
<a href="proxymap.8.html">proxy</a>:<a href="mysql_table.5.html">mysql</a>:/etc/postfix/virtual_alias.cf
The total number of connections is limited by the number of
proxymap server processes.
<b>o</b> To provide single-updater functionality for lookup tables that
do not reliably support multiple writers (i.e. all file-based
tables).
The <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server implements the following requests:
<b>open</b> <i>maptype:mapname flags</i>
Open the table with type <i>maptype</i> and name <i>mapname</i>, as controlled
by <i>flags</i>. The reply includes the <i>maptype</i> dependent flags (to
distinguish a fixed string table from a regular expression ta-
ble).
<b>lookup</b> <i>maptype:mapname flags key</i>
Look up the data stored under the requested key. The reply is
the request completion status code and the lookup result value.
The <i>maptype:mapname</i> and <i>flags</i> are the same as with the <b>open</b>
request.
<b>update</b> <i>maptype:mapname flags key value</i>
Update the data stored under the requested key. The reply is
the request completion status code. The <i>maptype:mapname</i> and
<i>flags</i> are the same as with the <b>open</b> request.
To implement single-updater maps, specify a process limit of 1
in the <a href="master.5.html">master.cf</a> file entry for the <b>proxywrite</b> service.
This request is supported in Postfix 2.5 and later.
<b>delete</b> <i>maptype:mapname flags key</i>
Delete the data stored under the requested key. The reply is
the request completion status code. The <i>maptype:mapname</i> and
<i>flags</i> are the same as with the <b>open</b> request.
This request is supported in Postfix 2.5 and later.
<b>sequence</b> <i>maptype:mapname flags function</i>
Iterate over the specified database. The <i>function</i> is one of
DICT_SEQ_FUN_FIRST or DICT_SEQ_FUN_NEXT. The reply is the
request completion status code and a lookup key and result
value, if found.
This request is supported in Postfix 2.9 and later.
The request completion status is one of OK, RETRY, NOKEY (lookup failed
because the key was not found), BAD (malformed request) or DENY (the
table is not approved for proxy read or update access).
There is no <b>close</b> command, nor are tables implicitly closed when a
client disconnects. The purpose is to share tables among multiple
client processes.
<b>SERVER PROCESS MANAGEMENT</b>
<a href="proxymap.8.html"><b>proxymap</b>(8)</a> servers run under control by the Postfix <a href="master.8.html"><b>master</b>(8)</a> server.
Each server can handle multiple simultaneous connections. When all
servers are busy while a client connects, the <a href="master.8.html"><b>master</b>(8)</a> creates a new
<a href="proxymap.8.html"><b>proxymap</b>(8)</a> server process, provided that the process limit is not
exceeded. Each server terminates after serving at least <b>$<a href="postconf.5.html#max_use">max_use</a></b>
clients or after <b>$<a href="postconf.5.html#max_idle">max_idle</a></b> seconds of idle time.
<b>SECURITY</b>
The <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server opens only tables that are approved via the
<b><a href="postconf.5.html#proxy_read_maps">proxy_read_maps</a></b> or <b><a href="postconf.5.html#proxy_write_maps">proxy_write_maps</a></b> configuration parameters, does not
talk to users, and can run at fixed low privilege, chrooted or not.
However, running the proxymap server chrooted severely limits usabil-
ity, because it can open only chrooted tables.
The <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server is not a trusted daemon process, and must not be
used to look up sensitive information such as UNIX user or group IDs,
mailbox file/directory names or external commands.
In Postfix version 2.2 and later, the proxymap client recognizes
requests to access a table for security-sensitive purposes, and opens
the table directly. This allows the same <a href="postconf.5.html">main.cf</a> setting to be used by
sensitive and non-sensitive processes.
Postfix-writable data files should be stored under a dedicated direc-
tory that is writable only by the Postfix mail system, such as the
Postfix-owned <b><a href="postconf.5.html#data_directory">data_directory</a></b>.
In particular, Postfix-writable files should never exist in root-owned
directories. That would open up a particular type of security hole
where ownership of a file or directory does not match the provider of
its content.
<b>DIAGNOSTICS</b>
Problems and transactions are logged to <b>syslogd</b>(8) or <a href="postlogd.8.html"><b>postlogd</b>(8)</a>.
<b>BUGS</b>
The <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server provides service to multiple clients, and must
therefore not be used for tables that have high-latency lookups.
The <a href="proxymap.8.html"><b>proxymap</b>(8)</a> read-write service does not explicitly close lookup
tables (even if it did, this could not be relied on, because the
process may be terminated between table updates). The read-write ser-
vice should therefore not be used with tables that leave persistent
storage in an inconsistent state between updates (for example, CDB).
Tables that support "sync on update" should be safe (for example,
Berkeley DB) as should tables that are implemented by a real DBMS.
<b>CONFIGURATION PARAMETERS</b>
On busy mail systems a long time may pass before <a href="proxymap.8.html"><b>proxymap</b>(8)</a> relevant
changes to <a href="postconf.5.html"><b>main.cf</b></a> are picked up. Use the command "<b>postfix reload</b>" to
speed up a change.
The text below provides only a parameter summary. See <a href="postconf.5.html"><b>postconf</b>(5)</a> for
more details including examples.
<b><a href="postconf.5.html#config_directory">config_directory</a> (see 'postconf -d' output)</b>
The default location of the Postfix <a href="postconf.5.html">main.cf</a> and <a href="master.5.html">master.cf</a> con-
figuration files.
<b><a href="postconf.5.html#data_directory">data_directory</a> (see 'postconf -d' output)</b>
The directory with Postfix-writable data files (for example:
caches, pseudo-random numbers).
<b><a href="postconf.5.html#daemon_timeout">daemon_timeout</a> (18000s)</b>
How much time a Postfix daemon process may take to handle a
request before it is terminated by a built-in watchdog timer.
<b><a href="postconf.5.html#ipc_timeout">ipc_timeout</a> (3600s)</b>
The time limit for sending or receiving information over an
internal communication channel.
<b><a href="postconf.5.html#max_idle">max_idle</a> (100s)</b>
The maximum amount of time that an idle Postfix daemon process
waits for an incoming connection before terminating voluntarily.
<b><a href="postconf.5.html#max_use">max_use</a> (100)</b>
The maximal number of incoming connections that a Postfix daemon
process will service before terminating voluntarily.
<b><a href="postconf.5.html#process_id">process_id</a> (read-only)</b>
The process ID of a Postfix command or daemon process.
<b><a href="postconf.5.html#process_name">process_name</a> (read-only)</b>
The process name of a Postfix command or daemon process.
<b><a href="postconf.5.html#proxy_read_maps">proxy_read_maps</a> (see 'postconf -d' output)</b>
The lookup tables that the <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server is allowed to
access for the read-only service.
Available in Postfix 2.5 and later:
<b><a href="postconf.5.html#data_directory">data_directory</a> (see 'postconf -d' output)</b>
The directory with Postfix-writable data files (for example:
caches, pseudo-random numbers).
<b><a href="postconf.5.html#proxy_write_maps">proxy_write_maps</a> (see 'postconf -d' output)</b>
The lookup tables that the <a href="proxymap.8.html"><b>proxymap</b>(8)</a> server is allowed to
access for the read-write service.
Available in Postfix 3.3 and later:
<b><a href="postconf.5.html#service_name">service_name</a> (read-only)</b>
The <a href="master.5.html">master.cf</a> service name of a Postfix daemon process.
<b>SEE ALSO</b>
<a href="postconf.5.html">postconf(5)</a>, configuration parameters
<a href="master.5.html">master(5)</a>, generic daemon options
<b>README FILES</b>
<a href="DATABASE_README.html">DATABASE_README</a>, Postfix lookup table overview
<b>LICENSE</b>
The Secure Mailer license must be distributed with this software.
<b>HISTORY</b>
The proxymap service was introduced with Postfix 2.0.
<b>AUTHOR(S)</b>
Wietse Venema
IBM T.J. Watson Research
P.O. Box 704
Yorktown Heights, NY 10598, USA
Wietse Venema
Google, Inc.
111 8th Avenue
New York, NY 10011, USA
PROXYMAP(8)
</pre> </body> </html>
| {
"pile_set_name": "Github"
} |
$NetBSD: patch-clang__delta_RemoveBaseClass.cpp,v 1.1 2016/11/18 18:20:05 joerg Exp $
--- clang_delta/RemoveBaseClass.cpp.orig 2016-11-17 01:45:06.000000000 +0000
+++ clang_delta/RemoveBaseClass.cpp
@@ -209,7 +209,7 @@ void RemoveBaseClass::copyBaseClassDecls
return;
SourceLocation StartLoc =
RewriteHelper->getLocationAfter(TheBaseClass->getLocation(), '{');
- SourceLocation EndLoc = TheBaseClass->getRBraceLoc();
+ SourceLocation EndLoc = TheBaseClass->getBraceRange().getEnd();
TransAssert(EndLoc.isValid() && "Invalid RBraceLoc!");
EndLoc = EndLoc.getLocWithOffset(-1);
@@ -217,7 +217,7 @@ void RemoveBaseClass::copyBaseClassDecls
TheRewriter.getRewrittenText(SourceRange(StartLoc, EndLoc));
TransAssert(!DeclsStr.empty() && "Empty DeclsStr!");
- SourceLocation InsertLoc = TheDerivedClass->getRBraceLoc();
+ SourceLocation InsertLoc = TheDerivedClass->getBraceRange().getEnd();
TheRewriter.InsertTextBefore(InsertLoc, DeclsStr);
}
| {
"pile_set_name": "Github"
} |
/** Used to match template delimiters. */
var reEscape = /<%-([\s\S]+?)%>/g;
module.exports = reEscape;
| {
"pile_set_name": "Github"
} |
// Scintilla source code edit control
/** @file AutoComplete.cxx
** Defines the auto completion list box.
**/
// Copyright 1998-2003 by Neil Hodgson <[email protected]>
// The License.txt file describes the conditions under which this software may be distributed.
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdexcept>
#include <string>
#include <vector>
#include <algorithm>
#include "Platform.h"
#include "Scintilla.h"
#include "CharacterSet.h"
#include "Position.h"
#include "AutoComplete.h"
#ifdef SCI_NAMESPACE
using namespace Scintilla;
#endif
AutoComplete::AutoComplete() :
active(false),
separator(' '),
typesep('?'),
ignoreCase(false),
chooseSingle(false),
lb(0),
posStart(0),
startLen(0),
cancelAtStartPos(true),
autoHide(true),
dropRestOfWord(false),
ignoreCaseBehaviour(SC_CASEINSENSITIVEBEHAVIOUR_RESPECTCASE),
widthLBDefault(100),
heightLBDefault(100),
autoSort(SC_ORDER_PRESORTED) {
lb = ListBox::Allocate();
}
AutoComplete::~AutoComplete() {
if (lb) {
lb->Destroy();
delete lb;
lb = 0;
}
}
bool AutoComplete::Active() const {
return active;
}
void AutoComplete::Start(Window &parent, int ctrlID,
int position, Point location, int startLen_,
int lineHeight, bool unicodeMode, int technology) {
if (active) {
Cancel();
}
lb->Create(parent, ctrlID, location, lineHeight, unicodeMode, technology);
lb->Clear();
active = true;
startLen = startLen_;
posStart = position;
}
void AutoComplete::SetStopChars(const char *stopChars_) {
stopChars = stopChars_;
}
bool AutoComplete::IsStopChar(char ch) {
return ch && (stopChars.find(ch) != std::string::npos);
}
void AutoComplete::SetFillUpChars(const char *fillUpChars_) {
fillUpChars = fillUpChars_;
}
bool AutoComplete::IsFillUpChar(char ch) {
return ch && (fillUpChars.find(ch) != std::string::npos);
}
void AutoComplete::SetSeparator(char separator_) {
separator = separator_;
}
char AutoComplete::GetSeparator() const {
return separator;
}
void AutoComplete::SetTypesep(char separator_) {
typesep = separator_;
}
char AutoComplete::GetTypesep() const {
return typesep;
}
struct Sorter {
AutoComplete *ac;
const char *list;
std::vector<int> indices;
Sorter(AutoComplete *ac_, const char *list_) : ac(ac_), list(list_) {
int i = 0;
while (list[i]) {
indices.push_back(i); // word start
while (list[i] != ac->GetTypesep() && list[i] != ac->GetSeparator() && list[i])
++i;
indices.push_back(i); // word end
if (list[i] == ac->GetTypesep()) {
while (list[i] != ac->GetSeparator() && list[i])
++i;
}
if (list[i] == ac->GetSeparator()) {
++i;
// preserve trailing separator as blank entry
if (!list[i]) {
indices.push_back(i);
indices.push_back(i);
}
}
}
indices.push_back(i); // index of last position
}
bool operator()(int a, int b) {
int lenA = indices[a * 2 + 1] - indices[a * 2];
int lenB = indices[b * 2 + 1] - indices[b * 2];
int len = std::min(lenA, lenB);
int cmp;
if (ac->ignoreCase)
cmp = CompareNCaseInsensitive(list + indices[a * 2], list + indices[b * 2], len);
else
cmp = strncmp(list + indices[a * 2], list + indices[b * 2], len);
if (cmp == 0)
cmp = lenA - lenB;
return cmp < 0;
}
};
void AutoComplete::SetList(const char *list) {
if (autoSort == SC_ORDER_PRESORTED) {
lb->SetList(list, separator, typesep);
sortMatrix.clear();
for (int i = 0; i < lb->Length(); ++i)
sortMatrix.push_back(i);
return;
}
Sorter IndexSort(this, list);
sortMatrix.clear();
for (int i = 0; i < (int)IndexSort.indices.size() / 2; ++i)
sortMatrix.push_back(i);
std::sort(sortMatrix.begin(), sortMatrix.end(), IndexSort);
if (autoSort == SC_ORDER_CUSTOM || sortMatrix.size() < 2) {
lb->SetList(list, separator, typesep);
PLATFORM_ASSERT(lb->Length() == static_cast<int>(sortMatrix.size()));
return;
}
std::string sortedList;
char item[maxItemLen];
for (size_t i = 0; i < sortMatrix.size(); ++i) {
int wordLen = IndexSort.indices[sortMatrix[i] * 2 + 2] - IndexSort.indices[sortMatrix[i] * 2];
if (wordLen > maxItemLen-2)
wordLen = maxItemLen - 2;
memcpy(item, list + IndexSort.indices[sortMatrix[i] * 2], wordLen);
if ((i+1) == sortMatrix.size()) {
// Last item so remove separator if present
if ((wordLen > 0) && (item[wordLen-1] == separator))
wordLen--;
} else {
// Item before last needs a separator
if ((wordLen == 0) || (item[wordLen-1] != separator)) {
item[wordLen] = separator;
wordLen++;
}
}
item[wordLen] = '\0';
sortedList += item;
}
for (int i = 0; i < (int)sortMatrix.size(); ++i)
sortMatrix[i] = i;
lb->SetList(sortedList.c_str(), separator, typesep);
}
int AutoComplete::GetSelection() const {
return lb->GetSelection();
}
std::string AutoComplete::GetValue(int item) const {
char value[maxItemLen];
lb->GetValue(item, value, sizeof(value));
return std::string(value);
}
void AutoComplete::Show(bool show) {
lb->Show(show);
if (show)
lb->Select(0);
}
void AutoComplete::Cancel() {
if (lb->Created()) {
lb->Clear();
lb->Destroy();
active = false;
}
}
void AutoComplete::Move(int delta) {
int count = lb->Length();
int current = lb->GetSelection();
current += delta;
if (current >= count)
current = count - 1;
if (current < 0)
current = 0;
lb->Select(current);
}
void AutoComplete::Select(const char *word) {
size_t lenWord = strlen(word);
int location = -1;
int start = 0; // lower bound of the api array block to search
int end = lb->Length() - 1; // upper bound of the api array block to search
while ((start <= end) && (location == -1)) { // Binary searching loop
int pivot = (start + end) / 2;
char item[maxItemLen];
lb->GetValue(sortMatrix[pivot], item, maxItemLen);
int cond;
if (ignoreCase)
cond = CompareNCaseInsensitive(word, item, lenWord);
else
cond = strncmp(word, item, lenWord);
if (!cond) {
// Find first match
while (pivot > start) {
lb->GetValue(sortMatrix[pivot-1], item, maxItemLen);
if (ignoreCase)
cond = CompareNCaseInsensitive(word, item, lenWord);
else
cond = strncmp(word, item, lenWord);
if (0 != cond)
break;
--pivot;
}
location = pivot;
if (ignoreCase
&& ignoreCaseBehaviour == SC_CASEINSENSITIVEBEHAVIOUR_RESPECTCASE) {
// Check for exact-case match
for (; pivot <= end; pivot++) {
lb->GetValue(sortMatrix[pivot], item, maxItemLen);
if (!strncmp(word, item, lenWord)) {
location = pivot;
break;
}
if (CompareNCaseInsensitive(word, item, lenWord))
break;
}
}
} else if (cond < 0) {
end = pivot - 1;
} else if (cond > 0) {
start = pivot + 1;
}
}
if (location == -1) {
if (autoHide)
Cancel();
else
lb->Select(-1);
} else {
if (autoSort == SC_ORDER_CUSTOM) {
// Check for a logically earlier match
char item[maxItemLen];
for (int i = location + 1; i <= end; ++i) {
lb->GetValue(sortMatrix[i], item, maxItemLen);
if (CompareNCaseInsensitive(word, item, lenWord))
break;
if (sortMatrix[i] < sortMatrix[location] && !strncmp(word, item, lenWord))
location = i;
}
}
lb->Select(sortMatrix[location]);
}
}
| {
"pile_set_name": "Github"
} |
[
{
"outputFile": "wwwroot/assets/css/style.css",
"inputFile": "assets/scss/style.scss"
}
] | {
"pile_set_name": "Github"
} |
/*****************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
****************************************************************/
package org.apache.cayenne.modeler.util;
import org.apache.cayenne.exp.Expression;
import org.apache.cayenne.exp.ExpressionException;
import org.apache.cayenne.exp.ExpressionFactory;
import org.apache.cayenne.exp.parser.ParseException;
import org.apache.cayenne.util.Util;
/**
* A Scope convertor that allows to display expressions in text fields.
*
* @since 1.1
*/
public class ExpressionConvertor {
public String valueAsString(Object value) throws IllegalArgumentException {
if (value == null) {
return null;
}
if (!(value instanceof Expression)) {
throw new IllegalArgumentException(
"Unsupported value class: " + value.getClass().getName());
}
return value.toString();
}
public Object stringAsValue(String string) throws IllegalArgumentException {
if (string == null || string.trim().length() == 0) {
return null;
}
try {
return ExpressionFactory.exp(string);
} catch (ExpressionException eex) {
// this is likely a parse exception... show detailed message
Throwable cause = Util.unwindException(eex);
String message =
(cause instanceof ParseException)
? cause.getMessage()
: "Invalid expression: " + string;
throw new IllegalArgumentException(message);
}
}
public boolean supportsStringAsValue() {
return true;
}
}
| {
"pile_set_name": "Github"
} |
*********************************
Transaction Family Specifications
*********************************
Sawtooth includes several transaction families as examples for developing
your own transaction family. These transaction families are available in the
``sawtooth-core`` repository unless noted below.
* The :doc:`/transaction_family_specifications/blockinfo_transaction_family`
provides a way to store information about a configurable number of historic
blocks.
The family name is ``block_info``.
The transaction processor is ``block-info-tp``.
* The :doc:`/transaction_family_specifications/identity_transaction_family`
is an extensible role- and policy-based system for defining permissions in a
way that can be used by other Sawtooth components.
The family name is ``sawtooth_identity``;
the associated transaction processor is ``identity-tp`` (see
:doc:`/cli/identity-tp`).
* The :doc:`/transaction_family_specifications/integerkey_transaction_family`
(also called "intkey") simply sets, increments, and decrements the value of
entries stored in a state dictionary.
The :doc:`intkey command </cli/intkey>` command provides an example CLI client.
intkey is available in several languages, including Go, Java, and JavaScript
(Node.js); see the ``sawtooth-sdk-{language}`` repositories under
``examples``.
The family name is ``intkey``.
The transaction processor is ``intkey-tp-{language}``.
* The :doc:`PoET Validator Registry transaction
family </transaction_family_specifications/validator_registry_transaction_family>`
provides a way to add new validators to the network. It is used by the PoET
consensus algorithm implementation to keep track of other validators.
This transaction family is in the
`sawtooth-poet <https://github.com/hyperledger/sawtooth-poet>`__ repository.
The family name is ``sawtooth_validator_registry``.
The transaction processor is ``poet-validator-registry-tp``.
* The :doc:`/transaction_family_specifications/settings_transaction_family`
provides a methodology for storing on-chain configuration settings.
The :doc:`sawset command </cli/sawset>` provides an example CLI client.
The family name is ``sawtooth_settings``.
The transaction processor is :doc:`/cli/settings-tp`.
.. note::
In a production environment, you should always run a transaction processor
that supports the Settings transaction family.
* The :doc:`/transaction_family_specifications/smallbank_transaction_family`
provides a cross-platform workload for comparing the performance of
blockchain systems.
The family name is ``smallbank``.
The transaction processor is ``smallbank-tp-{language}``.
* The :doc:`/transaction_family_specifications/xo_transaction_family`
allows two users to play a simple game of tic-tac-toe (see
:doc:`/app_developers_guide/intro_xo_transaction_family`).
The :doc:`xo command </cli/xo>` provides an example CLI client.
XO is available in several languages. The various implementations
can be found in the ``sawtooth-sdk-{language}`` repositories under
``examples``.
The family name is ``xo``.
The transaction processor is ``xo-tp-{language}``.
The following transaction families run on top of the Sawtooth platform:
* `Sawtooth Sabre Transaction Family <https://sawtooth.hyperledger.org/docs/sabre/nightly/master/>`__:
Implements on-chain smart contracts that are executed in a WebAssembly (WASM)
virtual machine.
This transaction family is in the
`sawtooth-sabre <https://github.com/hyperledger/sawtooth-sabre>`__ repository.
* `Sawtooth Seth Transaction Family <https://sawtooth.hyperledger.org/docs/seth/nightly/master/>`__:
Supports running Ethereum Virtual Machine (EVM) smart contracts on Sawtooth.
This transaction family is in the
`sawtooth-seth <https://github.com/hyperledger/sawtooth-seth>`__ repository.
.. toctree::
:maxdepth: 1
:caption: Contents
transaction_family_specifications/settings_transaction_family.rst
transaction_family_specifications/identity_transaction_family.rst
transaction_family_specifications/blockinfo_transaction_family.rst
transaction_family_specifications/integerkey_transaction_family.rst
transaction_family_specifications/xo_transaction_family.rst
transaction_family_specifications/validator_registry_transaction_family.rst
transaction_family_specifications/smallbank_transaction_family.rst
Sawtooth Sabre Transaction Family <https://sawtooth.hyperledger.org/docs/sabre/nightly/master/>
Sawtooth Seth Transaction Family <https://sawtooth.hyperledger.org/docs/seth/nightly/master/>
.. Licensed under Creative Commons Attribution 4.0 International License
.. https://creativecommons.org/licenses/by/4.0/
| {
"pile_set_name": "Github"
} |
// Copyright Aleksey Gurtovoy 2000-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Preprocessed version of "boost/mpl/greater_equal.hpp" header
// -- DO NOT modify by hand!
namespace boost { namespace mpl {
template<
typename Tag1
, typename Tag2
, BOOST_MPL_AUX_NTTP_DECL(int, tag1_) = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag1)::value
, BOOST_MPL_AUX_NTTP_DECL(int, tag2_) = BOOST_MPL_AUX_MSVC_VALUE_WKND(Tag2)::value
>
struct greater_equal_impl
: if_c<
( tag1_ > tag2_ )
, aux::cast2nd_impl< greater_equal_impl< Tag1,Tag1 >,Tag1, Tag2 >
, aux::cast1st_impl< greater_equal_impl< Tag2,Tag2 >,Tag1, Tag2 >
>::type
{
};
/// for Digital Mars C++/compilers with no CTPS/TTP support
template<> struct greater_equal_impl< na,na >
{
template< typename U1, typename U2 > struct apply
{
typedef apply type;
BOOST_STATIC_CONSTANT(int, value = 0);
};
};
template<> struct greater_equal_impl< na,integral_c_tag >
{
template< typename U1, typename U2 > struct apply
{
typedef apply type;
BOOST_STATIC_CONSTANT(int, value = 0);
};
};
template<> struct greater_equal_impl< integral_c_tag,na >
{
template< typename U1, typename U2 > struct apply
{
typedef apply type;
BOOST_STATIC_CONSTANT(int, value = 0);
};
};
template< typename T > struct greater_equal_tag
: tag< T,na >
{
};
template<
typename BOOST_MPL_AUX_NA_PARAM(N1)
, typename BOOST_MPL_AUX_NA_PARAM(N2)
>
struct greater_equal
: aux::msvc_eti_base< typename apply_wrap2<
greater_equal_impl<
typename greater_equal_tag<N1>::type
, typename greater_equal_tag<N2>::type
>
, N1
, N2
>::type >::type
{
BOOST_MPL_AUX_LAMBDA_SUPPORT(2, greater_equal, (N1, N2))
};
BOOST_MPL_AUX_NA_SPEC2(2, 2, greater_equal)
}}
namespace boost { namespace mpl {
template<>
struct greater_equal_impl< integral_c_tag,integral_c_tag >
{
template< typename N1, typename N2 > struct apply
{
BOOST_STATIC_CONSTANT(bool, value =
( BOOST_MPL_AUX_VALUE_WKND(N1)::value >=
BOOST_MPL_AUX_VALUE_WKND(N2)::value )
);
typedef bool_<value> type;
};
};
}}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en" class="no-js">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Dialog Effects | Wilma</title>
<meta name="description" content="A Collection of Dialog Effects" />
<meta name="keywords" content="dialog, effect, modal, overlay, animation, web design" />
<meta name="author" content="Codrops" />
<link rel="shortcut icon" href="../favicon.ico">
<link rel="stylesheet" type="text/css" href="css/normalize.css" />
<link rel="stylesheet" type="text/css" href="css/demo.css" />
<!-- common styles -->
<link rel="stylesheet" type="text/css" href="css/dialog.css" />
<!-- individual effect -->
<link rel="stylesheet" type="text/css" href="css/dialog-wilma.css" />
<script src="js/modernizr.custom.js"></script>
</head>
<body>
<div class="container">
<div class="content">
<header class="codrops-header">
<h1><span>A collection of</span> Dialog Effects</h1>
<div class="codrops-links">
<a class="codrops-icon codrops-icon-prev" href="http://tympanus.net/Development/MockupSlideshow/" title="Previous Demo"><span>Previous Demo</span></a> /
<a class="codrops-icon codrops-icon-drop" href="http://tympanus.net/codrops/?p=21386" title="Back to the article"><span>Back to the Codrops Article</span></a>
</div>
<div class="button-wrap"><button data-dialog="somedialog" class="trigger">Open Dialog</button></div>
</header>
<nav id="menu" class="menu">
<div>
<h2>Simple</h2>
<ul>
<li><a href="index.html">Sandra</a></li>
<li><a href="sally.html">Sally</a></li>
</ul>
<h2>Body</h2>
<ul>
<li><a href="dean.html">Dean</a></li>
<li><a href="susan.html">Susan</a></li>
</ul>
</div>
<div>
<h2>Move/Slide</h2>
<ul>
<li><a href="cathy.html">Cathy</a></li>
<li><a href="annie.html">Annie</a></li>
<li><a href="val.html">Val</a></li>
<li><a href="ricky.html">Ricky</a></li>
<li><a href="donna.html">Donna</a></li>
</ul>
</div>
<div>
<h2>Sticky</h2>
<ul>
<li><a href="ken.html">Ken</a></li>
<li><a href="alex.html">Alex</a></li>
</ul>
<h2>Jelly</h2>
<ul>
<li><a href="don.html">Don</a></li>
</ul>
</div>
<div>
<h2>SVG</h2>
<ul>
<li><a href="laura.html">Laura</a></li>
<li><a href="jamie.html">Jamie</a></li>
<li><a href="henry.html">Henry</a></li>
<li><a href="jim.html">Jim</a></li>
<li><a class="current-demo" href="wilma.html">Wilma</a></li>
</ul>
</div>
</nav>
<div id="somedialog" class="dialog">
<div class="dialog__overlay"></div>
<div class="dialog__content">
<div class="morph-shape">
<svg xmlns="http://www.w3.org/2000/svg" width="100%" height="100%" viewBox="0 0 560 280" preserveAspectRatio="none">
<rect x="3" y="3" fill="none" width="556" height="276"/>
</svg>
</div>
<div class="dialog-inner">
<h2><strong>Howdy</strong>, I'm a dialog box</h2>
<div><button class="action" data-dialog-close>Close</button></div>
</div>
</div>
</div>
<!-- Related demos -->
<section class="related">
You might also like: <a href="http://tympanus.net/Development/FullscreenOverlayStyles/">Fullscreen Overlay Effects</a> and <a href="http://tympanus.net/Development/ModalWindowEffects/">Nifty Modal Window Effects</a>
</section>
</div><!-- /content -->
</div><!-- /container -->
<script src="js/classie.js"></script>
<script src="js/dialogFx.js"></script>
<script>
(function() {
var dlgtrigger = document.querySelector( '[data-dialog]' ),
somedialog = document.getElementById( dlgtrigger.getAttribute( 'data-dialog' ) ),
dlg = new DialogFx( somedialog );
dlgtrigger.addEventListener( 'click', dlg.toggle.bind(dlg) );
})();
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) 19yy <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) 19yy name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2020 Ubique Innovation AG <https://www.ubique.ch>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*
* SPDX-License-Identifier: MPL-2.0
*/
package ch.admin.bag.dp3t.networking;
import android.content.Context;
import android.content.SharedPreferences;
import okhttp3.CertificatePinner;
public class CertificatePinning {
private static final CertificatePinner CERTIFICATE_PINNER_LIVE = new CertificatePinner.Builder()
.add("www.pt-d.bfs.admin.ch", "sha256/xWdkLqfT40GnyHyZXt9IStltvrshlowMuGZHgp631Tw=") // leaf
.add("www.pt1-d.bfs.admin.ch", "sha256/Pr8nx8M3Oa8EYefVXYB3D4KJViREDy4ipA1oVyjGoss=") // leaf
.add("codegen-service-d.bag.admin.ch", "sha256/GLV5pALJpJb02GavguT3NTOmOL57H7K3KhJ59iH6A/Q=") //leaf
.add("www.pt-t.bfs.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("www.pt1-t.bfs.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("codegen-service-t.bag.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("www.pt-a.bfs.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("www.pt1-a.bfs.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("codegen-service-a.bag.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("www.pt.bfs.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("www.pt1.bfs.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.add("codegen-service.bag.admin.ch", "sha256/KM3iZPSceB+hgYuNI+cSg4LRgTiUxCeGjrfXRQAY6Rs=") // intermediate
.build();
private static final CertificatePinner CERTIFICATE_PINNER_DISABLED = new CertificatePinner.Builder().build();
private static final String PREF_NAME_DEBUG = "debug";
private static final String PREF_KEY_CERT_PINNING_ENABLED = "certificate_pinning_enabled";
private static boolean isEnabled = true;
public static CertificatePinner getCertificatePinner() {
return isEnabled ? CERTIFICATE_PINNER_LIVE : CERTIFICATE_PINNER_DISABLED;
}
public static boolean isEnabled() {
return isEnabled;
}
public static void setEnabled(boolean enabled, Context context) {
isEnabled = enabled;
getDebugPrefs(context).edit().putBoolean(PREF_KEY_CERT_PINNING_ENABLED, enabled).apply();
}
public static void initDebug(Context context) {
isEnabled = getDebugPrefs(context).getBoolean(PREF_KEY_CERT_PINNING_ENABLED, isEnabled);
}
private static SharedPreferences getDebugPrefs(Context context) {
return context.getSharedPreferences(PREF_NAME_DEBUG, Context.MODE_PRIVATE);
}
}
| {
"pile_set_name": "Github"
} |
### Instalar dependências
Antes de começar, você deve <a href="https://go.microsoft.com/fwlink/?linkid=2016373" target="_blank">instalar o .NET Core 2.1</a>. Você também deve <a href="https://go.microsoft.com/fwlink/?linkid=2016195" target="_blank">instalar o Node.JS</a>, que inclui o NPM e, dessa forma, obterá o Azure Functions Core Tools. Se preferir não instalar o Node, confira as outras opções de instalação na <a href="https://go.microsoft.com/fwlink/?linkid=2016192" target="_blank">referência do Core Tools</a>.
Execute o seguinte comando para instalar o pacote do Core Tools:
<MarkdownHighlighter> npm install -g azure-functions-core-tools@3 --unsafe-perm true</MarkdownHighlighter>
<br/>
### Criar um projeto do Azure Functions
Na janela do terminal ou em um prompt de comando, navegue até uma pasta vazia do seu projeto e execute o seguinte comando:
<MarkdownHighlighter> func init</MarkdownHighlighter>
Também será solicitado que você escolha um runtime para o projeto. Selecione {workerRuntime}.
<br/>
### Criar uma função
Para criar uma função, execute o seguinte comando:
<MarkdownHighlighter> func new</MarkdownHighlighter>
Será solicitado que você escolha um modelo para sua função. Recomendamos o gatilho HTTP para começar.
<br/>
### Executar seu projeto de função localmente
Execute o seguinte comando para iniciar seu aplicativo de funções:
<MarkdownHighlighter> func start</MarkdownHighlighter>
O runtime produzirá uma URL para qualquer função HTTP, que poderá ser copiada e executada na barra de endereços do navegador.
Para interromper a depuração, use **Ctrl-C** no terminal.
<br/>
### Implantar seu código no Azure
Para publicar seu projeto do Functions no Azure, digite o seguinte comando:
<MarkdownHighlighter> func azure functionapp publish {functionAppName}</MarkdownHighlighter>
Você poderá ser solicitado a entrar no Azure. Siga as instruções na tela.
| {
"pile_set_name": "Github"
} |
// Copyright (C) 2012-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <vector>
#include <stdexcept>
// 4.4.x only
struct c
{
void *m;
c(void* o = 0) : m(o) {}
c(const c &r) : m(r.m) {}
template<class T>
explicit c(T &o) : m((void*)0xdeadbeef) { }
};
int main()
{
std::vector<c> cbs;
const c cb((void*)0xcafebabe);
for (int fd = 62; fd < 67; ++fd)
{
cbs.resize(fd + 1);
cbs[fd] = cb;
}
for (int fd = 62; fd< 67; ++fd)
if (cb.m != cbs[fd].m)
throw std::runtime_error("wrong");
return 0;
}
| {
"pile_set_name": "Github"
} |
#8.3.1 系统内存查看
script/下的memstat.sh或者ps_mem.py都可以查看系统的内存情况,两个工具都需要root权限。
| {
"pile_set_name": "Github"
} |
// ------------------------------------------------------------------------------
// <autogenerated>
// This code was generated by a tool.
// Mono Runtime Version: 4.0.30319.1
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </autogenerated>
// ------------------------------------------------------------------------------
namespace OWASP.WebGoat.NET {
public partial class HeaderInjection {
protected System.Web.UI.WebControls.Content Content1;
protected System.Web.UI.WebControls.Content Content2;
protected System.Web.UI.WebControls.Label lblHeaders;
protected System.Web.UI.WebControls.GridView gvCookies;
}
}
| {
"pile_set_name": "Github"
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
org.netbeans.modules.gsf.GsfOptions=
| {
"pile_set_name": "Github"
} |
message CssSelectorExpectedOpeningBracket do
title "Syntax Error"
opening_bracket "sub selector", got
snippet node
end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_CHARACTERMAPPINGREADER_HPP
#define TT2I_CHARACTERMAPPINGREADER_HPP
#include "characterMapping.h"
#include <string>
namespace tts
{
class CharacterMappingReader
{
public:
static CharacterMapping loadFromFile(const std::string& filename);
};
} // namespace tts
#endif
| {
"pile_set_name": "Github"
} |
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"time"
digest "github.com/opencontainers/go-digest"
)
// ImageConfig defines the execution parameters which should be used as a base when running a container using an image.
type ImageConfig struct {
// User defines the username or UID which the process in the container should run as.
User string `json:"User,omitempty"`
// ExposedPorts a set of ports to expose from a container running this image.
ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
// Env is a list of environment variables to be used in a container.
Env []string `json:"Env,omitempty"`
// Entrypoint defines a list of arguments to use as the command to execute when the container starts.
Entrypoint []string `json:"Entrypoint,omitempty"`
// Cmd defines the default arguments to the entrypoint of the container.
Cmd []string `json:"Cmd,omitempty"`
// Volumes is a set of directories describing where the process is likely write data specific to a container instance.
Volumes map[string]struct{} `json:"Volumes,omitempty"`
// WorkingDir sets the current working directory of the entrypoint process in the container.
WorkingDir string `json:"WorkingDir,omitempty"`
// Labels contains arbitrary metadata for the container.
Labels map[string]string `json:"Labels,omitempty"`
// StopSignal contains the system call signal that will be sent to the container to exit.
StopSignal string `json:"StopSignal,omitempty"`
}
// RootFS describes a layer content addresses
type RootFS struct {
// Type is the type of the rootfs.
Type string `json:"type"`
// DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most.
DiffIDs []digest.Digest `json:"diff_ids"`
}
// History describes the history of a layer.
type History struct {
// Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6.
Created *time.Time `json:"created,omitempty"`
// CreatedBy is the command which created the layer.
CreatedBy string `json:"created_by,omitempty"`
// Author is the author of the build point.
Author string `json:"author,omitempty"`
// Comment is a custom message set when creating the layer.
Comment string `json:"comment,omitempty"`
// EmptyLayer is used to mark if the history item created a filesystem diff.
EmptyLayer bool `json:"empty_layer,omitempty"`
}
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
type Image struct {
// Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6.
Created *time.Time `json:"created,omitempty"`
// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
Author string `json:"author,omitempty"`
// Architecture is the CPU architecture which the binaries in this image are built to run on.
Architecture string `json:"architecture"`
// OS is the name of the operating system which the image is built to run on.
OS string `json:"os"`
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
// RootFS references the layer content addresses used by the image.
RootFS RootFS `json:"rootfs"`
// History describes the history of each layer.
History []History `json:"history,omitempty"`
}
| {
"pile_set_name": "Github"
} |
// LazyTransform is a special type of Transform stream that is lazily loaded.
// This is used for performance with bi-API-ship: when two APIs are available
// for the stream, one conventional and one non-conventional.
'use strict';
const stream = require('stream');
const util = require('util');
module.exports = LazyTransform;
function LazyTransform(options) {
this._options = options;
}
util.inherits(LazyTransform, stream.Transform);
[
'_readableState',
'_writableState',
'_transformState'
].forEach(function(prop, i, props) {
Object.defineProperty(LazyTransform.prototype, prop, {
get: function() {
stream.Transform.call(this, this._options);
this._writableState.decodeStrings = false;
this._writableState.defaultEncoding = 'latin1';
return this[prop];
},
set: function(val) {
Object.defineProperty(this, prop, {
value: val,
enumerable: true,
configurable: true,
writable: true
});
},
configurable: true,
enumerable: true
});
});
| {
"pile_set_name": "Github"
} |
# go-reuseport-transport
[](http://ipn.io)
[](http://webchat.freenode.net/?channels=%23ipfs)
[](http://ipfs.io/)
[](https://github.com/RichardLitt/standard-readme)
[](https://godoc.org/github.com/libp2p/go-reuseport-transport)
[](https://coveralls.io/github/libp2p/go-reuseport-transport?branch=master)
[](https://travis-ci.org/libp2p/go-reuseport-transport)
> Basic reuseport TCP transport
This package provides a basic transport for automatically (and intelligently) reusing TCP ports.
To use, construct a new `Transport` (the zero value is safe to use) and configure any listeners (`tr.Listen(...)`).
Then, when dialing (`tr.Dial(...)`), the transport will attempt to reuse the ports it's currently listening on, choosing the best one depending on the destination address.
NOTE: Currently, we don't make any attempts to prevent two reusport transports from interfering with each other (reusing each other's ports). However, we reserve the right to fix this in the future.
## Install
`go-reuseport-transport` is a standard Go module which can be installed with:
```sh
go get github.com/libp2p/go-reuseport-transport
```
Note that `go-reuseport-transport` is packaged with Gx, so it is recommended to use Gx to install and use it (see the Usage section).
## Usage
This module is packaged with [Gx](https://github.com/whyrusleeping/gx). In order to use it in your own project it is recommended that you:
```sh
go get -u github.com/whyrusleeping/gx
go get -u github.com/whyrusleeping/gx-go
cd <your-project-repository>
gx init
gx import github.com/libp2p/go-reuseport-transport
gx install --global
gx-go --rewrite
```
Please check [Gx](https://github.com/whyrusleeping/gx) and [Gx-go](https://github.com/whyrusleeping/gx-go) documentation for more information.
This package is *currently* used by the [go-tcp-transport](https://github.com/libp2p/go-tcp-transport) libp2p transport and will likely be used by more libp2p transports in the future.
## Contribute
Feel free to join in. All welcome. Open an [issue](https://github.com/libp2p/go-reuseport-transport/issues)!
This repository falls under the IPFS [Code of Conduct](https://github.com/libp2p/community/blob/master/code-of-conduct.md).
### Want to hack on IPFS?
[](https://github.com/ipfs/community/blob/master/contributing.md)
## License
MIT
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* def
*/
package com.microsoft.azure.management.network.v2020_03_01.implementation;
import com.microsoft.azure.arm.resources.collection.implementation.GroupableResourcesCoreImpl;
import com.microsoft.azure.management.network.v2020_03_01.P2sVpnGateways;
import com.microsoft.azure.management.network.v2020_03_01.P2SVpnGateway;
import rx.Observable;
import rx.Completable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import com.microsoft.azure.arm.resources.ResourceUtilsCore;
import com.microsoft.azure.arm.utils.RXMapper;
import rx.functions.Func1;
import com.microsoft.azure.PagedList;
import com.microsoft.azure.Page;
import com.microsoft.azure.management.network.v2020_03_01.VpnProfileResponse;
import com.microsoft.azure.management.network.v2020_03_01.P2SVpnConnectionHealth;
import com.microsoft.azure.management.network.v2020_03_01.P2SVpnConnectionHealthRequest;
class P2sVpnGatewaysImpl extends GroupableResourcesCoreImpl<P2SVpnGateway, P2SVpnGatewayImpl, P2SVpnGatewayInner, P2sVpnGatewaysInner, NetworkManager> implements P2sVpnGateways {
protected P2sVpnGatewaysImpl(NetworkManager manager) {
super(manager.inner().p2sVpnGateways(), manager);
}
@Override
protected Observable<P2SVpnGatewayInner> getInnerAsync(String resourceGroupName, String name) {
P2sVpnGatewaysInner client = this.inner();
return client.getByResourceGroupAsync(resourceGroupName, name);
}
@Override
protected Completable deleteInnerAsync(String resourceGroupName, String name) {
P2sVpnGatewaysInner client = this.inner();
return client.deleteAsync(resourceGroupName, name).toCompletable();
}
@Override
public Observable<String> deleteByIdsAsync(Collection<String> ids) {
if (ids == null || ids.isEmpty()) {
return Observable.empty();
}
Collection<Observable<String>> observables = new ArrayList<>();
for (String id : ids) {
final String resourceGroupName = ResourceUtilsCore.groupFromResourceId(id);
final String name = ResourceUtilsCore.nameFromResourceId(id);
Observable<String> o = RXMapper.map(this.inner().deleteAsync(resourceGroupName, name), id);
observables.add(o);
}
return Observable.mergeDelayError(observables);
}
@Override
public Observable<String> deleteByIdsAsync(String...ids) {
return this.deleteByIdsAsync(new ArrayList<String>(Arrays.asList(ids)));
}
@Override
public void deleteByIds(Collection<String> ids) {
if (ids != null && !ids.isEmpty()) {
this.deleteByIdsAsync(ids).toBlocking().last();
}
}
@Override
public void deleteByIds(String...ids) {
this.deleteByIds(new ArrayList<String>(Arrays.asList(ids)));
}
@Override
public PagedList<P2SVpnGateway> listByResourceGroup(String resourceGroupName) {
P2sVpnGatewaysInner client = this.inner();
return this.wrapList(client.listByResourceGroup(resourceGroupName));
}
@Override
public Observable<P2SVpnGateway> listByResourceGroupAsync(String resourceGroupName) {
P2sVpnGatewaysInner client = this.inner();
return client.listByResourceGroupAsync(resourceGroupName)
.flatMapIterable(new Func1<Page<P2SVpnGatewayInner>, Iterable<P2SVpnGatewayInner>>() {
@Override
public Iterable<P2SVpnGatewayInner> call(Page<P2SVpnGatewayInner> page) {
return page.items();
}
})
.map(new Func1<P2SVpnGatewayInner, P2SVpnGateway>() {
@Override
public P2SVpnGateway call(P2SVpnGatewayInner inner) {
return wrapModel(inner);
}
});
}
@Override
public PagedList<P2SVpnGateway> list() {
P2sVpnGatewaysInner client = this.inner();
return this.wrapList(client.list());
}
@Override
public Observable<P2SVpnGateway> listAsync() {
P2sVpnGatewaysInner client = this.inner();
return client.listAsync()
.flatMapIterable(new Func1<Page<P2SVpnGatewayInner>, Iterable<P2SVpnGatewayInner>>() {
@Override
public Iterable<P2SVpnGatewayInner> call(Page<P2SVpnGatewayInner> page) {
return page.items();
}
})
.map(new Func1<P2SVpnGatewayInner, P2SVpnGateway>() {
@Override
public P2SVpnGateway call(P2SVpnGatewayInner inner) {
return wrapModel(inner);
}
});
}
@Override
public P2SVpnGatewayImpl define(String name) {
return wrapModel(name);
}
@Override
public Observable<VpnProfileResponse> generateVpnProfileAsync(String resourceGroupName, String gatewayName) {
P2sVpnGatewaysInner client = this.inner();
return client.generateVpnProfileAsync(resourceGroupName, gatewayName)
.map(new Func1<VpnProfileResponseInner, VpnProfileResponse>() {
@Override
public VpnProfileResponse call(VpnProfileResponseInner inner) {
return new VpnProfileResponseImpl(inner, manager());
}
});
}
@Override
public Observable<P2SVpnGateway> getP2sVpnConnectionHealthAsync(String resourceGroupName, String gatewayName) {
P2sVpnGatewaysInner client = this.inner();
return client.getP2sVpnConnectionHealthAsync(resourceGroupName, gatewayName)
.map(new Func1<P2SVpnGatewayInner, P2SVpnGateway>() {
@Override
public P2SVpnGateway call(P2SVpnGatewayInner inner) {
return new P2SVpnGatewayImpl(inner.name(), inner, manager());
}
});
}
@Override
public Observable<P2SVpnConnectionHealth> getP2sVpnConnectionHealthDetailedAsync(String resourceGroupName, String gatewayName, P2SVpnConnectionHealthRequest request) {
P2sVpnGatewaysInner client = this.inner();
return client.getP2sVpnConnectionHealthDetailedAsync(resourceGroupName, gatewayName, request)
.map(new Func1<P2SVpnConnectionHealthInner, P2SVpnConnectionHealth>() {
@Override
public P2SVpnConnectionHealth call(P2SVpnConnectionHealthInner inner) {
return new P2SVpnConnectionHealthImpl(inner, manager());
}
});
}
@Override
public Completable disconnectP2sVpnConnectionsAsync(String resourceGroupName, String p2sVpnGatewayName) {
P2sVpnGatewaysInner client = this.inner();
return client.disconnectP2sVpnConnectionsAsync(resourceGroupName, p2sVpnGatewayName).toCompletable();
}
@Override
protected P2SVpnGatewayImpl wrapModel(P2SVpnGatewayInner inner) {
return new P2SVpnGatewayImpl(inner.name(), inner, manager());
}
@Override
protected P2SVpnGatewayImpl wrapModel(String name) {
return new P2SVpnGatewayImpl(name, new P2SVpnGatewayInner(), this.manager());
}
}
| {
"pile_set_name": "Github"
} |
---
title: "Installing Boot9strap (Frogtool)"
---
{% include toc title="Table of Contents" %}
### Required Reading
We will now use our Homebrew Launcher access to run the Frogtool utility in order to inject the exploitable Japanese version of the "Flipnote Studio" title, which we then use to run b9sTool and install boot9strap.
This is a currently working implementation of the "FIRM partitions known-plaintext" exploit detailed [here](https://www.3dbrew.org/wiki/3DS_System_Flaws).
To use the [magnet](https://wikipedia.org/wiki/Magnet_URI_scheme) links on this page, you will need a torrent client like [Deluge](http://dev.deluge-torrent.org/wiki/Download).
### What You Need
* Your `movable.sed` file from completing [Seedminer](seedminer)
* <i class="fa fa-magnet" aria-hidden="true" title="This is a magnet link. Use a torrent client to download the file."></i> - [frogcert.bin](magnet:?xt=urn:btih:d12278ea50bb3574f1fbd327f3d0e2292c70941f&dn=frogcert.bin&tr=https%3a%2f%2ftracker.fastdownload.xyz%3a443%2fannounce&tr=https%3a%2f%2fopentracker.xyz%3a443%2fannounce&tr=http%3a%2f%2fopen.trackerlist.xyz%3a80%2fannounce&tr=http%3a%2f%2ft.nyaatracker.com%3a80%2fannounce&tr=udp%3a%2f%2ftracker.tiny-vps.com%3a6969%2fannounce&tr=udp%3a%2f%2fopen.demonii.si%3a1337%2fannounce&tr=udp%3a%2f%2ftracker.port443.xyz%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.vanitycore.co%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.torrent.eu.org%3a451%2fannounce&tr=udp%3a%2f%2fretracker.lanta-net.ru%3a2710%2fannounce&tr=udp%3a%2f%2fthetracker.org%3a80%2fannounce&tr=http%3a%2f%2ftorrent.nwps.ws%3a80%2fannounce&tr=udp%3a%2f%2ftracker.coppersurfer.tk%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.iamhansen.xyz%3a2000%2fannounce&tr=udp%3a%2f%2fbt.xxx-tracker.com%3a2710%2fannounce&tr=http%3a%2f%2f0d.kebhana.mx%3a443%2fannounce&tr=udp%3a%2f%2fexodus.desync.com%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.opentrackr.org%3a1337%2fannounce&tr=udp%3a%2f%2ftracker4.itzmx.com%3a2710%2fannounce&tr=udp%3a%2f%2ftracker.justseed.it%3a1337%2fannounce&tr=http%3a%2f%2ftherightsize.net%3a1337%2fannounce&tr=udp%3a%2f%2fretracker.hotplug.ru%3a2710%2fannounce&tr=udp%3a%2f%2ftracker.internetwarriors.net%3a1337%2fannounce&tr=udp%3a%2f%2f9.rarbg.com%3a2800%2fannounce&tr=https%3a%2f%2f2.track.ga%3a443%2fannounce&tr=udp%3a%2f%2fbigfoot1942.sektori.org%3a6969%2fannounce)
* The latest release of [Frogtool](https://github.com/zoogie/Frogtool/releases/latest)
* The latest release of [b9sTool](https://github.com/zoogie/b9sTool/releases/latest)
* The latest release of [Luma3DS](https://github.com/AuroraWright/Luma3DS/releases/latest)
#### Section I - Prep Work
1. Insert your SD card into your computer
1. Copy your `movable.sed` file to the root of your SD card
1. Copy `boot.firm` and `boot.3dsx` from the Luma3DS `.zip` to the root of your SD card
1. Copy `boot.nds` (B9STool) from the b9sTool release `.zip` to the root of your SD card
1. Copy `Frogtool.3dsx` to the `/3ds/` folder on your SD card
1. Copy `frogcert.bin` to the root of your SD card
1. Reinsert your SD card into your device
1. Power on your device
#### Section II - Patching DS Download Play
1. Open the Homebrew Launcher using any method
1. Launch Frogtool from the list of homebrew
1. Select the "INJECT patched DS Download Play" option
1. Frogtool will automatically run and inject the JPN version of Flipnote Studio into your DS Download Play
1. Once this operation has finished, read the screens and check if the process was successful
+ If there are any errors or missing files, correct the problem and try again
1. If the process was successful, tap the touch screen, then select "BOOT patched DS Download Play"
1. If the exploit was successful, your device will have loaded the JPN version of Flipnote Studio
#### Section III - Flipnote Exploit
If you would prefer a visual guide to this section, one is available [here](https://zoogie.github.io/web/flipnote_directions/).
{: .notice--info}
1. Complete the initial setup process for the launched game until you reach the main menu
+ Select the left option whenever prompted during the setup process
1. Using the touch-screen, select the large left box, then select the box with an SD card icon
1. Once the menu loads, select the face icon, then the bottom right icon to continue
1. Select the frog icon at the bottom left
+ Alternatively, press (X) or (UP) on the D-Pad depending on which is shown on the top screen
1. Select the second button along the top with a film-reel icon
1. Scroll right until reel "3/3" is selected
1. Tap the third box with the letter "A" in it
1. Scroll left until reel "1/3" is selected
1. Tap the fourth box with the letter "A" in it
1. If the exploit was successful, your device will have loaded b9sTool
1. Using the D-Pad, move to "Install boot9strap"
1. Press (A), then press START and SELECT at the same time to begin the process
1. Once complete and the bottom screen says "done.", exit b9sTool, then power off your device
+ You may have to force power off by holding the power button
+ If you see the Luma configuration menu, continue without powering off
#### Section IV - Configuring Luma3DS
1. Boot your device while holding (Select) to launch the Luma configuration menu
+ If you encounter issues launching the Luma configuration menu, [follow this troubleshooting guide](https://github.com/zoogie/b9sTool/blob/master/TROUBLESHOOTING.md)
1. Use the (A) button and the D-Pad to turn on the following:
+ **"Show NAND or user string in System Settings"**
1. Press (Start) to save and reboot
+ Your device should load the Home Menu after a short delay. If you get a black screen lasting longer than 5 minutes, [follow this troubleshooting guide](troubleshooting#black-screen-on-sysnand-boot-after-installing-boot9strap)
___
### Continue to [Finalizing Setup](finalizing-setup)
{: .notice--primary}
| {
"pile_set_name": "Github"
} |
# CGroup Input Plugin
This input plugin will capture specific statistics per cgroup.
Consider restricting paths to the set of cgroups you really
want to monitor if you have a large number of cgroups, to avoid
any cardinality issues.
Following file formats are supported:
* Single value
```
VAL\n
```
* New line separated values
```
VAL0\n
VAL1\n
```
* Space separated values
```
VAL0 VAL1 ...\n
```
* New line separated key-space-value's
```
KEY0 VAL0\n
KEY1 VAL1\n
```
### Tags:
All measurements have the following tags:
- path
### Configuration:
```toml
# [[inputs.cgroup]]
# paths = [
# "/sys/fs/cgroup/memory", # root cgroup
# "/sys/fs/cgroup/memory/child1", # container cgroup
# "/sys/fs/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
# ]
# files = ["memory.*usage*", "memory.limit_in_bytes"]
```
### usage examples:
```toml
# [[inputs.cgroup]]
# paths = [
# "/sys/fs/cgroup/cpu", # root cgroup
# "/sys/fs/cgroup/cpu/*", # all container cgroups
# "/sys/fs/cgroup/cpu/*/*", # all children cgroups under each container cgroup
# ]
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
# [[inputs.cgroup]]
# paths = [
# "/sys/fs/cgroup/unified/*", # root cgroup
# ]
# files = ["*"]
```
| {
"pile_set_name": "Github"
} |
import * as React from 'react';
import createSvgIcon from './utils/createSvgIcon';
export default createSvgIcon(
<path d="M12 11c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm6 2c0-3.31-2.69-6-6-6s-6 2.69-6 6c0 2.22 1.21 4.15 3 5.19l1-1.74c-1.19-.7-2-1.97-2-3.45 0-2.21 1.79-4 4-4s4 1.79 4 4c0 1.48-.81 2.75-2 3.45l1 1.74c1.79-1.04 3-2.97 3-5.19zM12 3C6.48 3 2 7.48 2 13c0 3.7 2.01 6.92 4.99 8.65l1-1.73C5.61 18.53 4 15.96 4 13c0-4.42 3.58-8 8-8s8 3.58 8 8c0 2.96-1.61 5.53-4 6.92l1 1.73c2.99-1.73 5-4.95 5-8.65 0-5.52-4.48-10-10-10z" />
, 'WifiTetheringTwoTone');
| {
"pile_set_name": "Github"
} |
<?php
/**
* Copyright © Magento, Inc. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\Signifyd\Test\Unit\Model\MessageGenerators;
use Magento\Framework\TestFramework\Unit\Helper\ObjectManager;
use Magento\Signifyd\Api\CaseRepositoryInterface;
use Magento\Signifyd\Api\Data\CaseInterface;
use Magento\Signifyd\Model\MessageGenerators\CaseRescore;
use PHPUnit_Framework_MockObject_MockObject as MockObject;
/**
* Tests for Signifyd CaseRescore message generator.
*
* Class CaseRescoreTest
*/
class CaseRescoreTest extends \PHPUnit\Framework\TestCase
{
private static $data = [
'caseId' => 100,
'score' => 200
];
/**
* @var ObjectManager
*/
private $objectManager;
/**
* @var CaseRepositoryInterface|MockObject
*/
private $caseRepository;
/**
* @var CaseRescore|MockObject
*/
private $caseRescore;
/**
* @var CaseInterface|MockObject
*/
private $case;
/**
* @inheritdoc
*/
protected function setUp()
{
$this->case = $this->getMockBuilder(CaseInterface::class)
->disableOriginalConstructor()
->getMock();
$this->objectManager = new ObjectManager($this);
$this->caseRepository = $this->getMockBuilder(CaseRepositoryInterface::class)
->disableOriginalConstructor()
->getMock();
$this->caseRescore = $this->objectManager->getObject(CaseRescore::class, [
'caseRepository' => $this->caseRepository
]);
}
/**
* Data array without required attribute caseId.
*
* @expectedException \Magento\Signifyd\Model\MessageGenerators\GeneratorException
* @expectedExceptionMessage The "caseId" should not be empty
*/
public function testGenerateEmptyCaseIdException()
{
$this->caseRescore->generate([]);
}
/**
* Case entity was not found in DB.
*
* @expectedException \Magento\Signifyd\Model\MessageGenerators\GeneratorException
* @expectedExceptionMessage Case entity not found.
*/
public function testGenerateNotFoundException()
{
$this->caseRepository->expects($this->once())
->method('getByCaseId')
->with(self::$data['caseId'])
->willReturn(null);
$this->caseRescore = $this->objectManager->getObject(CaseRescore::class, [
'caseRepository' => $this->caseRepository
]);
$this->caseRescore->generate(self::$data);
}
/**
* Generate case message with not empty previous score.
*/
public function testGenerateWithPreviousScore()
{
$this->case->expects($this->once())
->method('getScore')
->willReturn(self::$data['score']);
$this->caseRepository->expects($this->once())
->method('getByCaseId')
->with(self::$data['caseId'])
->willReturn($this->case);
$this->caseRescore = $this->objectManager->getObject(CaseRescore::class, [
'caseRepository' => $this->caseRepository
]);
$phrase = __(
'Case Update: New score for the order is %1. Previous score was %2.',
self::$data['score'],
self::$data['score']
);
$message = $this->caseRescore->generate(self::$data);
$this->assertEquals($phrase, $message);
}
/**
* Generate case message with empty previous score.
*/
public function testGenerateWithoutPreviousScore()
{
$this->caseRepository->expects($this->once())
->method('getByCaseId')
->with(self::$data['caseId'])
->willReturn($this->case);
$this->caseRescore = $this->objectManager->getObject(CaseRescore::class, [
'caseRepository' => $this->caseRepository
]);
$phrase = __(
'Case Update: New score for the order is %1. Previous score was %2.',
self::$data['score'],
null
);
$message = $this->caseRescore->generate(self::$data);
$this->assertEquals($phrase, $message);
}
}
| {
"pile_set_name": "Github"
} |
/*
MDAL - Mesh Data Abstraction Library (MIT License)
Copyright (C) 2019 Peter Petrik (zilolv at gmail dot com)
*/
#ifndef MDAL_TUFLOWFV_HPP
#define MDAL_TUFLOWFV_HPP
#include <string>
#include <memory>
#include <map>
#include <iostream>
#include <fstream>
#include "mdal_data_model.hpp"
#include "mdal_memory_data_model.hpp"
#include "mdal.h"
#include "mdal_driver.hpp"
#include "mdal_cf.hpp"
namespace MDAL
{
namespace TuflowFVActiveFlag
{
size_t activeData( std::shared_ptr<NetCDFFile> ncFile,
size_t timestep,
size_t timestepsCount,
size_t facesCount,
int ncidActive,
size_t indexStart,
size_t count,
int *buffer );
}
class TuflowFVDataset2D: public CFDataset2D
{
public:
TuflowFVDataset2D( DatasetGroup *parent,
double fillValX,
double fillValY,
int ncidX,
int ncidY,
Classification classificationX,
Classification classificationY,
int ncidActive,
CFDatasetGroupInfo::TimeLocation timeLocation,
size_t timesteps,
size_t values,
size_t ts,
std::shared_ptr<NetCDFFile> ncFile
);
size_t activeData( size_t indexStart, size_t count, int *buffer ) override;
private:
int mNcidActive; //!< NetCDF variable id for active flag
};
class TuflowFVDataset3D: public Dataset3D
{
public:
TuflowFVDataset3D( DatasetGroup *parent,
int ncidX,
int ncidY,
int ncidActive,
CFDatasetGroupInfo::TimeLocation timeLocation,
size_t timesteps,
size_t volumesCount,
size_t facesCount,
size_t levelFacesCount,
size_t ts,
size_t maximumLevelsCount,
std::shared_ptr<NetCDFFile> ncFile
);
virtual ~TuflowFVDataset3D() override;
size_t activeData( size_t indexStart, size_t count, int *buffer ) override;
size_t verticalLevelCountData( size_t indexStart, size_t count, int *buffer ) override;
size_t verticalLevelData( size_t indexStart, size_t count, double *buffer ) override;
size_t faceToVolumeData( size_t indexStart, size_t count, int *buffer ) override;
size_t scalarVolumesData( size_t indexStart, size_t count, double *buffer ) override;
size_t vectorVolumesData( size_t indexStart, size_t count, double *buffer ) override;
private:
int mNcidX; //!< NetCDF variable id for x
int mNcidY; //!< NetCDF variable id for y
int mNcidActive; //!< NetCDF variable id for active flag
size_t mTimesteps;
size_t mFacesCount;
size_t mLevelFacesCount;
CFDatasetGroupInfo::TimeLocation mTimeLocation;
size_t mTs;
std::shared_ptr<NetCDFFile> mNcFile;
int mNcidVerticalLevels = -1; //! variable id of int NL(NumCells2D) ;
int mNcidVerticalLevelsZ = -1; //! variable id of float layerface_Z(Time, NumLayerFaces3D) ;
int mNcidActive2D = -1; //! variable id of int stat(Time, NumCells2D) ;
int mNcid3DTo2D = -1; //! variable id of int idx2(NumCells3D) ;
int mNcid2DTo3D = -1; //! variable id of int idx3(NumCells2D) ;
};
/**
* TUFLOW FV format
*
* Binary NetCDF format with structure similar to UGRID stored as
* 3D Layered Mesh (https://github.com/qgis/QGIS-Enhancement-Proposals/issues/158)
*
* Supports active flag.
*
* There are special datasets for maximum/minimum/time of maximum/time of minimum.
* These datasets are not 1-1 with calculated max/min of time-datasets, since
* they are calculated with more fine methods directly in TUFLOW solvers
*
* Both mesh and dataset is stored in single file.
*/
class DriverTuflowFV: public DriverCF
{
public:
DriverTuflowFV();
~DriverTuflowFV() override;
DriverTuflowFV *create() override;
private:
CFDimensions populateDimensions( ) override;
void populateElements( Vertices &vertices, Edges &, Faces &faces ) override;
void addBedElevation( MemoryMesh *mesh ) override;
std::string getCoordinateSystemVariableName() override;
std::set<std::string> ignoreNetCDFVariables() override;
void parseNetCDFVariableMetadata( int varid,
std::string &variableName,
std::string &name,
bool *is_vector,
bool *isPolar,
bool *invertedDirection,
bool *is_x ) override;
std::vector<std::pair<double, double>> parseClassification( int varid ) const override;
std::string getTimeVariableName() const override;
std::shared_ptr<MDAL::Dataset> create2DDataset(
std::shared_ptr<MDAL::DatasetGroup> group,
size_t ts,
const MDAL::CFDatasetGroupInfo &dsi,
double fill_val_x, double fill_val_y ) override;
std::shared_ptr<MDAL::Dataset> create3DDataset(
std::shared_ptr<MDAL::DatasetGroup> group,
size_t ts,
const MDAL::CFDatasetGroupInfo &dsi,
double fill_val_x, double fill_val_y ) override;
void addBedElevationDatasetOnFaces();
void populateVertices( MDAL::Vertices &vertices );
void populateFaces( MDAL::Faces &faces );
virtual DateTime defaultReferenceTime() const override;
void calculateMaximumLevelCount();
int mMaximumLevelsCount = -1;
};
} // namespace MDAL
#endif //MDAL_TUFLOWFV_HPP
| {
"pile_set_name": "Github"
} |
// <auto-generated>
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// </auto-generated>
namespace Microsoft.Azure.Management.Sql.Fluent.Models
{
using Microsoft.Rest;
using Microsoft.Rest.Serialization;
using Newtonsoft.Json;
using System.Linq;
/// <summary>
/// The server encryption protector.
/// </summary>
[Rest.Serialization.JsonTransformation]
public partial class EncryptionProtectorInner : ProxyResourceInner
{
/// <summary>
/// Initializes a new instance of the EncryptionProtectorInner class.
/// </summary>
public EncryptionProtectorInner()
{
CustomInit();
}
/// <summary>
/// Initializes a new instance of the EncryptionProtectorInner class.
/// </summary>
/// <param name="serverKeyType">The encryption protector type like
/// 'ServiceManaged', 'AzureKeyVault'. Possible values include:
/// 'ServiceManaged', 'AzureKeyVault'</param>
/// <param name="kind">Kind of encryption protector. This is metadata
/// used for the Azure portal experience.</param>
/// <param name="location">Resource location.</param>
/// <param name="subregion">Subregion of the encryption
/// protector.</param>
/// <param name="serverKeyName">The name of the server key.</param>
/// <param name="uri">The URI of the server key.</param>
/// <param name="thumbprint">Thumbprint of the server key.</param>
public EncryptionProtectorInner(ServerKeyType serverKeyType, string id = default(string), string name = default(string), string type = default(string), string kind = default(string), string location = default(string), string subregion = default(string), string serverKeyName = default(string), string uri = default(string), string thumbprint = default(string))
: base(id, name, type)
{
Kind = kind;
Location = location;
Subregion = subregion;
ServerKeyName = serverKeyName;
ServerKeyType = serverKeyType;
Uri = uri;
Thumbprint = thumbprint;
CustomInit();
}
/// <summary>
/// An initialization method that performs custom operations like setting defaults
/// </summary>
partial void CustomInit();
/// <summary>
/// Gets or sets kind of encryption protector. This is metadata used
/// for the Azure portal experience.
/// </summary>
[JsonProperty(PropertyName = "kind")]
public string Kind { get; set; }
/// <summary>
/// Gets resource location.
/// </summary>
[JsonProperty(PropertyName = "location")]
public string Location { get; private set; }
/// <summary>
/// Gets subregion of the encryption protector.
/// </summary>
[JsonProperty(PropertyName = "properties.subregion")]
public string Subregion { get; private set; }
/// <summary>
/// Gets or sets the name of the server key.
/// </summary>
[JsonProperty(PropertyName = "properties.serverKeyName")]
public string ServerKeyName { get; set; }
/// <summary>
/// Gets or sets the encryption protector type like 'ServiceManaged',
/// 'AzureKeyVault'. Possible values include: 'ServiceManaged',
/// 'AzureKeyVault'
/// </summary>
[JsonProperty(PropertyName = "properties.serverKeyType")]
public ServerKeyType ServerKeyType { get; set; }
/// <summary>
/// Gets the URI of the server key.
/// </summary>
[JsonProperty(PropertyName = "properties.uri")]
public string Uri { get; private set; }
/// <summary>
/// Gets thumbprint of the server key.
/// </summary>
[JsonProperty(PropertyName = "properties.thumbprint")]
public string Thumbprint { get; private set; }
/// <summary>
/// Validate the object.
/// </summary>
/// <exception cref="ValidationException">
/// Thrown if validation fails
/// </exception>
public virtual void Validate()
{
if (ServerKeyType == null)
{
throw new ValidationException(ValidationRules.CannotBeNull, "ServerKeyType");
}
}
}
}
| {
"pile_set_name": "Github"
} |
process.env.NODE_ENV = process.env.NODE_ENV || 'development'
const environment = require('./environment')
module.exports = environment.toWebpackConfig()
| {
"pile_set_name": "Github"
} |
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windowz variants
if not "%OS%" == "Windows_NT" goto win9xME_args
if "%@eval[2+2]" == "4" goto 4NT_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
goto execute
:4NT_args
@rem Get arguments from the 4NT Shell from JP Software
set CMD_LINE_ARGS=%$
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
| {
"pile_set_name": "Github"
} |
.gallery-toggle {
position: absolute;
display: flex;
flex-flow: column;
justify-content: center;
align-items: center;
height: 100%;
width: 2rem;
transition: 0.25s;
color: rgba(#ecf0f1, 0.5);
background-color: rgba(#2c3e50, 0.5);
opacity: 0;
cursor: pointer;
&:hover {
opacity: 1;
}
}
| {
"pile_set_name": "Github"
} |
.\" **************************************************************************
.\" * _ _ ____ _
.\" * Project ___| | | | _ \| |
.\" * / __| | | | |_) | |
.\" * | (__| |_| | _ <| |___
.\" * \___|\___/|_| \_\_____|
.\" *
.\" * Copyright (C) 1998 - 2010, Daniel Stenberg, <[email protected]>, et al.
.\" *
.\" * This software is licensed as described in the file COPYING, which
.\" * you should have received as part of this distribution. The terms
.\" * are also available at http://curl.haxx.se/docs/copyright.html.
.\" *
.\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell
.\" * copies of the Software, and permit persons to whom the Software is
.\" * furnished to do so, under the terms of the COPYING file.
.\" *
.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
.\" * KIND, either express or implied.
.\" *
.\" * $Id$
.\" **************************************************************************
.\"
.TH curl 1 "28 November 2009" "Curl 7.20.0" "Curl Manual"
.SH NAME
curl \- transfer a URL
.SH SYNOPSIS
.B curl [options]
.I [URL...]
.SH DESCRIPTION
.B curl
is a tool to transfer data from or to a server, using one of the supported
protocols (HTTP, HTTPS, FTP, FTPS, SCP, SFTP, TFTP, DICT, TELNET, LDAP or
FILE). The command is designed to work without user interaction.
curl offers a busload of useful tricks like proxy support, user
authentication, FTP upload, HTTP post, SSL connections, cookies, file transfer
resume and more. As you will see below, the number of features will make your
head spin!
curl is powered by libcurl for all transfer-related features. See
.BR libcurl (3)
for details.
.SH URL
The URL syntax is protocol-dependent. You'll find a detailed description in
RFC 3986.
You can specify multiple URLs or parts of URLs by writing part sets within
braces as in:
http://site.{one,two,three}.com
or you can get sequences of alphanumeric series by using [] as in:
ftp://ftp.numericals.com/file[1-100].txt
ftp://ftp.numericals.com/file[001-100].txt (with leading zeros)
ftp://ftp.letters.com/file[a-z].txt
No nesting of the sequences is supported at the moment, but you can use
several ones next to each other:
http://any.org/archive[1996-1999]/vol[1-4]/part{a,b,c}.html
You can specify any amount of URLs on the command line. They will be fetched
in a sequential manner in the specified order.
Since curl 7.15.1 you can also specify a step counter for the ranges, so that
you can get every Nth number or letter:
http://www.numericals.com/file[1-100:10].txt
http://www.letters.com/file[a-z:2].txt
If you specify URL without protocol:// prefix, curl will attempt to guess what
protocol you might want. It will then default to HTTP but try other protocols
based on often-used host name prefixes. For example, for host names starting
with "ftp." curl will assume you want to speak FTP.
curl will do its best to use what you pass to it as a URL. It is not trying to
validate it as a syntactically correct URL by any means but is instead
\fBvery\fP liberal with what it accepts.
Curl will attempt to re-use connections for multiple file transfers, so that
getting many files from the same server will not do multiple connects /
handshakes. This improves speed. Of course this is only done on files
specified on a single command line and cannot be used between separate curl
invokes.
.SH "PROGRESS METER"
curl normally displays a progress meter during operations, indicating the amount
of transferred data, transfer speeds and estimated time left, etc.
However, since curl displays this data to the terminal by default, if you invoke
curl to do an operation and it is about to write data to the terminal, it
\fIdisables\fP the progress meter as otherwise it would mess up the output
mixing progress meter and response data.
If you want a progress meter for HTTP POST or PUT requests, you need to
redirect the response output to a file, using shell redirect (>), -o [file] or
similar.
It is not the same case for FTP upload as that operation does not spit out
any response data to the terminal.
If you prefer a progress "bar" instead of the regular meter, \fI-#\fP is your
friend.
.SH OPTIONS
In general, all boolean options are enabled with --option and yet again
disabled with --\fBno-\fPoption. That is, you use the exact same option name
but prefix it with "no-". However, in this list we mostly only list and show
the --option version of them. (This concept with --no options was added in
7.19.0. Previously most options were toggled on/off on repeated use of the
same command line option.)
.IP "-a/--append"
(FTP/SFTP) When used in an upload, this will tell curl to append to the target
file instead of overwriting it. If the file doesn't exist, it will be created.
Note that this flag is ignored by some SSH servers (including OpenSSH).
.IP "-A/--user-agent <agent string>"
(HTTP) Specify the User-Agent string to send to the HTTP server. Some badly
done CGIs fail if this field isn't set to "Mozilla/4.0". To encode blanks in
the string, surround the string with single quote marks. This can also be set
with the \fI-H/--header\fP option of course.
If this option is set more than once, the last one will be the one that's
used.
.IP "--anyauth"
(HTTP) Tells curl to figure out authentication method by itself, and use the
most secure one the remote site claims to support. This is done by first
doing a request and checking the response-headers, thus possibly inducing an
extra network round-trip. This is used instead of setting a specific
authentication method, which you can do with \fI--basic\fP, \fI--digest\fP,
\fI--ntlm\fP, and \fI--negotiate\fP.
Note that using --anyauth is not recommended if you do uploads from stdin,
since it may require data to be sent twice and then the client must be able to
rewind. If the need should arise when uploading from stdin, the upload
operation will fail.
.IP "-b/--cookie <name=data>"
(HTTP)
Pass the data to the HTTP server as a cookie. It is supposedly the
data previously received from the server in a "Set-Cookie:" line.
The data should be in the format "NAME1=VALUE1; NAME2=VALUE2".
If no '=' symbol is used in the line, it is treated as a filename to use to
read previously stored cookie lines from, which should be used in this session
if they match. Using this method also activates the "cookie parser" which will
make curl record incoming cookies too, which may be handy if you're using this
in combination with the \fI-L/--location\fP option. The file format of the
file to read cookies from should be plain HTTP headers or the Netscape/Mozilla
cookie file format.
\fBNOTE\fP that the file specified with \fI-b/--cookie\fP is only used as
input. No cookies will be stored in the file. To store cookies, use the
\fI-c/--cookie-jar\fP option or you could even save the HTTP headers to a file
using \fI-D/--dump-header\fP!
If this option is set more than once, the last one will be the one that's
used.
.IP "-B/--use-ascii"
Enable ASCII transfer when using FTP or LDAP. For FTP, this can also be
enforced by using an URL that ends with ";type=A". This option causes data
sent to stdout to be in text mode for win32 systems.
.IP "--basic"
(HTTP) Tells curl to use HTTP Basic authentication. This is the default and
this option is usually pointless, unless you use it to override a previously
set option that sets a different authentication method (such as \fI--ntlm\fP,
\fI--digest\fP, or \fI--negotiate\fP).
.IP "--ciphers <list of ciphers>"
(SSL) Specifies which ciphers to use in the connection. The list of ciphers
must specify valid ciphers. Read up on SSL cipher list details on this URL:
\fIhttp://www.openssl.org/docs/apps/ciphers.html\fP
NSS ciphers are done differently than OpenSSL and GnuTLS. The full list of
NSS ciphers is in the NSSCipherSuite entry at this URL:
\fIhttp://directory.fedora.redhat.com/docs/mod_nss.html#Directives\fP
If this option is used several times, the last one will override the others.
.IP "--compressed"
(HTTP) Request a compressed response using one of the algorithms libcurl
supports, and return the uncompressed document. If this option is used and
the server sends an unsupported encoding, curl will report an error.
.IP "--connect-timeout <seconds>"
Maximum time in seconds that you allow the connection to the server to take.
This only limits the connection phase, once curl has connected this option is
of no more use. See also the \fI-m/--max-time\fP option.
If this option is used several times, the last one will be used.
.IP "-c/--cookie-jar <file name>"
Specify to which file you want curl to write all cookies after a completed
operation. Curl writes all cookies previously read from a specified file as
well as all cookies received from remote server(s). If no cookies are known,
no file will be written. The file will be written using the Netscape cookie
file format. If you set the file name to a single dash, "-", the cookies will
be written to stdout.
.B NOTE
If the cookie jar can't be created or written to, the whole curl operation
won't fail or even report an error clearly. Using -v will get a warning
displayed, but that is the only visible feedback you get about this possibly
lethal situation.
If this option is used several times, the last specified file name will be
used.
.IP "-C/--continue-at <offset>"
Continue/Resume a previous file transfer at the given offset. The given offset
is the exact number of bytes that will be skipped, counting from the beginning
of the source file before it is transferred to the destination. If used with
uploads, the FTP server command SIZE will not be used by curl.
Use "-C -" to tell curl to automatically find out where/how to resume the
transfer. It then uses the given output/input files to figure that out.
If this option is used several times, the last one will be used.
.IP "--create-dirs"
When used in conjunction with the -o option, curl will create the necessary
local directory hierarchy as needed. This option creates the dirs mentioned
with the -o option, nothing else. If the -o file name uses no dir or if the
dirs it mentions already exist, no dir will be created.
To create remote directories when using FTP or SFTP, try
\fI--ftp-create-dirs\fP.
.IP "--crlf"
(FTP) Convert LF to CRLF in upload. Useful for MVS (OS/390).
.IP "--crlfile <file>"
(HTTPS/FTPS) Provide a file using PEM format with a Certificate Revocation
List that may specify peer certificates that are to be considered revoked.
If this option is used several times, the last one will be used.
(Added in 7.19.7)
.IP "-d/--data <data>"
(HTTP) Sends the specified data in a POST request to the HTTP server, in the
same way that a browser does when a user has filled in an HTML form and
presses the submit button. This will cause curl to pass the data to the server
using the content-type application/x-www-form-urlencoded. Compare to
\fI-F/--form\fP.
\fI-d/--data\fP is the same as \fI--data-ascii\fP. To post data purely binary,
you should instead use the \fI--data-binary\fP option. To URL-encode the value
of a form field you may use \fI--data-urlencode\fP.
If any of these options is used more than once on the same command line, the
data pieces specified will be merged together with a separating
&-symbol. Thus, using '-d name=daniel -d skill=lousy' would generate a post
chunk that looks like \&'name=daniel&skill=lousy'.
If you start the data with the letter @, the rest should be a file name to
read the data from, or - if you want curl to read the data from stdin. The
contents of the file must already be URL-encoded. Multiple files can also be
specified. Posting data from a file named 'foobar' would thus be done with
\fI--data @foobar\fP.
.IP "--data-binary <data>"
(HTTP) This posts data exactly as specified with no extra processing
whatsoever.
If you start the data with the letter @, the rest should be a filename. Data
is posted in a similar manner as \fI--data-ascii\fP does, except that newlines
are preserved and conversions are never done.
If this option is used several times, the ones following the first will append
data as described in \fI-d/--data\fP.
.IP "--data-urlencode <data>"
(HTTP) This posts data, similar to the other --data options with the exception
that this performs URL-encoding. (Added in 7.18.0)
To be CGI-compliant, the <data> part should begin with a \fIname\fP followed
by a separator and a content specification. The <data> part can be passed to
curl using one of the following syntaxes:
.RS
.IP "content"
This will make curl URL-encode the content and pass that on. Just be careful
so that the content doesn't contain any = or @ symbols, as that will then make
the syntax match one of the other cases below!
.IP "=content"
This will make curl URL-encode the content and pass that on. The preceding =
symbol is not included in the data.
.IP "name=content"
This will make curl URL-encode the content part and pass that on. Note that
the name part is expected to be URL-encoded already.
.IP "@filename"
This will make curl load data from the given file (including any newlines),
URL-encode that data and pass it on in the POST.
.IP "name@filename"
This will make curl load data from the given file (including any newlines),
URL-encode that data and pass it on in the POST. The name part gets an equal
sign appended, resulting in \fIname=urlencoded-file-content\fP. Note that the
name is expected to be URL-encoded already.
.RE
.IP "--digest"
(HTTP) Enables HTTP Digest authentication. This is a authentication that
prevents the password from being sent over the wire in clear text. Use this in
combination with the normal \fI-u/--user\fP option to set user name and
password. See also \fI--ntlm\fP, \fI--negotiate\fP and \fI--anyauth\fP for
related options.
If this option is used several times, the following occurrences make no
difference.
.IP "--disable-eprt"
(FTP) Tell curl to disable the use of the EPRT and LPRT commands when doing
active FTP transfers. Curl will normally always first attempt to use EPRT,
then LPRT before using PORT, but with this option, it will use PORT right
away. EPRT and LPRT are extensions to the original FTP protocol, and may not work
on all servers, but they enable more functionality in a better way than the
traditional PORT command.
Since curl 7.19.0, \fB--eprt\fP can be used to explicitly enable EPRT again
and \fB--no-eprt\fP is an alias for \fB--disable-eprt\fP.
Disabling EPRT only changes the active behavior. If you want to switch to
passive mode you need to not use \fI-P/--ftp-port\fP or force it with
\fI--ftp-pasv\fP.
.IP "--disable-epsv"
(FTP) Tell curl to disable the use of the EPSV command when doing passive FTP
transfers. Curl will normally always first attempt to use EPSV before PASV,
but with this option, it will not try using EPSV.
Since curl 7.19.0, \fB--epsv\fP can be used to explicitly enable EPRT again
and \fB--no-epsv\fP is an alias for \fB--disable-epsv\fP.
Disabling EPSV only changes the passive behavior. If you want to switch to
active mode you need to use \fI-P/--ftp-port\fP.
.IP "-D/--dump-header <file>"
Write the protocol headers to the specified file.
This option is handy to use when you want to store the headers that a HTTP
site sends to you. Cookies from the headers could then be read in a second
curl invocation by using the \fI-b/--cookie\fP option! The \fI-c/--cookie-jar\fP
option is however a better way to store cookies.
When used in FTP, the FTP server response lines are considered being "headers"
and thus are saved there.
If this option is used several times, the last one will be used.
.IP "-e/--referer <URL>"
(HTTP) Sends the "Referer Page" information to the HTTP server. This can also
be set with the \fI-H/--header\fP flag of course. When used with
\fI-L/--location\fP you can append ";auto" to the --referer URL to make curl
automatically set the previous URL when it follows a Location: header. The
\&";auto" string can be used alone, even if you don't set an initial --referer.
If this option is used several times, the last one will be used.
.IP "--engine <name>"
Select the OpenSSL crypto engine to use for cipher
operations. Use \fI--engine list\fP to print a list of build-time supported
engines. Note that not all (or none) of the engines may be available at
run-time.
.IP "--environment"
(RISC OS ONLY) Sets a range of environment variables, using the names the -w
option supports, to allow easier extraction of useful information after having
run curl.
.IP "--egd-file <file>"
(SSL) Specify the path name to the Entropy Gathering Daemon socket. The socket
is used to seed the random engine for SSL connections. See also the
\fI--random-file\fP option.
.IP "-E/--cert <certificate[:password]>"
(SSL) Tells curl to use the specified certificate file when getting a file
with HTTPS or FTPS. The certificate must be in PEM format. If the optional
password isn't specified, it will be queried for on the terminal. Note that
this option assumes a \&"certificate" file that is the private key and the
private certificate concatenated! See \fI--cert\fP and \fI--key\fP to specify
them independently.
If curl is built against the NSS SSL library then this option tells
curl the nickname of the certificate to use within the NSS database defined
by the environment variable SSL_DIR (or by default /etc/pki/nssdb). If the
NSS PEM PKCS#11 module (libnsspem.so) is available then PEM files may be
loaded.
If this option is used several times, the last one will be used.
.IP "--cert-type <type>"
(SSL) Tells curl what certificate type the provided certificate is in. PEM,
DER and ENG are recognized types. If not specified, PEM is assumed.
If this option is used several times, the last one will be used.
.IP "--cacert <CA certificate>"
(SSL) Tells curl to use the specified certificate file to verify the peer. The
file may contain multiple CA certificates. The certificate(s) must be in PEM
format. Normally curl is built to use a default file for this, so this option
is typically used to alter that default file.
curl recognizes the environment variable named 'CURL_CA_BUNDLE' if it is
set, and uses the given path as a path to a CA cert bundle. This option
overrides that variable.
The windows version of curl will automatically look for a CA certs file named
\'curl-ca-bundle.crt\', either in the same directory as curl.exe, or in the
Current Working Directory, or in any folder along your PATH.
If curl is built against the NSS SSL library then this option tells
curl the nickname of the CA certificate to use within the NSS database
defined by the environment variable SSL_DIR (or by default /etc/pki/nssdb).
If the NSS PEM PKCS#11 module (libnsspem.so) is available then PEM files
may be loaded.
If this option is used several times, the last one will be used.
.IP "--capath <CA certificate directory>"
(SSL) Tells curl to use the specified certificate directory to verify the
peer. The certificates must be in PEM format, and the directory must have been
processed using the c_rehash utility supplied with openssl. Using
\fI--capath\fP can allow curl to make SSL-connections much more efficiently
than using \fI--cacert\fP if the \fI--cacert\fP file contains many CA
certificates.
If this option is used several times, the last one will be used.
.IP "-f/--fail"
(HTTP) Fail silently (no output at all) on server errors. This is mostly done
to better enable scripts etc to better deal with failed attempts. In
normal cases when a HTTP server fails to deliver a document, it returns an
HTML document stating so (which often also describes why and more). This flag
will prevent curl from outputting that and return error 22.
This method is not fail-safe and there are occasions where non-successful
response codes will slip through, especially when authentication is involved
(response codes 401 and 407).
.IP "--ftp-account [data]"
(FTP) When an FTP server asks for "account data" after user name and password
has been provided, this data is sent off using the ACCT command. (Added in
7.13.0)
If this option is used twice, the second will override the previous use.
.IP "--ftp-create-dirs"
(FTP/SFTP) When an FTP or SFTP URL/operation uses a path that doesn't
currently exist on the server, the standard behavior of curl is to
fail. Using this option, curl will instead attempt to create missing
directories.
.IP "--ftp-method [method]"
(FTP) Control what method curl should use to reach a file on a FTP(S)
server. The method argument should be one of the following alternatives:
.RS
.IP multicwd
curl does a single CWD operation for each path part in the given URL. For deep
hierarchies this means very many commands. This is how RFC1738 says it should
be done. This is the default but the slowest behavior.
.IP nocwd
curl does no CWD at all. curl will do SIZE, RETR, STOR etc and give a full
path to the server for all these commands. This is the fastest behavior.
.IP singlecwd
curl does one CWD with the full target directory and then operates on the file
\&"normally" (like in the multicwd case). This is somewhat more standards
compliant than 'nocwd' but without the full penalty of 'multicwd'.
.RE
(Added in 7.15.1)
.IP "--ftp-pasv"
(FTP) Use passive mode for the data conection. Passive is the internal default
behavior, but using this option can be used to override a previous
\fI-P/-ftp-port\fP option. (Added in 7.11.0)
If this option is used several times, the following occurrences make no
difference. Undoing an enforced passive really isn't doable but you must then
instead enforce the correct \fI-P/--ftp-port\fP again.
Passive mode means that curl will try the EPSV command first and then PASV,
unless \fI--disable-epsv\fP is used.
.IP "--ftp-alternative-to-user <command>"
(FTP) If authenticating with the USER and PASS commands fails, send this
command. When connecting to Tumbleweed's Secure Transport server over FTPS
using a client certificate, using "SITE AUTH" will tell the server to retrieve
the username from the certificate. (Added in 7.15.5)
.IP "--ftp-skip-pasv-ip"
(FTP) Tell curl to not use the IP address the server suggests in its response
to curl's PASV command when curl connects the data connection. Instead curl
will re-use the same IP address it already uses for the control
connection. (Added in 7.14.2)
This option has no effect if PORT, EPRT or EPSV is used instead of PASV.
.IP "--ftp-pret"
(FTP) Tell curl to send a PRET command before PASV (and EPSV). Certain
FTP servers, mainly drftpd, require this non-standard command for
directory listings as well as up and downloads in PASV mode.
(Added in 7.20.x)
.IP "--ssl"
(FTP, POP3, IMAP, SMTP) Try to use SSL/TLS for the connection. Reverts to a
non-secure connection if the server doesn't support SSL/TLS. See also
\fI--ftp-ssl-control\fP and \fI--ssl-reqd\fP for different levels of
encryption required. (Added in 7.20.0)
This option was formerly known as \fI--ftp-ssl\fP (Added in 7.11.0) and that
can still be used but will be removed in a future version.
.IP "--ftp-ssl-control"
(FTP) Require SSL/TLS for the FTP login, clear for transfer. Allows secure
authentication, but non-encrypted data transfers for efficiency. Fails the
transfer if the server doesn't support SSL/TLS. (Added in 7.16.0)
.IP "--ssl-reqd"
(FTP, POP3, IMAP, SMTP) Require SSL/TLS for the connection. Terminates the
connection if the server doesn't support SSL/TLS. (Added in 7.20.0)
This option was formerly known as \fI--ftp-ssl-reqd\fP (added in 7.15.5) and
that can still be used but will be removed in a future version.
.IP "--ftp-ssl-ccc"
(FTP) Use CCC (Clear Command Channel)
Shuts down the SSL/TLS layer after authenticating. The rest of the
control channel communication will be unencrypted. This allows
NAT routers to follow the FTP transaction. The default mode is
passive. See --ftp-ssl-ccc-mode for other modes.
(Added in 7.16.1)
.IP "--ftp-ssl-ccc-mode [active/passive]"
(FTP) Use CCC (Clear Command Channel)
Sets the CCC mode. The passive mode will not initiate the shutdown, but
instead wait for the server to do it, and will not reply to the
shutdown from the server. The active mode initiates the shutdown and
waits for a reply from the server.
(Added in 7.16.2)
.IP "-F/--form <name=content>"
(HTTP) This lets curl emulate a filled-in form in which a user has pressed the
submit button. This causes curl to POST data using the Content-Type
multipart/form-data according to RFC2388. This enables uploading of binary
files etc. To force the 'content' part to be a file, prefix the file name
with an @ sign. To just get the content part from a file, prefix the file name
with the symbol <. The difference between @ and < is then that @ makes a file
get attached in the post as a file upload, while the < makes a text field and
just get the contents for that text field from a file.
Example, to send your password file to the server, where
\&'password' is the name of the form-field to which /etc/passwd will be the
input:
\fBcurl\fP -F password=@/etc/passwd www.mypasswords.com
To read the file's content from stdin instead of a file, use - where the file
name should've been. This goes for both @ and < constructs.
You can also tell curl what Content-Type to use by using 'type=', in a manner
similar to:
\fBcurl\fP -F "[email protected];type=text/html" url.com
or
\fBcurl\fP -F "name=daniel;type=text/foo" url.com
You can also explicitly change the name field of an file upload part by
setting filename=, like this:
\fBcurl\fP -F "file=@localfile;filename=nameinpost" url.com
See further examples and details in the MANUAL.
This option can be used multiple times.
.IP "--form-string <name=string>"
(HTTP) Similar to \fI--form\fP except that the value string for the named
parameter is used literally. Leading \&'@' and \&'<' characters, and the
\&';type=' string in the value have no special meaning. Use this in preference
to \fI--form\fP if there's any possibility that the string value may
accidentally trigger the \&'@' or \&'<' features of \fI--form\fP.
.IP "-g/--globoff"
This option switches off the "URL globbing parser". When you set this option,
you can specify URLs that contain the letters {}[] without having them being
interpreted by curl itself. Note that these letters are not normal legal URL
contents but they should be encoded according to the URI standard.
.IP "-G/--get"
When used, this option will make all data specified with \fI-d/--data\fP or
\fI--data-binary\fP to be used in a HTTP GET request instead of the POST
request that otherwise would be used. The data will be appended to the URL
with a '?' separator.
If used in combination with -I, the POST data will instead be appended to the
URL with a HEAD request.
If this option is used several times, the following occurrences make no
difference. This is because undoing a GET doesn't make sense, but you should
then instead enforce the alternative method you prefer.
.IP "-h/--help"
Usage help.
.IP "-H/--header <header>"
(HTTP) Extra header to use when getting a web page. You may specify any number
of extra headers. Note that if you should add a custom header that has the
same name as one of the internal ones curl would use, your externally set
header will be used instead of the internal one. This allows you to make even
trickier stuff than curl would normally do. You should not replace internally
set headers without knowing perfectly well what you're doing. Remove an
internal header by giving a replacement without content on the right side of
the colon, as in: -H \&"Host:".
curl will make sure that each header you add/replace is sent with the proper
end-of-line marker, you should thus \fBnot\fP add that as a part of the header
content: do not add newlines or carriage returns, they will only mess things up
for you.
See also the \fI-A/--user-agent\fP and \fI-e/--referer\fP options.
This option can be used multiple times to add/replace/remove multiple headers.
.IP "--hostpubmd5 <md5>"
Pass a string containing 32 hexadecimal digits. The string should be the 128
bit MD5 checksum of the remote host's public key, curl will refuse the
connection with the host unless the md5sums match. This option is only for SCP
and SFTP transfers. (Added in 7.17.1)
.IP "--ignore-content-length"
(HTTP)
Ignore the Content-Length header. This is particularly useful for servers
running Apache 1.x, which will report incorrect Content-Length for files
larger than 2 gigabytes.
.IP "-i/--include"
(HTTP) Include the HTTP-header in the output. The HTTP-header includes things
like server-name, date of the document, HTTP-version and more...
.IP "--interface <name>"
Perform an operation using a specified interface. You can enter interface
name, IP address or host name. An example could look like:
curl --interface eth0:1 http://www.netscape.com/
If this option is used several times, the last one will be used.
.IP "-I/--head"
(HTTP/FTP/FILE)
Fetch the HTTP-header only! HTTP-servers feature the command HEAD
which this uses to get nothing but the header of a document. When used
on a FTP or FILE file, curl displays the file size and last modification
time only.
.IP "-j/--junk-session-cookies"
(HTTP) When curl is told to read cookies from a given file, this option will
make it discard all "session cookies". This will basically have the same effect
as if a new session is started. Typical browsers always discard session
cookies when they're closed down.
.IP "-J/--remote-header-name"
(HTTP) This option tells the -O/--remote-name option to use the server-specified
Content-Disposition filename instead of extracting a filename from the URL.
.IP "-k/--insecure"
(SSL) This option explicitly allows curl to perform "insecure" SSL connections
and transfers. All SSL connections are attempted to be made secure by using
the CA certificate bundle installed by default. This makes all connections
considered "insecure" fail unless \fI-k/--insecure\fP is used.
See this online resource for further details:
\fBhttp://curl.haxx.se/docs/sslcerts.html\fP
.IP "--keepalive-time <seconds>"
This option sets the time a connection needs to remain idle before sending
keepalive probes and the time between individual keepalive probes. It is
currently effective on operating systems offering the TCP_KEEPIDLE and
TCP_KEEPINTVL socket options (meaning Linux, recent AIX, HP-UX and more). This
option has no effect if \fI--no-keepalive\fP is used. (Added in 7.18.0)
If this option is used multiple times, the last occurrence sets the amount.
.IP "--key <key>"
(SSL/SSH) Private key file name. Allows you to provide your private key in this
separate file.
If this option is used several times, the last one will be used.
.IP "--key-type <type>"
(SSL) Private key file type. Specify which type your \fI--key\fP provided
private key is. DER, PEM, and ENG are supported. If not specified, PEM is
assumed.
If this option is used several times, the last one will be used.
.IP "--krb <level>"
(FTP) Enable Kerberos authentication and use. The level must be entered and
should be one of 'clear', 'safe', 'confidential', or 'private'. Should you use
a level that is not one of these, 'private' will instead be used.
This option requires a library built with kerberos4 or GSSAPI
(GSS-Negotiate) support. This is not very common. Use \fI-V/--version\fP to
see if your curl supports it.
If this option is used several times, the last one will be used.
.IP "-K/--config <config file>"
Specify which config file to read curl arguments from. The config file is a
text file in which command line arguments can be written which then will be
used as if they were written on the actual command line. Options and their
parameters must be specified on the same config file line, separated by
whitespace, colon, the equals sign or any combination thereof (however,
the preferred separator is the equals sign). If the parameter is to contain
whitespace, the parameter must be enclosed within quotes. Within double
quotes, the following escape sequences are available: \\\\, \\", \\t, \\n,
\\r and \\v. A backslash preceding any other letter is ignored. If the
first column of a config line is a '#' character, the rest of the line will be
treated as a comment. Only write one option per physical line in the config
file.
Specify the filename to -K/--config as '-' to make curl read the file from
stdin.
Note that to be able to specify a URL in the config file, you need to specify
it using the \fI--url\fP option, and not by simply writing the URL on its own
line. So, it could look similar to this:
url = "http://curl.haxx.se/docs/"
Long option names can optionally be given in the config file without the
initial double dashes.
When curl is invoked, it always (unless \fI-q\fP is used) checks for a default
config file and uses it if found. The default config file is checked for in
the following places in this order:
1) curl tries to find the "home dir": It first checks for the CURL_HOME and
then the HOME environment variables. Failing that, it uses getpwuid() on
UNIX-like systems (which returns the home dir given the current user in your
system). On Windows, it then checks for the APPDATA variable, or as a last
resort the '%USERPROFILE%\\Application Data'.
2) On windows, if there is no _curlrc file in the home dir, it checks for one
in the same dir the curl executable is placed. On UNIX-like systems, it will
simply try to load .curlrc from the determined home dir.
.nf
# --- Example file ---
# this is a comment
url = "curl.haxx.se"
output = "curlhere.html"
user-agent = "superagent/1.0"
# and fetch another URL too
url = "curl.haxx.se/docs/manpage.html"
-O
referer = "http://nowhereatall.com/"
# --- End of example file ---
.fi
This option can be used multiple times to load multiple config files.
.IP "--libcurl <file>"
Append this option to any ordinary curl command line, and you will get a
libcurl-using source code written to the file that does the equivalent
of what your command-line operation does!
NOTE: this does not properly support -F and the sending of multipart
formposts, so in those cases the output program will be missing necessary
calls to \fIcurl_formadd(3)\fP, and possibly more.
If this option is used several times, the last given file name will be
used. (Added in 7.16.1)
.IP "--limit-rate <speed>"
Specify the maximum transfer rate you want curl to use. This feature is useful
if you have a limited pipe and you'd like your transfer not to use your entire
bandwidth.
The given speed is measured in bytes/second, unless a suffix is appended.
Appending 'k' or 'K' will count the number as kilobytes, 'm' or M' makes it
megabytes, while 'g' or 'G' makes it gigabytes. Examples: 200K, 3m and 1G.
The given rate is the average speed counted during the entire transfer. It
means that curl might use higher transfer speeds in short bursts, but over
time it uses no more than the given rate.
If you also use the \fI-Y/--speed-limit\fP option, that option will take
precedence and might cripple the rate-limiting slightly, to help keeping the
speed-limit logic working.
If this option is used several times, the last one will be used.
.IP "-l/--list-only"
(FTP)
When listing an FTP directory, this switch forces a name-only view.
Especially useful if you want to machine-parse the contents of an FTP
directory since the normal directory view doesn't use a standard look
or format.
This option causes an FTP NLST command to be sent. Some FTP servers
list only files in their response to NLST; they do not include
subdirectories and symbolic links.
.IP "--local-port <num>[-num]"
Set a preferred number or range of local port numbers to use for the
connection(s). Note that port numbers by nature are a scarce resource that
will be busy at times so setting this range to something too narrow might
cause unnecessary connection setup failures. (Added in 7.15.2)
.IP "-L/--location"
(HTTP/HTTPS) If the server reports that the requested page has moved to a
different location (indicated with a Location: header and a 3XX response code),
this option will make curl redo the request on the new place. If used together
with \fI-i/--include\fP or \fI-I/--head\fP, headers from all requested pages
will be shown. When authentication is used, curl only sends its credentials to
the initial host. If a redirect takes curl to a different host, it won't be
able to intercept the user+password. See also \fI--location-trusted\fP on how
to change this. You can limit the amount of redirects to follow by using the
\fI--max-redirs\fP option.
When curl follows a redirect and the request is not a plain GET (for example
POST or PUT), it will do the following request with a GET if the HTTP response
was 301, 302, or 303. If the response code was any other 3xx code, curl will
re-send the following request using the same unmodified method.
.IP "--location-trusted"
(HTTP/HTTPS) Like \fI-L/--location\fP, but will allow sending the name +
password to all hosts that the site may redirect to. This may or may not
introduce a security breach if the site redirects you to a site to which
you'll send your authentication info (which is plaintext in the case of HTTP
Basic authentication).
.IP "--mail-rcpt <address>"
(SMTP) Specify a single address that the given mail should get sent to. This
option can be used multiple times to specify many recipients.
(Added in 7.20.0)
.IP "--mail-from <address>"
(SMTP) Specify a single address that the given mail should get sent from.
(Added in 7.20.0)
.IP "--max-filesize <bytes>"
Specify the maximum size (in bytes) of a file to download. If the file
requested is larger than this value, the transfer will not start and curl will
return with exit code 63.
\fBNOTE:\fP The file size is not always known prior to download, and for such files
this option has no effect even if the file transfer ends up being larger than
this given limit. This concerns both FTP and HTTP transfers.
.IP "-m/--max-time <seconds>"
Maximum time in seconds that you allow the whole operation to take. This is
useful for preventing your batch jobs from hanging for hours due to slow
networks or links going down. See also the \fI--connect-timeout\fP option.
If this option is used several times, the last one will be used.
.IP "-M/--manual"
Manual. Display the huge help text.
.IP "-n/--netrc"
Makes curl scan the \fI.netrc\fP (\fI_netrc\fP on Windows) file in the user's
home directory for login name and password. This is typically used for FTP on
UNIX. If used with HTTP, curl will enable user authentication. See
.BR netrc(4)
or
.BR ftp(1)
for details on the file format. Curl will not complain if that file
doesn't have the right permissions (it should not be either world- or
group-readable). The environment variable "HOME" is used to find the home
directory.
A quick and very simple example of how to setup a \fI.netrc\fP to allow curl
to FTP to the machine host.domain.com with user name \&'myself' and password
\&'secret' should look similar to:
.B "machine host.domain.com login myself password secret"
.IP "--netrc-optional"
Very similar to \fI--netrc\fP, but this option makes the .netrc usage
\fBoptional\fP and not mandatory as the \fI--netrc\fP option does.
.IP "--negotiate"
(HTTP) Enables GSS-Negotiate authentication. The GSS-Negotiate method was
designed by Microsoft and is used in their web applications. It is primarily
meant as a support for Kerberos5 authentication but may be also used along
with another authentication method. For more information see IETF draft
draft-brezak-spnego-http-04.txt.
If you want to enable Negotiate for your proxy authentication, then use
\fI--proxy-negotiate\fP.
This option requires a library built with GSSAPI support. This is
not very common. Use \fI-V/--version\fP to see if your version supports
GSS-Negotiate.
When using this option, you must also provide a fake -u/--user option to
activate the authentication code properly. Sending a '-u :' is enough as the
user name and password from the -u option aren't actually used.
If this option is used several times, the following occurrences make no
difference.
.IP "-N/--no-buffer"
Disables the buffering of the output stream. In normal work situations, curl
will use a standard buffered output stream that will have the effect that it
will output the data in chunks, not necessarily exactly when the data arrives.
Using this option will disable that buffering.
Note that this is the negated option name documented. You can thus use
\fI--buffer\fP to enforce the buffering.
.IP "--no-keepalive"
Disables the use of keepalive messages on the TCP connection, as by default
curl enables them.
Note that this is the negated option name documented. You can thus use
\fI--keepalive\fP to enforce keepalive.
.IP "--no-sessionid"
(SSL) Disable curl's use of SSL session-ID caching. By default all transfers
are done using the cache. Note that while nothing should ever get hurt by
attempting to reuse SSL session-IDs, there seem to be broken SSL
implementations in the wild that may require you to disable this in order for
you to succeed. (Added in 7.16.0)
Note that this is the negated option name documented. You can thus use
\fI--sessionid\fP to enforce session-ID caching.
.IP "--noproxy <no-proxy-list>"
Comma-separated list of hosts which do not use a proxy, if one is specified.
The only wildcard is a single * character, which matches all hosts, and
effectively disables the proxy. Each name in this list is matched as either
a domain which contains the hostname, or the hostname itself. For example,
local.com would match local.com, local.com:80, and www.local.com, but not
www.notlocal.com. (Added in 7.19.4).
.IP "--ntlm"
(HTTP) Enables NTLM authentication. The NTLM authentication method was
designed by Microsoft and is used by IIS web servers. It is a proprietary
protocol, reverse-engineered by clever people and implemented in curl based
on their efforts. This kind of behavior should not be endorsed, you should
encourage everyone who uses NTLM to switch to a public and documented
authentication method instead, such as Digest.
If you want to enable NTLM for your proxy authentication, then use
\fI--proxy-ntlm\fP.
This option requires a library built with SSL support. Use
\fI-V/--version\fP to see if your curl supports NTLM.
If this option is used several times, the following occurrences make no
difference.
.IP "-o/--output <file>"
Write output to <file> instead of stdout. If you are using {} or [] to fetch
multiple documents, you can use '#' followed by a number in the <file>
specifier. That variable will be replaced with the current string for the URL
being fetched. Like in:
curl http://{one,two}.site.com -o "file_#1.txt"
or use several variables like:
curl http://{site,host}.host[1-5].com -o "#1_#2"
You may use this option as many times as the number of URLs you have.
See also the \fI--create-dirs\fP option to create the local directories
dynamically. Specifying the output as '-' (a single dash) will force the
output to be done to stdout.
.IP "-O/--remote-name"
Write output to a local file named like the remote file we get. (Only the file
part of the remote file is used, the path is cut off.)
The remote file name to use for saving is extracted from the given URL,
nothing else.
You may use this option as many times as the number of URLs you have.
.IP "--remote-name-all"
This option changes the default action for all given URLs to be dealt with as
if \fI-O/--remote-name\fP were used for each one. So if you want to disable
that for a specific URL after \fI--remote-name-all\fP has been used, you must
use "-o -" or \fI--no-remote-name\fP. (Added in 7.19.0)
.IP "--pass <phrase>"
(SSL/SSH) Passphrase for the private key
If this option is used several times, the last one will be used.
.IP "--post301"
Tells curl to respect RFC 2616/10.3.2 and not convert POST requests into GET
requests when following a 301 redirection. The non-RFC behaviour is ubiquitous
in web browsers, so curl does the conversion by default to maintain
consistency. However, a server may require a POST to remain a POST after such
a redirection. This option is meaningful only when using \fI-L/--location\fP
(Added in 7.17.1)
.IP "--post302"
Tells curl to respect RFC 2616/10.3.2 and not convert POST requests into GET
requests when following a 302 redirection. The non-RFC behaviour is ubiquitous
in web browsers, so curl does the conversion by default to maintain
consistency. However, a server may require a POST to remain a POST after such
a redirection. This option is meaningful only when using \fI-L/--location\fP
(Added in 7.19.1)
.IP "--proxy-anyauth"
Tells curl to pick a suitable authentication method when communicating with
the given proxy. This might cause an extra request/response round-trip. (Added
in 7.13.2)
.IP "--proxy-basic"
Tells curl to use HTTP Basic authentication when communicating with the given
proxy. Use \fI--basic\fP for enabling HTTP Basic with a remote host. Basic is
the default authentication method curl uses with proxies.
.IP "--proxy-digest"
Tells curl to use HTTP Digest authentication when communicating with the given
proxy. Use \fI--digest\fP for enabling HTTP Digest with a remote host.
.IP "--proxy-negotiate"
Tells curl to use HTTP Negotiate authentication when communicating
with the given proxy. Use \fI--negotiate\fP for enabling HTTP Negotiate
with a remote host. (Added in 7.17.1)
.IP "--proxy-ntlm"
Tells curl to use HTTP NTLM authentication when communicating with the given
proxy. Use \fI--ntlm\fP for enabling NTLM with a remote host.
.IP "--proxy1.0 <proxyhost[:port]>"
Use the specified HTTP 1.0 proxy. If the port number is not specified, it is
assumed at port 1080.
The only difference between this and the HTTP proxy option (\fI-x/--proxy\fP),
is that attempts to use CONNECT through the proxy will specify an HTTP 1.0
protocol instead of the default HTTP 1.1.
.IP "-p/--proxytunnel"
When an HTTP proxy is used (\fI-x/--proxy\fP), this option will cause non-HTTP
protocols to attempt to tunnel through the proxy instead of merely using it to
do HTTP-like operations. The tunnel approach is made with the HTTP proxy
CONNECT request and requires that the proxy allows direct connect to the
remote port number curl wants to tunnel through to.
.IP "--pubkey <key>"
(SSH) Public key file name. Allows you to provide your public key in this
separate file.
If this option is used several times, the last one will be used.
.IP "-P/--ftp-port <address>"
(FTP) Reverses the default initiator/listener roles when connecting with
FTP. This switch makes curl use active mode. In practice, curl then tells the
server to connect back to the client's specified address and port, while
passive mode asks the server to setup an IP address and port for it to connect
to. <address> should be one of:
.RS
.IP interface
i.e "eth0" to specify which interface's IP address you want to use (Unix only)
.IP "IP address"
i.e "192.168.10.1" to specify the exact IP address
.IP "host name"
i.e "my.host.domain" to specify the machine
.IP "-"
make curl pick the same IP address that is already used for the control
connection
.RE
If this option is used several times, the last one will be used. Disable the
use of PORT with \fI--ftp-pasv\fP. Disable the attempt to use the EPRT command
instead of PORT by using \fI--disable-eprt\fP. EPRT is really PORT++.
Starting in 7.19.5, you can append \&":[start]-[end]\&" to the right of the
address, to tell curl what TCP port range to use. That means you specify a
port range, from a lower to a higher number. A single number works as well,
but do note that it increases the risk of failure since the port may not be
available.
.IP "-q"
If used as the first parameter on the command line, the \fIcurlrc\fP config
file will not be read and used. See the \fI-K/--config\fP for details on the
default config file search path.
.IP "-Q/--quote <command>"
(FTP/SFTP) Send an arbitrary command to the remote FTP or SFTP server. Quote
commands are sent BEFORE the transfer takes place (just after the
initial PWD command in an FTP transfer, to be exact). To make commands
take place after a successful transfer, prefix them with a dash '-'.
To make commands be sent after libcurl has changed the working directory,
just before the transfer command(s), prefix the command with a '+' (this
is only supported for FTP). You may specify any number of commands. If
the server returns failure for one of the commands, the entire operation
will be aborted. You must send syntactically correct FTP commands as
RFC959 defines to FTP servers, or one of the commands listed below to
SFTP servers. This option can be used multiple times.
SFTP is a binary protocol. Unlike for FTP, libcurl interprets SFTP quote
commands before sending them to the server. Following is the list of
all supported SFTP quote commands:
.RS
.IP "chgrp group file"
The chgrp command sets the group ID of the file named by the file operand to the
group ID specified by the group operand. The group operand is a decimal
integer group ID.
.IP "chmod mode file"
The chmod command modifies the file mode bits of the specified file. The
mode operand is an octal integer mode number.
.IP "chown user file"
The chown command sets the owner of the file named by the file operand to the
user ID specified by the user operand. The user operand is a decimal
integer user ID.
.IP "ln source_file target_file"
The ln and symlink commands create a symbolic link at the target_file location
pointing to the source_file location.
.IP "mkdir directory_name"
The mkdir command creates the directory named by the directory_name operand.
.IP "pwd"
The pwd command returns the absolute pathname of the current working directory.
.IP "rename source target"
The rename command renames the file or directory named by the source
operand to the destination path named by the target operand.
.IP "rm file"
The rm command removes the file specified by the file operand.
.IP "rmdir directory"
The rmdir command removes the directory entry specified by the directory
operand, provided it is empty.
.IP "symlink source_file target_file"
See ln.
.RE
.IP "--random-file <file>"
(SSL) Specify the path name to file containing what will be considered as
random data. The data is used to seed the random engine for SSL connections.
See also the \fI--egd-file\fP option.
.IP "-r/--range <range>"
(HTTP/FTP/SFTP/FILE) Retrieve a byte range (i.e a partial document) from a
HTTP/1.1, FTP or SFTP server or a local FILE. Ranges can be specified
in a number of ways.
.RS
.TP 10
.B 0-499
specifies the first 500 bytes
.TP
.B 500-999
specifies the second 500 bytes
.TP
.B -500
specifies the last 500 bytes
.TP
.B 9500-
specifies the bytes from offset 9500 and forward
.TP
.B 0-0,-1
specifies the first and last byte only(*)(H)
.TP
.B 500-700,600-799
specifies 300 bytes from offset 500(H)
.TP
.B 100-199,500-599
specifies two separate 100-byte ranges(*)(H)
.RE
(*) = NOTE that this will cause the server to reply with a multipart
response!
Only digit characters (0-9) are valid in the 'start' and 'stop' fields of
the \&'start-stop' range syntax. If a non-digit character is given in the range, the server's
response will be unspecified, depending on the server's configuration.
You should also be aware that many HTTP/1.1 servers do not have this feature
enabled, so that when you attempt to get a range, you'll instead get the whole
document.
FTP and SFTP range downloads only support the simple 'start-stop' syntax
(optionally with one of the numbers omitted). FTP use depends on the extended
FTP command SIZE.
If this option is used several times, the last one will be used.
.IP "--raw"
When used, it disables all internal HTTP decoding of content or transfer
encodings and instead makes them passed on unaltered, raw. (Added in 7.16.2)
.IP "-R/--remote-time"
When used, this will make libcurl attempt to figure out the timestamp of the
remote file, and if that is available make the local file get that same
timestamp.
.IP "--retry <num>"
If a transient error is returned when curl tries to perform a transfer, it
will retry this number of times before giving up. Setting the number to 0
makes curl do no retries (which is the default). Transient error means either:
a timeout, an FTP 4xx response code or an HTTP 5xx response code.
When curl is about to retry a transfer, it will first wait one second and then
for all forthcoming retries it will double the waiting time until it reaches
10 minutes which then will be the delay between the rest of the retries. By
using \fI--retry-delay\fP you disable this exponential backoff algorithm. See
also \fI--retry-max-time\fP to limit the total time allowed for
retries. (Added in 7.12.3)
If this option is used multiple times, the last occurrence decide the amount.
.IP "--retry-delay <seconds>"
Make curl sleep this amount of time before each retry when a transfer has
failed with a transient error (it changes the default backoff time algorithm
between retries). This option is only interesting if \fI--retry\fP is also
used. Setting this delay to zero will make curl use the default backoff time.
(Added in 7.12.3)
If this option is used multiple times, the last occurrence determines the amount.
.IP "--retry-max-time <seconds>"
The retry timer is reset before the first transfer attempt. Retries will be
done as usual (see \fI--retry\fP) as long as the timer hasn't reached this
given limit. Notice that if the timer hasn't reached the limit, the request
will be made and while performing, it may take longer than this given time
period. To limit a single request\'s maximum time, use \fI-m/--max-time\fP.
Set this option to zero to not timeout retries. (Added in 7.12.3)
If this option is used multiple times, the last occurrence determines the
amount.
.IP "-s/--silent"
Silent or quiet mode. Don't show progress meter or error messages. Makes
Curl mute.
.IP "-S/--show-error"
When used with -s it makes curl show an error message if it fails.
.IP "--socks4 <host[:port]>"
Use the specified SOCKS4 proxy. If the port number is not specified, it is
assumed at port 1080. (Added in 7.15.2)
This option overrides any previous use of \fI-x/--proxy\fP, as they are
mutually exclusive.
If this option is used several times, the last one will be used.
.IP "--socks4a <host[:port]>"
Use the specified SOCKS4a proxy. If the port number is not specified, it is
assumed at port 1080. (Added in 7.18.0)
This option overrides any previous use of \fI-x/--proxy\fP, as they are
mutually exclusive.
If this option is used several times, the last one will be used.
.IP "--socks5-hostname <host[:port]>"
Use the specified SOCKS5 proxy (and let the proxy resolve the host name). If
the port number is not specified, it is assumed at port 1080. (Added in
7.18.0)
This option overrides any previous use of \fI-x/--proxy\fP, as they are
mutually exclusive.
If this option is used several times, the last one will be used. (This option
was previously wrongly documented and used as --socks without the number
appended.)
.IP "--socks5 <host[:port]>"
Use the specified SOCKS5 proxy - but resolve the host name locally. If the
port number is not specified, it is assumed at port 1080.
This option overrides any previous use of \fI-x/--proxy\fP, as they are
mutually exclusive.
If this option is used several times, the last one will be used. (This option
was previously wrongly documented and used as --socks without the number
appended.)
This option (as well as \fI--socks4\fP) does not work with IPV6, FTPS or LDAP.
.IP "--socks5-gssapi-service <servicename>"
The default service name for a socks server is rcmd/server-fqdn. This option
allows you to change it.
Examples:
--socks5 proxy-name \fI--socks5-gssapi-service\fP sockd would use
sockd/proxy-name
--socks5 proxy-name \fI--socks5-gssapi-service\fP sockd/real-name would use
sockd/real-name for cases where the proxy-name does not match the princpal name.
(Added in 7.19.4).
.IP "--socks5-gssapi-nec"
As part of the gssapi negotiation a protection mode is negotiated. The rfc1961
says in section 4.3/4.4 it should be protected, but the NEC reference
implementation does not. The option \fI--socks5-gssapi-nec\fP allows the
unprotected exchange of the protection mode negotiation. (Added in 7.19.4).
.IP "--stderr <file>"
Redirect all writes to stderr to the specified file instead. If the file name
is a plain '-', it is instead written to stdout. This option has no point when
you're using a shell with decent redirecting capabilities.
If this option is used several times, the last one will be used.
.IP "--tcp-nodelay"
Turn on the TCP_NODELAY option. See the \fIcurl_easy_setopt(3)\fP man page for
details about this option. (Added in 7.11.2)
.IP "-t/--telnet-option <OPT=val>"
Pass options to the telnet protocol. Supported options are:
TTYPE=<term> Sets the terminal type.
XDISPLOC=<X display> Sets the X display location.
NEW_ENV=<var,val> Sets an environment variable.
.IP "--tftp-blksize <value>"
(TFTP) Set TFTP BLKSIZE option (must be >512). This is the block size that
curl will try to use when tranferring data to or from a TFTP server. By
default 512 bytes will be used.
If this option is used several times, the last one will be used.
(Added in 7.20.0)
.IP "-T/--upload-file <file>"
This transfers the specified local file to the remote URL. If there is no file
part in the specified URL, Curl will append the local file name. NOTE that you
must use a trailing / on the last directory to really prove to Curl that there
is no file name or curl will think that your last directory name is the remote
file name to use. That will most likely cause the upload operation to fail. If
this is used on a HTTP(S) server, the PUT command will be used.
Use the file name "-" (a single dash) to use stdin instead of a given file.
Alternately, the file name "." (a single period) may be specified instead
of "-" to use stdin in non-blocking mode to allow reading server output
while stdin is being uploaded.
You can specify one -T for each URL on the command line. Each -T + URL pair
specifies what to upload and to where. curl also supports "globbing" of the -T
argument, meaning that you can upload multiple files to a single URL by using
the same URL globbing style supported in the URL, like this:
curl -T "{file1,file2}" http://www.uploadtothissite.com
or even
curl -T "img[1-1000].png" ftp://ftp.picturemania.com/upload/
.IP "--trace <file>"
Enables a full trace dump of all incoming and outgoing data, including
descriptive information, to the given output file. Use "-" as filename to have
the output sent to stdout.
This option overrides previous uses of \fI-v/--verbose\fP or
\fI--trace-ascii\fP.
If this option is used several times, the last one will be used.
.IP "--trace-ascii <file>"
Enables a full trace dump of all incoming and outgoing data, including
descriptive information, to the given output file. Use "-" as filename to have
the output sent to stdout.
This is very similar to \fI--trace\fP, but leaves out the hex part and only
shows the ASCII part of the dump. It makes smaller output that might be easier
to read for untrained humans.
This option overrides previous uses of \fI-v/--verbose\fP or \fI--trace\fP.
If this option is used several times, the last one will be used.
.IP "--trace-time"
Prepends a time stamp to each trace or verbose line that curl displays.
(Added in 7.14.0)
.IP "-u/--user <user:password>"
Specify the user name and password to use for server authentication. Overrides
\fI-n/--netrc\fP and \fI--netrc-optional\fP.
If you just give the user name (without entering a colon) curl will prompt for
a password.
If you use an SSPI-enabled curl binary and do NTLM authentication, you can
force curl to pick up the user name and password from your environment by
simply specifying a single colon with this option: "-u :".
If this option is used several times, the last one will be used.
.IP "-U/--proxy-user <user:password>"
Specify the user name and password to use for proxy authentication.
If you use an SSPI-enabled curl binary and do NTLM authentication, you can
force curl to pick up the user name and password from your environment by
simply specifying a single colon with this option: "-U :".
If this option is used several times, the last one will be used.
.IP "--url <URL>"
Specify a URL to fetch. This option is mostly handy when you want to specify
URL(s) in a config file.
This option may be used any number of times. To control where this URL is
written, use the \fI-o/--output\fP or the \fI-O/--remote-name\fP options.
.IP "-v/--verbose"
Makes the fetching more verbose/talkative. Mostly useful for debugging. A line
starting with '>' means "header data" sent by curl, '<' means "header data"
received by curl that is hidden in normal cases, and a line starting with '*'
means additional info provided by curl.
Note that if you only want HTTP headers in the output, \fI-i/--include\fP
might be the option you're looking for.
If you think this option still doesn't give you enough details, consider using
\fI--trace\fP or \fI--trace-ascii\fP instead.
This option overrides previous uses of \fI--trace-ascii\fP or \fI--trace\fP.
Use \fI-S/--silent\fP to make curl quiet.
.IP "-V/--version"
Displays information about curl and the libcurl version it uses.
The first line includes the full version of curl, libcurl and other 3rd party
libraries linked with the executable.
The second line (starts with "Protocols:") shows all protocols that libcurl
reports to support.
The third line (starts with "Features:") shows specific features libcurl
reports to offer. Available features include:
.RS
.IP "IPv6"
You can use IPv6 with this.
.IP "krb4"
Krb4 for FTP is supported.
.IP "SSL"
HTTPS and FTPS are supported.
.IP "libz"
Automatic decompression of compressed files over HTTP is supported.
.IP "NTLM"
NTLM authentication is supported.
.IP "GSS-Negotiate"
Negotiate authentication and krb5 for FTP is supported.
.IP "Debug"
This curl uses a libcurl built with Debug. This enables more error-tracking
and memory debugging etc. For curl-developers only!
.IP "AsynchDNS"
This curl uses asynchronous name resolves.
.IP "SPNEGO"
SPNEGO Negotiate authentication is supported.
.IP "Largefile"
This curl supports transfers of large files, files larger than 2GB.
.IP "IDN"
This curl supports IDN - international domain names.
.IP "SSPI"
SSPI is supported. If you use NTLM and set a blank user name, curl will
authenticate with your current user and password.
.RE
.IP "-w/--write-out <format>"
Defines what to display on stdout after a completed and successful
operation. The format is a string that may contain plain text mixed with any
number of variables. The string can be specified as "string", to get read from
a particular file you specify it "@filename" and to tell curl to read the
format from stdin you write "@-".
The variables present in the output format will be substituted by the value or
text that curl thinks fit, as described below. All variables are specified
as %{variable_name} and to output a normal % you just write them as
%%. You can output a newline by using \\n, a carriage return with \\r and a tab
space with \\t.
.B NOTE:
The %-symbol is a special symbol in the win32-environment, where all
occurrences of % must be doubled when using this option.
The variables available at this point are:
.RS
.TP 15
.B url_effective
The URL that was fetched last. This is most meaningful if you've told curl
to follow location: headers.
.TP
.B http_code
The numerical response code that was found in the last retrieved HTTP(S) or
FTP(s) transfer. In 7.18.2 the alias \fBresponse_code\fP was added to show the
same info.
.TP
.B http_connect
The numerical code that was found in the last response (from a proxy) to a
curl CONNECT request. (Added in 7.12.4)
.TP
.B time_total
The total time, in seconds, that the full operation lasted. The time will be
displayed with millisecond resolution.
.TP
.B time_namelookup
The time, in seconds, it took from the start until the name resolving was
completed.
.TP
.B time_connect
The time, in seconds, it took from the start until the TCP connect to the
remote host (or proxy) was completed.
.TP
.B time_appconnect
The time, in seconds, it took from the start until the SSL/SSH/etc
connect/handshake to the remote host was completed. (Added in 7.19.0)
.TP
.B time_pretransfer
The time, in seconds, it took from the start until the file transfer was just
about to begin. This includes all pre-transfer commands and negotiations that
are specific to the particular protocol(s) involved.
.TP
.B time_redirect
The time, in seconds, it took for all redirection steps include name lookup,
connect, pretransfer and transfer before the final transaction was
started. time_redirect shows the complete execution time for multiple
redirections. (Added in 7.12.3)
.TP
.B time_starttransfer
The time, in seconds, it took from the start until the first byte was just about
to be transferred. This includes time_pretransfer and also the time the
server needed to calculate the result.
.TP
.B size_download
The total amount of bytes that were downloaded.
.TP
.B size_upload
The total amount of bytes that were uploaded.
.TP
.B size_header
The total amount of bytes of the downloaded headers.
.TP
.B size_request
The total amount of bytes that were sent in the HTTP request.
.TP
.B speed_download
The average download speed that curl measured for the complete download.
.TP
.B speed_upload
The average upload speed that curl measured for the complete upload.
.TP
.B content_type
The Content-Type of the requested document, if there was any.
.TP
.B num_connects
Number of new connects made in the recent transfer. (Added in 7.12.3)
.TP
.B num_redirects
Number of redirects that were followed in the request. (Added in 7.12.3)
.TP
.B redirect_url
When a HTTP request was made without -L to follow redirects, this variable
will show the actual URL a redirect \fIwould\fP take you to. (Added in 7.18.2)
.TP
.B ftp_entry_path
The initial path libcurl ended up in when logging on to the remote FTP
server. (Added in 7.15.4)
.TP
.B ssl_verify_result
The result of the SSL peer certificate verification that was requested. 0
means the verification was successful. (Added in 7.19.0)
.RE
If this option is used several times, the last one will be used.
.IP "-x/--proxy <proxyhost[:port]>"
Use the specified HTTP proxy. If the port number is not specified, it is assumed
at port 1080.
This option overrides existing environment variables that set the proxy to
use. If there's an environment variable setting a proxy, you can set proxy to
\&"" to override it.
\fBNote\fP that all operations that are performed over a HTTP proxy will
transparently be converted to HTTP. It means that certain protocol specific
operations might not be available. This is not the case if you can tunnel
through the proxy, as done with the \fI-p/--proxytunnel\fP option.
Starting with 7.14.1, the proxy host can be specified the exact same way as
the proxy environment variables, including the protocol prefix (http://) and
the embedded user + password.
If this option is used several times, the last one will be used.
.IP "-X/--request <command>"
(HTTP) Specifies a custom request method to use when communicating with the
HTTP server. The specified request will be used instead of the method
otherwise used (which defaults to GET). Read the HTTP 1.1 specification for
details and explanations. Common additional HTTP requests include PUT and
DELETE, but related technologies like WebDAV offers PROPFIND, COPY, MOVE and
more.
(FTP)
Specifies a custom FTP command to use instead of LIST when doing file lists
with FTP.
If this option is used several times, the last one will be used.
.IP "-y/--speed-time <time>"
If a download is slower than speed-limit bytes per second during a speed-time
period, the download gets aborted. If speed-time is used, the default
speed-limit will be 1 unless set with -Y.
This option controls transfers and thus will not affect slow connects etc. If
this is a concern for you, try the \fI--connect-timeout\fP option.
If this option is used several times, the last one will be used.
.IP "-Y/--speed-limit <speed>"
If a download is slower than this given speed (in bytes per second) for
speed-time seconds it gets aborted. speed-time is set with -y and is 30 if
not set.
If this option is used several times, the last one will be used.
.IP "-z/--time-cond <date expression>"
(HTTP/FTP) Request a file that has been modified later than the given time and
date, or one that has been modified before that time. The date expression can
be all sorts of date strings or if it doesn't match any internal ones, it
tries to get the time from a given file name instead! See the
\fIcurl_getdate(3)\fP man pages for date expression details.
Start the date expression with a dash (-) to make it request for a document
that is older than the given date/time, default is a document that is newer
than the specified date/time.
If this option is used several times, the last one will be used.
.IP "--max-redirs <num>"
Set maximum number of redirection-followings allowed. If \fI-L/--location\fP
is used, this option can be used to prevent curl from following redirections
\&"in absurdum". By default, the limit is set to 50 redirections. Set this
option to -1 to make it limitless.
If this option is used several times, the last one will be used.
.IP "-0/--http1.0"
(HTTP) Forces curl to issue its requests using HTTP 1.0 instead of using its
internally preferred: HTTP 1.1.
.IP "-1/--tlsv1"
(SSL)
Forces curl to use TLS version 1 when negotiating with a remote TLS server.
.IP "-2/--sslv2"
(SSL)
Forces curl to use SSL version 2 when negotiating with a remote SSL server.
.IP "-3/--sslv3"
(SSL)
Forces curl to use SSL version 3 when negotiating with a remote SSL server.
.IP "-4/--ipv4"
If libcurl is capable of resolving an address to multiple IP versions (which
it is if it is IPv6-capable), this option tells libcurl to resolve names to
IPv4 addresses only.
.IP "-6/--ipv6"
If libcurl is capable of resolving an address to multiple IP versions (which
it is if it is IPv6-capable), this option tells libcurl to resolve names to
IPv6 addresses only.
.IP "-#/--progress-bar"
Make curl display progress information as a progress bar instead of the
default statistics.
.SH FILES
.I ~/.curlrc
.RS
Default config file, see \fI-K/--config\fP for details.
.SH ENVIRONMENT
The environment variables can be specified in lower case or upper case. The
lower case version has precedence. http_proxy is an exception as it is only
available in lower case.
.IP "http_proxy [protocol://]<host>[:port]"
Sets the proxy server to use for HTTP.
.IP "HTTPS_PROXY [protocol://]<host>[:port]"
Sets the proxy server to use for HTTPS.
.IP "FTP_PROXY [protocol://]<host>[:port]"
Sets the proxy server to use for FTP.
.IP "ALL_PROXY [protocol://]<host>[:port]"
Sets the proxy server to use if no protocol-specific proxy is set.
.IP "NO_PROXY <comma-separated list of hosts>"
list of host names that shouldn't go through any proxy. If set to a asterisk
\&'*' only, it matches all hosts.
.SH EXIT CODES
There are a bunch of different error codes and their corresponding error
messages that may appear during bad conditions. At the time of this writing,
the exit codes are:
.IP 1
Unsupported protocol. This build of curl has no support for this protocol.
.IP 2
Failed to initialize.
.IP 3
URL malformed. The syntax was not correct.
.IP 5
Couldn't resolve proxy. The given proxy host could not be resolved.
.IP 6
Couldn't resolve host. The given remote host was not resolved.
.IP 7
Failed to connect to host.
.IP 8
FTP weird server reply. The server sent data curl couldn't parse.
.IP 9
FTP access denied. The server denied login or denied access to the particular
resource or directory you wanted to reach. Most often you tried to change to a
directory that doesn't exist on the server.
.IP 11
FTP weird PASS reply. Curl couldn't parse the reply sent to the PASS request.
.IP 13
FTP weird PASV reply, Curl couldn't parse the reply sent to the PASV request.
.IP 14
FTP weird 227 format. Curl couldn't parse the 227-line the server sent.
.IP 15
FTP can't get host. Couldn't resolve the host IP we got in the 227-line.
.IP 17
FTP couldn't set binary. Couldn't change transfer method to binary.
.IP 18
Partial file. Only a part of the file was transferred.
.IP 19
FTP couldn't download/access the given file, the RETR (or similar) command
failed.
.IP 21
FTP quote error. A quote command returned error from the server.
.IP 22
HTTP page not retrieved. The requested url was not found or returned another
error with the HTTP error code being 400 or above. This return code only
appears if \fI-f/--fail\fP is used.
.IP 23
Write error. Curl couldn't write data to a local filesystem or similar.
.IP 25
FTP couldn't STOR file. The server denied the STOR operation, used for FTP
uploading.
.IP 26
Read error. Various reading problems.
.IP 27
Out of memory. A memory allocation request failed.
.IP 28
Operation timeout. The specified time-out period was reached according to the
conditions.
.IP 30
FTP PORT failed. The PORT command failed. Not all FTP servers support the PORT
command, try doing a transfer using PASV instead!
.IP 31
FTP couldn't use REST. The REST command failed. This command is used for
resumed FTP transfers.
.IP 33
HTTP range error. The range "command" didn't work.
.IP 34
HTTP post error. Internal post-request generation error.
.IP 35
SSL connect error. The SSL handshaking failed.
.IP 36
FTP bad download resume. Couldn't continue an earlier aborted download.
.IP 37
FILE couldn't read file. Failed to open the file. Permissions?
.IP 38
LDAP cannot bind. LDAP bind operation failed.
.IP 39
LDAP search failed.
.IP 41
Function not found. A required LDAP function was not found.
.IP 42
Aborted by callback. An application told curl to abort the operation.
.IP 43
Internal error. A function was called with a bad parameter.
.IP 45
Interface error. A specified outgoing interface could not be used.
.IP 47
Too many redirects. When following redirects, curl hit the maximum amount.
.IP 48
Unknown TELNET option specified.
.IP 49
Malformed telnet option.
.IP 51
The peer's SSL certificate or SSH MD5 fingerprint was not ok.
.IP 52
The server didn't reply anything, which here is considered an error.
.IP 53
SSL crypto engine not found.
.IP 54
Cannot set SSL crypto engine as default.
.IP 55
Failed sending network data.
.IP 56
Failure in receiving network data.
.IP 58
Problem with the local certificate.
.IP 59
Couldn't use specified SSL cipher.
.IP 60
Peer certificate cannot be authenticated with known CA certificates.
.IP 61
Unrecognized transfer encoding.
.IP 62
Invalid LDAP URL.
.IP 63
Maximum file size exceeded.
.IP 64
Requested FTP SSL level failed.
.IP 65
Sending the data requires a rewind that failed.
.IP 66
Failed to initialise SSL Engine.
.IP 67
The user name, password, or similar was not accepted and curl failed to log in.
.IP 68
File not found on TFTP server.
.IP 69
Permission problem on TFTP server.
.IP 70
Out of disk space on TFTP server.
.IP 71
Illegal TFTP operation.
.IP 72
Unknown TFTP transfer ID.
.IP 73
File already exists (TFTP).
.IP 74
No such user (TFTP).
.IP 75
Character conversion failed.
.IP 76
Character conversion functions required.
.IP 77
Problem with reading the SSL CA cert (path? access rights?).
.IP 78
The resource referenced in the URL does not exist.
.IP 79
An unspecified error occurred during the SSH session.
.IP 80
Failed to shut down the SSL connection.
.IP 82
Could not load CRL file, missing or wrong format (added in 7.19.0).
.IP 83
Issuer check failed (added in 7.19.0).
.IP XX
More error codes will appear here in future releases. The existing ones
are meant to never change.
.SH AUTHORS / CONTRIBUTORS
Daniel Stenberg is the main author, but the whole list of contributors is
found in the separate THANKS file.
.SH WWW
http://curl.haxx.se
.SH FTP
ftp://ftp.sunet.se/pub/www/utilities/curl/
.SH "SEE ALSO"
.BR ftp (1),
.BR wget (1)
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>帮助</title>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>
<script type="text/javascript" src="../internal.js"></script>
<link rel="stylesheet" type="text/css" href="help.css">
</head>
<body>
<div class="wrapper" id="helptab">
<div id="tabHeads" class="tabhead">
<span class="focus" tabsrc="about"><var id="lang_input_about"></var></span>
<span tabsrc="shortcuts"><var id="lang_input_shortcuts"></var></span>
</div>
<div id="tabBodys" class="tabbody">
<div id="about" class="panel">
<h1>UEditor</h1>
<p id="version"></p>
<p><var id="lang_input_introduction"></var></p>
</div>
<div id="shortcuts" class="panel">
<table>
<thead>
<tr>
<td><var id="lang_Txt_shortcuts"></var></td>
<td><var id="lang_Txt_func"></var></td>
</tr>
</thead>
<tbody>
<tr>
<td>ctrl+b</td>
<td><var id="lang_Txt_bold"></var></td>
</tr>
<tr>
<td>ctrl+c</td>
<td><var id="lang_Txt_copy"></var></td>
</tr>
<tr>
<td>ctrl+x</td>
<td><var id="lang_Txt_cut"></var></td>
</tr>
<tr>
<td>ctrl+v</td>
<td><var id="lang_Txt_Paste"></var></td>
</tr>
<tr>
<td>ctrl+y</td>
<td><var id="lang_Txt_undo"></var></td>
</tr>
<tr>
<td>ctrl+z</td>
<td><var id="lang_Txt_redo"></var></td>
</tr>
<tr>
<td>ctrl+i</td>
<td><var id="lang_Txt_italic"></var></td>
</tr>
<tr>
<td>ctrl+u</td>
<td><var id="lang_Txt_underline"></var></td>
</tr>
<tr>
<td>ctrl+a</td>
<td><var id="lang_Txt_selectAll"></var></td>
</tr>
<tr>
<td>shift+enter</td>
<td><var id="lang_Txt_visualEnter"></var></td>
</tr>
<tr>
<td>alt+z</td>
<td><var id="lang_Txt_fullscreen"></var></td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<script type="text/javascript" src="help.js"></script>
</body>
</html> | {
"pile_set_name": "Github"
} |
/* A Bison parser, made by GNU Bison 2.7.12-4996. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
#ifndef YY_YY_GENERATED_BISON_BPREDICATE_H_INCLUDED
# define YY_YY_GENERATED_BISON_BPREDICATE_H_INCLUDED
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
#if YYDEBUG
extern int yydebug;
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
STRING = 258,
NAME = 259,
PEER1_NAME = 260,
PEER2_NAME = 261,
AND = 262,
OR = 263,
NOT = 264,
SPAR = 265,
EPAR = 266,
CONSTANT_TRUE = 267,
CONSTANT_FALSE = 268,
COMMA = 269
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 2053 of yacc.c */
#line 227 "predicate/BPredicate.y"
char *text;
struct predicate_node *node;
struct arguments_node *arg_node;
struct predicate_node nfaw;
struct arguments_arg arg_arg;
/* Line 2053 of yacc.c */
#line 80 "generated//bison_BPredicate.h"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
typedef struct YYLTYPE
{
int first_line;
int first_column;
int last_line;
int last_column;
} YYLTYPE;
# define yyltype YYLTYPE /* obsolescent; will be withdrawn */
# define YYLTYPE_IS_DECLARED 1
# define YYLTYPE_IS_TRIVIAL 1
#endif
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void *scanner, struct predicate_node **result);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
#endif /* !YY_YY_GENERATED_BISON_BPREDICATE_H_INCLUDED */
| {
"pile_set_name": "Github"
} |
Jekyll Themes
=============
A directory of the best-looking themes for Jekyll blogs
Submitting a Theme
------------------
Have a theme you want to share?
* Fork the site on GitHub
* Create a new post in the `_posts` directory and fill out the relevant YAML fields
* Make a 250x200 thumbnail and drop it in the thumbnails directory. List its filename in the post's markdown file.
* Test it out, then push your changes up and open a pull request.
License
-------
The contents of this repository are licensed under the [GNU Public License.](http://www.gnu.org/licenses/gpl-3.0.html)
| {
"pile_set_name": "Github"
} |
<Page
x:Class="ListViewSample.MainPage"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:local="using:ListViewSample"
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
mc:Ignorable="d">
<Grid Background="{ThemeResource ApplicationPageBackgroundThemeBrush}">
<Grid.RowDefinitions>
<RowDefinition Height="Auto"/>
<RowDefinition Height="*"/>
</Grid.RowDefinitions>
<Grid.ColumnDefinitions>
<ColumnDefinition Width="Auto"/>
<ColumnDefinition Width="*"/>
</Grid.ColumnDefinitions>
<Button Style="{StaticResource SplitViewTogglePaneButtonStyle}"
Click="ShowSplitView"/>
<TextBlock Text="ListView/GridView Samples"
VerticalAlignment="Center"
Style="{ThemeResource TitleTextBlockStyle}"
Grid.Column="1"/>
<StackPanel Orientation="Vertical" Grid.ColumnSpan="2" Grid.Row="1">
<RichTextBlock FontSize="14" Padding="32" >
<Paragraph>This app demonstrates the usage of ListView and Gridview in basic scenarios, the real use of the controls and layout techniques in Master/Detail, and how to implement Tap on left Edge of the item behvior.
</Paragraph>
<Paragraph>
<LineBreak/>To get the most out of it, browse the full code in Visual Studio and see how the app works.
</Paragraph>
<Paragraph>
<LineBreak/>Use the hamburger button at the left top of the page to navigate across the samples.
</Paragraph>
</RichTextBlock>
<RichTextBlock FontSize="10" Padding="32,60,32,20">
<Paragraph>THIS CODE AND INFORMATION IS PROVIDED 'AS IS' WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
</Paragraph>
<Paragraph>
<LineBreak/>
<LineBreak/>Copyright (c) Microsoft Corporation. All rights
</Paragraph>
</RichTextBlock>
</StackPanel>
<local:SamplesPane x:Name="MySamplesPane" Grid.ColumnSpan="2" Grid.RowSpan="2" />
</Grid>
</Page>
| {
"pile_set_name": "Github"
} |
set +u
if [ -z "$MC_PROXY_CERTBOT_DOMAIN" ]; then
echo "MC_PROXY_CERTBOT_DOMAIN (top-level domain to issue the certificate for) is not set."
exit 1
fi
if [ -z "$MC_PROXY_CERTBOT_LETSENCRYPT_EMAIL" ]; then
echo "MC_PROXY_CERTBOT_LETSENCRYPT_EMAIL (email for Let's Encrypt's notifications) is not set."
exit 1
fi
if [ -z "$MC_PROXY_CERTBOT_CLOUDFLARE_EMAIL" ]; then
echo "MC_PROXY_CERTBOT_CLOUDFLARE_EMAIL (CloudFlare account email) is not set."
exit 1
fi
if [ -z "$MC_PROXY_CERTBOT_CLOUDFLARE_GLOBAL_API_KEY" ]; then
echo "MC_PROXY_CERTBOT_CLOUDFLARE_GLOBAL_API_KEY (CloudFlare account global API key) is not set."
exit 1
fi
set -u
| {
"pile_set_name": "Github"
} |
var searchData=
[
['ble_5ftcs_5finit',['ble_tcs_init',['../group__ble__sdk__srv__tcs.html#gad33571ba0d7ba51659c3aa09618b3cc9',1,'ble_tcs.h']]],
['ble_5ftcs_5fmtu_5fset',['ble_tcs_mtu_set',['../group__ble__sdk__srv__tcs.html#ga8a6b5889d593500659b05c563740c51a',1,'ble_tcs.h']]],
['ble_5ftcs_5fnfc_5fset',['ble_tcs_nfc_set',['../group__ble__sdk__srv__tcs.html#gaa10dbf0cf3e90ef0a03a8244eb88532c',1,'ble_tcs.h']]],
['ble_5ftcs_5fon_5fble_5fevt',['ble_tcs_on_ble_evt',['../group__ble__sdk__srv__tcs.html#ga10835bbd06a099abc38cdbee47c382f6',1,'ble_tcs.h']]],
['ble_5ftes_5fcolor_5fset',['ble_tes_color_set',['../group__ble__sdk__srv__tes.html#ga96aa8f21018b192f8bb854859530bafe',1,'ble_tes.h']]],
['ble_5ftes_5fgas_5fset',['ble_tes_gas_set',['../group__ble__sdk__srv__tes.html#ga3d1629990a842eca0c6c96872eca7231',1,'ble_tes.h']]],
['ble_5ftes_5fhumidity_5fset',['ble_tes_humidity_set',['../group__ble__sdk__srv__tes.html#ga214c30d7b0f07f264ef3e2ceb0b10f47',1,'ble_tes.h']]],
['ble_5ftes_5finit',['ble_tes_init',['../group__ble__sdk__srv__tes.html#ga6091b836154596e1478a21404f13ff2c',1,'ble_tes.h']]],
['ble_5ftes_5fon_5fble_5fevt',['ble_tes_on_ble_evt',['../group__ble__sdk__srv__tes.html#ga144e7065b310a35fcab8c3f2d94375d2',1,'ble_tes.h']]],
['ble_5ftes_5fpressure_5fset',['ble_tes_pressure_set',['../group__ble__sdk__srv__tes.html#ga38d41753614fed2e730169d00d6edbb3',1,'ble_tes.h']]],
['ble_5ftes_5ftemperature_5fset',['ble_tes_temperature_set',['../group__ble__sdk__srv__tes.html#ga80bf34aaf088ad2437ba5b1fbea368b9',1,'ble_tes.h']]],
['ble_5ftms_5feuler_5fset',['ble_tms_euler_set',['../group__ble__sdk__srv__wss.html#gae144dcf1529ef6afe7a25bccf62506bf',1,'ble_tms.h']]],
['ble_5ftms_5fgravity_5fset',['ble_tms_gravity_set',['../group__ble__sdk__srv__wss.html#gaee53ea24ac784f958db56d334763a306',1,'ble_tms.h']]],
['ble_5ftms_5fheading_5fset',['ble_tms_heading_set',['../group__ble__sdk__srv__wss.html#gad8a6720b9f28cb13237ac0d5379a7ef0',1,'ble_tms.h']]],
['ble_5ftms_5finit',['ble_tms_init',['../group__ble__sdk__srv__wss.html#gaac5bc26cb9a4927d0e774fc22c8fe3ab',1,'ble_tms.h']]],
['ble_5ftms_5fon_5fble_5fevt',['ble_tms_on_ble_evt',['../group__ble__sdk__srv__wss.html#ga619888103f7c089c7622cb841bc1f152',1,'ble_tms.h']]],
['ble_5ftms_5forientation_5fset',['ble_tms_orientation_set',['../group__ble__sdk__srv__wss.html#ga1882d22b71460844a49ef232b3b429a9',1,'ble_tms.h']]],
['ble_5ftms_5fpedo_5fset',['ble_tms_pedo_set',['../group__ble__sdk__srv__wss.html#ga69135fb15c189f352d3bf60fdd786102',1,'ble_tms.h']]],
['ble_5ftms_5fquat_5fset',['ble_tms_quat_set',['../group__ble__sdk__srv__wss.html#ga430db44a4ab326a0fa74968b46c086f8',1,'ble_tms.h']]],
['ble_5ftms_5fraw_5fset',['ble_tms_raw_set',['../group__ble__sdk__srv__wss.html#gae724422f00957f8a17c6e179f56a16c0',1,'ble_tms.h']]],
['ble_5ftms_5frot_5fmat_5fset',['ble_tms_rot_mat_set',['../group__ble__sdk__srv__wss.html#gadc3e8340c810646804ecbb7980c4456d',1,'ble_tms.h']]],
['ble_5ftms_5ftap_5fset',['ble_tms_tap_set',['../group__ble__sdk__srv__wss.html#gabc6b82dd6f4daad7774379fcabef416c',1,'ble_tms.h']]],
['ble_5ftss_5finit',['ble_tss_init',['../group__ble__srv__tss.html#ga3cad4a25955b00dc99b931dd37f91d77',1,'ble_tss.h']]],
['ble_5ftss_5fmic_5fset',['ble_tss_mic_set',['../group__ble__srv__tss.html#ga01ca54da607d250e6f113c35f3c66614',1,'ble_tss.h']]],
['ble_5ftss_5fon_5fble_5fevt',['ble_tss_on_ble_evt',['../group__ble__srv__tss.html#gaeab153e0eed3564e841c3bd9fda47962',1,'ble_tss.h']]],
['ble_5ftss_5fspkr_5fstat_5fset',['ble_tss_spkr_stat_set',['../group__ble__srv__tss.html#gafcc18b076ebbd36ae893ae8291e9ca8f',1,'ble_tss.h']]],
['ble_5fuis_5finit',['ble_uis_init',['../group__ble__sdk__srv__uis.html#ga826b313e4e80c5eccb8c76c38fa56d3d',1,'ble_uis.h']]],
['ble_5fuis_5fon_5fble_5fevt',['ble_uis_on_ble_evt',['../group__ble__sdk__srv__uis.html#ga31637b55d11194bab31a00ebb253e733',1,'ble_uis.h']]],
['ble_5fuis_5fon_5fbutton_5fchange',['ble_uis_on_button_change',['../group__ble__sdk__srv__uis.html#ga3b4636fcf3ec049e322d1eb0f85dad82',1,'ble_uis.h']]]
];
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env node
(function() {
var fs = require('fs');
var cssesc = require('../cssesc.js');
var strings = process.argv.splice(2);
var stdin = process.stdin;
var data;
var timeout;
var isObject = false;
var options = {};
var log = console.log;
var main = function() {
var option = strings[0];
if (/^(?:-h|--help|undefined)$/.test(option)) {
log(
'cssesc v%s - http://mths.be/cssesc',
cssesc.version
);
log([
'\nUsage:\n',
'\tcssesc [string]',
'\tcssesc [-i | --identifier] [string]',
'\tcssesc [-s | --single-quotes] [string]',
'\tcssesc [-d | --double-quotes] [string]',
'\tcssesc [-w | --wrap] [string]',
'\tcssesc [-e | --escape-everything] [string]',
'\tcssesc [-v | --version]',
'\tcssesc [-h | --help]',
'\nExamples:\n',
'\tcssesc \'f\xF6o \u2665 b\xE5r \uD834\uDF06 baz\'',
'\tcssesc --identifier \'f\xF6o \u2665 b\xE5r \uD834\uDF06 baz\'',
'\tcssesc --escape-everything \'f\xF6o \u2665 b\xE5r \uD834\uDF06 baz\'',
'\tcssesc --double-quotes --wrap \'f\xF6o \u2665 b\xE5r \uD834\uDF06 baz\'',
'\techo \'f\xF6o \u2665 b\xE5r \uD834\uDF06 baz\' | cssesc'
].join('\n'));
return process.exit(1);
}
if (/^(?:-v|--version)$/.test(option)) {
log('v%s', cssesc.version);
return process.exit(1);
}
strings.forEach(function(string) {
// Process options
if (/^(?:-i|--identifier)$/.test(string)) {
options.isIdentifier = true;
return;
}
if (/^(?:-s|--single-quotes)$/.test(string)) {
options.quotes = 'single';
return;
}
if (/^(?:-d|--double-quotes)$/.test(string)) {
options.quotes = 'double';
return;
}
if (/^(?:-w|--wrap)$/.test(string)) {
options.wrap = true;
return;
}
if (/^(?:-e|--escape-everything)$/.test(string)) {
options.escapeEverything = true;
return;
}
// Process string(s)
var result;
try {
result = cssesc(string, options);
log(result);
} catch(error) {
log(error.message + '\n');
log('Error: failed to escape.');
log('If you think this is a bug in cssesc, please report it:');
log('https://github.com/mathiasbynens/cssesc/issues/new');
log(
'\nStack trace using cssesc@%s:\n',
cssesc.version
);
log(error.stack);
return process.exit(1);
}
});
// Return with exit status 0 outside of the `forEach` loop, in case
// multiple strings were passed in.
return process.exit(0);
};
if (stdin.isTTY) {
// handle shell arguments
main();
} else {
// Either the script is called from within a non-TTY context, or `stdin`
// content is being piped in.
if (!process.stdout.isTTY) {
// The script was called from a non-TTY context. This is a rather uncommon
// use case we don’t actively support. However, we don’t want the script
// to wait forever in such cases, so…
timeout = setTimeout(function() {
// …if no piped data arrived after a whole minute, handle shell
// arguments instead.
main();
}, 60000);
}
data = '';
stdin.on('data', function(chunk) {
clearTimeout(timeout);
data += chunk;
});
stdin.on('end', function() {
strings.push(data.trim());
main();
});
stdin.resume();
}
}());
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_79) on Mon Nov 23 19:34:23 PST 2015 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class com.fasterxml.aalto.evt.EventReaderImpl (aalto-xml 1.0.0 API)</title>
<meta name="date" content="2015-11-23">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class com.fasterxml.aalto.evt.EventReaderImpl (aalto-xml 1.0.0 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../com/fasterxml/aalto/evt/EventReaderImpl.html" title="class in com.fasterxml.aalto.evt">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?com/fasterxml/aalto/evt/class-use/EventReaderImpl.html" target="_top">Frames</a></li>
<li><a href="EventReaderImpl.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class com.fasterxml.aalto.evt.EventReaderImpl" class="title">Uses of Class<br>com.fasterxml.aalto.evt.EventReaderImpl</h2>
</div>
<div class="classUseContainer">No usage of com.fasterxml.aalto.evt.EventReaderImpl</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../com/fasterxml/aalto/evt/EventReaderImpl.html" title="class in com.fasterxml.aalto.evt">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?com/fasterxml/aalto/evt/class-use/EventReaderImpl.html" target="_top">Frames</a></li>
<li><a href="EventReaderImpl.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2012-2015 <a href="http://fasterxml.com/">FasterXML</a>. All Rights Reserved.</small></p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/**
*
* WARNING! This file was autogenerated by:
* _ _ _ _ __ __
* | | | | | | |\ \ / /
* | | | | |_| | \ V /
* | | | | _ | / \
* | |_| | | | |/ /^\ \
* \___/\_| |_/\/ \/
*
* This file was autogenerated by UnrealHxGenerator using UHT definitions.
* It only includes UPROPERTYs and UFUNCTIONs. Do not modify it!
* In order to add more definitions, create or edit a type with the same name/package, but with an `_Extra` suffix
**/
package unreal.blueprintgraph;
/**
WARNING: This type was defined as MinimalAPI on its declaration. Because of that, its properties/methods are inaccessible
**/
@:umodule("BlueprintGraph")
@:glueCppIncludes("K2Node_DoOnceMultiInput.h")
@:uextern @:uclass extern class UK2Node_DoOnceMultiInput extends unreal.blueprintgraph.UK2Node implements unreal.blueprintgraph.IK2Node_AddPinInterface {
/**
Reference to the integer that contains
**/
@:uproperty public var DataNode : unreal.blueprintgraph.UK2Node_TemporaryVariable;
/**
The number of additional input pins to generate for this node (2 base pins are not included)
**/
@:uproperty public var NumAdditionalInputs : unreal.Int32;
// K2Node_AddPinInterface interface implementation
}
| {
"pile_set_name": "Github"
} |
# Event 6106 - DocPerf_Task_Setup_SplInstallPrinterDriverFromPackage
###### Version: 0
## Description
None
## Data Dictionary
|Standard Name|Field Name|Type|Description|Sample Value|
|---|---|---|---|---|
|TBD|InfNameOrPath|UnicodeString|None|`None`|
|TBD|ServerName|UnicodeString|None|`None`|
|TBD|ModelOrDriverName|UnicodeString|None|`None`|
## Tags
* etw_level_Informational
* etw_keywords_DocPerf
* etw_opcode_Stop
* etw_task_DocPerf_Task_Setup_SplInstallPrinterDriverFromPackage | {
"pile_set_name": "Github"
} |
table.datatable {
@extend .table, .table-striped, .table-bordered, .table-hover;
td.actions {
vertical-align: middle;
align: center;
div.btn-group {
display: flex;
}
a {
@extend .btn;
}
}
td.truncate {
max-width: 1px;
@include text-overflow();
}
}
| {
"pile_set_name": "Github"
} |
/* Data structure for communication from the run-time dynamic linker for
loaded ELF shared objects.
Copyright (C) 1995-1999, 2000, 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _LINK_H
#define _LINK_H 1
#include <features.h>
#include <elf.h>
#include <dlfcn.h>
#include <sys/types.h>
#define DT_THISPROCNUM 0
/* We use this macro to refer to ELF types independent of the native wordsize.
`ElfW(TYPE)' is used in place of `Elf32_TYPE' or `Elf64_TYPE'. */
#define ElfW(type) _ElfW (Elf, __ELF_NATIVE_CLASS, type)
#define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
#define _ElfW_1(e,w,t) e##w##t
#include <sys/elfclass.h> /* Defines __ELF_NATIVE_CLASS. */
#include <sys/link.h>
#include <dl-lookupcfg.h>
/* Rendezvous structure used by the run-time dynamic linker to communicate
details of shared object loading to the debugger. If the executable's
dynamic section has a DT_DEBUG element, the run-time linker sets that
element's value to the address where this structure can be found. */
struct r_debug
{
int r_version; /* Version number for this protocol. */
struct link_map *r_map; /* Head of the chain of loaded objects. */
/* This is the address of a function internal to the run-time linker,
that will always be called when the linker begins to map in a
library or unmap it, and again when the mapping change is complete.
The debugger can set a breakpoint at this address if it wants to
notice shared object mapping changes. */
ElfW(Addr) r_brk;
enum
{
/* This state value describes the mapping change taking place when
the `r_brk' address is called. */
RT_CONSISTENT, /* Mapping change is complete. */
RT_ADD, /* Beginning to add a new object. */
RT_DELETE /* Beginning to remove an object mapping. */
} r_state;
ElfW(Addr) r_ldbase; /* Base address the linker is loaded at. */
};
/* This is the instance of that structure used by the dynamic linker. */
extern struct r_debug _r_debug;
/* This symbol refers to the "dynamic structure" in the `.dynamic' section
of whatever module refers to `_DYNAMIC'. So, to find its own
`struct r_debug', a program could do:
for (dyn = _DYNAMIC; dyn->d_tag != DT_NULL; ++dyn)
if (dyn->d_tag == DT_DEBUG)
r_debug = (struct r_debug *) dyn->d_un.d_ptr;
*/
extern ElfW(Dyn) _DYNAMIC[];
/* Some internal data structures of the dynamic linker used in the
linker map. We only provide forward declarations. */
struct libname_list;
struct r_found_version;
struct r_search_path_elem;
/* Forward declaration. */
struct link_map;
/* Structure to describe a single list of scope elements. The lookup
functions get passed an array of pointers to such structures. */
struct r_scope_elem
{
/* Array of maps for the scope. */
struct link_map **r_list;
/* Number of entries in the scope. */
unsigned int r_nlist;
};
/* Structure to record search path and allocation mechanism. */
struct r_search_path_struct
{
struct r_search_path_elem **dirs;
int malloced;
};
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
members form a chain of all the shared objects loaded at startup.
These data structures exist in space used by the run-time dynamic linker;
modifying them may have disastrous results.
This data structure might change in future, if necessary. User-level
programs must avoid defining objects of this type. */
struct link_map
{
/* These first few members are part of the protocol with the debugger.
This is the same format used in SVR4. */
ElfW(Addr) l_addr; /* Base address shared object is loaded at. */
char *l_name; /* Absolute file name object was found in. */
ElfW(Dyn) *l_ld; /* Dynamic section of the shared object. */
struct link_map *l_next, *l_prev; /* Chain of loaded objects. */
/* All following members are internal to the dynamic linker.
They may change without notice. */
struct libname_list *l_libname;
/* Indexed pointers to dynamic section.
[0,DT_NUM) are indexed by the processor-independent tags.
[DT_NUM,DT_NUM+DT_THISPROCNUM) are indexed by the tag minus DT_LOPROC.
[DT_NUM+DT_THISPROCNUM,DT_NUM+DT_THISPROCNUM+DT_EXTRANUM) are indexed
by DT_EXTRATAGIDX(tagvalue) and
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM)
are indexed by DT_EXTRATAGIDX(tagvalue) (see <elf.h>). */
ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
+ DT_EXTRANUM];
const ElfW(Phdr) *l_phdr; /* Pointer to program header table in core. */
ElfW(Addr) l_entry; /* Entry point location. */
ElfW(Half) l_phnum; /* Number of program header entries. */
ElfW(Half) l_ldnum; /* Number of dynamic segment entries. */
/* Array of DT_NEEDED dependencies and their dependencies, in
dependency order for symbol lookup (with and without
duplicates). There is no entry before the dependencies have
been loaded. */
struct r_scope_elem l_searchlist;
/* We need a special searchlist to process objects marked with
DT_SYMBOLIC. */
struct r_scope_elem l_symbolic_searchlist;
/* Dependent object that first caused this object to be loaded. */
struct link_map *l_loader;
/* Symbol hash table. */
Elf_Symndx l_nbuckets;
const Elf_Symndx *l_buckets, *l_chain;
unsigned int l_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
lt_library, /* Library needed by main executable. */
lt_loaded /* Extra run-time loaded shared object. */
} l_type:2;
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
to by `l_phdr' is allocated. */
unsigned int l_soname_added:1; /* Nonzero if the SONAME is for sure in
the l_libname list. */
unsigned int l_faked:1; /* Nonzero if this is a faked descriptor
without associated file. */
/* Array with version names. */
unsigned int l_nversions;
struct r_found_version *l_versions;
/* Collected information about own RPATH directories. */
struct r_search_path_struct l_rpath_dirs;
/* Collected results of relocation while profiling. */
ElfW(Addr) *l_reloc_result;
/* Pointer to the version information if available. */
ElfW(Versym) *l_versyms;
/* String specifying the path where this object was found. */
const char *l_origin;
/* Start and finish of memory map for this object. l_map_start
need not be the same as l_addr. */
ElfW(Addr) l_map_start, l_map_end;
/* Default array for 'l_scope'. */
struct r_scope_elem *l_scope_mem[4];
/* Size of array allocated for 'l_scope'. */
size_t l_scope_max;
/* This is an array defining the lookup scope for this link map.
There are at most three different scope lists. */
struct r_scope_elem **l_scope;
/* A similar array, this time only with the local scope. This is
used occasionally. */
struct r_scope_elem *l_local_scope[2];
/* This information is kept to check for sure whether a shared
object is the same as one already loaded. */
dev_t l_dev;
ino64_t l_ino;
/* Collected information about own RUNPATH directories. */
struct r_search_path_struct l_runpath_dirs;
/* List of object in order of the init and fini calls. */
struct link_map **l_initfini;
/* List of the dependencies introduced through symbol binding. */
unsigned int l_reldepsmax;
unsigned int l_reldepsact;
struct link_map **l_reldeps;
/* Various flag words. */
ElfW(Word) l_feature_1;
ElfW(Word) l_flags_1;
/* Temporarily used in `dl_close'. */
unsigned int l_idx;
struct link_map_machine l_mach;
struct
{
const ElfW(Sym) *sym;
int type_class;
#ifdef DL_LOOKUP_RETURNS_MAP
struct link_map *value;
#else
ElfW(Addr) value;
#endif
const ElfW(Sym) *ret;
} l_lookup_cache;
};
struct dl_phdr_info
{
ElfW(Addr) dlpi_addr;
const char *dlpi_name;
const ElfW(Phdr) *dlpi_phdr;
ElfW(Half) dlpi_phnum;
};
extern int dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
size_t size, void *data),
void *data);
extern int __dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
size_t size, void *data),
void *data);
#endif /* link.h */
| {
"pile_set_name": "Github"
} |
import theano
import theano.tensor as T
from lasagne.layers import Layer
from lasagne import init
from lasagne import nonlinearities
# original source at:
# https://github.com/f0k/Lasagne/blob/batchnorm/lasagne/layers/normalization.py
# this has not yet been released in Lasagne's master branch, so temporarily
# has to be included from the batchnorm branch
class BatchNormLayer(Layer):
"""
lasagne.layers.BatchNormLayer(incoming, axes='auto', epsilon=1e-4,
alpha=0.1, mode='low_mem',
beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0), inv_std=lasagne.init.Constant(1), **kwargs)
Batch Normalization
This layer implements batch normalization of its inputs, following [1]_:
.. math::
y = \\frac{x - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\gamma + \\beta
That is, the input is normalized to zero mean and unit variance, and then
linearly transformed. The crucial part is that the mean and variance are
computed across the batch dimension, i.e., over examples, not per example.
During training, :math:`\\mu` and :math:`\\sigma^2` are defined to be the
mean and variance of the current input mini-batch :math:`x`, and during
testing, they are replaced with average statistics over the training
data. Consequently, this layer has four stored parameters: :math:`\\beta`,
:math:`\\gamma`, and the averages :math:`\\mu` and :math:`\\sigma^2`
(nota bene: instead of :math:`\\sigma^2`, the layer actually stores
:math:`1 / \\sqrt{\\sigma^2 + \\epsilon}`, for compatibility to cuDNN).
By default, this layer learns the average statistics as exponential moving
averages computed during training, so it can be plugged into an existing
network without any changes of the training procedure (see Notes).
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
axes : 'auto', int or tuple of int
The axis or axes to normalize over. If ``'auto'`` (the default),
normalize over all axes except for the second: this will normalize over
the minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers.
epsilon : scalar
Small constant :math:`\\epsilon` added to the variance before taking
the square root and dividing by it, to avoid numerical problems
alpha : scalar
Coefficient for the exponential moving average of batch-wise means and
standard deviations computed during training; the closer to one, the
more it will depend on the last batches seen
beta : Theano shared variable, expression, numpy array, callable or None
Initial value, expression or initializer for :math:`\\beta`. Must match
the incoming shape, skipping all axes in `axes`. Set to ``None`` to fix
it to 0.0 instead of learning it.
See :func:`lasagne.utils.create_param` for more information.
gamma : Theano shared variable, expression, numpy array, callable or None
Initial value, expression or initializer for :math:`\\gamma`. Must
match the incoming shape, skipping all axes in `axes`. Set to ``None``
to fix it to 1.0 instead of learning it.
See :func:`lasagne.utils.create_param` for more information.
mean : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\mu`. Must match
the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
inv_std : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`1 / \\sqrt{
\\sigma^2 + \\epsilon}`. Must match the incoming shape, skipping all
axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
Notes
-----
This layer should be inserted between a linear transformation (such as a
:class:`DenseLayer`, or :class:`Conv2DLayer`) and its nonlinearity. The
convenience function :func:`batch_norm` modifies an existing layer to
insert batch normalization in front of its nonlinearity.
The behavior can be controlled by passing keyword arguments to
:func:`lasagne.layers.get_output()` when building the output expression
of any network containing this layer.
During training, [1]_ normalize each input mini-batch by its statistics
and update an exponential moving average of the statistics to be used for
validation. This can be achieved by passing ``deterministic=False``.
For validation, [1]_ normalize each input mini-batch by the stored
statistics. This can be achieved by passing ``deterministic=True``.
For more fine-grained control, ``batch_norm_update_averages`` can be passed
to update the exponential moving averages (``True``) or not (``False``),
and ``batch_norm_use_averages`` can be passed to use the exponential moving
averages for normalization (``True``) or normalize each mini-batch by its
own statistics (``False``). These settings override ``deterministic``.
Note that for testing a model after training, [1]_ replace the stored
exponential moving average statistics by fixing all network weights and
re-computing average statistics over the training data in a layerwise
fashion. This is not part of the layer implementation.
In case you set `axes` to not include the batch dimension (the first axis,
usually), normalization is done per example, not across examples. This does
not require any averages, so you can pass ``batch_norm_update_averages``
and ``batch_norm_use_averages`` as ``False`` in this case.
See also
--------
batch_norm : Convenience function to apply batch normalization to a layer
References
----------
.. [1] Ioffe, Sergey and Szegedy, Christian (2015):
Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift. http://arxiv.org/abs/1502.03167.
"""
def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
mode='low_mem', beta=init.Constant(0), gamma=init.Constant(1),
mean=init.Constant(0), inv_std=init.Constant(1), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
if axes == 'auto':
# default: normalize over all but the second axis
axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(axes, int):
axes = (axes,)
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
self.mode = mode
# create parameters, ignoring all dimensions in axes
shape = [size for axis, size in enumerate(self.input_shape) if axis not in self.axes]
for size in shape:
if size is None:
raise ValueError("BatchNormLayer needs specified input sizes for all axes not normalized over.")
# if any(size is None for size in shape):
# raise ValueError("BatchNormLayer needs specified input sizes for "
# "all axes not normalized over.")
if beta is None:
self.beta = None
else:
self.beta = self.add_param(beta, shape, 'beta',
trainable=True, regularizable=False)
if gamma is None:
self.gamma = None
else:
self.gamma = self.add_param(gamma, shape, 'gamma',
trainable=True, regularizable=True)
self.mean = self.add_param(mean, shape, 'mean',
trainable=False, regularizable=False)
self.inv_std = self.add_param(inv_std, shape, 'inv_std',
trainable=False, regularizable=False)
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))
# Decide whether to use the stored averages or mini-batch statistics
use_averages = kwargs.get('batch_norm_use_averages',
deterministic)
if use_averages:
mean = self.mean
inv_std = self.inv_std
else:
mean = input_mean
inv_std = input_inv_std
# Decide whether to update the stored averages
update_averages = kwargs.get('batch_norm_update_averages',
not deterministic)
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_inv_std = theano.clone(self.inv_std, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_inv_std.default_update = ((1 - self.alpha) *
running_inv_std +
self.alpha * input_inv_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
inv_std += 0 * running_inv_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(range(input.ndim - len(self.axes)))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
inv_std = inv_std.dimshuffle(pattern)
# normalize
normalized = (input - mean) * (gamma * inv_std) + beta
return normalized
| {
"pile_set_name": "Github"
} |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
File Name: 15.5.4.4-3.js
ECMA Section: 15.5.4.4 String.prototype.charAt(pos)
Description: Returns a string containing the character at position
pos in the string. If there is no character at that
string, the result is the empty string. The result is
a string value, not a String object.
When the charAt method is called with one argument,
pos, the following steps are taken:
1. Call ToString, with this value as its argument
2. Call ToInteger pos
3. Compute the number of characters in Result(1)
4. If Result(2) is less than 0 is or not less than
Result(3), return the empty string
5. Return a string of length 1 containing one character
from result (1), the character at position Result(2).
Note that the charAt function is intentionally generic;
it does not require that its this value be a String
object. Therefore it can be transferred to other kinds
of objects for use as a method.
This tests assiging charAt to a user-defined function.
Author: [email protected]
Date: 2 october 1997
*/
var SECTION = "15.5.4.4-3";
var VERSION = "ECMA_1";
startTest();
var TITLE = "String.prototype.charAt";
writeHeaderToLog( SECTION + " "+ TITLE);
var foo = new MyObject('hello');
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "h", foo.charAt(0) );
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "e", foo.charAt(1) );
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "l", foo.charAt(2) );
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "l", foo.charAt(3) );
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "o", foo.charAt(4) );
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "", foo.charAt(-1) );
new TestCase( SECTION, "var foo = new MyObject('hello'); ", "", foo.charAt(5) );
var boo = new MyObject(true);
new TestCase( SECTION, "var boo = new MyObject(true); ", "t", boo.charAt(0) );
new TestCase( SECTION, "var boo = new MyObject(true); ", "r", boo.charAt(1) );
new TestCase( SECTION, "var boo = new MyObject(true); ", "u", boo.charAt(2) );
new TestCase( SECTION, "var boo = new MyObject(true); ", "e", boo.charAt(3) );
var noo = new MyObject( Math.PI );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", "3", noo.charAt(0) );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", ".", noo.charAt(1) );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", "1", noo.charAt(2) );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", "4", noo.charAt(3) );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", "1", noo.charAt(4) );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", "5", noo.charAt(5) );
new TestCase( SECTION, "var noo = new MyObject(Math.PI); ", "9", noo.charAt(6) );
test();
function MyObject (v) {
this.value = v;
this.toString = new Function( "return this.value +'';" );
this.valueOf = new Function( "return this.value" );
this.charAt = String.prototype.charAt;
}
| {
"pile_set_name": "Github"
} |
{
"parent": "forestry:block/apiculture/alveary_plain_normal",
"textures": {
"particle": "forestry:blocks/apiculture/alveary.sieve",
"down": "forestry:blocks/apiculture/alveary.bottom",
"up": "forestry:blocks/apiculture/alveary.bottom",
"north": "forestry:blocks/apiculture/alveary.sieve",
"east": "forestry:blocks/apiculture/alveary.sieve",
"south": "forestry:blocks/apiculture/alveary.sieve",
"west": "forestry:blocks/apiculture/alveary.sieve"
}
}
| {
"pile_set_name": "Github"
} |
/*
* hmac.c
*
* implementation of hmac auth_type_t
*
* David A. McGrew
* Cisco Systems, Inc.
*/
/*
*
* Copyright(c) 2001-2006 Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Cisco Systems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "hmac.h"
#include "alloc.h"
/* the debug module for authentiation */
debug_module_t mod_hmac = {
0, /* debugging is off by default */
"hmac sha-1" /* printable name for module */
};
err_status_t
hmac_alloc(auth_t **a, int key_len, int out_len) {
extern auth_type_t hmac;
uint8_t *pointer;
debug_print(mod_hmac, "allocating auth func with key length %d", key_len);
debug_print(mod_hmac, " tag length %d", out_len);
/*
* check key length - note that we don't support keys larger
* than 20 bytes yet
*/
if (key_len > 20)
return err_status_bad_param;
/* check output length - should be less than 20 bytes */
if (out_len > 20)
return err_status_bad_param;
/* allocate memory for auth and hmac_ctx_t structures */
pointer = (uint8_t*)crypto_alloc(sizeof(hmac_ctx_t) + sizeof(auth_t));
if (pointer == NULL)
return err_status_alloc_fail;
/* set pointers */
*a = (auth_t *)pointer;
(*a)->type = &hmac;
(*a)->state = pointer + sizeof(auth_t);
(*a)->out_len = out_len;
(*a)->key_len = key_len;
(*a)->prefix_len = 0;
/* increment global count of all hmac uses */
hmac.ref_count++;
return err_status_ok;
}
err_status_t
hmac_dealloc(auth_t *a) {
extern auth_type_t hmac;
/* zeroize entire state*/
octet_string_set_to_zero((uint8_t *)a,
sizeof(hmac_ctx_t) + sizeof(auth_t));
/* free memory */
crypto_free(a);
/* decrement global count of all hmac uses */
hmac.ref_count--;
return err_status_ok;
}
err_status_t
hmac_init(hmac_ctx_t *state, const uint8_t *key, int key_len) {
int i;
uint8_t ipad[64];
/*
* check key length - note that we don't support keys larger
* than 20 bytes yet
*/
if (key_len > 20)
return err_status_bad_param;
/*
* set values of ipad and opad by exoring the key into the
* appropriate constant values
*/
for (i=0; i < key_len; i++) {
ipad[i] = key[i] ^ 0x36;
state->opad[i] = key[i] ^ 0x5c;
}
/* set the rest of ipad, opad to constant values */
for ( ; i < 64; i++) {
ipad[i] = 0x36;
((uint8_t *)state->opad)[i] = 0x5c;
}
debug_print(mod_hmac, "ipad: %s", octet_string_hex_string(ipad, 64));
/* initialize sha1 context */
sha1_init(&state->init_ctx);
/* hash ipad ^ key */
sha1_update(&state->init_ctx, ipad, 64);
memcpy(&state->ctx, &state->init_ctx, sizeof(sha1_ctx_t));
return err_status_ok;
}
err_status_t
hmac_start(hmac_ctx_t *state) {
memcpy(&state->ctx, &state->init_ctx, sizeof(sha1_ctx_t));
return err_status_ok;
}
err_status_t
hmac_update(hmac_ctx_t *state, const uint8_t *message, int msg_octets) {
debug_print(mod_hmac, "input: %s",
octet_string_hex_string(message, msg_octets));
/* hash message into sha1 context */
sha1_update(&state->ctx, message, msg_octets);
return err_status_ok;
}
err_status_t
hmac_compute(hmac_ctx_t *state, const void *message,
int msg_octets, int tag_len, uint8_t *result) {
uint32_t hash_value[5];
uint32_t H[5];
int i;
/* check tag length, return error if we can't provide the value expected */
if (tag_len > 20)
return err_status_bad_param;
/* hash message, copy output into H */
hmac_update(state, (const uint8_t*)message, msg_octets);
sha1_final(&state->ctx, H);
/*
* note that we don't need to debug_print() the input, since the
* function hmac_update() already did that for us
*/
debug_print(mod_hmac, "intermediate state: %s",
octet_string_hex_string((uint8_t *)H, 20));
/* re-initialize hash context */
sha1_init(&state->ctx);
/* hash opad ^ key */
sha1_update(&state->ctx, (uint8_t *)state->opad, 64);
/* hash the result of the inner hash */
sha1_update(&state->ctx, (uint8_t *)H, 20);
/* the result is returned in the array hash_value[] */
sha1_final(&state->ctx, hash_value);
/* copy hash_value to *result */
for (i=0; i < tag_len; i++)
result[i] = ((uint8_t *)hash_value)[i];
debug_print(mod_hmac, "output: %s",
octet_string_hex_string((uint8_t *)hash_value, tag_len));
return err_status_ok;
}
/* begin test case 0 */
uint8_t
hmac_test_case_0_key[20] = {
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b
};
uint8_t
hmac_test_case_0_data[8] = {
0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 /* "Hi There" */
};
uint8_t
hmac_test_case_0_tag[20] = {
0xb6, 0x17, 0x31, 0x86, 0x55, 0x05, 0x72, 0x64,
0xe2, 0x8b, 0xc0, 0xb6, 0xfb, 0x37, 0x8c, 0x8e,
0xf1, 0x46, 0xbe, 0x00
};
auth_test_case_t
hmac_test_case_0 = {
20, /* octets in key */
hmac_test_case_0_key, /* key */
8, /* octets in data */
hmac_test_case_0_data, /* data */
20, /* octets in tag */
hmac_test_case_0_tag, /* tag */
NULL /* pointer to next testcase */
};
/* end test case 0 */
char hmac_description[] = "hmac sha-1 authentication function";
/*
* auth_type_t hmac is the hmac metaobject
*/
auth_type_t
hmac = {
(auth_alloc_func) hmac_alloc,
(auth_dealloc_func) hmac_dealloc,
(auth_init_func) hmac_init,
(auth_compute_func) hmac_compute,
(auth_update_func) hmac_update,
(auth_start_func) hmac_start,
(char *) hmac_description,
(int) 0, /* instance count */
(auth_test_case_t *) &hmac_test_case_0,
(debug_module_t *) &mod_hmac
};
| {
"pile_set_name": "Github"
} |
import Foundation
#if os(macOS)
import PostboxMac
import SwiftSignalKitMac
import MtProtoKitMac
#else
import Postbox
import SwiftSignalKit
import MtProtoKitDynamic
#endif
func managedAutodownloadSettingsUpdates(accountManager: AccountManager, network: Network) -> Signal<Void, NoError> {
let poll = Signal<Void, NoError> { subscriber in
return (network.request(Api.functions.account.getAutoDownloadSettings())
|> retryRequest
|> mapToSignal { result -> Signal<Void, NoError> in
return updateAutodownloadSettingsInteractively(accountManager: accountManager, { _ -> AutodownloadSettings in
return AutodownloadSettings(apiAutodownloadSettings: result)
})
}).start()
}
return (poll |> then(.complete() |> suspendAwareDelay(1.0 * 60.0 * 60.0, queue: Queue.concurrentDefaultQueue()))) |> restart
}
public enum SavedAutodownloadPreset {
case low
case medium
case high
}
public func saveAutodownloadSettings(account: Account, preset: SavedAutodownloadPreset, settings: AutodownloadPresetSettings) -> Signal<Void, NoError> {
var flags: Int32 = 0
switch preset {
case .low:
flags |= (1 << 0)
case .high:
flags |= (1 << 1)
default:
break
}
return account.network.request(Api.functions.account.saveAutoDownloadSettings(flags: flags, settings: apiAutodownloadPresetSettings(settings)))
|> `catch` { _ -> Signal<Api.Bool, NoError> in
return .complete()
}
|> mapToSignal { _ -> Signal<Void, NoError> in
return .complete()
}
}
| {
"pile_set_name": "Github"
} |
//
// FWKeysHelper.h
// faceWrapper
//
// Created by macpocket1 on 16/12/11.
// Copyright (c) 2011 __MyCompanyName__. All rights reserved.
//
//#error Define your keys
#import <Foundation/Foundation.h>
@interface FWKeysHelper : NSObject
{
//Define your API from skybiometry.com --> https://www.skybiometry.com/Account
NSString *kFaceAPI;
NSString *kFaceSecretAPI;
//Define your xAuth Tokens at developer.twitter.com and set them in your skybiometry.com account
NSString *kTwitterConsumerKey;
NSString *kTwitterConsumerSecret;
//Define your Facebook Tokens at https://developers.facebook.com and set them in your skybiometry.com account
NSString *kFacebookAppID;
}
+ (NSString *)faceAPI;
+ (void)setFaceAPI:(NSString *)value;
+ (NSString *)faceSecretAPI;
+ (void)setFaceSecretAPI:(NSString *)value;
+ (NSString *)twitterConsumerKey;
+ (void)setTwitterConsumerKey:(NSString *)value;
+ (NSString *)twitterConsumerSecret;
+ (void)setTwitterConsumerSecret:(NSString *)value;
+ (NSString *)facebookAppID;
+ (void)setFacebookAppID:(NSString *)value;
@property (nonatomic, retain) NSString *kFaceAPI;
@property (nonatomic, retain) NSString *kFaceSecretAPI;
@property (nonatomic, retain) NSString *kTwitterConsumerKey;
@property (nonatomic, retain) NSString *kTwitterConsumerSecret;
@property (nonatomic, retain) NSString *kFacebookAppID;
@end
| {
"pile_set_name": "Github"
} |
@import 'base-variables';
@import 'site/variables';
.clay-site-swatches {
display: flex;
flex-wrap: wrap;
margin-left: -12px;
margin-right: -12px;
}
.clay-site-swatch {
border-radius: 4px;
box-shadow: 0 0.1rem 3rem rgba(19, 20, 31, 0.08);
margin: 0 12px 1.5rem;
overflow: hidden;
width: 100%;
@media (min-width: 480px) {
width: calc(50% - 24px);
}
@media (min-width: 600px) {
width: calc(33.33333% - 24px);
}
@media (min-width: 900px) {
width: calc(50% - 24px);
}
@media (min-width: 1024px) {
width: calc(33.33333% - 24px);
}
@media (min-width: 1600px) {
width: calc(25% - 24px);
}
.clay-site-swatch-color {
padding-bottom: 50%;
}
.clay-site-swatch-title {
color: rgba(0, 0, 0, 0.8);
font-weight: 600;
margin-bottom: 0.5rem;
}
.clay-site-swatch-body {
padding: 1rem 1rem 0.75rem;
.autofit-row {
margin-bottom: 0.25rem;
}
}
.clay-site-swatch-type {
color: rgb(107, 108, 126);
font-weight: 500;
padding-right: 1rem;
text-transform: uppercase;
@media (min-width: $grid-float-breakpoint) {
padding-right: 2rem;
}
}
.clay-site-hex {
text-transform: lowercase;
}
.clay-site-swatch-value {
color: #000;
font-size: 0.875rem;
}
.clay-site-swatch-text {
cursor: pointer;
user-select: all;
}
}
$swatches: (
'black': $black,
'gray-900': $gray-900,
'gray-800': $gray-800,
'gray-700': $gray-700,
'gray-400': $gray-400,
'gray-600': $gray-600,
'gray-500': $gray-500,
'gray-300': $gray-300,
'gray-200': $gray-200,
'gray-100': $gray-100,
'white': $white,
'blue': $blue,
'indigo': $indigo,
'purple': $purple,
'pink': $pink,
'orange': $orange,
'red': $red,
'yellow': $yellow,
'green': $green,
'teal': $teal,
'cyan': $cyan,
'primary-d2': $primary-d2,
'primary-d1': $primary-d1,
'primary': $primary,
'primary-l1': $primary-l1,
'primary-l2': $primary-l2,
'primary-l3': $primary-l3,
'secondary-d2': $secondary-d2,
'secondary-d1': $secondary-d1,
'secondary': $secondary,
'secondary-l1': $secondary-l1,
'secondary-l2': $secondary-l2,
'secondary-l3': $secondary-l3,
'success-d2': $success-d2,
'success-d1': $success-d1,
'success': $success,
'success-l1': $success-l1,
'success-l2': $success-l2,
'info-d2': $info-d2,
'info-d1': $info-d1,
'info': $info,
'info-l1': $info-l1,
'info-l2': $info-l2,
'warning-d2': $warning-d2,
'warning-d1': $warning-d1,
'warning': $warning,
'warning-l1': $warning-l1,
'warning-l2': $warning-l2,
'danger-d2': $danger-d2,
'danger-d1': $danger-d1,
'danger': $danger,
'danger-l1': $danger-l1,
'danger-l2': $danger-l2,
'light-d2': $light-d2,
'light-d1': $light-d1,
'light': $light,
'light-l1': $light-l1,
'light-l2': $light-l2,
'dark-d2': $dark-d2,
'dark-d1': $dark-d1,
'dark': $dark,
'dark-l1': $dark-l1,
'dark-l2': $dark-l2,
);
@each $swatch, $value in $swatches {
.clay-site-swatch-#{$swatch} {
background-color: $value;
content: '#{$value}';
}
}
#clayCSSTheme {
content: 'base';
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!--Generated by crowdin.com-->
<resources>
<string name="debug_toast_enabled">Soát lỗi đang bật. Hãy quay về cài đặt.</string>
<string name="debug_toast_already_enabled">Đã bật soát lỗi. Hãy quay lại phần cài đặt.</string>
<string name="debug_disclaimer_info">Dù hầu hết các thông tin cá nhân đều tự động bị xoá khỏi báo cáo, một số thông tin nhạy cảm có thể vẫn xuất hiện. \nHãy xem lại báo cáo lỗi trước khi gửi đi. \n\nBấm một trong các tuỳ chọn dưới đây để soạn email chứa dữ liệu trang web.
</string>
<string name="debug_incomplete">Báo cáo không đầy đủ</string>
<string name="debug_web">Dò lỗi từ web</string>
<string name="debug_web_desc">Chuyển đến trang bị lỗi và gửi thông tin đề dò lỗi.</string>
<string name="parsing_data">Phân tích dữ liệu</string>
<string name="debug_parsers">Phân tích lỗi</string>
<string name="debug_parsers_desc">Mở một trong các bộ phân tích lỗi để dò lỗi từ kết quả phân tích</string>
</resources>
| {
"pile_set_name": "Github"
} |
/**
* Copyright 2011 LiveRamp
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.liveramp.hank.partition_server;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.google.common.collect.Sets;
import org.apache.thrift.TException;
import org.junit.Test;
import com.liveramp.hank.config.PartitionServerConfigurator;
import com.liveramp.hank.config.ReaderConfigurator;
import com.liveramp.hank.coordinator.Coordinator;
import com.liveramp.hank.coordinator.Domain;
import com.liveramp.hank.coordinator.DomainAndVersion;
import com.liveramp.hank.coordinator.Host;
import com.liveramp.hank.coordinator.HostDomain;
import com.liveramp.hank.coordinator.HostDomainPartition;
import com.liveramp.hank.coordinator.PartitionServerAddress;
import com.liveramp.hank.coordinator.Ring;
import com.liveramp.hank.coordinator.RingGroup;
import com.liveramp.hank.coordinator.mock.MockCoordinator;
import com.liveramp.hank.coordinator.mock.MockDomain;
import com.liveramp.hank.coordinator.mock.MockDomainGroup;
import com.liveramp.hank.generated.HankBulkResponse;
import com.liveramp.hank.generated.HankException;
import com.liveramp.hank.generated.HankResponse;
import com.liveramp.hank.partitioner.Partitioner;
import com.liveramp.hank.storage.Reader;
import com.liveramp.hank.storage.mock.MockReader;
import com.liveramp.hank.storage.mock.MockStorageEngine;
import com.liveramp.hank.test.BaseTestCase;
import com.liveramp.hank.test.coordinator.MockHost;
import com.liveramp.hank.test.coordinator.MockHostDomain;
import com.liveramp.hank.test.coordinator.MockHostDomainPartition;
import com.liveramp.hank.test.coordinator.MockRing;
import com.liveramp.hank.test.coordinator.MockRingGroup;
import com.liveramp.hank.test.partitioner.MapPartitioner;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestPartitionServerHandler extends BaseTestCase {
private static final ByteBuffer K1 = bb(1);
private static final ByteBuffer K2 = bb(2);
private static final ByteBuffer K3 = bb(3);
private static final ByteBuffer K4 = bb(4);
private static final ByteBuffer K5 = bb(5);
private static final byte[] V1 = new byte[]{9};
private static final Host mockHostConfig = new MockHost(
new PartitionServerAddress("localhost", 12345)) {
@Override
public HostDomain getHostDomain(Domain domain) {
return new MockHostDomain(domain) {
@Override
public HostDomainPartition addPartition(int partitionNumber) {
return null;
}
@Override
public Set<HostDomainPartition> getPartitions() throws IOException {
return new HashSet<HostDomainPartition>(Arrays.asList(
new MockHostDomainPartition(0, 0),
new MockHostDomainPartition(4, 0)));
}
};
}
};
@Test
public void testDontServeNotUpToDatePartition() throws IOException, TException {
try {
PartitionServerHandler handler = createHandler(42);
fail("Should throw an exception.");
} catch (IOException e) {
}
}
@Test
public void testSetUpAndServe() throws Exception {
PartitionServerHandler handler = createHandler(0);
assertEquals(HankResponse.value(V1), handler.get((byte)0, K1));
assertEquals(HankResponse.value(V1), handler.get((byte)0, K5));
assertEquals(HankResponse.xception(HankException.wrong_host(true)),
handler.get(0, K2));
assertEquals(HankResponse.xception(HankException.wrong_host(true)),
handler.get(0, K3));
assertEquals(HankResponse.xception(HankException.wrong_host(true)),
handler.get(0, K4));
}
@Test
public void testSetUpAndServeBulk() throws Exception {
PartitionServerHandler handler = createHandler(0);
// Regular bulk request
List<ByteBuffer> keys1 = new ArrayList<ByteBuffer>();
keys1.add(K1);
keys1.add(K2);
keys1.add(K5);
ArrayList<HankResponse> responses1 = new ArrayList<HankResponse>();
responses1.add(HankResponse.value(V1));
responses1.add(HankResponse.xception(HankException.wrong_host(true)));
responses1.add(HankResponse.value(V1));
assertEquals(HankBulkResponse.responses(responses1), handler.getBulk(0, keys1));
// Large bulk request
List<ByteBuffer> keys2 = new ArrayList<ByteBuffer>();
ArrayList<HankResponse> responses2 = new ArrayList<HankResponse>();
for (int i = 0; i < 10000; ++i) {
keys2.add(K1);
responses2.add(HankResponse.value(V1));
}
assertEquals(HankBulkResponse.responses(responses2), handler.getBulk(0, keys2));
}
private PartitionServerHandler createHandler(final int readerVersionNumber) throws IOException {
Partitioner partitioner = new MapPartitioner(K1, 0, K2, 1, K3, 2, K4, 3,
K5, 4);
MockStorageEngine storageEngine = new MockStorageEngine() {
@Override
public Reader getReader(ReaderConfigurator configurator, int partitionNumber, DiskPartitionAssignment assignment)
throws IOException {
return new MockReader(configurator, partitionNumber, V1, readerVersionNumber) {
@Override
public Integer getVersionNumber() {
return readerVersionNumber;
}
};
}
};
final Domain domain = new MockDomain("myDomain", 0, 5, partitioner, storageEngine, null, null);
final MockDomainGroup dg = new MockDomainGroup("myDomainGroup") {
@Override
public Set<DomainAndVersion> getDomainVersions() throws IOException {
Set<DomainAndVersion> result = new HashSet<DomainAndVersion>();
result.add(new DomainAndVersion(domain, 1));
return result;
}
};
final MockRingGroup rg = new MockRingGroup(dg, "myRingGroupName", Sets.newHashSet());
final MockRing mockRing = new MockRing(null, rg, 1) {
@Override
public Host getHostByAddress(PartitionServerAddress address) {
return mockHostConfig;
}
};
Coordinator mockCoordinator = new MockCoordinator() {
@Override
public RingGroup getRingGroup(String ringGroupName) {
assertEquals("myRingGroupName", ringGroupName);
return new MockRingGroup(dg, "myRingGroupName", Sets.newHashSet()) {
@Override
public Ring getRingForHost(PartitionServerAddress hostAddress) {
return mockRing;
}
};
}
};
PartitionServerConfigurator config = new MockPartitionServerConfigurator(12345,
mockCoordinator, "myRingGroupName", "/tmp/local/data/dir");
PartitionServerHandler handler = new PartitionServerHandler(new PartitionServerAddress(
"localhost", 12345), config, mockCoordinator);
return handler;
}
private static ByteBuffer bb(int i) {
return ByteBuffer.wrap(new byte[]{(byte)i});
}
}
| {
"pile_set_name": "Github"
} |
while (true) {
var x = function () {};
}
for (var i = 0; i < 5; i++) {
var y = function () {};
}
while (true) {
function z() {}
} | {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
using System;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using DynamicsOmnichannelBot.AdaptiveCards;
using Microsoft.Bot.Builder;
using Microsoft.Bot.Schema;
using Microsoft.Extensions.Configuration;
using OmniChannel.Models;
namespace Microsoft.BotBuilderSamples.Bots
{
public class DynamicsOmnichannelAgentBot : ActivityHandler
{
private Conversation conversation;
public DynamicsOmnichannelAgentBot()
{
// Load conversation json for demo
ConfigurationBuilder configBuilder = new ConfigurationBuilder();
var configuration = configBuilder
.SetBasePath(AppDomain.CurrentDomain.BaseDirectory)
.AddJsonFile($"DemoConversation/conversation.json", optional: false, reloadOnChange: true)
.Build();
conversation = new Conversation();
configuration.Bind("Conversation", conversation);
}
public override async Task OnTurnAsync(ITurnContext turnContext, CancellationToken cancellationToken)
{
if (turnContext.Activity.Type == ActivityTypes.ConversationUpdate && turnContext.Activity.MembersAdded != null)
{
var replyActivity = MessageFactory.Text($"{conversation.WelcomeMessage}");
await turnContext.SendActivityAsync(replyActivity, cancellationToken);
} else if (turnContext.Activity.Type == ActivityTypes.Message)
{
if (turnContext.Activity.Text.Contains("agent") | conversation.EscalationDictionary.ContainsKey(turnContext.Activity.Text))
{
Dictionary<string, string> endConversationContext = new Dictionary<string, string>();
if (conversation.EscalationDictionary.ContainsKey(turnContext.Activity.Text)) { endConversationContext = conversation.EscalationDictionary[turnContext.Activity.Text]; }
await turnContext.SendActivityAsync("Transferring to an agent, who can help you with this. Please remain online…");
Dictionary<string, object> handOffContext = new Dictionary<string, object>()
{
{ "BotHandoffContext", "Specialist request" },
{ "skill", "service" }
};
var handoffevent = EventFactory.CreateHandoffInitiation(turnContext, new
{
MessageToAgent = "Issue Summary: billing question",
Context = handOffContext
}); // Transcript is persisted by Omnichannel
await turnContext.SendActivityAsync(handoffevent);
}
else if (turnContext.Activity.Text.ToLower().Contains("end"))
{
await turnContext.SendActivityAsync("Thanks for talking with me. Have a good day. Bye.");
IEndOfConversationActivity endOfConversationActivity = Activity.CreateEndOfConversationActivity();
await turnContext.SendActivityAsync(endOfConversationActivity);
}
else
{
await HandleConversation(turnContext, cancellationToken);
}
}
}
private async Task HandleConversation(ITurnContext turnContext, CancellationToken cancellationToken)
{
var text = turnContext.Activity.Text;
IMessageActivity reply = Activity.CreateMessageActivity();
// Handle conversation scenarios
if (conversation.ConversationDictionary.ContainsKey(text))
{
var replyText = conversation.ConversationDictionary[text];
if (replyText.Contains("Adaptive Card"))
{
replyText = await GetAdaptiveCardsReplyText(reply, replyText, turnContext, cancellationToken);
}
if (replyText.Contains("Suggested Action"))
{
replyText = GetSuggestedActionsReplyText(reply, replyText);
}
reply.Text = replyText;
}
else {
switch (text.ToLower())
{
case "microsoft store":
case "adaptive card":
// Display an Adaptive Card for Microsoft Store
reply.Attachments.Add(AdaptiveCards.CreateAdaptiveCardAttachment("store.json"));
break;
case "microsoft article":
// Display an Adaptive Card for support article
reply.Attachments.Add(AdaptiveCards.CreateAdaptiveCardAttachment("article.json"));
break;
case "suggested action":
reply.SuggestedActions = AdaptiveCards.CreateSuggestedAction(new string[] { "10am", "1pm", "3pm" });
break;
default:
reply.Text = $"I am sorry, I cannot help you with\n'{text}'\n\n If you would like me to transfer you to a customer service agent please type 'Talk to agent'";
break;
}
}
await turnContext.SendActivityAsync(reply, cancellationToken);
}
private async Task<string> GetAdaptiveCardsReplyText(IMessageActivity reply, string replyText, ITurnContext turnContext, CancellationToken cancellationToken)
{
int start = replyText.IndexOf("(");
int end = replyText.IndexOf(")")+1;
var adaptiveCardSubString = replyText.Substring(start, end - start);
int cardNameStart = adaptiveCardSubString.IndexOf("'")+1;
int cardNameEnd = adaptiveCardSubString.LastIndexOf("'");
var adaptiveCard = adaptiveCardSubString.Substring(cardNameStart, cardNameEnd-cardNameStart);
reply.Attachments.Add(AdaptiveCards.CreateAdaptiveCardAttachment(adaptiveCard));
reply.Text = replyText.Replace(adaptiveCardSubString, "");
await turnContext.SendActivityAsync(reply, cancellationToken);
// Follow up action or reply to sending adaptive card
reply.Attachments.Clear();
return conversation.ConversationDictionary[adaptiveCard];
}
private string GetSuggestedActionsReplyText(IMessageActivity reply, string replyText)
{
int start = replyText.IndexOf("(");
int end = replyText.IndexOf(")")+1;
var suggestedActionSubString = replyText.Substring(start, end-start);
int startActions = suggestedActionSubString.IndexOf("'")+1;
int endActions = suggestedActionSubString.LastIndexOf("'");
var suggestedActions = suggestedActionSubString.Substring(startActions, endActions- startActions).Split("/");
reply.SuggestedActions = AdaptiveCards.CreateSuggestedAction(suggestedActions);
return replyText.Replace(suggestedActionSubString, "");
}
}
}
| {
"pile_set_name": "Github"
} |
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/moira-alert/moira (interfaces: Lock)
// Package mock_moira_alert is a generated GoMock package.
package mock_moira_alert
import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockLock is a mock of Lock interface
type MockLock struct {
ctrl *gomock.Controller
recorder *MockLockMockRecorder
}
// MockLockMockRecorder is the mock recorder for MockLock
type MockLockMockRecorder struct {
mock *MockLock
}
// NewMockLock creates a new mock instance
func NewMockLock(ctrl *gomock.Controller) *MockLock {
mock := &MockLock{ctrl: ctrl}
mock.recorder = &MockLockMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockLock) EXPECT() *MockLockMockRecorder {
return m.recorder
}
// Acquire mocks base method
func (m *MockLock) Acquire(arg0 <-chan struct{}) (<-chan struct{}, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Acquire", arg0)
ret0, _ := ret[0].(<-chan struct{})
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Acquire indicates an expected call of Acquire
func (mr *MockLockMockRecorder) Acquire(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Acquire", reflect.TypeOf((*MockLock)(nil).Acquire), arg0)
}
// Release mocks base method
func (m *MockLock) Release() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Release")
}
// Release indicates an expected call of Release
func (mr *MockLockMockRecorder) Release() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockLock)(nil).Release))
}
| {
"pile_set_name": "Github"
} |
#ifndef BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
#define BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
//
// detail/quick_allocator.hpp
//
// Copyright (c) 2003 David Abrahams
// Copyright (c) 2003 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/config.hpp>
#include <boost/smart_ptr/detail/lightweight_mutex.hpp>
#include <boost/type_traits/type_with_alignment.hpp>
#include <boost/type_traits/alignment_of.hpp>
#include <new> // ::operator new, ::operator delete
#include <cstddef> // std::size_t
namespace boost
{
namespace detail
{
template<unsigned size, unsigned align_> union freeblock
{
typedef typename boost::type_with_alignment<align_>::type aligner_type;
aligner_type aligner;
char bytes[size];
freeblock * next;
};
template<unsigned size, unsigned align_> struct allocator_impl
{
typedef freeblock<size, align_> block;
// It may seem odd to use such small pages.
//
// However, on a typical Windows implementation that uses
// the OS allocator, "normal size" pages interact with the
// "ordinary" operator new, slowing it down dramatically.
//
// 512 byte pages are handled by the small object allocator,
// and don't interfere with ::new.
//
// The other alternative is to use much bigger pages (1M.)
//
// It is surprisingly easy to hit pathological behavior by
// varying the page size. g++ 2.96 on Red Hat Linux 7.2,
// for example, passionately dislikes 496. 512 seems OK.
#if defined(BOOST_QA_PAGE_SIZE)
enum { items_per_page = BOOST_QA_PAGE_SIZE / size };
#else
enum { items_per_page = 512 / size }; // 1048560 / size
#endif
#ifdef BOOST_HAS_THREADS
static lightweight_mutex & mutex()
{
static freeblock< sizeof( lightweight_mutex ), boost::alignment_of< lightweight_mutex >::value > fbm;
static lightweight_mutex * pm = new( &fbm ) lightweight_mutex;
return *pm;
}
static lightweight_mutex * mutex_init;
#endif
static block * free;
static block * page;
static unsigned last;
static inline void * alloc()
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
if(block * x = free)
{
free = x->next;
return x;
}
else
{
if(last == items_per_page)
{
// "Listen to me carefully: there is no memory leak"
// -- Scott Meyers, Eff C++ 2nd Ed Item 10
page = ::new block[items_per_page];
last = 0;
}
return &page[last++];
}
}
static inline void * alloc(std::size_t n)
{
if(n != size) // class-specific new called for a derived object
{
return ::operator new(n);
}
else
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
if(block * x = free)
{
free = x->next;
return x;
}
else
{
if(last == items_per_page)
{
page = ::new block[items_per_page];
last = 0;
}
return &page[last++];
}
}
}
static inline void dealloc(void * pv)
{
if(pv != 0) // 18.4.1.1/13
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
block * pb = static_cast<block *>(pv);
pb->next = free;
free = pb;
}
}
static inline void dealloc(void * pv, std::size_t n)
{
if(n != size) // class-specific delete called for a derived object
{
::operator delete(pv);
}
else if(pv != 0) // 18.4.1.1/13
{
#ifdef BOOST_HAS_THREADS
lightweight_mutex::scoped_lock lock( mutex() );
#endif
block * pb = static_cast<block *>(pv);
pb->next = free;
free = pb;
}
}
};
#ifdef BOOST_HAS_THREADS
template<unsigned size, unsigned align_>
lightweight_mutex * allocator_impl<size, align_>::mutex_init = &allocator_impl<size, align_>::mutex();
#endif
template<unsigned size, unsigned align_>
freeblock<size, align_> * allocator_impl<size, align_>::free = 0;
template<unsigned size, unsigned align_>
freeblock<size, align_> * allocator_impl<size, align_>::page = 0;
template<unsigned size, unsigned align_>
unsigned allocator_impl<size, align_>::last = allocator_impl<size, align_>::items_per_page;
template<class T>
struct quick_allocator: public allocator_impl< sizeof(T), boost::alignment_of<T>::value >
{
};
} // namespace detail
} // namespace boost
#endif // #ifndef BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<title>
CubicVR 3D Engine - Javascript Port: Polygon tesselation/cutting test
</title>
<script src="../../CubicVR.js" type="text/javascript"></script>
<script type='text/javascript'>
function buildPolygon() {
var obj_tess = new CubicVR.Mesh();
// Make a material named test
var objMaterial = new CubicVR.Material("test_material");
objMaterial.max_smooth = 89.9;
objMaterial.setTexture(new CubicVR.Texture("../images/1422-diffuse.jpg"), CubicVR.enums.texture.map.COLOR);
objMaterial.specular = [1, 1, 1];
objMaterial.shininess = 0.5;
objMaterial.max_smooth = 30;
var pQuad = new CubicVR.Polygon([
[-2, -2],
[2, -2],
[2, 2],
[-2, 2]
]);
var pQuad2 = new CubicVR.Polygon([
[-0.3, -0.3],
[0.3, -0.3],
[0.3, 0.3],
[-0.3, 0.3]
]);
var pHex = new CubicVR.Polygon([
[-1, 0],
[-0.5, -1],
[0.5, -1],
[1, 0],
[0.5, 1],
[-0.5, 1]
]);
var arHexSmall = [
[-0.4, 0],
[-0.2, -0.4],
[0.2, -0.4],
[0.4, 0],
[0.2, 0.4],
[-0.2, 0.4]
];
var pHexCenter = new CubicVR.Polygon(arHexSmall);
var pHexUpper = new CubicVR.Polygon(CubicVR.polygon.addOffset(arHexSmall,[0.0,1.6]));
var pHexLower = new CubicVR.Polygon(CubicVR.polygon.addOffset(arHexSmall,[0.0,-1.6]));
var pHexLeft = new CubicVR.Polygon(CubicVR.polygon.addOffset(arHexSmall,[-1.6,0.0]));
var pHexRight = new CubicVR.Polygon(CubicVR.polygon.addOffset(arHexSmall,[1.6,0.0]));
var pStar_points = [];
var pStar_points2 = [];
var j = 0;
var nDivs = 30;
for (var i = 0; i <= Math.PI * 2.0 - Math.PI / nDivs; i += Math.PI / nDivs) {
var d = 3 + 0.4 * (j % 2);
pStar_points.push([d * Math.cos(i), d * Math.sin(i)]);
j++;
}
nDivs = 20;
for (var i = 0; i <= Math.PI * 2.0 - Math.PI / nDivs; i += Math.PI / nDivs) {
var d = 1 + 0.4 * (j % 2);
pStar_points2.push([d * Math.cos(i), d * Math.sin(i)]);
j++;
}
var shapeMesh = new CubicVR.Mesh();
shapeMesh.setFaceMaterial(objMaterial);
var pStar = new CubicVR.Polygon(pStar_points);
var pStar2 = new CubicVR.Polygon(pStar_points2);
// single cut
// pStar.cut(pStar2); // cut out the star shape
// pStar.cut(pHex); // cut out the hex shape
// multiple cuts, many small hex shapes
pStar.cut(pHexUpper);
pStar.cut(pHexLower);
pStar.cut(pHexLeft);
pStar.cut(pHexRight);
pStar.cut(pHexCenter);
// flat polygon mesh
// pStar.toMesh(shapeMesh);
// plain extrusion
// pStar.toExtrudedMesh(shapeMesh,-0.5,0.5);
// extrude+bevel front, back, front depth, front shift, back depth, back shift
// pStar.toExtrudedBeveledMesh(shapeMesh,-0.2,0.2,0.5,0.8,0.5,0.8);
// extrude+bevel obj constructor
pStar.toExtrudedBeveledMesh(shapeMesh,{
front:-0.2,
back:0.2,
frontDepth:0.3,
frontShift:0.3,
backDepth:0.3,
backShift:0.3
});
obj_tess.booleanAdd(shapeMesh);
obj_tess.triangulateQuads();
obj_tess.calcNormals();
// Create a UV Mapper and apply it to objMaterial
objMaterialMap = new CubicVR.UVMapper({
projectionMode: CubicVR.enums.uv.projection.PLANAR,
projectionAxis: CubicVR.enums.uv.axis.Y
});
objMaterialMap.apply(obj_tess, objMaterial);
obj_tess.triangulateQuads().compile();
return obj_tess;
}
function webGLStart() {
var gl = CubicVR.init(gl);
if (!gl) {
alert("Sorry your browser does not support WebGL.");
return;
}
var canvas = CubicVR.getCanvas();
var obj_tess = buildPolygon();
var light_obj = new CubicVR.Mesh();
var lightMaterial = new CubicVR.Material("lightMat");
CubicVR.genBoxObject(light_obj, 0.3, lightMaterial);
light_obj.calcNormals();
light_obj.triangulateQuads();
light_obj.compile();
CubicVR.setGlobalAmbient([0, 0, 0]);
var lights = [{
type: CubicVR.enums.light.type.POINT,
diffuse: [0.0, 0.0, 1.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [0.0, 1.0, 0.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [1.0, 0.0, 0.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [0.0, 0.0, 1.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [1.0, 0.0, 1.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [0.0, 1.0, 1.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [1.0, 1.0, 0.0],
distance: 50.0
}, {
type: CubicVR.enums.light.type.POINT,
diffuse: [0.0, 1.0, 1.0],
distance: 50.0
}];
for (var i = 0, iMax = lights.length; i < iMax; i++) {
lights[i] = new CubicVR.Light(lights[i]);
}
var camera = new CubicVR.Camera(canvas.width, canvas.height, 60);
camera.position = [10, 8, -10];
camera.target = [0, 0, 0];
CubicVR.addResizeable(camera);
var mvc = new CubicVR.MouseViewController(canvas, camera);
CubicVR.MainLoop(function(timer, gl) {
if (timer.getNumUpdates() % 30 == 0 && timer.getSeconds() != 0) {
fpsStr = "" + timer.getNumUpdates() / timer.getSeconds();
document.getElementById('fpsText').innerHTML = fpsStr.substring(0, 6);
}
var timerLastSeconds = timer.getLastUpdateSeconds();
var xp = timer.getSeconds() / 2.0;
lights[0].position = [3.0 * Math.cos(xp), (1.0 + Math.sin(xp * 1.15)) * 2, -Math.sin(xp * 0.7) * 3];
lights[1].position = [2.0 * Math.cos(xp * 2.34), (-1.0 + Math.sin(xp * 4.15)) * 2, -Math.sin(xp * 0.7) * 1];
lights[2].position = [4.0 * Math.cos(xp * 1.45), (1.0 + Math.sin(xp * 2.15)) * 2, -Math.sin(xp * 1.2) * 2];
lights[3].position = [1.0 * Math.cos(xp * 1.56), (-1.0 + Math.sin(xp * 3.15)) * 2, -Math.sin(xp * 1.3) * 2.5];
lights[4].position = [-3.0 * Math.cos(xp), -(1.0 + Math.sin(xp * 1.15)) * 2, Math.sin(xp * 0.7) * 3];
lights[5].position = [-2.0 * Math.cos(xp * 2.34), -(-1.0 + Math.sin(xp * 4.15)) * 2, Math.sin(xp * 0.7) * 1];
lights[6].position = [-4.0 * Math.cos(xp * 1.45), -(1.0 + Math.sin(xp * 2.15)) * 2, Math.sin(xp * 1.2) * 2];
lights[7].position = [-1.0 * Math.cos(xp * 1.56), -(-1.0 + Math.sin(xp * 3.15)) * 2, Math.sin(xp * 1.3) * 2.5];
camera.lookat(camera.position[0], camera.position[1], camera.position[2], camera.target[0], camera.target[1], camera.target[2], 0, 1, 0);
for (var i = 0; i < lights.length; i++) {
lights[i].position = CubicVR.vec3.multiply(lights[i].position, 7.0);
lights[i].prepare(camera);
}
var transMat;
var t;
var spd = 15.0;
var spd2 = 20.0;
var sz = 1.5;
var transMat = new CubicVR.Transform();
for (var i = -sz; i <= sz; i++) {
for (var j = -sz; j <= sz; j++) {
for (var k = -sz; k <= sz; k++) {
t = transMat.clearStack().pushMatrix().translate(i * 8, j * 8, k * 8).pushMatrix().rotate([i * 50 + xp * spd, j * 50 + xp * spd2, k * 50 + xp * 4.0]).pushMatrix().scale(1, 1, 1).getResult();
CubicVR.renderObject(obj_tess, camera, t, lights);
}
}
}
transMat.clearStack();
for (var i = 0; i < lights.length; i++) {
lightMaterial.color = lightMaterial.diffuse = lights[i].diffuse;
t = transMat.setIdentity().translate(lights[i].position[0], lights[i].position[1], lights[i].position[2]).getResult();
CubicVR.renderObject(light_obj, camera, t);
}
});
}
</script>
</head>
<body onLoad="webGLStart();">
<div style='font-family:Arial;font-size:14px;font-weight:bold;position:absolute;top:10px;left:10px;z-index:100;color:white'>
FPS:
<span id='fpsText'>
...
</span>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<?php
namespace Never5\DownloadMonitor\Dependencies\Psr\Log;
class InvalidArgumentException extends \InvalidArgumentException
{
}
| {
"pile_set_name": "Github"
} |
# Default type for anything under /firmware
type firmware_file, fs_type, contextmount_type;
type qmuxd_socket, file_type;
type sensors_socket, file_type;
type camera_socket, file_type;
type thermald_socket, file_type;
type diag_logs, file_type, data_file_type, mlstrustedobject;
type sensors_data_file, file_type, data_file_type;
type time_data_file, file_type, data_file_type;
type ramdump_data_file, file_type, data_file_type;
type kickstart_data_file, file_type, data_file_type;
type mpdecision_socket, file_type;
# Default type for anything under /firmware
type radio_efs_file, fs_type, contextmount_type;
# Persist firmware types
type persist_file, file_type;
type persist_bluetooth_file, file_type;
type persist_camera_file, file_type;
type persist_data_file, file_type;
type persist_drm_file, file_type;
type persist_sensors_file, file_type;
type persist_wifi_file, file_type;
type sysfs_rmnet, fs_type, sysfs_type;
type sysfs_mpdecision, fs_type, sysfs_type;
type sysfs_surfaceflinger, fs_type, sysfs_type;
type sysfs_smdcntl_open_timeout, fs_type, sysfs_type;
type sysfs_hardware, fs_type, sysfs_type;
type wcnss_ctrl, dev_type;
| {
"pile_set_name": "Github"
} |
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2015, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto2_maps
import (
"testing"
"github.com/gogo/protobuf/proto"
)
func TestNilMaps(t *testing.T) {
m := &AllMaps{StringToMsgMap: map[string]*FloatingPoint{"a": nil}}
data, err := proto.Marshal(m)
if err != nil {
t.Fatal(err)
}
size := m.Size()
protoSize := proto.Size(m)
marshaledSize := len(data)
if size != protoSize || marshaledSize != protoSize {
t.Errorf("size %d != protoSize %d != marshaledSize %d", size, protoSize, marshaledSize)
}
m2 := &AllMaps{}
if err := proto.Unmarshal(data, m2); err != nil {
t.Fatal(err)
}
if v, ok := m2.StringToMsgMap["a"]; !ok {
t.Error("element not in map")
} else if v != nil {
t.Errorf("element should be nil, but its %v", v)
}
}
func TestNilMapsBytes(t *testing.T) {
m := &AllMaps{StringToBytesMap: map[string][]byte{"a": nil}}
data, err := proto.Marshal(m)
if err != nil {
t.Fatal(err)
}
size := m.Size()
protoSize := proto.Size(m)
marshaledSize := len(data)
if size != protoSize || marshaledSize != protoSize {
t.Errorf("size %d != protoSize %d != marshaledSize %d", size, protoSize, marshaledSize)
}
m2 := &AllMaps{}
if err := proto.Unmarshal(data, m2); err != nil {
t.Fatal(err)
}
if v, ok := m2.StringToBytesMap["a"]; !ok {
t.Error("element not in map")
} else if len(v) != 0 {
t.Errorf("element should be empty, but its %v", v)
}
}
func TestEmptyMapsBytes(t *testing.T) {
m := &AllMaps{StringToBytesMap: map[string][]byte{"b": {}}}
data, err := proto.Marshal(m)
if err != nil {
t.Fatal(err)
}
size := m.Size()
protoSize := proto.Size(m)
marshaledSize := len(data)
if size != protoSize || marshaledSize != protoSize {
t.Errorf("size %d != protoSize %d != marshaledSize %d", size, protoSize, marshaledSize)
}
m2 := &AllMaps{}
if err := proto.Unmarshal(data, m2); err != nil {
t.Fatal(err)
}
if v, ok := m2.StringToBytesMap["b"]; !ok {
t.Error("element not in map")
} else if len(v) != 0 {
t.Errorf("element should be empty, but its %v", v)
}
}
| {
"pile_set_name": "Github"
} |
"use strict";
module.exports = function(Promise,
PromiseArray,
apiRejection,
tryConvertToPromise,
INTERNAL) {
var getDomain = Promise._getDomain;
var async = require("./async.js");
var util = require("./util.js");
var tryCatch = util.tryCatch;
var errorObj = util.errorObj;
var PENDING = {};
var EMPTY_ARRAY = [];
function MappingPromiseArray(promises, fn, limit, _filter) {
this.constructor$(promises);
this._promise._captureStackTrace();
var domain = getDomain();
this._callback = domain === null ? fn : domain.bind(fn);
this._preservedValues = _filter === INTERNAL
? new Array(this.length())
: null;
this._limit = limit;
this._inFlight = 0;
this._queue = limit >= 1 ? [] : EMPTY_ARRAY;
async.invoke(init, this, undefined);
}
util.inherits(MappingPromiseArray, PromiseArray);
function init() {this._init$(undefined, -2);}
MappingPromiseArray.prototype._init = function () {};
MappingPromiseArray.prototype._promiseFulfilled = function (value, index) {
var values = this._values;
var length = this.length();
var preservedValues = this._preservedValues;
var limit = this._limit;
if (values[index] === PENDING) {
values[index] = value;
if (limit >= 1) {
this._inFlight--;
this._drainQueue();
if (this._isResolved()) return;
}
} else {
if (limit >= 1 && this._inFlight >= limit) {
values[index] = value;
this._queue.push(index);
return;
}
if (preservedValues !== null) preservedValues[index] = value;
var callback = this._callback;
var receiver = this._promise._boundValue();
this._promise._pushContext();
var ret = tryCatch(callback).call(receiver, value, index, length);
this._promise._popContext();
if (ret === errorObj) return this._reject(ret.e);
var maybePromise = tryConvertToPromise(ret, this._promise);
if (maybePromise instanceof Promise) {
maybePromise = maybePromise._target();
if (maybePromise._isPending()) {
if (limit >= 1) this._inFlight++;
values[index] = PENDING;
return maybePromise._proxyPromiseArray(this, index);
} else if (maybePromise._isFulfilled()) {
ret = maybePromise._value();
} else {
return this._reject(maybePromise._reason());
}
}
values[index] = ret;
}
var totalResolved = ++this._totalResolved;
if (totalResolved >= length) {
if (preservedValues !== null) {
this._filter(values, preservedValues);
} else {
this._resolve(values);
}
}
};
MappingPromiseArray.prototype._drainQueue = function () {
var queue = this._queue;
var limit = this._limit;
var values = this._values;
while (queue.length > 0 && this._inFlight < limit) {
if (this._isResolved()) return;
var index = queue.pop();
this._promiseFulfilled(values[index], index);
}
};
MappingPromiseArray.prototype._filter = function (booleans, values) {
var len = values.length;
var ret = new Array(len);
var j = 0;
for (var i = 0; i < len; ++i) {
if (booleans[i]) ret[j++] = values[i];
}
ret.length = j;
this._resolve(ret);
};
MappingPromiseArray.prototype.preservedValues = function () {
return this._preservedValues;
};
function map(promises, fn, options, _filter) {
var limit = typeof options === "object" && options !== null
? options.concurrency
: 0;
limit = typeof limit === "number" &&
isFinite(limit) && limit >= 1 ? limit : 0;
return new MappingPromiseArray(promises, fn, limit, _filter);
}
Promise.prototype.map = function (fn, options) {
if (typeof fn !== "function") return apiRejection("fn must be a function\u000a\u000a See http://goo.gl/916lJJ\u000a");
return map(this, fn, options, null).promise();
};
Promise.map = function (promises, fn, options, _filter) {
if (typeof fn !== "function") return apiRejection("fn must be a function\u000a\u000a See http://goo.gl/916lJJ\u000a");
return map(promises, fn, options, _filter).promise();
};
};
| {
"pile_set_name": "Github"
} |
#import "Base.h"
namespace Cedar { namespace Matchers {
struct BeInstanceOfMessageBuilder {
template<typename U>
static NSString * string_for_actual_value(const U & value) {
return [NSString stringWithFormat:@"%@ (%@)", value, [value class]];
}
};
class BeInstanceOf : public Base<BeInstanceOfMessageBuilder> {
private:
BeInstanceOf & operator=(const BeInstanceOf &);
public:
explicit BeInstanceOf(const Class expectedValue);
~BeInstanceOf();
// Allow default copy ctor.
template<typename U>
bool matches(const U &) const;
BeInstanceOf & or_any_subclass();
protected:
virtual NSString * failure_message_end() const;
private:
const Class expectedClass_;
bool includeSubclasses_;
};
inline BeInstanceOf be_instance_of(const Class expectedValue) {
return BeInstanceOf(expectedValue);
}
inline BeInstanceOf::BeInstanceOf(const Class expectedClass)
: Base<BeInstanceOfMessageBuilder>(), expectedClass_(expectedClass), includeSubclasses_(false) {}
inline BeInstanceOf::~BeInstanceOf() {}
inline BeInstanceOf & BeInstanceOf::or_any_subclass() {
includeSubclasses_ = true;
return *this;
}
inline /*virtual*/ NSString * BeInstanceOf::failure_message_end() const {
NSMutableString *messageEnd = [NSMutableString stringWithFormat:@"be an instance of class <%@>", expectedClass_];
if (includeSubclasses_) {
[messageEnd appendString:@", or any of its subclasses"];
}
return messageEnd;
}
#pragma mark Generic
template<typename U>
bool BeInstanceOf::matches(const U & actualValue) const {
if (includeSubclasses_) {
return [actualValue isKindOfClass:expectedClass_];
} else {
return [actualValue isMemberOfClass:expectedClass_];
}
}
}}
| {
"pile_set_name": "Github"
} |
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AggregationRule) DeepCopyInto(out *AggregationRule) {
*out = *in
if in.ClusterRoleSelectors != nil {
in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors
*out = make([]metav1.LabelSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule.
func (in *AggregationRule) DeepCopy() *AggregationRule {
if in == nil {
return nil
}
out := new(AggregationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AggregationRule != nil {
in, out := &in.AggregationRule, &out.AggregationRule
*out = new(AggregationRule)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
func (in *ClusterRole) DeepCopy() *ClusterRole {
if in == nil {
return nil
}
out := new(ClusterRole)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRole) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
copy(*out, *in)
}
out.RoleRef = in.RoleRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
if in == nil {
return nil
}
out := new(ClusterRoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterRoleBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
if in == nil {
return nil
}
out := new(ClusterRoleBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterRole, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
if in == nil {
return nil
}
out := new(ClusterRoleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NonResourceURLs != nil {
in, out := &in.NonResourceURLs, &out.NonResourceURLs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
func (in *PolicyRule) DeepCopy() *PolicyRule {
if in == nil {
return nil
}
out := new(PolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Role) DeepCopyInto(out *Role) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
func (in *Role) DeepCopy() *Role {
if in == nil {
return nil
}
out := new(Role)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Role) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
copy(*out, *in)
}
out.RoleRef = in.RoleRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
func (in *RoleBinding) DeepCopy() *RoleBinding {
if in == nil {
return nil
}
out := new(RoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RoleBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
func (in *RoleBindingList) DeepCopy() *RoleBindingList {
if in == nil {
return nil
}
out := new(RoleBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleList) DeepCopyInto(out *RoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Role, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
func (in *RoleList) DeepCopy() *RoleList {
if in == nil {
return nil
}
out := new(RoleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleRef) DeepCopyInto(out *RoleRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef.
func (in *RoleRef) DeepCopy() *RoleRef {
if in == nil {
return nil
}
out := new(RoleRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subject) DeepCopyInto(out *Subject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
func (in *Subject) DeepCopy() *Subject {
if in == nil {
return nil
}
out := new(Subject)
in.DeepCopyInto(out)
return out
}
| {
"pile_set_name": "Github"
} |
# e2fsprogs
Ext2/3/4 filesystem userspace utilities
## Maintainers
* The Habitat Maintainers: <[email protected]>
## Type of Package
Binary package
## Usage
*TODO: Add instructions for usage*
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<strings>
<notfound>Metatietoja ei löydy.</notfound>
<notowner>Metatietojen oikeuksia ei päivitetty, koska et ole metatietojen omistaja.</notowner>
<updated>Päivitetyt metatiedot</updated>
<resultstitle>Oikeuspäivityksen tulokset</resultstitle>
</strings>
| {
"pile_set_name": "Github"
} |
/*
* R : A Computer Language for Statistical Data Analysis
* Copyright (C) 1999-2016 The R Core Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, a copy is available at
* https://www.R-project.org/Licenses/.
*/
#include <R.h>
static void
burg(int n, double*x, int pmax, double *coefs, double *var1, double *var2)
{
double d, phii, *u, *v, *u0, sum;
u = (double *) R_alloc(n, sizeof(double));
v = (double *) R_alloc(n, sizeof(double));
u0 = (double *) R_alloc(n, sizeof(double));
for(int i = 0; i < pmax*pmax; i++) coefs[i] = 0.0;
sum = 0.0;
for(int t = 0; t < n; t++) {
u[t] = v[t] = x[n - 1 - t];
sum += x[t] * x[t];
}
var1[0] = var2[0] = sum/n;
for(int p = 1; p <= pmax; p++) { /* do AR(p) */
sum = 0.0;
d = 0;
for(int t = p; t < n; t++) {
sum += v[t]*u[t-1];
d += v[t]*v[t] + u[t-1]*u[t-1];
}
phii = 2*sum/d;
coefs[pmax*(p-1) + (p-1)] = phii;
if(p > 1)
for(int j = 1; j < p; j++)
coefs[p-1 + pmax*(j-1)] =
coefs[p-2 + pmax*(j-1)] - phii* coefs[p-2 + pmax*(p-j-1)];
/* update u and v */
for(int t = 0; t < n; t++)
u0[t] = u[t];
for(int t = p; t < n; t++) {
u[t] = u0[t-1] - phii * v[t];
v[t] = v[t] - phii * u0[t-1];
}
var1[p] = var1[p-1] * (1 - phii * phii);
d = 0.0;
for(int t = p; t < n; t++) d += v[t]*v[t] + u[t]*u[t];
var2[p] = d/(2.0*(n-p));
}
}
#include <Rinternals.h>
SEXP Burg(SEXP x, SEXP order)
{
x = PROTECT(coerceVector(x, REALSXP));
int n = LENGTH(x), pmax = asInteger(order);
SEXP coefs = PROTECT(allocVector(REALSXP, pmax * pmax)),
var1 = PROTECT(allocVector(REALSXP, pmax + 1)),
var2 = PROTECT(allocVector(REALSXP, pmax + 1));
burg(n, REAL(x), pmax, REAL(coefs), REAL(var1), REAL(var2));
SEXP ans = PROTECT(allocVector(VECSXP, 3));
SET_VECTOR_ELT(ans, 0, coefs);
SET_VECTOR_ELT(ans, 1, var1);
SET_VECTOR_ELT(ans, 2, var2);
UNPROTECT(5);
return ans;
}
| {
"pile_set_name": "Github"
} |
/** @file
Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef __PCI_IOV_H__
#define __PCI_IOV_H__
/**
Parse PCI IOV VF bar information and fill them into PCI device instance.
@param PciIoDevice Pci device instance.
@param Offset Bar offset.
@param BarIndex Bar index.
@return Next bar offset.
**/
UINTN
PciIovParseVfBar (
IN PCI_IO_DEVICE *PciIoDevice,
IN UINTN Offset,
IN UINTN BarIndex
);
/**
Initialize SR-IOV (Single Root IO Virtualization)
@param[in,out] PciIoDevice Pointer to instance of PCI_IO_DEVICE.
@param[in] Bus Device Bus NO.
@param[in] Device Device device NO.
@param[in] Func Device func NO.
**/
VOID
EFIAPI
InitializeSrIov (
IN OUT PCI_IO_DEVICE *PciIoDevice,
IN UINT8 Bus,
IN UINT8 Device,
IN UINT8 Func
);
#endif // __PCI_IOV_H__
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
import argparse
from tqdm import tqdm
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
parser = argparse.ArgumentParser()
parser.add_argument("-min_child_weight", type=int, default=10)
parser.add_argument("-max_depth", type=int, default=10)
parser.add_argument("-gamma", type=float, default=5.0)
parser.add_argument("-alpha", type=float, default=5.0)
args = parser.parse_args()
def main():
# Train data from https://www.kaggle.com/c/allstate-claims-severity
train = pd.read_csv('./train.csv')
categorical_columns = train.select_dtypes(include=['object']).columns
for column in tqdm(categorical_columns):
le = LabelEncoder()
train[column] = le.fit_transform(train[column])
y = train['loss']
X = train.drop(['loss', 'id'], 1)
xgtrain = xgb.DMatrix(X, label=y)
num_rounds = 3
random_state = 2016
params = {
'eta': 0.1,
'silent': 1,
'eval_metric': 'mae',
'verbose_eval': True,
'seed': random_state
}
params["min_child_weight"] = args.min_child_weight
params["max_depth"] = args.max_depth
params["gamma"] = args.gamma
params["alpha"] = args.alpha
cv_result = xgb.cv(
params,
xgtrain,
num_boost_round=num_rounds,
nfold=5,
seed=random_state,
callbacks=[xgb.callback.early_stop(10)])
print(cv_result['test-mae-mean'].values[-1])
if __name__ == "__main__":
main()
| {
"pile_set_name": "Github"
} |
//
// This file is part of Gambit
// Copyright (c) 1994-2016, The Gambit Project (http://www.gambit-project.org)
//
// FILE: src/gui/style.cc
// Display configuration class for the extensive form
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// aint with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
//
#include <sstream>
#include <wx/wxprec.h>
#ifndef WX_PRECOMP
#include <wx/wx.h>
#endif // WX_PRECOMP
#include <wx/config.h>
#include "style.h"
//===========================================================================
// class gbtStyle: Implementation
//===========================================================================
gbtStyle::gbtStyle(void)
: m_font(wxFont(10, wxSWISS, wxNORMAL, wxBOLD))
{
SetDefaults();
}
static wxColour s_defaultColors[8] = {
wxColour(255, 0, 0),
wxColour(0, 0, 255),
wxColour(0, 128, 0),
wxColour(255, 128, 0),
wxColour(0, 0, 64),
wxColour(128, 0, 255),
wxColour(64, 0, 0),
wxColour(255, 128, 255)
};
//!
//! Gets the player color assigned to player number 'pl'.
//! If this is the first request for that player's color, create the
//! default one.
//!
const wxColour &gbtStyle::GetPlayerColor(int pl) const
{
while (pl > m_playerColors.Length()) {
m_playerColors.Append(s_defaultColors[m_playerColors.Length() % 8]);
}
return m_playerColors[pl];
}
void gbtStyle::SetDefaults(void)
{
m_nodeSize = 10;
m_terminalSpacing = 50;
m_chanceToken = GBT_NODE_TOKEN_DOT;
m_playerToken = GBT_NODE_TOKEN_DOT;
m_terminalToken = GBT_NODE_TOKEN_DOT;
m_rootReachable = 0;
m_branchLength = 60;
m_tineLength = 20;
m_branchStyle = GBT_BRANCH_STYLE_FORKTINE;
m_branchLabels = GBT_BRANCH_LABEL_HORIZONTAL;
m_infosetConnect = GBT_INFOSET_CONNECT_ALL;
m_infosetJoin = GBT_INFOSET_JOIN_CIRCLES;
m_nodeAboveLabel = GBT_NODE_LABEL_LABEL;
m_nodeBelowLabel = GBT_NODE_LABEL_ISETID;
m_branchAboveLabel = GBT_BRANCH_LABEL_LABEL;
m_branchBelowLabel = GBT_BRANCH_LABEL_PROBS;
m_numDecimals = 4;
m_font = wxFont(10, wxSWISS, wxNORMAL, wxBOLD);
m_chanceColor = wxColour(154, 205, 50);
m_terminalColor = *wxBLACK;
for (int pl = 1; pl <= m_playerColors.Length(); pl++) {
m_playerColors[pl] = s_defaultColors[(pl-1)%8];
}
}
std::string gbtStyle::GetColorXML(void) const
{
std::ostringstream s;
s <<"<colors>\n";
s <<"<player id=\"-1\" ";
s <<"red=\"" << ((int) m_terminalColor.Red()) << "\" ";
s <<"green=\"" << ((int) m_terminalColor.Green()) << "\" ";
s <<"blue=\"" << ((int) m_terminalColor.Blue()) << "\" ";
s <<"/>\n";
s <<"<player id=\"0\" ";
s <<"red=\"" << ((int) m_chanceColor.Red()) << "\" ";
s <<"green=\"" << ((int) m_chanceColor.Green()) << "\" ";
s <<"blue=\"" << ((int) m_chanceColor.Blue()) << "\" ";
s <<"/>\n";
for (int pl = 1; pl <= m_playerColors.Length(); pl++) {
s <<"<player id=\"" << pl << "\" ";
s <<"red=\"" << ((int) m_playerColors[pl].Red()) << "\" ";
s <<"green=\"" << ((int) m_playerColors[pl].Green()) << "\" ";
s <<"blue=\"" << ((int) m_playerColors[pl].Blue()) << "\" ";
s <<"/>\n";
}
s <<"</colors>\n";
return s.str();
}
void gbtStyle::SetColorXML(TiXmlNode *p_colors)
{
for (TiXmlNode *node = p_colors->FirstChild(); node;
node = node->NextSiblingElement()) {
int id = -2;
node->ToElement()->QueryIntAttribute("id", &id);
int red = 0, green = 0, blue = 0;
node->ToElement()->QueryIntAttribute("red", &red);
node->ToElement()->QueryIntAttribute("green", &green);
node->ToElement()->QueryIntAttribute("blue", &blue);
if (id > 0) {
// This call ensures that the player appears in the color table
GetPlayerColor(id);
SetPlayerColor(id, wxColour(red, green, blue));
}
else if (id == 0) {
SetChanceColor(wxColour(red, green, blue));
}
else if (id == -1) {
SetTerminalColor(wxColour(red, green, blue));
}
}
}
std::string gbtStyle::GetFontXML(void) const
{
std::ostringstream s;
s << "<font size=\"" << (int) m_font.GetPointSize() << "\" ";
s << "family=\"" << (int) m_font.GetFamily() << "\" ";
s << "face=\"" << (const char *) m_font.GetFaceName().mb_str() << "\" ";
s << "style=\"" << (int) m_font.GetStyle() << "\" ";
s << "weight=\"" << (int) m_font.GetWeight() << "\" ";
s << "/>\n";
return s.str();
}
void gbtStyle::SetFontXML(TiXmlNode *p_font)
{
int size, family, style, weight;
p_font->ToElement()->QueryIntAttribute("size", &size);
p_font->ToElement()->QueryIntAttribute("family", &family);
p_font->ToElement()->QueryIntAttribute("style", &style);
p_font->ToElement()->QueryIntAttribute("weight", &weight);
SetFont(wxFont(size, family, style, weight, false,
wxString(p_font->ToElement()->Attribute("face"),
*wxConvCurrent)));
}
std::string gbtStyle::GetLayoutXML(void) const
{
std::ostringstream s;
s << "<autolayout>\n";
s << "<nodes size=\"" << m_nodeSize << "\" spacing=\"" << m_terminalSpacing << "\" ";
std::string nodeTokens[] = { "line", "box", "circle", "diamond", "dot" };
s << "chance=\"" << nodeTokens[m_chanceToken] << "\" ";
s << "player=\"" << nodeTokens[m_playerToken] << "\" ";
s << "terminal=\"" << nodeTokens[m_terminalToken] << "\"/>\n";
s << "<branches size=\"" << m_branchLength << "\" tine=\"" << m_tineLength << "\" ";
std::string branchStyles[] = { "line", "forktine" };
s << "branch=\"" << branchStyles[m_branchStyle] << "\" ";
std::string branchLabels[] = { "horizontal", "rotated" };
s << "labels=\"" << branchLabels[m_branchLabels] << "\"/>\n";
std::string infosetConnect[] = { "none", "same", "all" };
s << "<infosets connect=\"" << infosetConnect[m_infosetConnect] << "\" ";
std::string infosetStyle[] = { "lines", "circles" };
s << "style=\"" << infosetStyle[m_infosetJoin] << "\"/>\n";
s << "</autolayout>\n";
return s.str();
}
void gbtStyle::SetLayoutXML(TiXmlNode *p_node)
{
TiXmlNode *nodes = p_node->FirstChild("nodes");
if (nodes) {
nodes->ToElement()->QueryIntAttribute("size", &m_nodeSize);
nodes->ToElement()->QueryIntAttribute("spacing", &m_terminalSpacing);
const char *chance = nodes->ToElement()->Attribute("chance");
if (chance) {
std::string s = chance;
if (s == "line") m_chanceToken = GBT_NODE_TOKEN_LINE;
else if (s == "box") m_chanceToken = GBT_NODE_TOKEN_BOX;
else if (s == "circle") m_chanceToken = GBT_NODE_TOKEN_CIRCLE;
else if (s == "diamond") m_chanceToken = GBT_NODE_TOKEN_DIAMOND;
else if (s == "dot") m_chanceToken = GBT_NODE_TOKEN_DOT;
}
const char *player = nodes->ToElement()->Attribute("player");
if (player) {
std::string s = player;
if (s == "line") m_playerToken = GBT_NODE_TOKEN_LINE;
else if (s == "box") m_playerToken = GBT_NODE_TOKEN_BOX;
else if (s == "circle") m_playerToken = GBT_NODE_TOKEN_CIRCLE;
else if (s == "diamond") m_playerToken = GBT_NODE_TOKEN_DIAMOND;
else if (s == "dot") m_playerToken = GBT_NODE_TOKEN_DOT;
}
const char *terminal = nodes->ToElement()->Attribute("terminal");
if (terminal) {
std::string s = terminal;
if (s == "line") m_terminalToken = GBT_NODE_TOKEN_LINE;
else if (s == "box") m_terminalToken = GBT_NODE_TOKEN_BOX;
else if (s == "circle") m_terminalToken = GBT_NODE_TOKEN_CIRCLE;
else if (s == "diamond") m_terminalToken = GBT_NODE_TOKEN_DIAMOND;
else if (s == "dot") m_terminalToken = GBT_NODE_TOKEN_DOT;
}
}
TiXmlNode *branches = p_node->FirstChild("branches");
if (branches) {
branches->ToElement()->QueryIntAttribute("size", &m_branchLength);
branches->ToElement()->QueryIntAttribute("tine", &m_tineLength);
const char *branch = branches->ToElement()->Attribute("branch");
if (branch) {
std::string s = branch;
if (s == "line") m_branchStyle = GBT_BRANCH_STYLE_LINE;
else if (s == "forktine") m_branchStyle = GBT_BRANCH_STYLE_FORKTINE;
}
const char *labels = branches->ToElement()->Attribute("labels");
if (labels) {
std::string s = labels;
if (s == "horizontal") m_branchLabels = GBT_BRANCH_LABEL_HORIZONTAL;
else if (s == "rotated") m_branchLabels = GBT_BRANCH_LABEL_ROTATED;
}
}
TiXmlNode *infosets = p_node->FirstChild("infosets");
if (infosets) {
const char *connect = infosets->ToElement()->Attribute("connect");
if (connect) {
std::string s = connect;
if (s == "none") m_infosetConnect = GBT_INFOSET_CONNECT_NONE;
else if (s == "same") m_infosetConnect = GBT_INFOSET_CONNECT_SAMELEVEL;
else if (s == "all") m_infosetConnect = GBT_INFOSET_CONNECT_ALL;
}
const char *style = infosets->ToElement()->Attribute("style");
if (style) {
std::string s = style;
if (s == "lines") m_infosetJoin = GBT_INFOSET_JOIN_LINES;
else if (s == "circles") m_infosetJoin = GBT_INFOSET_JOIN_CIRCLES;
}
}
}
std::string gbtStyle::GetLabelXML(void) const
{
std::ostringstream s;
s << "<labels ";
std::string nodeLabels[] = { "none", "label", "player",
"isetlabel", "isetid",
"realizprob", "beliefprob", "value" };
s << "abovenode=\"" << nodeLabels[m_nodeAboveLabel] << "\" ";
s << "belownode=\"" << nodeLabels[m_nodeBelowLabel] << "\" ";
std::string branchLabels[] = { "none", "label", "probs", "value" };
s << "abovebranch=\"" << branchLabels[m_branchAboveLabel] << "\" ";
s << "belowbranch=\"" << branchLabels[m_branchBelowLabel] << "\" ";
s << "/>\n";
return s.str();
}
void gbtStyle::SetLabelXML(TiXmlNode *p_node)
{
const char *abovenode = p_node->ToElement()->Attribute("abovenode");
if (abovenode) {
std::string s = abovenode;
if (s == "none") m_nodeAboveLabel = GBT_NODE_LABEL_NOTHING;
else if (s == "label") m_nodeAboveLabel = GBT_NODE_LABEL_LABEL;
else if (s == "player") m_nodeAboveLabel = GBT_NODE_LABEL_PLAYER;
else if (s == "isetlabel") m_nodeAboveLabel = GBT_NODE_LABEL_ISETLABEL;
else if (s == "isetid") m_nodeAboveLabel = GBT_NODE_LABEL_ISETID;
else if (s == "realizprob") m_nodeAboveLabel = GBT_NODE_LABEL_REALIZPROB;
else if (s == "beliefprob") m_nodeAboveLabel = GBT_NODE_LABEL_BELIEFPROB;
else if (s == "value") m_nodeAboveLabel = GBT_NODE_LABEL_VALUE;
}
const char *belownode = p_node->ToElement()->Attribute("belownode");
if (belownode) {
std::string s = belownode;
if (s == "none") m_nodeBelowLabel = GBT_NODE_LABEL_NOTHING;
else if (s == "label") m_nodeBelowLabel = GBT_NODE_LABEL_LABEL;
else if (s == "player") m_nodeBelowLabel = GBT_NODE_LABEL_PLAYER;
else if (s == "isetlabel") m_nodeBelowLabel = GBT_NODE_LABEL_ISETLABEL;
else if (s == "isetid") m_nodeBelowLabel = GBT_NODE_LABEL_ISETID;
else if (s == "realizprob") m_nodeBelowLabel = GBT_NODE_LABEL_REALIZPROB;
else if (s == "beliefprob") m_nodeBelowLabel = GBT_NODE_LABEL_BELIEFPROB;
else if (s == "value") m_nodeBelowLabel = GBT_NODE_LABEL_VALUE;
}
const char *abovebranch = p_node->ToElement()->Attribute("abovebranch");
if (abovebranch) {
std::string s = abovebranch;
if (s == "none") m_branchAboveLabel = GBT_BRANCH_LABEL_NOTHING;
else if (s == "label") m_branchAboveLabel = GBT_BRANCH_LABEL_LABEL;
else if (s == "probs") m_branchAboveLabel = GBT_BRANCH_LABEL_PROBS;
else if (s == "value") m_branchAboveLabel = GBT_BRANCH_LABEL_VALUE;
}
const char *belowbranch = p_node->ToElement()->Attribute("belowbranch");
if (belowbranch) {
std::string s = belowbranch;
if (s == "none") m_branchBelowLabel = GBT_BRANCH_LABEL_NOTHING;
else if (s == "label") m_branchBelowLabel = GBT_BRANCH_LABEL_LABEL;
else if (s == "probs") m_branchBelowLabel = GBT_BRANCH_LABEL_PROBS;
else if (s == "value") m_branchBelowLabel = GBT_BRANCH_LABEL_VALUE;
}
}
| {
"pile_set_name": "Github"
} |
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"errors"
"reflect"
)
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// errOneofHasNil is the error returned if Marshal is called with
// a struct with a oneof field containing a nil element.
errOneofHasNil = errors.New("proto: oneof field has nil value")
// ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil")
// ErrTooLarge is the error returned if Marshal is called with a
// message that encodes to >2GB.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
)
// The fundamental encoders that put bytes on the wire.
// Those that take integer types all accept uint64 and are
// therefore of type valueEncoder.
const maxVarintBytes = 10 // maximum length of a varint
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
// Not used by the package itself, but helpful to clients
// wishing to use the same encoding.
func EncodeVarint(x uint64) []byte {
var buf [maxVarintBytes]byte
var n int
for n = 0; x > 127; n++ {
buf[n] = 0x80 | uint8(x&0x7F)
x >>= 7
}
buf[n] = uint8(x)
n++
return buf[0:n]
}
// EncodeVarint writes a varint-encoded integer to the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) EncodeVarint(x uint64) error {
for x >= 1<<7 {
p.buf = append(p.buf, uint8(x&0x7f|0x80))
x >>= 7
}
p.buf = append(p.buf, uint8(x))
return nil
}
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
switch {
case x < 1<<7:
return 1
case x < 1<<14:
return 2
case x < 1<<21:
return 3
case x < 1<<28:
return 4
case x < 1<<35:
return 5
case x < 1<<42:
return 6
case x < 1<<49:
return 7
case x < 1<<56:
return 8
case x < 1<<63:
return 9
}
return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) EncodeFixed64(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24),
uint8(x>>32),
uint8(x>>40),
uint8(x>>48),
uint8(x>>56))
return nil
}
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) EncodeFixed32(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24))
return nil
}
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
// to the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) EncodeZigzag32(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) EncodeRawBytes(b []byte) error {
p.EncodeVarint(uint64(len(b)))
p.buf = append(p.buf, b...)
return nil
}
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
p.EncodeVarint(uint64(len(s)))
p.buf = append(p.buf, s...)
return nil
}
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
siz := Size(pb)
p.EncodeVarint(uint64(siz))
return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
func isNil(v reflect.Value) bool {
switch v.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of the league/commonmark package.
*
* (c) Colin O'Dell <[email protected]>
*
* Original code based on the CommonMark JS reference parser (https://bitly.com/commonmark-js)
* - (c) John MacFarlane
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace League\CommonMark\Block\Element;
use League\CommonMark\ContextInterface;
use League\CommonMark\Cursor;
use League\CommonMark\Node\Node;
/**
* Block-level element
*
* @method parent() ?AbstractBlock
*/
abstract class AbstractBlock extends Node
{
/**
* Used for storage of arbitrary data.
*
* @var array<string, mixed>
*/
public $data = [];
/**
* @var bool
*/
protected $open = true;
/**
* @var bool
*/
protected $lastLineBlank = false;
/**
* @var int
*/
protected $startLine;
/**
* @var int
*/
protected $endLine;
protected function setParent(Node $node = null)
{
if ($node && !$node instanceof self) {
throw new \InvalidArgumentException('Parent of block must also be block (can not be inline)');
}
parent::setParent($node);
}
public function isContainer(): bool
{
return true;
}
/**
* @return bool
*/
public function hasChildren(): bool
{
return $this->firstChild !== null;
}
/**
* Returns true if this block can contain the given block as a child node
*
* @param AbstractBlock $block
*
* @return bool
*/
abstract public function canContain(AbstractBlock $block): bool;
/**
* Whether this is a code block
*
* Code blocks are extra-greedy - they'll try to consume all subsequent
* lines of content without calling matchesNextLine() each time.
*
* @return bool
*/
abstract public function isCode(): bool;
/**
* @param Cursor $cursor
*
* @return bool
*/
abstract public function matchesNextLine(Cursor $cursor): bool;
/**
* @param int $startLine
*
* @return $this
*/
public function setStartLine(int $startLine)
{
$this->startLine = $startLine;
if (empty($this->endLine)) {
$this->endLine = $startLine;
}
return $this;
}
/**
* @return int
*/
public function getStartLine(): int
{
return $this->startLine;
}
/**
* @param int $endLine
*
* @return $this
*/
public function setEndLine(int $endLine)
{
$this->endLine = $endLine;
return $this;
}
/**
* @return int
*/
public function getEndLine(): int
{
return $this->endLine;
}
/**
* Whether the block ends with a blank line
*
* @return bool
*/
public function endsWithBlankLine(): bool
{
return $this->lastLineBlank;
}
/**
* @param bool $blank
*
* @return void
*/
public function setLastLineBlank(bool $blank)
{
$this->lastLineBlank = $blank;
}
/**
* Determines whether the last line should be marked as blank
*
* @param Cursor $cursor
* @param int $currentLineNumber
*
* @return bool
*/
public function shouldLastLineBeBlank(Cursor $cursor, int $currentLineNumber): bool
{
return $cursor->isBlank();
}
/**
* Whether the block is open for modifications
*
* @return bool
*/
public function isOpen(): bool
{
return $this->open;
}
/**
* Finalize the block; mark it closed for modification
*
* @param ContextInterface $context
* @param int $endLineNumber
*
* @return void
*/
public function finalize(ContextInterface $context, int $endLineNumber)
{
if (!$this->open) {
return;
}
$this->open = false;
$this->endLine = $endLineNumber;
// This should almost always be true
if ($context->getTip() !== null) {
$context->setTip($context->getTip()->parent());
}
}
/**
* @param string $key
* @param mixed $default
*
* @return mixed
*/
public function getData(string $key, $default = null)
{
return \array_key_exists($key, $this->data) ? $this->data[$key] : $default;
}
}
| {
"pile_set_name": "Github"
} |
; PickedEntity Example
; --------------------
Graphics3D 640,480,0,2
SetBuffer BackBuffer()
camera=CreateCamera()
PositionEntity camera,0,2,-10
light=CreateLight()
RotateEntity light,90,0,0
plane=CreatePlane()
EntityPickMode plane,2 ; Make the plane entity 'pickable'. Use pick_geometry mode no.2 for polygon collision.
ground_tex=LoadTexture("media/Chorme-2.bmp")
EntityTexture plane,ground_tex
cube=CreateCube()
EntityPickMode cube,2 ; Make the cube entity 'pickable'. Use pick_geometry mode no.2 for polygon collision.
cube_tex=LoadTexture("media/b3dlogo.jpg")
EntityTexture cube,cube_tex
PositionEntity cube,0,1,0
While Not KeyDown( 1 )
If KeyDown( 205 )=True Then TurnEntity camera,0,-1,0
If KeyDown( 203 )=True Then TurnEntity camera,0,1,0
If KeyDown( 208 )=True Then MoveEntity camera,0,0,-0.05
If KeyDown( 200 )=True Then MoveEntity camera,0,0,0.05
; If left mouse button hit then use CameraPick with mouse coordinates
; In this example, only three things can be picked: the plane, the cube, or nothing
If MouseHit(1)=True Then CameraPick(camera,MouseX(),MouseY())
RenderWorld
Text 0,0,"Use cursor keys to move about"
Text 0,20,"Press left mouse button to use CameraPick with mouse coordinates"
Text 0,40,"PickedX: "+PickedX#()
Text 0,60,"PickedY: "+PickedY#()
Text 0,80,"PickedZ: "+PickedZ#()
Text 0,100,"PickedNX: "+PickedNX#()
Text 0,120,"PickedNY: "+PickedNY#()
Text 0,140,"PickedNZ: "+PickedNZ#()
Text 0,160,"PickedTime: "+PickedTime#()
Text 0,180,"PickedEntity: "+PickedEntity()
Text 0,200,"PickedSurface: "+PickedSurface()
Text 0,220,"PickedTriangle: "+PickedTriangle()
Flip
Wend
End | {
"pile_set_name": "Github"
} |
using System;
namespace JavaScriptEngineSwitcher.ChakraCore.JsRt
{
/// <summary>
/// The script context
/// </summary>
/// <remarks>
/// <para>
/// Each script context contains its own global object, distinct from the global object in
/// other script contexts.
/// </para>
/// <para>
/// Many Chakra hosting APIs require an "active" script context, which can be set using
/// Current. Chakra hosting APIs that require a current context to be set will note
/// that explicitly in their documentation.
/// </para>
/// </remarks>
internal struct JsContext
{
/// <summary>
/// The reference
/// </summary>
private readonly IntPtr _reference;
/// <summary>
/// Gets a invalid context
/// </summary>
public static JsContext Invalid
{
get { return new JsContext(IntPtr.Zero); }
}
/// <summary>
/// Gets or sets a current script context on the thread
/// </summary>
public static JsContext Current
{
get
{
JsContext reference;
JsErrorHelpers.ThrowIfError(NativeMethods.JsGetCurrentContext(out reference));
return reference;
}
set
{
JsErrorHelpers.ThrowIfError(NativeMethods.JsSetCurrentContext(value));
}
}
/// <summary>
/// Gets a value indicating whether the runtime of the current context is in an exception state
/// </summary>
/// <remarks>
/// <para>
/// If a call into the runtime results in an exception (either as the result of running a
/// script or due to something like a conversion failure), the runtime is placed into an
/// "exception state." All calls into any context created by the runtime (except for the
/// exception APIs) will fail with <c>InExceptionState</c> until the exception is
/// cleared.
/// </para>
/// <para>
/// If the runtime of the current context is in the exception state when a callback returns
/// into the engine, the engine will automatically rethrow the exception.
/// </para>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
public static bool HasException
{
get
{
bool hasException;
JsErrorHelpers.ThrowIfError(NativeMethods.JsHasException(out hasException));
return hasException;
}
}
/// <summary>
/// Gets a runtime that the context belongs to
/// </summary>
public JsRuntime Runtime
{
get
{
JsRuntime handle;
JsErrorHelpers.ThrowIfError(NativeMethods.JsGetRuntime(this, out handle));
return handle;
}
}
/// <summary>
/// Gets a value indicating whether the context is a valid context or not
/// </summary>
public bool IsValid
{
get { return _reference != IntPtr.Zero; }
}
/// <summary>
/// Initializes a new instance of the <see cref="JsContext"/> struct
/// </summary>
/// <param name="reference">The reference</param>
internal JsContext(IntPtr reference)
{
_reference = reference;
}
/// <summary>
/// Tells the runtime to do any idle processing it need to do
/// </summary>
/// <remarks>
/// <para>
/// If idle processing has been enabled for the current runtime, calling <c>Idle</c> will
/// inform the current runtime that the host is idle and that the runtime can perform
/// memory cleanup tasks.
/// </para>
/// <para>
/// <c>Idle</c> will also return the number of system ticks until there will be more idle work
/// for the runtime to do. Calling <c>Idle</c> before this number of ticks has passed will do
/// no work.
/// </para>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
/// <returns>
/// The next system tick when there will be more idle work to do. Returns the
/// maximum number of ticks if there no upcoming idle work to do.
/// </returns>
public static uint Idle()
{
uint ticks;
JsErrorHelpers.ThrowIfError(NativeMethods.JsIdle(out ticks));
return ticks;
}
/// <summary>
/// Parses a script and returns a function representing the script
/// </summary>
/// <remarks>
/// Requires an active script context.
/// </remarks>
/// <param name="script">The script to parse</param>
/// <param name="sourceContext">A cookie identifying the script that can be used
/// by debuggable script contexts</param>
/// <param name="sourceUrl">The location the script came from</param>
/// <param name="parseAttributes">Attribute mask for parsing the script</param>
/// <returns>A function representing the script code</returns>
public static JsValue ParseScript(string script, JsSourceContext sourceContext, string sourceUrl,
ref JsParseScriptAttributes parseAttributes)
{
JsValue scriptValue = JsValue.FromString(script);
scriptValue.AddRef();
JsValue sourceUrlValue = JsValue.FromString(sourceUrl);
sourceUrlValue.AddRef();
JsValue result;
try
{
JsErrorCode errorCode = NativeMethods.JsParse(scriptValue, sourceContext, sourceUrlValue,
parseAttributes, out result);
JsErrorHelpers.ThrowIfError(errorCode);
}
finally
{
scriptValue.Release();
sourceUrlValue.Release();
}
return result;
}
/// <summary>
/// Parses a serialized script and returns a function representing the script
/// </summary>
/// <remarks>
/// <para>Requires an active script context.</para>
/// <para>The runtime will hold on to the buffer until all instances of any functions created from
/// the buffer are garbage collected.</para>
/// </remarks>
/// <param name="script">The script to parse</param>
/// <param name="buffer">The serialized script</param>
/// <param name="scriptLoadCallback">Callback to load the source code of the serialized script</param>
/// <param name="sourceContext">A cookie identifying the script that can be used
/// by debuggable script contexts</param>
/// <param name="sourceUrl">The location the script came from</param>
/// <returns>A function representing the script code</returns>
public static JsValue ParseSerializedScript(string script, byte[] buffer,
JsSerializedLoadScriptCallback scriptLoadCallback, JsSourceContext sourceContext, string sourceUrl)
{
JsValue bufferValue = JsValue.CreateExternalArrayBuffer(buffer);
bufferValue.AddRef();
JsValue sourceUrlValue = JsValue.FromString(sourceUrl);
sourceUrlValue.AddRef();
JsValue result;
try
{
JsErrorCode errorCode = NativeMethods.JsParseSerialized(bufferValue, scriptLoadCallback,
sourceContext, sourceUrlValue, out result);
JsErrorHelpers.ThrowIfError(errorCode);
}
finally
{
bufferValue.Release();
sourceUrlValue.Release();
}
return result;
}
/// <summary>
/// Executes a script
/// </summary>
/// <remarks>
/// Requires an active script context.
/// </remarks>
/// <param name="script">The script to run</param>
/// <param name="sourceContext">A cookie identifying the script that can be used
/// by debuggable script contexts</param>
/// <param name="sourceUrl">The location the script came from</param>
/// <param name="parseAttributes">Attribute mask for parsing the script</param>
/// <returns>The result of the script, if any</returns>
public static JsValue RunScript(string script, JsSourceContext sourceContext, string sourceUrl,
ref JsParseScriptAttributes parseAttributes)
{
JsValue scriptValue = JsValue.FromString(script);
scriptValue.AddRef();
JsValue sourceUrlValue = JsValue.FromString(sourceUrl);
sourceUrlValue.AddRef();
JsValue result;
try
{
JsErrorCode errorCode = NativeMethods.JsRun(scriptValue, sourceContext, sourceUrlValue,
parseAttributes, out result);
JsErrorHelpers.ThrowIfError(errorCode);
}
finally
{
scriptValue.Release();
sourceUrlValue.Release();
}
return result;
}
/// <summary>
/// Runs a serialized script
/// </summary>
/// <remarks>
/// <para>Requires an active script context.</para>
/// <para>The runtime will detach the data from the buffer and hold on to it until all
/// instances of any functions created from the buffer are garbage collected.</para>
/// </remarks>
/// <param name="script">The source code of the serialized script</param>
/// <param name="buffer">The serialized script</param>
/// <param name="scriptLoadCallback">Callback to load the source code of the serialized script</param>
/// <param name="sourceContext">A cookie identifying the script that can be used
/// by debuggable script contexts</param>
/// <param name="sourceUrl">The location the script came from</param>
/// <returns>The result of running the script, if any</returns>
public static JsValue RunSerializedScript(string script, byte[] buffer,
JsSerializedLoadScriptCallback scriptLoadCallback, JsSourceContext sourceContext, string sourceUrl)
{
JsValue bufferValue = JsValue.CreateExternalArrayBuffer(buffer);
bufferValue.AddRef();
JsValue sourceUrlValue = JsValue.FromString(sourceUrl);
sourceUrlValue.AddRef();
JsValue result;
try
{
JsErrorCode errorCode = NativeMethods.JsRunSerialized(bufferValue, scriptLoadCallback,
sourceContext, sourceUrlValue, out result);
JsErrorHelpers.ThrowIfError(errorCode);
}
finally
{
bufferValue.Release();
sourceUrlValue.Release();
}
return result;
}
/// <summary>
/// Serializes a parsed script to a buffer than can be reused
/// </summary>
/// <remarks>
/// <para>
/// <c>SerializeScript</c> parses a script and then stores the parsed form of the script in a
/// runtime-independent format. The serialized script then can be deserialized in any
/// runtime without requiring the script to be re-parsed.
/// </para>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
/// <param name="script">The script to serialize</param>
/// <param name="parseAttributes">Attribute mask for parsing the script</param>
/// <returns>The buffer to put the serialized script into</returns>
public static byte[] SerializeScript(string script, ref JsParseScriptAttributes parseAttributes)
{
JsValue scriptValue = JsValue.FromString(script);
scriptValue.AddRef();
JsValue bufferValue;
try
{
JsErrorCode errorCode = NativeMethods.JsSerialize(scriptValue, out bufferValue, parseAttributes);
JsErrorHelpers.ThrowIfError(errorCode);
}
finally
{
scriptValue.Release();
}
byte[] buffer = bufferValue.ArrayBufferBytes;
return buffer;
}
/// <summary>
/// Returns a exception that caused the runtime of the current context to be in the
/// exception state and resets the exception state for that runtime
/// </summary>
/// <remarks>
/// <para>
/// If the runtime of the current context is not in an exception state, this API will throw
/// <see cref="JsErrorCode.InvalidArgument"/>. If the runtime is disabled, this will return
/// an exception indicating that the script was terminated, but it will not clear the exception
/// (the exception will be cleared if the runtime is re-enabled using
/// <c>JsEnableRuntimeExecution</c>).
/// </para>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
/// <returns>The exception for the runtime of the current context</returns>
public static JsValue GetAndClearException()
{
JsValue exception;
JsErrorHelpers.ThrowIfError(NativeMethods.JsGetAndClearException(out exception));
return exception;
}
/// <summary>
/// Returns a metadata relating to the exception that caused the runtime of the current context
/// to be in the exception state and resets the exception state for that runtime. The metadata
/// includes a reference to the exception itself.
/// </summary>
/// <remarks>
/// <para>
/// If the runtime of the current context is not in an exception state, this API will throw
/// <see cref="JsErrorCode.InvalidArgument"/>. If the runtime is disabled, this will return
/// an exception indicating that the script was terminated, but it will not clear the exception
/// (the exception will be cleared if the runtime is re-enabled using
/// <c>JsEnableRuntimeExecution</c>).
/// </para>
/// <para>
/// The metadata value is a javascript object with the following properties: <c>exception</c>, the
/// thrown exception object; <c>line</c>, the 0 indexed line number where the exception was thrown;
/// <c>column</c>, the 0 indexed column number where the exception was thrown; <c>length</c>, the
/// source-length of the cause of the exception; <c>source</c>, a string containing the line of
/// source code where the exception was thrown; and <c>url</c>, a string containing the name of
/// the script file containing the code that threw the exception.
/// </para>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
/// <returns>The exception metadata for the runtime of the current context</returns>
public static JsValue GetAndClearExceptionWithMetadata()
{
JsValue metadata;
JsErrorHelpers.ThrowIfError(NativeMethods.JsGetAndClearExceptionWithMetadata(out metadata));
return metadata;
}
/// <summary>
/// Sets a runtime of the current context to an exception state
/// </summary>
/// <remarks>
/// <para>
/// If the runtime of the current context is already in an exception state, this API will
/// throw <c>JsErrorInExceptionState</c>.
/// </para>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
/// <param name="exception">The JavaScript exception to set for the runtime of the current context</param>
public static void SetException(JsValue exception)
{
JsErrorHelpers.ThrowIfError(NativeMethods.JsSetException(exception));
}
/// <summary>
/// Sets a promise continuation callback function that is called by the context when a task
/// needs to be queued for future execution
/// </summary>
/// <remarks>
/// <para>
/// Requires an active script context.
/// </para>
/// </remarks>
/// <param name="promiseContinuationCallback">The callback function being set</param>
/// <param name="callbackState">User provided state that will be passed back to the callback</param>
public static void SetPromiseContinuationCallback(JsPromiseContinuationCallback promiseContinuationCallback,
IntPtr callbackState)
{
JsErrorHelpers.ThrowIfError(NativeMethods.JsSetPromiseContinuationCallback(promiseContinuationCallback,
callbackState));
}
/// <summary>
/// Adds a reference to a script context
/// </summary>
/// <remarks>
/// Calling AddRef ensures that the context will not be freed until Release is called.
/// </remarks>
/// <returns>The object's new reference count</returns>
public uint AddRef()
{
uint count;
JsErrorHelpers.ThrowIfError(NativeMethods.JsContextAddRef(this, out count));
return count;
}
/// <summary>
/// Releases a reference to a script context
/// </summary>
/// <remarks>
/// Removes a reference to a context that was created by AddRef.
/// </remarks>
/// <returns>The object's new reference count</returns>
public uint Release()
{
uint count;
JsErrorHelpers.ThrowIfError(NativeMethods.JsContextRelease(this, out count));
return count;
}
}
} | {
"pile_set_name": "Github"
} |
<base:PageBase
x:Class="eShop.UWP.Views.Login.LoginView"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:templateSelectors="using:eShop.UWP.TemplateSelectors"
xmlns:base="using:eShop.UWP.Views.Base"
xmlns:login="using:eShop.UWP.Views.Login"
Margin="{StaticResource LoginBackgroundMargin}"
DataContext="{Binding LoginViewModel, Source={StaticResource Locator}}"
mc:Ignorable="d">
<Page.Resources>
<DataTemplate x:Key="SignInWithHelloTemplate">
<login:SignInWithHelloView />
</DataTemplate>
<DataTemplate x:Key="SignInWithPasswordTemplate">
<login:SignInWithPasswordView />
</DataTemplate>
<templateSelectors:LoginMethodTemplateSelector
x:Key="LoginMethodTemplateSelector"
SignInWithHelloTemplate="{StaticResource SignInWithHelloTemplate}"
SignInWithPasswordTemplate="{StaticResource SignInWithPasswordTemplate}" />
</Page.Resources>
<Grid x:Name="ContentArea">
<Grid HorizontalAlignment="Stretch" VerticalAlignment="Stretch">
<ContentControl
x:Name="LoginHost"
HorizontalAlignment="Stretch"
VerticalAlignment="Stretch"
HorizontalContentAlignment="Stretch"
VerticalContentAlignment="Stretch"
Content="{Binding LoginMethod}"
ContentTemplateSelector="{StaticResource LoginMethodTemplateSelector}" />
</Grid>
<Image
x:Uid="Login_Logo"
Margin="40,20,0,0"
HorizontalAlignment="Left"
VerticalAlignment="Top"
Stretch="None" />
<!-- Adaptive triggers -->
<VisualStateManager.VisualStateGroups>
<VisualStateGroup x:Name="WindowStates">
<VisualState x:Name="WideState">
<VisualState.StateTriggers>
<AdaptiveTrigger MinWindowWidth="900" />
</VisualState.StateTriggers>
</VisualState>
<VisualState x:Name="NarrowState">
<VisualState.StateTriggers>
<AdaptiveTrigger MinWindowWidth="0" />
</VisualState.StateTriggers>
<VisualState.Setters>
<Setter Target="TitlePage.Margin" Value="0" />
</VisualState.Setters>
</VisualState>
</VisualStateGroup>
</VisualStateManager.VisualStateGroups>
</Grid>
</base:PageBase>
| {
"pile_set_name": "Github"
} |
package com.blogcode;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class GradleTaskApplicationTests {
@Test
public void contextLoads() {
}
}
| {
"pile_set_name": "Github"
} |
#include <../../nrnconf.h>
/* /local/src/master/nrn/src/nrnoc/fstim.c,v 1.2 1997/08/15 13:04:11 hines Exp */
/* copy of synapse.c modified to simulate current stimulus pulses */
/* 4/9/2002 modified to conform to new treeset.c */
/*
fstim(maxnum)
allocates space for maxnum synapses. Space for
previously existing synapses is released. All synapses initialized to
0 maximum conductance.
fstim(i, loc, delay, duration, stim)
The ith current stimulus is injected at parameter `loc'
different current stimuli do not concatenate but can ride on top of
each other. delay refers to onset of stimulus relative to t=0
delay and duration are in msec.
stim in namps.
fstimi(i)
returns stimulus current for ith stimulus at the value of the
global time t.
*/
#include <stdlib.h>
#include "neuron.h"
#include "section.h"
#include "nrniv_mf.h"
typedef struct Stimulus {
double loc; /* parameter location (0--1) */
double delay; /* value of t in msec for onset */
double duration;/* turns off at t = delay + duration */
double mag; /* magnitude in namps */
double mag_seg; /* value added to rhs, depends on area of seg*/
Node *pnd; /* segment location */
Section* sec;
} Stimulus;
static int maxstim = 0; /* size of stimulus array */
static Stimulus *pstim; /* pointer to stimulus array */
static void free_stim(void);
static void stim_record(int);
#define nt_t nrn_threads->_t
void print_stim() {
int i;
if (maxstim == 0) return;
/*SUPPRESS 440*/
Printf("fstim(%d)\n/* section fstim( #, loc, delay(ms), duration(ms), magnitude(namp)) */\n", maxstim);
for (i=0; i<maxstim; i++) {
Printf("%-15s fstim(%2d,%4g,%10g,%13g,%16g)\n",
secname(pstim[i].sec), i,
pstim[i].loc, pstim[i].delay, pstim[i].duration, pstim[i].mag);
}
}
static double stimulus();
void fstimi(void) {
int i;
double cur;
i = chkarg(1, 0., (double)(maxstim-1));
if ((cur = stimulus(i)) != 0.) {
cur = pstim[i].mag;
}
hoc_retpushx(cur);
}
void fstim(void) {
int i;
if (nrn_nthread > 1) {
hoc_execerror("fstim does not allow threads", "");
}
i = chkarg(1, 0., 10000.);
if (ifarg(2)) {
if (i >= maxstim) {
hoc_execerror("index out of range", (char *)0);
}
pstim[i].loc = chkarg(2, 0., 1.);
pstim[i].delay = chkarg(3, 0., 1e21);
pstim[i].duration = chkarg(4, 0., 1e21);
pstim[i].mag = *getarg(5);
pstim[i].sec = chk_access();
section_ref(pstim[i].sec);
stim_record(i);
} else {
free_stim();
maxstim = i;
if (maxstim) {
pstim = (Stimulus *)emalloc((unsigned)(maxstim * sizeof(Stimulus)));
}
for (i = 0; i<maxstim; i++) {
pstim[i].loc = 0;
pstim[i].mag = 0.;
pstim[i].delay = 1e20;
pstim[i].duration = 0.;
pstim[i].sec = 0;
stim_record(i);
}
}
hoc_retpushx(0.);
}
static void free_stim(void) {
int i;
if (maxstim) {
for (i=0; i < maxstim; ++i) {
if (pstim[i].sec) {
section_unref(pstim[i].sec);
}
}
free((char *)pstim);
maxstim=0;
}
}
static void stim_record(int i) /*fill in the section info*/
{
Node *node_ptr();
double area;
Section* sec;
sec = pstim[i].sec;
if (sec) {
if (sec->prop) {
pstim[i].pnd = node_ptr(sec, pstim[i].loc, &area);
pstim[i].mag_seg = 1.e2*pstim[i].mag / area;
}else{
section_unref(sec);
pstim[i].sec = 0;
}
}
}
void stim_prepare(void) {
int i;
for (i=0; i<maxstim; i++) {
stim_record(i);
}
}
static double stimulus(int i)
{
#if CVODE
at_time(nrn_threads, pstim[i].delay);
at_time(nrn_threads, pstim[i].delay + pstim[i].duration);
#endif
if (nt_t < pstim[i].delay-1e-9
|| nt_t > pstim[i].delay + pstim[i].duration - 1e-9) {
return 0.0;
}
return pstim[i].mag_seg;
}
void activstim_rhs(void) {
int i;
for (i=0; i<maxstim; i++) {
if (pstim[i].sec) {
NODERHS(pstim[i].pnd) += stimulus(i);
}
}
}
| {
"pile_set_name": "Github"
} |
Rails.application.routes.draw do
# For details on the DSL available within this file, see http://guides.rubyonrails.org/routing.html
# Serve websocket cable requests in-process
# mount ActionCable.server => '/cable'
resources :places, except: [:update, :edit, :destroy]
resources :distances, only: [:new, :create]
root 'places#index'
end
| {
"pile_set_name": "Github"
} |
sql = [
"""CREATE TEMPORARY TABLE node_change_old AS SELECT * FROM node_change;""",
"""DROP TABLE node_change;""",
"""CREATE TABLE node_change (
rev text,
path text,
kind char(1),
change char(1),
base_path text,
base_rev text,
UNIQUE(rev, path, change)
);""",
"""INSERT INTO node_change (rev,path,kind,change,base_path,base_rev)
SELECT rev,path,kind,change,base_path,base_rev FROM node_change_old;""",
"""DROP TABLE node_change_old;"""
]
def do_upgrade(env, ver, cursor):
# Wiki pages were accidentially created with the version number starting at
# 0 instead of 1; This should fix that
cursor.execute("SELECT name, version FROM wiki WHERE name IN "
"(SELECT name FROM wiki WHERE version=0) ORDER BY name,"
"version DESC")
result = cursor.fetchall()
if result:
cursor.executemany("UPDATE wiki SET version=version+1 WHERE name=%s "
"and version=%s",
[tuple(row) for row in result])
# Correct difference between db_default.py and upgrades/db10.py: The
# 'change' was missing from the uniqueness constraint
for s in sql:
cursor.execute(s)
| {
"pile_set_name": "Github"
} |
#122_Cleaning_Up_The_Beach_In_Chiba__Japan_pick_f_nm_np1_fr_bad_1.avi 1
#122_Cleaning_Up_The_Beach_In_Chiba__Japan_pick_f_nm_np1_le_bad_2.avi 1
#122_Cleaning_Up_The_Beach_In_Chiba__Japan_pick_f_nm_np1_ri_bad_0.avi 1
AMADEUS_pick_u_nm_np1_fr_med_8.avi 2
AboutABoy_pick_h_nm_np1_le_goo_9.avi 1
BuriedTreasure_pick_u_cm_np1_le_goo_1.avi 1
Catch_Me_If_You_Can_pick_f_cm_np1_ri_med_5.avi 1
Clay_sBasketballSkillz_pick_f_nm_np1_ba_med_0.avi 2
Clay_sBasketballSkillz_pick_f_nm_np1_ri_med_8.avi 2
Cleaning_The_Beach_Jeton_Bardhi__Dimitar_Chaprazov_pick_f_cm_np3_ba_bad_3.avi 2
Cleaning_The_Beach_Jeton_Bardhi__Dimitar_Chaprazov_pick_f_cm_np5_le_bad_1.avi 2
Cleaning_The_Beach_Jeton_Bardhi__Dimitar_Chaprazov_pick_f_cm_np5_le_bad_2.avi 2
Collecting_litter_on_the_woodland_ground_surface_pick_f_nm_np1_fr_med_0.avi 1
David_Letterman_picks_up_trash_on_Broadway_pick_f_cm_np1_fr_med_0.avi 2
David_Letterman_picks_up_trash_on_Broadway_pick_f_cm_np3_ri_med_1.avi 2
Derren_Brown_-_Abondoned_Wallet_Trick_pick_f_cm_np1_le_med_0.avi 1
Die_Pfandpiraten_Doku_pick_f_cm_np1_ba_med_1.avi 1
Die_Pfandpiraten_Doku_pick_f_cm_np1_ba_med_2.avi 1
Die_Pfandpiraten_Doku_pick_f_cm_np1_le_med_0.avi 1
Dollar_Prank_at_Wally_World_pick_f_nm_np1_fr_med_0.avi 1
Dollar_Prank_at_Wally_World_pick_f_nm_np1_le_med_1.avi 1
Dollar_Prank_at_Wally_World_pick_f_nm_np1_le_med_2.avi 1
Dollar_Prank_at_the_Rivertown_Mall_pick_f_cm_np1_le_med_0.avi 2
Dollar_Prank_at_the_Rivertown_Mall_pick_f_nm_np1_fr_med_1.avi 2
Dollar_Prank_at_the_Rivertown_Mall_pick_f_nm_np1_fr_med_3.avi 2
Dollar_Prank_at_the_Rivertown_Mall_pick_f_nm_np1_le_med_2.avi 2
Eco-Tex_picks_up_Trash_pick_f_cm_np1_ri_med_0.avi 0
EverythingisIlluminated_pick_u_cm_np1_ba_goo_3.avi 1
Fishing_For_People_pick_f_cm_np1_fr_med_3.avi 1
Fishing_For_People_pick_f_cm_np1_ri_med_2.avi 1
Fishing_For_People_pick_f_cm_np1_ri_med_5.avi 1
Fishing_For_People_pick_f_cm_np2_le_med_4.avi 1
Fishing_For_People_pick_f_cm_np2_ri_med_1.avi 1
Fishing_For_People_pick_f_nm_np2_ba_med_0.avi 1
Flaschenrolf_auf_Tour_pick_f_cm_np1_ba_bad_0.avi 1
Flaschenrolf_auf_Tour_pick_u_cm_np1_ba_bad_1.avi 1
GardenWiseB256kb_pick_f_nm_np1_le_med_0.avi 1
Gattaca_pick_f_nm_np1_fr_med_1.avi 1
Gattaca_pick_f_nm_np1_ri_med_0.avi 1
IamLegendII_pick_u_cm_np1_fr_med_2.avi 1
IamLegendII_pick_u_cm_np1_le_med_3.avi 1
IamLegendII_pick_u_nm_np1_fr_bad_0.avi 1
IamLegendII_pick_u_nm_np1_fr_med_1.avi 1
Juno_pick_u_cm_np1_le_bad_9.avi 1
Juno_pick_u_nm_np1_fr_goo_2.avi 1
M_ll_sammeln_pick_f_cm_np1_fr_bad_0.avi 1
Magic_Boys-_Der_Portmonai_Trick_pick_l_nm_np1_ri_med_0.avi 1
Mushroom_search_pick_f_cm_np2_le_med_0.avi 1
NH-__Open_Carry_Litter_Pickup_in__troubled__neighborhood_pick_f_nm_np1_le_med_0.avi 2
NH_gun_owners_react_to_detention_with_armed_litter_pickup_pick_f_cm_np1_ba_med_0.avi 1
NH_gun_owners_react_to_detention_with_armed_litter_pickup_pick_f_cm_np3_ba_bad_1.avi 1
NH_gun_owners_react_to_detention_with_armed_litter_pickup_pick_u_cm_np1_ba_med_2.avi 1
NH_open_carry_litter_pickup_spurs_stunning_encountrs__1_of_2_pick_f_cm_np1_fr_med_0.avi 1
NoCountryForOldMen_pick_u_nm_np1_le_goo_0.avi 1
Oceans13_pick_u_cm_np1_fr_med_2.avi 2
Oceans13_pick_u_nm_np1_ba_goo_5.avi 2
Oceans13_pick_u_nm_np1_ri_goo_7.avi 2
People_Fishing_pick_f_cm_np2_ri_med_0.avi 2
Pick_Up_The_Phone_pick_u_nm_np1_fr_med_0.avi 1
Pick_Up_Your_Trash!_pick_f_cm_np1_ba_med_1.avi 1
Pick_Up_Your_Trash!_pick_f_cm_np1_le_med_0.avi 1
Pilzesuchen_pick_f_cm_np1_fr_med_0.avi 1
Portemonnaie-Trick_Openair_Lumnezia_2008_Nr__2_pick_f_cm_np2_fr_med_0.avi 1
Prelinger_ActYourA1949_pick_u_nm_np1_ri_med_14.avi 1
Prelinger_HabitPat1954_pick_f_cm_np1_fr_med_28.avi 1
Prelinger_HabitPat1954_pick_u_nm_np1_le_med_9.avi 1
Rent_a_Pocher_Beim_Pilze_sammeln_Oliver_Pocher_pick_f_cm_np2_fr_med_0.avi 1
RushHour2_pick_u_nm_np1_le_med_7.avi 1
SafeInPort_pick_f_nm_np1_fr_med_1.avi 2
Search_and_Identify_Golf_Ball_-_www_mulliganplus_com_pick_f_nm_np1_le_goo_0.avi 1
Sixthsense_pick_h_nm_np1_fr_goo_1.avi 1
SoundAndTheStory_pick_u_nm_np1_le_med_3.avi 1
Stevie_And_Lindsay_Picking_Up_Garbage_pick_f_cm_np1_ba_med_0.avi 2
Stevie_And_Lindsay_Picking_Up_Garbage_pick_f_cm_np1_ri_med_3.avi 2
Stevie_And_Lindsay_Picking_Up_Garbage_pick_f_cm_np2_ba_med_2.avi 0
Stevie_And_Lindsay_Picking_Up_Garbage_pick_l_cm_np1_ba_med_1.avi 0
THE_WALLET_TRICK!!!_pick_f_cm_np1_fr_med_2.avi 2
THE_WALLET_TRICK!!!_pick_f_cm_np1_le_med_0.avi 2
THE_WALLET_TRICK!!!_pick_f_cm_np2_ba_med_1.avi 2
TheBigBangS01E02TheBigBranHypothesis_pick_u_cm_np1_le_med_1.avi 2
ThePerfectScore_pick_u_nm_np1_ba_goo_3.avi 0
ThreeStories_pick_u_cm_np1_ri_goo_2.avi 2
ThreeStories_pick_u_cm_np1_ri_med_3.avi 2
Torwarttraining_-_Impressionen_vom_1__FFC_Frankfurt_pick_f_cm_np1_fr_goo_3.avi 1
Wallet_Trick_pick_f_cm_np1_fr_bad_4.avi 0
Wallet_Trick_pick_f_cm_np1_le_bad_2.avi 2
Wallet_Trick_pick_f_cm_np1_ri_bad_3.avi 2
Wallet_Trick_pick_f_cm_np2_fr_bad_1.avi 0
Wallet_Trick_pick_f_cm_np4_ri_med_0.avi 2
Yep__I_m_picking_up_trash_today_pick_f_cm_np1_fr_med_1.avi 1
Yep__I_m_picking_up_trash_today_pick_f_cm_np1_ri_med_0.avi 1
americanthrift1_pick_u_cm_np1_fr_med_1.avi 1
americanthrift2_pick_u_cm_np1_ri_med_1.avi 1
americanthrift2_pick_u_nm_np1_ri_med_2.avi 1
garbage_men_pick_f_cm_np1_ri_med_0.avi 1
garbage_men_pick_f_cm_np1_ri_med_1.avi 1
garbage_men_pick_f_cm_np1_ri_med_2.avi 1
nameunknown256kb_pick_u_nm_np1_le_med_2.avi 2
pick_up_trash_says_yeti_pick_f_cm_np1_le_med_1.avi 1
pick_up_trash_says_yeti_pick_f_cm_np1_le_med_2.avi 1
pick_up_trash_says_yeti_pick_f_cm_np1_ri_med_0.avi 1
prelinger_LetsBeGo1953_pick_f_cm_np1_fr_med_8.avi 1
prelinger_LetsBeGo1953_pick_f_nm_np1_ri_med_12.avi 1
prelinger_TowardEm1954_pick_u_cm_np1_le_goo_5.avi 1
prelinger_they_grow_up_so_fast_1_pick_f_nm_np1_fr_med_10.avi 1
prelinger_they_grow_up_so_fast_1_pick_u_nm_np1_ri_med_1.avi 1
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_212) on Sun Jan 26 10:16:17 PST 2020 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>SimpleUserGroup (guacamole-ext 1.1.0 API)</title>
<meta name="date" content="2020-01-26">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="SimpleUserGroup (guacamole-ext 1.1.0 API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/SimpleUserGroup.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../org/apache/guacamole/net/auth/simple/SimpleUserDirectory.html" title="class in org.apache.guacamole.net.auth.simple"><span class="typeNameLink">Prev Class</span></a></li>
<li>Next Class</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/apache/guacamole/net/auth/simple/SimpleUserGroup.html" target="_top">Frames</a></li>
<li><a href="SimpleUserGroup.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#methods.inherited.from.class.org.apache.guacamole.net.auth.AbstractUserGroup">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.apache.guacamole.net.auth.simple</div>
<h2 title="Class SimpleUserGroup" class="title">Class SimpleUserGroup</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li><a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">java.lang.Object</a></li>
<li>
<ul class="inheritance">
<li><a href="../../../../../../org/apache/guacamole/net/auth/AbstractIdentifiable.html" title="class in org.apache.guacamole.net.auth">org.apache.guacamole.net.auth.AbstractIdentifiable</a></li>
<li>
<ul class="inheritance">
<li><a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html" title="class in org.apache.guacamole.net.auth">org.apache.guacamole.net.auth.AbstractUserGroup</a></li>
<li>
<ul class="inheritance">
<li>org.apache.guacamole.net.auth.simple.SimpleUserGroup</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd><a href="../../../../../../org/apache/guacamole/net/auth/Attributes.html" title="interface in org.apache.guacamole.net.auth">Attributes</a>, <a href="../../../../../../org/apache/guacamole/net/auth/Identifiable.html" title="interface in org.apache.guacamole.net.auth">Identifiable</a>, <a href="../../../../../../org/apache/guacamole/net/auth/Permissions.html" title="interface in org.apache.guacamole.net.auth">Permissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/UserGroup.html" title="interface in org.apache.guacamole.net.auth">UserGroup</a></dd>
</dl>
<hr>
<br>
<pre>public class <span class="typeNameLabel">SimpleUserGroup</span>
extends <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html" title="class in org.apache.guacamole.net.auth">AbstractUserGroup</a></pre>
<div class="block">A read-only UserGroup implementation which has no members and no
permissions. Implementations that need to define members or permissions
should extend this class and override the associated getters.</div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../../org/apache/guacamole/net/auth/simple/SimpleUserGroup.html#SimpleUserGroup--">SimpleUserGroup</a></span>()</code>
<div class="block">Creates a completely uninitialized SimpleUserGroup.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../../org/apache/guacamole/net/auth/simple/SimpleUserGroup.html#SimpleUserGroup-java.lang.String-">SimpleUserGroup</a></span>(<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> identifier)</code>
<div class="block">Creates a new SimpleUserGroup having the given identifier.</div>
</td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.org.apache.guacamole.net.auth.AbstractUserGroup">
<!-- -->
</a>
<h3>Methods inherited from class org.apache.guacamole.net.auth.<a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html" title="class in org.apache.guacamole.net.auth">AbstractUserGroup</a></h3>
<code><a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getActiveConnectionPermissions--">getActiveConnectionPermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getAttributes--">getAttributes</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getConnectionGroupPermissions--">getConnectionGroupPermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getConnectionPermissions--">getConnectionPermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getMemberUserGroups--">getMemberUserGroups</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getMemberUsers--">getMemberUsers</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getSharingProfilePermissions--">getSharingProfilePermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getSystemPermissions--">getSystemPermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getUserGroupPermissions--">getUserGroupPermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getUserGroups--">getUserGroups</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#getUserPermissions--">getUserPermissions</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractUserGroup.html#setAttributes-java.util.Map-">setAttributes</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.org.apache.guacamole.net.auth.AbstractIdentifiable">
<!-- -->
</a>
<h3>Methods inherited from class org.apache.guacamole.net.auth.<a href="../../../../../../org/apache/guacamole/net/auth/AbstractIdentifiable.html" title="class in org.apache.guacamole.net.auth">AbstractIdentifiable</a></h3>
<code><a href="../../../../../../org/apache/guacamole/net/auth/AbstractIdentifiable.html#equals-java.lang.Object-">equals</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractIdentifiable.html#getIdentifier--">getIdentifier</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractIdentifiable.html#hashCode--">hashCode</a>, <a href="../../../../../../org/apache/guacamole/net/auth/AbstractIdentifiable.html#setIdentifier-java.lang.String-">setIdentifier</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></h3>
<code><a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--" title="class or interface in java.lang">clone</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--" title="class or interface in java.lang">finalize</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--" title="class or interface in java.lang">getClass</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--" title="class or interface in java.lang">notify</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--" title="class or interface in java.lang">wait</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-" title="class or interface in java.lang">wait</a>, <a href="http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-" title="class or interface in java.lang">wait</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.org.apache.guacamole.net.auth.Identifiable">
<!-- -->
</a>
<h3>Methods inherited from interface org.apache.guacamole.net.auth.<a href="../../../../../../org/apache/guacamole/net/auth/Identifiable.html" title="interface in org.apache.guacamole.net.auth">Identifiable</a></h3>
<code><a href="../../../../../../org/apache/guacamole/net/auth/Identifiable.html#getIdentifier--">getIdentifier</a>, <a href="../../../../../../org/apache/guacamole/net/auth/Identifiable.html#setIdentifier-java.lang.String-">setIdentifier</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="SimpleUserGroup--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>SimpleUserGroup</h4>
<pre>public SimpleUserGroup()</pre>
<div class="block">Creates a completely uninitialized SimpleUserGroup.</div>
</li>
</ul>
<a name="SimpleUserGroup-java.lang.String-">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>SimpleUserGroup</h4>
<pre>public SimpleUserGroup(<a href="http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> identifier)</pre>
<div class="block">Creates a new SimpleUserGroup having the given identifier.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>identifier</code> - The identifier to assign to this SimpleUserGroup.</dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/SimpleUserGroup.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../org/apache/guacamole/net/auth/simple/SimpleUserDirectory.html" title="class in org.apache.guacamole.net.auth.simple"><span class="typeNameLink">Prev Class</span></a></li>
<li>Next Class</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/apache/guacamole/net/auth/simple/SimpleUserGroup.html" target="_top">Frames</a></li>
<li><a href="SimpleUserGroup.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#methods.inherited.from.class.org.apache.guacamole.net.auth.AbstractUserGroup">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2020. All rights reserved.</small></p>
<!-- Google Analytics -->
<script type="text/javascript">
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-75289145-1', 'auto');
ga('send', 'pageview');
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<%
layout("/layouts/platform.html"){
%>
<header class="header navbar bg-white shadow">
<div class="btn-group tool-button">
<a class="btn btn-primary navbar-btn" href="${base}/platform/sys/menu" data-pjax id="goback"><i class="ti-angle-left"></i> 返回</a>
</div>
<div class="pull-right">
<div class="btn-group tool-button">
<button class="btn btn-primary navbar-btn" onclick="complete()"> 保存</button>
</div>
</div>
</header>
<div class="modal-body no-p">
<div class="panel">
<div class="panel-heading no-b">
</div>
<div class="panel-body" style="padding-top: 50px;">
<div class="cf nestable-lists">
<div class="dd" id="nestable3">
<ol class="dd-list">
<% for(obj in firstMenus){ %>
<li class="dd-item dd3-item" data-id="${obj.id!}">
<div class="dd-handle dd3-handle">Drag</div>
<div class="dd3-content">${obj.name!}</div>
<% if(!isEmpty(@secondMenus.get(obj.path))){ %>
<ol class="dd-list">
<% for(secondMenu in @secondMenus.get(obj.path)){ %>
<li class="dd-item dd3-item" data-id="${secondMenu.id!}">
<div class="dd-handle dd3-handle">Drag</div>
<div class="dd3-content">${secondMenu.name!}</div>
<% if(!isEmpty(@secondMenus.get(secondMenu.path))){ %>
<ol class="dd-list">
<% for(thMenu in @secondMenus.get(secondMenu.path)){ %>
<li class="dd-item dd3-item" data-id="${thMenu.id!}">
<div class="dd-handle dd3-handle">Drag</div>
<div class="dd3-content">${thMenu.name!}</div>
</li>
<% } %>
</ol>
<% } %>
</li>
<% } %>
</ol>
<% } %>
</li>
<% }%>
</ol>
</div>
</div>
</div>
</div>
</div>
<script type="text/javascript">
var ids=[];
function getId(ob){
$.each(ob,function(i,o){
ids.push(o.id);
if(o.children){
getId(o.children);
}
});
}
function complete(){
ids=[];
getId($("#nestable3").nestable('serialize'));
$.post("${base}/platform/sys/menu/sortDo", {ids: ids.toString()}, function (data) {
if (data.code == 0) {
Toast.success(data.msg);
} else {
Toast.error(data.msg);
}
}, "json");
}
$(function () {
$("#nestable3").nestable();
$('#nestable3').nestable('collapseAll');
});
</script>
<%}%> | {
"pile_set_name": "Github"
} |
# =============================================================================
# This file defines properties used by build-impl.xml and the associated
# *-impl.xml files (sass-impl.xml, js-impl.xml, etc.), which are the core of
# the applications build process.
#
# IMPORTANT - This file is not modifiable by a package, and will be overwritten
# during each app upgrade. Please use build.properties for defining package
# customizations to these properties.
# =============================================================================
# ===========================================
# properties defining various directory
# locations
# ===========================================
build.dir=${package.build.dir}
package.output=${build.dir}
package.output.base=${package.output}
package.output.js=
package.output.css=resources
package.output.sass=${package.output.js}
package.output.resources=${package.output.css}
build.out.js.dir=${package.output.base}/${package.output.js}
build.out.css.dir=${package.output.base}/${package.output.css}
build.out.sass.dir=${package.output.base}/${package.output.sass}
build.out.resources.dir=${package.output.base}/${package.output.resources}
# a temporary output directory used for staging intermediate build artifacts
build.temp.dir=${workspace.build.dir}/temp/${package.name}
build.resources.dir=${build.out.resources.dir}
package.resources.dir=${package.dir}/resources
package.sass.dir=${package.dir}/sass
package.licenses.dir=${package.dir}/licenses
# ===========================================
# definitions of various file name patterns
# used for output artifacts
# ===========================================
build.name.prefix=${package.name}
build.name.css.prefix=${build.resources.dir}/${package.name}
build.name.ruby=config.rb
build.debug.suffix=-debug
build.all.suffix=-all
build.rtl.suffix=-rtl
build.all.debug.suffix=${build.all.suffix}${build.debug.suffix}
build.all.rtl.suffix=${build.all.suffix}${build.rtl.suffix}
build.all.rtl.debug.suffix=${build.all.suffix}${build.rtl.suffix}${build.debug.suffix}
# ===========================================
# define the output js file names for dev,
# debug, and compressed (no suffix)
# ===========================================
build.all.js=${build.out.js.dir}/${build.name.prefix}.js
build.all.debug.js=${build.out.js.dir}/${build.name.prefix}${build.debug.suffix}.js
package.sass.build.dir=${build.out.sass.dir}
# ===========================================
# output file names for the scss files
# ===========================================
build.all.scss=${package.sass.build.dir}/${build.name.prefix}${build.all.debug.suffix}.scss
build.all.rtl.scss=${package.sass.build.dir}/${build.name.prefix}${build.all.rtl.debug.suffix}.scss
# ===========================================
# output file names for the css files
# generated from the scss files by running
# a compass compilation
# ===========================================
build.all.css.debug.prefix=${package.name}${build.all.debug.suffix}
build.all.css.debug=${build.out.css.dir}/${build.all.css.debug.prefix}.css
build.all.rtl.css.debug.prefix=${package.name}${build.all.rtl.debug.suffix}
build.all.rtl.css.debug=${build.out.css.dir}/${build.all.rtl.css.debug.prefix}.css
build.all.css.prefix=${package.name}${build.all.suffix}
build.all.css=${build.out.css.dir}/${build.all.css.prefix}.css
build.all.rtl.css.prefix=${package.name}${build.all.rtl.suffix}
build.all.rtl.css=${build.out.css.dir}/${build.all.rtl.css.prefix}.css
build.all.ruby=${package.sass.build.dir}/${build.name.ruby}
# ===========================================
# options to pass to the 'sencha fs slice' command
# ===========================================
build.slice.options=
# ===========================================
# preprocessor options used when generating
# concatenated js output files
# ===========================================
build.compile.js.debug.options=debug:true
build.compile.js.options=debug:false
# enables / disables removing text references from
# package js build files
build.remove.references=false
# This property can be modified to change general build options
# such as excluding files from the set. The format expects newlines
# for each argument, for example:
#
# build.operations=\
# exclude\n \
# -namespace=Ext\n
#
# NOTE: modifications to build.operations are intended to be
# placed in an override of the "-after-init" target, where it
# can be calculated based on other
# ant properties
#
# build.operations=
# ===========================================
# compression option used to generate '-all'
# js output file
# ===========================================
build.compile.js.compress=+yui
build.compile.temp.dir=${build.temp.dir}/sencha-compiler
# controles whether to keep the temp compile dir after the build
build.compile.temp.dir.keep=true
# ===========================================
# selector count threshold to use when
# splitting a single css file into multiple
# css files (IE selector limit workaround)
# ===========================================
build.css.selector.limit=4095
# controls the ruby command used to execute compass. a full path
# to ruby may be specified rather than allowing the system shell
# to resolve the command
build.ruby.path=ruby
# controls the working directory of the child compass process
# and the output location for the .sass-cache folder
compass.working.dir=${package.sass.build.dir}
# enables / disables console highlighting for compass
compass.compile.boring=false
# enables / disables forced rebuilds for compass
compass.compile.force=true
# enables / disables stack traces in compass failure output
compass.compile.trace=true
# the directory containing sass files for compass to compile
compass.sass.dir=${package.sass.build.dir}
# the output directory where compass should place built css files
compass.css.dir=${build.out.css.dir}
# the directory containing the ruby config file for compass
compass.config.file=${build.all.ruby}
compass.cache.dir=${workspace.build.dir}/.sass-cache
# ===========================================
# Options for sub-packages
# Set to true/1 to enable build.version inheritance by sub-pacakges
build.subpkgs.inherit.version=0
# ===========================================
# theme slicing example page settings
# ===========================================
package.example.dir=${package.dir}/sass/example
package.example.build.dir=${build.temp.dir}/slicer-temp
package.example.base=${build.all.rtl.css.debug.prefix}
package.example.css=${package.example.build.dir}/${package.example.base}.css
package.example.scss=${package.example.build.dir}/${package.example.base}.scss
package.example.theme.html=${package.example.dir}/theme.html
package.example.fashion.html=${package.example.dir}/fashion.html
# the name of the intermediate screenshot file used for image slicing
build.capture.png=${package.example.build.dir}/theme-capture.png
# the name of the intermediate widget manifest file used for image slicing
build.capture.json=${package.example.build.dir}/theme-capture.json
# the microloader to use for bootstrapping operations
package.microloader.bootstrap=${package.microloader.dir}/${package.microloader.development}
build.boot.name=Boot.js
build.boot.file=${package.config.dir}/${build.boot.name}
build.slicer.microloader.name=Microloader.js
build.slicer.microloader.file=${package.config.dir}/${build.slicer.microloader.name}
# the ruby compass config file to generate for slicer page scss
package.example.out.ruby=${package.example.build.dir}/config.rb
package.example.compass.config=${package.example.out.ruby}
bootstrap.base.path=${package.example.dir}
bootstrap.example.js=${package.example.dir}/bootstrap.js
bootstrap.example.json=${package.example.dir}/bootstrap.json
# ===========================================
# options controlling output packaging
# operations for output '.pkg' file
# ===========================================
pkg.build.dir=${workspace.build.dir}/${package.name}
pkg.file.name=${package.name}.pkg
pkg.includes=**/*
pkg.excludes=package.json
# the port number to start the local web server on
build.web.port=1841
# the directory representing the root web folder
build.web.root=${workspace.dir}
| {
"pile_set_name": "Github"
} |
/*
Copyright (C) 2011 Martin S.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xA0]+/,null,"\t\n\r \u00a0"],["com",/^%[^\r\n]*/,null,"%"]],[["kwd",/^\\[a-zA-Z@]+/],["kwd",/^\\./],["typ",/^[$&]/],["lit",/[+-]?(?:\.\d+|\d+(?:\.\d*)?)(cm|em|ex|in|pc|pt|bp|mm)/i],["pun",/^[{}()\[\]=]+/]]),["latex","tex"]);
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* PWM controller driver for Amlogic Meson SoCs.
*
* This PWM is only a set of Gates, Dividers and Counters:
* PWM output is achieved by calculating a clock that permits calculating
* two periods (low and high). The counter then has to be set to switch after
* N cycles for the first half period.
* The hardware has no "polarity" setting. This driver reverses the period
* cycles (the low length is inverted with the high length) for
* PWM_POLARITY_INVERSED. This means that .get_state cannot read the polarity
* from the hardware.
* Setting the duty cycle will disable and re-enable the PWM output.
* Disabling the PWM stops the output immediately (without waiting for the
* current period to complete first).
*
* The public S912 (GXM) datasheet contains some documentation for this PWM
* controller starting on page 543:
* https://dl.khadas.com/Hardware/VIM2/Datasheet/S912_Datasheet_V0.220170314publicversion-Wesion.pdf
* An updated version of this IP block is found in S922X (G12B) SoCs. The
* datasheet contains the description for this IP block revision starting at
* page 1084:
* https://dn.odroid.com/S922X/ODROID-N2/Datasheet/S922X_Public_Datasheet_V0.2.pdf
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2014 Amlogic, Inc.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define REG_PWM_A 0x0
#define REG_PWM_B 0x4
#define PWM_LOW_MASK GENMASK(15, 0)
#define PWM_HIGH_MASK GENMASK(31, 16)
#define REG_MISC_AB 0x8
#define MISC_B_CLK_EN BIT(23)
#define MISC_A_CLK_EN BIT(15)
#define MISC_CLK_DIV_MASK 0x7f
#define MISC_B_CLK_DIV_SHIFT 16
#define MISC_A_CLK_DIV_SHIFT 8
#define MISC_B_CLK_SEL_SHIFT 6
#define MISC_A_CLK_SEL_SHIFT 4
#define MISC_CLK_SEL_MASK 0x3
#define MISC_B_EN BIT(1)
#define MISC_A_EN BIT(0)
#define MESON_NUM_PWMS 2
static struct meson_pwm_channel_data {
u8 reg_offset;
u8 clk_sel_shift;
u8 clk_div_shift;
u32 clk_en_mask;
u32 pwm_en_mask;
} meson_pwm_per_channel_data[MESON_NUM_PWMS] = {
{
.reg_offset = REG_PWM_A,
.clk_sel_shift = MISC_A_CLK_SEL_SHIFT,
.clk_div_shift = MISC_A_CLK_DIV_SHIFT,
.clk_en_mask = MISC_A_CLK_EN,
.pwm_en_mask = MISC_A_EN,
},
{
.reg_offset = REG_PWM_B,
.clk_sel_shift = MISC_B_CLK_SEL_SHIFT,
.clk_div_shift = MISC_B_CLK_DIV_SHIFT,
.clk_en_mask = MISC_B_CLK_EN,
.pwm_en_mask = MISC_B_EN,
}
};
struct meson_pwm_channel {
unsigned int hi;
unsigned int lo;
u8 pre_div;
struct clk *clk_parent;
struct clk_mux mux;
struct clk *clk;
};
struct meson_pwm_data {
const char * const *parent_names;
unsigned int num_parents;
};
struct meson_pwm {
struct pwm_chip chip;
const struct meson_pwm_data *data;
struct meson_pwm_channel channels[MESON_NUM_PWMS];
void __iomem *base;
/*
* Protects register (write) access to the REG_MISC_AB register
* that is shared between the two PWMs.
*/
spinlock_t lock;
};
static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
{
return container_of(chip, struct meson_pwm, chip);
}
static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel;
struct device *dev = chip->dev;
int err;
channel = pwm_get_chip_data(pwm);
if (channel)
return 0;
channel = &meson->channels[pwm->hwpwm];
if (channel->clk_parent) {
err = clk_set_parent(channel->clk, channel->clk_parent);
if (err < 0) {
dev_err(dev, "failed to set parent %s for %s: %d\n",
__clk_get_name(channel->clk_parent),
__clk_get_name(channel->clk), err);
return err;
}
}
err = clk_prepare_enable(channel->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock %s: %d\n",
__clk_get_name(channel->clk), err);
return err;
}
return pwm_set_chip_data(pwm, channel);
}
static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
if (channel)
clk_disable_unprepare(channel->clk);
}
static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
unsigned int duty, period, pre_div, cnt, duty_cnt;
unsigned long fin_freq = -1;
duty = state->duty_cycle;
period = state->period;
if (state->polarity == PWM_POLARITY_INVERSED)
duty = period - duty;
fin_freq = clk_get_rate(channel->clk);
if (fin_freq == 0) {
dev_err(meson->chip.dev, "invalid source clock frequency\n");
return -EINVAL;
}
dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL);
if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1));
if (cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get period cnt\n");
return -EINVAL;
}
dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
pre_div, cnt);
if (duty == period) {
channel->pre_div = pre_div;
channel->hi = cnt;
channel->lo = 0;
} else if (duty == 0) {
channel->pre_div = pre_div;
channel->hi = 0;
channel->lo = cnt;
} else {
/* Then check is we can have the duty with the same pre_div */
duty_cnt = div64_u64(fin_freq * (u64)duty,
NSEC_PER_SEC * (pre_div + 1));
if (duty_cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get duty cycle\n");
return -EINVAL;
}
dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
duty, pre_div, duty_cnt);
channel->pre_div = pre_div;
channel->hi = duty_cnt;
channel->lo = cnt - duty_cnt;
}
return 0;
}
static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm_channel_data *channel_data;
unsigned long flags;
u32 value;
channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
spin_lock_irqsave(&meson->lock, flags);
value = readl(meson->base + REG_MISC_AB);
value &= ~(MISC_CLK_DIV_MASK << channel_data->clk_div_shift);
value |= channel->pre_div << channel_data->clk_div_shift;
value |= channel_data->clk_en_mask;
writel(value, meson->base + REG_MISC_AB);
value = FIELD_PREP(PWM_HIGH_MASK, channel->hi) |
FIELD_PREP(PWM_LOW_MASK, channel->lo);
writel(value, meson->base + channel_data->reg_offset);
value = readl(meson->base + REG_MISC_AB);
value |= channel_data->pwm_en_mask;
writel(value, meson->base + REG_MISC_AB);
spin_unlock_irqrestore(&meson->lock, flags);
}
static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
{
unsigned long flags;
u32 value;
spin_lock_irqsave(&meson->lock, flags);
value = readl(meson->base + REG_MISC_AB);
value &= ~meson_pwm_per_channel_data[pwm->hwpwm].pwm_en_mask;
writel(value, meson->base + REG_MISC_AB);
spin_unlock_irqrestore(&meson->lock, flags);
}
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
int err = 0;
if (!state)
return -EINVAL;
if (!state->enabled) {
if (state->polarity == PWM_POLARITY_INVERSED) {
/*
* This IP block revision doesn't have an "always high"
* setting which we can use for "inverted disabled".
* Instead we achieve this using the same settings
* that we use a pre_div of 0 (to get the shortest
* possible duration for one "count") and
* "period == duty_cycle". This results in a signal
* which is LOW for one "count", while being HIGH for
* the rest of the (so the signal is HIGH for slightly
* less than 100% of the period, but this is the best
* we can achieve).
*/
channel->pre_div = 0;
channel->hi = ~0;
channel->lo = 0;
meson_pwm_enable(meson, pwm);
} else {
meson_pwm_disable(meson, pwm);
}
} else {
err = meson_pwm_calc(meson, pwm, state);
if (err < 0)
return err;
meson_pwm_enable(meson, pwm);
}
return 0;
}
static unsigned int meson_pwm_cnt_to_ns(struct pwm_chip *chip,
struct pwm_device *pwm, u32 cnt)
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel;
unsigned long fin_freq;
u32 fin_ns;
/* to_meson_pwm() can only be used after .get_state() is called */
channel = &meson->channels[pwm->hwpwm];
fin_freq = clk_get_rate(channel->clk);
if (fin_freq == 0)
return 0;
fin_ns = div_u64(NSEC_PER_SEC, fin_freq);
return cnt * fin_ns * (channel->pre_div + 1);
}
static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel_data *channel_data;
struct meson_pwm_channel *channel;
u32 value, tmp;
if (!state)
return;
channel = &meson->channels[pwm->hwpwm];
channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
value = readl(meson->base + REG_MISC_AB);
tmp = channel_data->pwm_en_mask | channel_data->clk_en_mask;
state->enabled = (value & tmp) == tmp;
tmp = value >> channel_data->clk_div_shift;
channel->pre_div = FIELD_GET(MISC_CLK_DIV_MASK, tmp);
value = readl(meson->base + channel_data->reg_offset);
channel->lo = FIELD_GET(PWM_LOW_MASK, value);
channel->hi = FIELD_GET(PWM_HIGH_MASK, value);
if (channel->lo == 0) {
state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
state->duty_cycle = state->period;
} else if (channel->lo >= channel->hi) {
state->period = meson_pwm_cnt_to_ns(chip, pwm,
channel->lo + channel->hi);
state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm,
channel->hi);
} else {
state->period = 0;
state->duty_cycle = 0;
}
}
static const struct pwm_ops meson_pwm_ops = {
.request = meson_pwm_request,
.free = meson_pwm_free,
.apply = meson_pwm_apply,
.get_state = meson_pwm_get_state,
.owner = THIS_MODULE,
};
static const char * const pwm_meson8b_parent_names[] = {
"xtal", "vid_pll", "fclk_div4", "fclk_div3"
};
static const struct meson_pwm_data pwm_meson8b_data = {
.parent_names = pwm_meson8b_parent_names,
.num_parents = ARRAY_SIZE(pwm_meson8b_parent_names),
};
static const char * const pwm_gxbb_parent_names[] = {
"xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
};
static const struct meson_pwm_data pwm_gxbb_data = {
.parent_names = pwm_gxbb_parent_names,
.num_parents = ARRAY_SIZE(pwm_gxbb_parent_names),
};
/*
* Only the 2 first inputs of the GXBB AO PWMs are valid
* The last 2 are grounded
*/
static const char * const pwm_gxbb_ao_parent_names[] = {
"xtal", "clk81"
};
static const struct meson_pwm_data pwm_gxbb_ao_data = {
.parent_names = pwm_gxbb_ao_parent_names,
.num_parents = ARRAY_SIZE(pwm_gxbb_ao_parent_names),
};
static const char * const pwm_axg_ee_parent_names[] = {
"xtal", "fclk_div5", "fclk_div4", "fclk_div3"
};
static const struct meson_pwm_data pwm_axg_ee_data = {
.parent_names = pwm_axg_ee_parent_names,
.num_parents = ARRAY_SIZE(pwm_axg_ee_parent_names),
};
static const char * const pwm_axg_ao_parent_names[] = {
"aoclk81", "xtal", "fclk_div4", "fclk_div5"
};
static const struct meson_pwm_data pwm_axg_ao_data = {
.parent_names = pwm_axg_ao_parent_names,
.num_parents = ARRAY_SIZE(pwm_axg_ao_parent_names),
};
static const char * const pwm_g12a_ao_ab_parent_names[] = {
"xtal", "aoclk81", "fclk_div4", "fclk_div5"
};
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
.parent_names = pwm_g12a_ao_ab_parent_names,
.num_parents = ARRAY_SIZE(pwm_g12a_ao_ab_parent_names),
};
static const char * const pwm_g12a_ao_cd_parent_names[] = {
"xtal", "aoclk81",
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
.parent_names = pwm_g12a_ao_cd_parent_names,
.num_parents = ARRAY_SIZE(pwm_g12a_ao_cd_parent_names),
};
static const char * const pwm_g12a_ee_parent_names[] = {
"xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
};
static const struct meson_pwm_data pwm_g12a_ee_data = {
.parent_names = pwm_g12a_ee_parent_names,
.num_parents = ARRAY_SIZE(pwm_g12a_ee_parent_names),
};
static const struct of_device_id meson_pwm_matches[] = {
{
.compatible = "amlogic,meson8b-pwm",
.data = &pwm_meson8b_data
},
{
.compatible = "amlogic,meson-gxbb-pwm",
.data = &pwm_gxbb_data
},
{
.compatible = "amlogic,meson-gxbb-ao-pwm",
.data = &pwm_gxbb_ao_data
},
{
.compatible = "amlogic,meson-axg-ee-pwm",
.data = &pwm_axg_ee_data
},
{
.compatible = "amlogic,meson-axg-ao-pwm",
.data = &pwm_axg_ao_data
},
{
.compatible = "amlogic,meson-g12a-ee-pwm",
.data = &pwm_g12a_ee_data
},
{
.compatible = "amlogic,meson-g12a-ao-pwm-ab",
.data = &pwm_g12a_ao_ab_data
},
{
.compatible = "amlogic,meson-g12a-ao-pwm-cd",
.data = &pwm_g12a_ao_cd_data
},
{},
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
static int meson_pwm_init_channels(struct meson_pwm *meson)
{
struct device *dev = meson->chip.dev;
struct clk_init_data init;
unsigned int i;
char name[255];
int err;
for (i = 0; i < meson->chip.npwm; i++) {
struct meson_pwm_channel *channel = &meson->channels[i];
snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
init.name = name;
init.ops = &clk_mux_ops;
init.flags = 0;
init.parent_names = meson->data->parent_names;
init.num_parents = meson->data->num_parents;
channel->mux.reg = meson->base + REG_MISC_AB;
channel->mux.shift =
meson_pwm_per_channel_data[i].clk_sel_shift;
channel->mux.mask = MISC_CLK_SEL_MASK;
channel->mux.flags = 0;
channel->mux.lock = &meson->lock;
channel->mux.table = NULL;
channel->mux.hw.init = &init;
channel->clk = devm_clk_register(dev, &channel->mux.hw);
if (IS_ERR(channel->clk)) {
err = PTR_ERR(channel->clk);
dev_err(dev, "failed to register %s: %d\n", name, err);
return err;
}
snprintf(name, sizeof(name), "clkin%u", i);
channel->clk_parent = devm_clk_get_optional(dev, name);
if (IS_ERR(channel->clk_parent))
return PTR_ERR(channel->clk_parent);
}
return 0;
}
static int meson_pwm_probe(struct platform_device *pdev)
{
struct meson_pwm *meson;
struct resource *regs;
int err;
meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
if (!meson)
return -ENOMEM;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
meson->base = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(meson->base))
return PTR_ERR(meson->base);
spin_lock_init(&meson->lock);
meson->chip.dev = &pdev->dev;
meson->chip.ops = &meson_pwm_ops;
meson->chip.base = -1;
meson->chip.npwm = MESON_NUM_PWMS;
meson->chip.of_xlate = of_pwm_xlate_with_flags;
meson->chip.of_pwm_n_cells = 3;
meson->data = of_device_get_match_data(&pdev->dev);
err = meson_pwm_init_channels(meson);
if (err < 0)
return err;
err = pwmchip_add(&meson->chip);
if (err < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err);
return err;
}
platform_set_drvdata(pdev, meson);
return 0;
}
static int meson_pwm_remove(struct platform_device *pdev)
{
struct meson_pwm *meson = platform_get_drvdata(pdev);
return pwmchip_remove(&meson->chip);
}
static struct platform_driver meson_pwm_driver = {
.driver = {
.name = "meson-pwm",
.of_match_table = meson_pwm_matches,
},
.probe = meson_pwm_probe,
.remove = meson_pwm_remove,
};
module_platform_driver(meson_pwm_driver);
MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
| {
"pile_set_name": "Github"
} |
// Tabs as whitespace should work like spaces
int anInt = 0;
void setup () {
anInt = 1;
}
| {
"pile_set_name": "Github"
} |
db "IMITATION@" ; species name
dw 311, 840 ; height, weight
db "If a tree branch"
next "shakes when there"
next "is no wind, it's a"
page "SUDOWOODO, not a"
next "tree. It hides"
next "from the rain.@"
| {
"pile_set_name": "Github"
} |
/* parse_context.h -*- C++ -*-
Jeremy Barnes, 27 January 2005
Copyright (c) 2005 Jeremy Barnes. All rights reserved.
This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
A class to ease the recursive-descent parsing of strings.
*/
#pragma once
#include "mldb/arch/exception.h"
#include "mldb/compiler/compiler.h"
#include <cmath>
#include <string>
#include <iostream>
#include <list>
#include <climits>
#include <cstring>
#include <cstdint>
#include <memory>
namespace MLDB {
/*****************************************************************************/
/* PARSE_CONTEXT */
/*****************************************************************************/
struct ParseContext {
static const std::string CONSOLE;
struct Exception: MLDB::Exception {
Exception(const char * what,
std::string filename,
int row,
int col) noexcept
: MLDB::Exception(what),
filename(std::move(filename)),
row(row),
col(col)
{
}
Exception(const std::string & what,
std::string filename,
int row,
int col) noexcept
: MLDB::Exception(what),
filename(std::move(filename)),
row(row),
col(col)
{
}
~Exception() throw()
{
}
std::string filename;
int row;
int col;
};
/** Create but don't initialize. */
ParseContext();
/** Initialize from a filename, loading the file and uncompressing if
necessary. */
explicit ParseContext(const std::string & filename);
/** Initialize from a memory region. */
ParseContext(const std::string & filename, const char * start,
const char * finish, unsigned line = 1, unsigned col = 1);
ParseContext(const std::string & filename, const char * start,
size_t length, unsigned line = 1, unsigned col = 1);
/** Default chunk size. */
enum { DEFAULT_CHUNK_SIZE = 65500 };
/** Initialize from an istream. */
ParseContext(const std::string & filename, std::istream & stream,
unsigned line = 1, unsigned col = 1,
size_t chunk_size = DEFAULT_CHUNK_SIZE);
~ParseContext();
/** Initialize from a filename, loading the file and uncompressing if
necessary. */
void init(const std::string & filename);
/** Set the chunk size for the buffers. Mostly used for testing
purposes. Note that this is only useful when initialized from
a stream. */
void set_chunk_size(size_t size);
/** Get the chunk size. */
size_t get_chunk_size() const { return chunk_size_; }
/** How many characters are available to read ahead from? */
size_t readahead_available() const;
/** How many characters are buffered in total, both before and after
the current character? */
size_t total_buffered() const;
/** Increment. Note that it always sets up the buffer such that more
characters are available. */
MLDB_ALWAYS_INLINE ParseContext & operator ++ ()
{
if (eof()) exception("unexpected EOF");
if (*cur_ == '\n') { ++line_; col_ = 0; }
ofs_ += 1; col_ += 1;
++cur_;
if (MLDB_UNLIKELY(cur_ == ebuf_))
next_buffer();
return *this;
}
ParseContext & operator += (int steps)
{
for (int i = 0; i < steps; i++) {
operator ++();
}
return *this;
}
/** Little helper class that allows character at current position to
be returned. This allows us to do "*context++", without making
a (heavyweight!) copy at each stage (as context++ requires a copy
of the object to be returned).
*/
struct Last_Char {
Last_Char(char c) : c(c) {}
char operator * () const { return c; }
char c;
};
Last_Char operator ++ (int)
{
char result = operator * ();
operator ++ ();
return Last_Char(result);
}
char operator * () const
{
if (eof()) exception("unexpected EOF");
return *cur_;
}
/** Match a literal character. Return true if matched or false if not.
Never throws.
*/
bool match_literal(char c)
{
if (eof()) return false;
if (*cur_ == c) { operator ++(); return true; }
return false;
}
/** Expect a literal character. Throws if the character is not matched. */
void expect_literal(char c, const char * error = "expected '%c', got '%c'")
{
if (!match_literal(c)) exception_fmt(error, c, (eof() ? '\0' : *cur_));
}
/** Match a literal string. Returns true if it was matched and false if
not. Never throws.
*/
bool match_literal(const std::string & str)
{
return match_literal_str(str.data(), str.length());
}
/** Expect a literal string. Throws an exception if the given string
was not at the current position.
*/
void expect_literal(const std::string & str,
const char * error = "expected '%s'")
{
if (!match_literal(str)) exception_fmt(error, str.c_str());
}
/** Match a literal string. Returns true if it was matched and false if
not. Never throws.
*/
bool match_literal(const char * str)
{
return match_literal_str(str, strlen(str));
}
/** Expect a literal string. Throws an exception if the given string
was not at the current position.
*/
void expect_literal(const char * str,
const char * error = "expected '%s'")
{
if (!match_literal(str)) exception_fmt(error, str);
}
template<class FoundEnd>
bool match_text(std::string & text, const FoundEnd & found)
{
#if 0 // buggy
/* We do each buffer separately, to avoid overhead. */
while (!eof()) {
const char * text_start = cur_;
/* Go to an EOF or the end of the buffer, whatever first. */
while (cur_ < ebuf_ && !found(*cur_)) ++cur_;
/* Copy the text. */
text.append(text_start, cur_);
/* Did we find the end of line? */
if (cur_ < ebuf_) break;
/* We need a new buffer. */
--cur_; // make sure the operator ++ will return a new buffer
operator ++ (); // get the new buffer
}
#else
char internalBuffer[4096];
char * buffer = internalBuffer;
size_t bufferSize = 4096;
size_t pos = 0;
char c;
while (!eof() && !found(c = operator *())) {
if (pos == bufferSize) {
size_t newBufferSize = bufferSize * 8;
char * newBuffer = new char[newBufferSize];
std::copy(buffer, buffer + bufferSize, newBuffer);
if (buffer != internalBuffer)
delete[] buffer;
buffer = newBuffer;
bufferSize = newBufferSize;
}
buffer[pos++] = c;
operator ++();
}
text = std::string(buffer, buffer + pos);
if (buffer != internalBuffer)
delete[] buffer;
#endif
return true;
}
struct Matches_Char {
Matches_Char(char c) : c(c) {}
char c;
bool operator () (char c2) const { return c == c2; }
};
/** Match a string of any length delimited by the given character. EOF is
implicitly considered a delimiter. The text may be of zero length if
the delimiter is encountered straight away. The text up to but not
including the delimiter is returned in text, and the position will be
at the delimiter. Always returns true, as the empty string counts as
being matched.
*/
bool match_text(std::string & text, char delimiter)
{
return match_text(text, Matches_Char(delimiter));
}
bool match_text(std::string & text, const char * delimiters);
bool match_test_icase(const char* word);
std::string expect_text(char delimiter,
bool allow_empty = true,
const char * error = "expected text");
std::string expect_text(const char * delimiters,
bool allow_empty = true,
const char * error = "expected text");
bool match_int(int & val, int min = -INT_MAX, int max = INT_MAX);
int expect_int(int min = -INT_MAX, int max = INT_MAX,
const char * error = "expected integer");
bool match_hex4(int & val, int min = -INT_MAX, int max = INT_MAX);
int expect_hex4(int min = -INT_MAX, int max = INT_MAX,
const char * error = "invalid hexadecimal in code");
bool match_unsigned(unsigned & val, unsigned min = 0,
unsigned max = INT_MAX);
unsigned expect_unsigned(unsigned min = 0, unsigned max = INT_MAX,
const char * error = "expected unsigned");
bool match_long(long & val,
long min = LONG_MIN,
long max = LONG_MAX);
long expect_long(long min = -LONG_MAX,
long max = LONG_MAX,
const char * error = "expected long integer");
bool match_unsigned_long(unsigned long & val,
unsigned long min = 0,
unsigned long max = ULONG_MAX);
unsigned long
expect_unsigned_long(unsigned long min = 0,
unsigned long max = ULONG_MAX,
const char * error = "expected long integer");
bool match_long_long(long long & val,
long long min = LONG_LONG_MIN,
long long max = LONG_LONG_MAX);
long long
expect_long_long(long long min = -LONG_LONG_MAX,
long long max = LONG_LONG_MAX,
const char * error = "expected long long integer");
bool match_unsigned_long_long(unsigned long long & val,
unsigned long long min = 0,
unsigned long long max = ULONG_LONG_MAX);
unsigned long long
expect_unsigned_long_long(unsigned long long min = 0,
unsigned long long max = ULONG_LONG_MAX,
const char * error = "expected long long integer");
/** Matches a floating point value in the given range.
If lenient is set to false, the function will not accept integers.
*/
bool match_float(float & val, float min = -INFINITY, float max = INFINITY, bool lenient = true);
/** If lenient is set to false, the function will not accept integers. */
float expect_float(float min = -INFINITY, float max = INFINITY,
const char * error = "expected float", bool lenient = true);
/** Matches a floating point value in the given range.
If lenient is set to false, the function will not accept integers.
*/
bool match_double(double & val,
double min = -INFINITY, double max = INFINITY,
bool lenient = true);
/** If lenient is set to false, the function will not accept integers. */
double expect_double(double min = -INFINITY, double max = INFINITY,
const char * error = "expected double",
bool lenient = true);
bool match_whitespace()
{
bool result = false;
while (!eof() && isblank(*cur_)) {
// while (!eof() && isspace(*cur_) && *cur_ != '\n') {
result = true;
operator ++ ();
}
return result;
}
void skip_whitespace()
{
match_whitespace();
}
void expect_whitespace()
{
if (!match_whitespace()) exception("expected whitespace");
}
bool match_numeric(signed int & i)
{
return match_int(i);
}
bool match_numeric(unsigned int & i)
{
return match_unsigned(i);
}
template<typename MatchAs, typename T>
bool match_numeric_as(T & i)
{
Revert_Token token(*this);
MatchAs r;
if (!match_numeric(r)) return false;
i = r;
if (i != r)
exception("type did not fit in range");
token.ignore();
return true;
}
bool match_numeric(short signed int & i)
{
return match_numeric_as<int>(i);
}
bool match_numeric(short unsigned int & i)
{
return match_numeric_as<unsigned>(i);
}
bool match_numeric(signed char & i)
{
return match_numeric_as<int>(i);
}
bool match_numeric(unsigned char & i)
{
return match_numeric_as<unsigned int>(i);
}
bool match_numeric(signed long & i)
{
return match_numeric_as<signed long long>(i);
}
bool match_numeric(unsigned long & i)
{
return match_numeric_as<unsigned long long>(i);
}
bool match_numeric(signed long long & i)
{
return match_long_long(i);
}
bool match_numeric(unsigned long long & i)
{
return match_unsigned_long_long(i);
}
/** If lenient is set to false, the function will not accept integers. */
bool match_numeric(float & f, bool lenient = true)
{
return match_float(f, lenient);
}
/** If lenient is set to false, the function will not accept integers. */
bool match_numeric(double & f, bool lenient = true)
{
return match_double(f, lenient);
}
template<typename T>
T expect_numeric(const char * error = "expected numeric value of type %s")
{
T result;
if (!match_numeric(result))
exception_fmt(error, typeid(T).name());
return result;
}
/** Return a message giving filename:line:col */
std::string where() const;
void exception(const std::string & message) const MLDB_NORETURN;
void exception(const char * message) const MLDB_NORETURN;
void exception_fmt(const char * message, ...) const MLDB_NORETURN;
size_t get_offset() const { return ofs_; }
size_t get_line() const { return line_; }
size_t get_col() const { return col_; }
/** Query if we are at the end of file. This occurs when we can't find
any more characters. */
MLDB_ALWAYS_INLINE bool eof() const
{
//using namespace std;
//cerr << "eof: cur_ = " << (void *)cur_ << "ebuf_ = " << (void *)ebuf_
// << endl;
return cur_ == ebuf_;
}
/** Query if we are at the end of file. */
operator bool () const
{
return !eof();
}
bool match_eol(bool eof_is_eol = true)
{
if (eof_is_eol && eof()) return true; // EOF is considered EOL
if (*cur_ == '\n') {
operator ++ ();
if (eof_is_eol && eof()) return true; // EOF is considered EOL
if (*cur_ == '\r')
operator ++ ();
return true;
}
if (*cur_ != '\r') return false;
// deal with DOS line endings
return match_literal("\r\n");
}
void expect_eol(const char * error = "expected eol")
{
if (!match_eol()) exception(error);
}
void expect_eof(const char * error = "expected eof")
{
if (!eof()) exception(error);
}
bool match_line(std::string & line)
{
if (eof()) return false;
match_text(line, '\n');
match_eol();
return true;
}
std::string expect_line(const char * error = "expected line of text")
{
std::string result;
if (!match_line(result)) exception(error);
return result;
}
void skip_line()
{
expect_line();
}
bool match_literal_str(const char * start, size_t len);
protected:
/** This token class allows speculative parsing. It saves the position
of the parse context, and will on destruction revert back to that
position, unless it was ignored.
Note that we require these to be on the stack (it is checked that
the address of multiple tokens is always descending). Tokens may
not be used from more than one thread.
They are stored as a doubly linked list. The ParseContext
structure maintains a pointer to the earliest one.
*/
struct Token {
protected:
Token(ParseContext & context)
: context(&context),
ofs(context.ofs_), line(context.line_), col(context.col_),
prev(0), next(0)
{
//std::cerr << "creating token " << this
// << " at ofs " << ofs << " line " << line
// << " col " << col << std::endl;
//using namespace std;
//cerr << "next = " << next << " prev = " << prev << endl;
//cerr << "first = " << context.first_token_ << " last = "
// << context.last_token_ << endl;
prev = context.last_token_;
if (prev) prev->next = this;
else context.first_token_ = this;
context.last_token_ = this;
//cerr << "next = " << next << " prev = " << prev << endl;
//cerr << "first = " << context.first_token_ << " last = "
// << context.last_token_ << endl;
}
~Token() noexcept(false)
{
//std::cerr << "deleting token " << this << std::endl;
if (context)
throw MLDB::Exception("ParseContext::Token::~Token(): "
"active token was destroyed");
}
void apply(bool in_destructor = false)
{
//std::cerr << "applying token " << this <<
// " context = " << context << std::endl;
/* Apply the token. This reverts us back to the current
position. */
if (!context) return; // nothing to do
//std::cerr << " check..." << std::endl;
/* We should be the last token. */
if (next != 0) {
//using namespace std;
//cerr << "next = " << next << " prev = " << prev << endl;
//cerr << "first = " << context->first_token_ << " last = "
// << context->last_token_ << endl;
context = 0;
// Don't throw if we're in a destructor, no matter how bad the
// result is.
if (in_destructor)
return;
throw MLDB::Exception("ParseContext::Token::apply(): logic error: "
"applied token was not the latest one");
}
//std::cerr << "going to ofs " << ofs << " line " << line
// << " col " << col << std::endl;
//std::cerr << " goto..." << std::endl;
context->goto_ofs(ofs, line, col, in_destructor);
//std::cerr << " remove..." << std::endl;
/* Finish off by removing it. */
remove(in_destructor);
}
void remove(bool in_destructor = false)
{
//std::cerr << "removing token " << this << std::endl;
if (!context) return; // already ignored
/* We need to remove this token from the token list. */
if (prev) prev->next = next;
else {
if (context->first_token_ != this) {
if (in_destructor)
return;
throw MLDB::Exception("ParseContext::Token::ignore(): "
"logic error: no prev but not first");
}
context->first_token_ = next;
}
if (next) next->prev = prev;
else {
if (context->last_token_ != this) {
if (in_destructor)
return;
throw MLDB::Exception("ParseContext::Token::ignore(): "
"logic error: no next but not last");
}
context->last_token_ = prev;
}
/* Maybe we can free some buffers since this token no longer
exists. */
context->free_buffers();
context = 0;
}
ParseContext * context; ///< The ParseContext object that owns us
uint64_t ofs; ///< Offset for this token
unsigned line; ///< Line number for this token
unsigned col; ///< Column number for this token
/* Token linked list */
Token * prev; ///< The previous token in the series
Token * next; ///< The next token in the series
friend struct ParseContext;
};
public:
/** A token that, unless ignore() is called, will cause the parse context
to revert back to its position once it goes out of scope. Used for
speculative parsing. */
struct Revert_Token : public Token {
Revert_Token(ParseContext & context)
: Token(context)
{
}
~Revert_Token()
{
if (context)
apply(true /* in destructor */);
}
void ignore()
{
remove();
}
using Token::apply;
};
/** A token that, unless stop() is called, will cause the parse context
to remember the text from there onwards. Used to force the
ParseContext to buffer text from a certain point onwards. */
struct Hold_Token : public Token {
Hold_Token(ParseContext & context)
: Token(context)
{
}
~Hold_Token()
{
if (context) remove(true /* in destructor */);
}
void stop()
{
remove();
}
std::string captured() const
{
if (!context)
throw MLDB::Exception("hold token hasn't captured any text");
return context->text_between(ofs, context->get_offset());
}
};
private:
/** Go to the next buffer, creating and populating a new one if
necessary. */
void next_buffer();
/** Go to a given offset. It must be within the current set of buffers. */
void goto_ofs(uint64_t ofs, size_t line, size_t col,
bool in_destructor = false);
/** Return the text between the given offsets. */
std::string text_between(uint64_t ofs1, uint64_t ofs2) const;
/** Check if there are and buffers that can be freed, and do so if
possible. */
void free_buffers();
/** This contains a single contiguous block of text. */
struct Buffer {
Buffer(uint64_t ofs = 0, const char * pos = 0, size_t size = 0,
bool del = false)
: ofs(ofs), pos(pos), size(size), del(del)
{
}
uint64_t ofs; ///< Offset of first character
const char * pos; ///< First character
size_t size; ///< Length
bool del; ///< Do we delete it once finished with?
};
/** Read a new buffer if possible, and update everything. Doesn't
do anything if it fails. */
std::list<Buffer>::iterator read_new_buffer();
std::istream * stream_; ///< Stream we read from; zero if none
size_t chunk_size_; ///< Size of chunks we read in
Token * first_token_; ///< The earliest token
Token * last_token_; ///< The latest token
std::list<Buffer> buffers_;
std::list<Buffer>::iterator current_;
std::string filename_; ///< For reporting errors only
const char * cur_; ///< Current position (points inside buffer)
const char * ebuf_; ///< Position for the end of the buffer
size_t line_; ///< Line number at current position
size_t col_; ///< Column number at current position
uint64_t ofs_; ///< Offset of current position (chars since 0)
std::shared_ptr<std::istream> ownedStream_;
};
} // namespace MLDB
| {
"pile_set_name": "Github"
} |
using System;
using System.Diagnostics;
using System.IO;
using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using System.Xml;
using System.Xml.Serialization;
using AsyncTests.HttpClientTests.Addin;
using AsyncTests.HttpClientTests.Test;
using NDesk.Options;
namespace AsyncTests.ConsoleRunner
{
using Framework;
class MainClass
{
static bool xml;
public static void Main (string[] args)
{
Debug.AutoFlush = true;
Debug.Listeners.Add (new ConsoleTraceListener ());
bool server = false;
string prefix = "http://localhost:8088/";
var p = new OptionSet ().
Add ("server", v => server = true).Add ("prefix=", v => prefix = v).
Add ("xml", v => xml = true);
p.Parse (args);
var asm = typeof(Simple).Assembly;
if (server) {
Server.Start (asm, prefix).Wait ();
Thread.Sleep (System.Threading.Timeout.Infinite);
return;
}
try {
Run (asm).Wait ();
} catch (Exception ex) {
Console.WriteLine ("ERROR: {0}", ex);
}
}
static async Task Run (Assembly assembly)
{
var suite = await TestSuite.Create (assembly);
var results = await suite.Run (CancellationToken.None);
WriteResults (results);
}
static void WriteResults (TestResultCollection results)
{
if (xml) {
var serializer = new XmlSerializer (typeof(TestResultCollection));
serializer.Serialize (Console.Out, results);
Console.WriteLine ();
} else {
ResultPrinter.Print (Console.Out, results);
}
}
}
}
| {
"pile_set_name": "Github"
} |
import { Provider } from 'react-redux';
import { Router, browserHistory } from 'react-router';
import { syncHistoryWithStore } from 'react-router-redux';
import React, { Component } from 'react';
import routes from '../routes';
import store from '../stores/store';
// Create an enhanced history that syncs navigation events with the store
export const history = syncHistoryWithStore(browserHistory, store);
export default class Root extends Component {
render() {
return (
<Provider store={store}>
<Router history={history} routes={routes} />
</Provider>
);
}
}
| {
"pile_set_name": "Github"
} |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT License.
// See the LICENSE file in the project root for more information.
using System;
using System.Reactive.Linq;
using System.Threading;
using BenchmarkDotNet.Attributes;
namespace Benchmarks.System.Reactive
{
[MemoryDiagnoser]
public class RangeBenchmark
{
[Params(1, 10, 100, 1000, 10000, 100000, 1000000)]
public int N;
private int _store;
[Benchmark]
public void Range()
{
Observable.Range(1, N).Subscribe(v => Volatile.Write(ref _store, v));
}
}
}
| {
"pile_set_name": "Github"
} |
/// <reference types="cypress" />
import { skipOn } from '@cypress/skip-test'
let polyfill
// grab fetch polyfill from remote URL, could be also from a local package
before(() => {
const polyfillUrl = 'https://unpkg.com/unfetch/dist/unfetch.umd.js'
cy.request(polyfillUrl)
.then((response) => {
polyfill = response.body
})
})
// yields iframe's document
const getIframeDocument = () => {
return cy
.get('iframe[data-cy="the-frame"]')
.its('0.contentDocument').should('exist')
}
const getIframeBody = () => {
return getIframeDocument().its('body').should('not.be.undefined').then(cy.wrap)
}
const getIframeWindow = () => {
return cy
.get('iframe[data-cy="the-frame"]')
.its('0.contentWindow').should('exist')
}
const replaceIFrameFetchWithXhr = () => {
// see recipe "Stubbing window.fetch" in
// https://github.com/cypress-io/cypress-example-recipes
getIframeWindow().then((iframeWindow) => {
delete iframeWindow.fetch
// since the application code does not ship with a polyfill
// load a polyfilled "fetch" from the test
iframeWindow.eval(polyfill)
iframeWindow.fetch = iframeWindow.unfetch
// BUT to be able to spy on XHR or stub XHR requests
// from the iframe we need to copy OUR window.XMLHttpRequest into the iframe
cy.window().then((appWindow) => {
iframeWindow.XMLHttpRequest = appWindow.XMLHttpRequest
})
})
}
describe('Recipe: blogs__iframes', () => {
skipOn('firefox', () => {
it('spies on XHR request', () => {
cy.visit('index.html')
replaceIFrameFetchWithXhr()
// spy on XHR before clicking the button
cy.server()
cy.route('/todos/1').as('getTodo')
getIframeBody().find('#run-button').should('have.text', 'Try it').click()
// let's wait for XHR request to happen
// for more examples, see recipe "XHR Assertions"
// in repository https://github.com/cypress-io/cypress-example-recipes
cy.wait('@getTodo').its('response.body').should('deep.equal', {
completed: false,
id: 1,
title: 'delectus aut autem',
userId: 1,
})
// and we can confirm the UI has updated correctly
cy.getIframeBody().find('#result').should('include.text', '"delectus aut autem"')
})
it('stubs XHR response', () => {
cy.visit('index.html')
replaceIFrameFetchWithXhr()
// spy on XHR before clicking the button
cy.server()
cy.route('/todos/1', {
completed: true,
id: 1,
title: 'write tests',
userId: 101,
}).as('getTodo')
cy.getIframeBody().find('#run-button').should('have.text', 'Try it').click()
// and we can confirm the UI shows our stubbed response
cy.getIframeBody().find('#result').should('include.text', '"write tests"')
})
})
})
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __DELAYED_REF__
#define __DELAYED_REF__
/* these are the possible values of struct btrfs_delayed_ref_node->action */
#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
/*
* XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
* same ref_node structure.
* Ref_head is in a higher logic level than tree/data ref, and duplicated
* bytenr/num_bytes in ref_node is really a waste or memory, they should be
* referred from ref_head.
* This gets more disgusting after we use list to store tree/data ref in
* ref_head. Must clean this mess up later.
*/
struct btrfs_delayed_ref_node {
/*
* ref_head use rb tree, stored in ref_root->href.
* indexed by bytenr
*/
struct rb_node rb_node;
/*data/tree ref use list, stored in ref_head->ref_list. */
struct list_head list;
/* the starting bytenr of the extent */
u64 bytenr;
/* the size of the extent */
u64 num_bytes;
/* seq number to keep track of insertion order */
u64 seq;
/* ref count on this data structure */
atomic_t refs;
/*
* how many refs is this entry adding or deleting. For
* head refs, this may be a negative number because it is keeping
* track of the total mods done to the reference count.
* For individual refs, this will always be a positive number
*
* It may be more than one, since it is possible for a single
* parent to have more than one ref on an extent
*/
int ref_mod;
unsigned int action:8;
unsigned int type:8;
/* is this node still in the rbtree? */
unsigned int is_head:1;
unsigned int in_tree:1;
};
struct btrfs_delayed_extent_op {
struct btrfs_disk_key key;
u8 level;
bool update_key;
bool update_flags;
bool is_data;
u64 flags_to_set;
};
/*
* the head refs are used to hold a lock on a given extent, which allows us
* to make sure that only one process is running the delayed refs
* at a time for a single extent. They also store the sum of all the
* reference count modifications we've queued up.
*/
struct btrfs_delayed_ref_head {
struct btrfs_delayed_ref_node node;
/*
* the mutex is held while running the refs, and it is also
* held when checking the sum of reference modifications.
*/
struct mutex mutex;
spinlock_t lock;
struct list_head ref_list;
struct rb_node href_node;
struct btrfs_delayed_extent_op *extent_op;
/*
* This is used to track the final ref_mod from all the refs associated
* with this head ref, this is not adjusted as delayed refs are run,
* this is meant to track if we need to do the csum accounting or not.
*/
int total_ref_mod;
/*
* For qgroup reserved space freeing.
*
* ref_root and reserved will be recorded after
* BTRFS_ADD_DELAYED_EXTENT is called.
* And will be used to free reserved qgroup space at
* run_delayed_refs() time.
*/
u64 qgroup_ref_root;
u64 qgroup_reserved;
/*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
* until the delayed ref is processed. must_insert_reserved is
* used to flag a delayed ref so the accounting can be updated
* when a full insert is done.
*
* It is possible the extent will be freed before it is ever
* inserted into the extent allocation tree. In this case
* we need to update the in ram accounting to properly reflect
* the free has happened.
*/
unsigned int must_insert_reserved:1;
unsigned int is_data:1;
unsigned int processing:1;
};
struct btrfs_delayed_tree_ref {
struct btrfs_delayed_ref_node node;
u64 root;
u64 parent;
int level;
};
struct btrfs_delayed_data_ref {
struct btrfs_delayed_ref_node node;
u64 root;
u64 parent;
u64 objectid;
u64 offset;
};
struct btrfs_delayed_ref_root {
/* head ref rbtree */
struct rb_root href_root;
/* dirty extent records */
struct rb_root dirty_extent_root;
/* this spin lock protects the rbtree and the entries inside */
spinlock_t lock;
/* how many delayed ref updates we've queued, used by the
* throttling code
*/
atomic_t num_entries;
/* total number of head nodes in tree */
unsigned long num_heads;
/* total number of head nodes ready for processing */
unsigned long num_heads_ready;
u64 pending_csums;
/*
* set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need
* to be run right away
*/
int flushing;
u64 run_delayed_start;
/*
* To make qgroup to skip given root.
* This is for snapshot, as btrfs_qgroup_inherit() will manually
* modify counters for snapshot and its source, so we should skip
* the snapshot in new_root/old_roots or it will get calculated twice
*/
u64 qgroup_to_skip;
};
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int btrfs_delayed_ref_init(void);
void btrfs_delayed_ref_exit(void);
static inline struct btrfs_delayed_extent_op *
btrfs_alloc_delayed_extent_op(void)
{
return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
}
static inline void
btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
{
if (op)
kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
}
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
WARN_ON(atomic_read(&ref->refs) == 0);
if (atomic_dec_and_test(&ref->refs)) {
WARN_ON(ref->in_tree);
switch (ref->type) {
case BTRFS_TREE_BLOCK_REF_KEY:
case BTRFS_SHARED_BLOCK_REF_KEY:
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
case BTRFS_SHARED_DATA_REF_KEY:
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
break;
case 0:
kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
break;
default:
BUG();
}
}
}
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op);
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
struct btrfs_delayed_extent_op *extent_op);
int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 ref_root, u64 bytenr, u64 num_bytes);
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op);
void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
mutex_unlock(&head->mutex);
}
struct btrfs_delayed_ref_head *
btrfs_select_ref_head(struct btrfs_trans_handle *trans);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
u64 seq);
/*
* a node might live in a head or a regular ref, this lets you
* test for the proper type to use.
*/
static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
{
return node->is_head;
}
/*
* helper functions to cast a node into its container
*/
static inline struct btrfs_delayed_tree_ref *
btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
{
WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_tree_ref, node);
}
static inline struct btrfs_delayed_data_ref *
btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
{
WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_data_ref, node);
}
static inline struct btrfs_delayed_ref_head *
btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
{
WARN_ON(!btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_ref_head, node);
}
#endif
| {
"pile_set_name": "Github"
} |
/**
* @fileoverview Options configuration for optionator.
* @author George Zahariev
*/
"use strict";
//------------------------------------------------------------------------------
// Requirements
//------------------------------------------------------------------------------
const optionator = require("optionator");
//------------------------------------------------------------------------------
// Initialization and Public Interface
//------------------------------------------------------------------------------
// exports "parse(args)", "generateHelp()", and "generateHelpForOption(optionName)"
module.exports = optionator({
prepend: "eslint [options] file.js [file.js] [dir]",
defaults: {
concatRepeatedArrays: true,
mergeRepeatedObjects: true
},
options: [
{
heading: "Basic configuration"
},
{
option: "eslintrc",
type: "Boolean",
default: "true",
description: "Disable use of configuration from .eslintrc.*"
},
{
option: "config",
alias: "c",
type: "path::String",
description: "Use this configuration, overriding .eslintrc.* config options if present"
},
{
option: "env",
type: "[String]",
description: "Specify environments"
},
{
option: "ext",
type: "[String]",
default: ".js",
description: "Specify JavaScript file extensions"
},
{
option: "global",
type: "[String]",
description: "Define global variables"
},
{
option: "parser",
type: "String",
description: "Specify the parser to be used"
},
{
option: "parser-options",
type: "Object",
description: "Specify parser options"
},
{
heading: "Specifying rules and plugins"
},
{
option: "rulesdir",
type: "[path::String]",
description: "Use additional rules from this directory"
},
{
option: "plugin",
type: "[String]",
description: "Specify plugins"
},
{
option: "rule",
type: "Object",
description: "Specify rules"
},
{
heading: "Fixing problems"
},
{
option: "fix",
type: "Boolean",
default: false,
description: "Automatically fix problems"
},
{
option: "fix-dry-run",
type: "Boolean",
default: false,
description: "Automatically fix problems without saving the changes to the file system"
},
{
heading: "Ignoring files"
},
{
option: "ignore-path",
type: "path::String",
description: "Specify path of ignore file"
},
{
option: "ignore",
type: "Boolean",
default: "true",
description: "Disable use of ignore files and patterns"
},
{
option: "ignore-pattern",
type: "[String]",
description: "Pattern of files to ignore (in addition to those in .eslintignore)",
concatRepeatedArrays: [true, {
oneValuePerFlag: true
}]
},
{
heading: "Using stdin"
},
{
option: "stdin",
type: "Boolean",
default: "false",
description: "Lint code provided on <STDIN>"
},
{
option: "stdin-filename",
type: "String",
description: "Specify filename to process STDIN as"
},
{
heading: "Handling warnings"
},
{
option: "quiet",
type: "Boolean",
default: "false",
description: "Report errors only"
},
{
option: "max-warnings",
type: "Int",
default: "-1",
description: "Number of warnings to trigger nonzero exit code"
},
{
heading: "Output"
},
{
option: "output-file",
alias: "o",
type: "path::String",
description: "Specify file to write report to"
},
{
option: "format",
alias: "f",
type: "String",
default: "stylish",
description: "Use a specific output format"
},
{
option: "color",
type: "Boolean",
alias: "no-color",
description: "Force enabling/disabling of color"
},
{
heading: "Inline configuration comments"
},
{
option: "inline-config",
type: "Boolean",
default: "true",
description: "Prevent comments from changing config or rules"
},
{
option: "report-unused-disable-directives",
type: "Boolean",
default: false,
description: "Adds reported errors for unused eslint-disable directives"
},
{
heading: "Caching"
},
{
option: "cache",
type: "Boolean",
default: "false",
description: "Only check changed files"
},
{
option: "cache-file",
type: "path::String",
default: ".eslintcache",
description: "Path to the cache file. Deprecated: use --cache-location"
},
{
option: "cache-location",
type: "path::String",
description: "Path to the cache file or directory"
},
{
heading: "Miscellaneous"
},
{
option: "init",
type: "Boolean",
default: "false",
description: "Run config initialization wizard"
},
{
option: "debug",
type: "Boolean",
default: false,
description: "Output debugging information"
},
{
option: "help",
alias: "h",
type: "Boolean",
description: "Show help"
},
{
option: "version",
alias: "v",
type: "Boolean",
description: "Output the version number"
},
{
option: "print-config",
type: "path::String",
description: "Print the configuration for the given file"
}
]
});
| {
"pile_set_name": "Github"
} |
/*
* Definitions for libbde
*
* Copyright (C) 2011-2020, Joachim Metz <[email protected]>
*
* Refer to AUTHORS for acknowledgements.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#if !defined( _LIBBDE_DEFINITIONS_H )
#define _LIBBDE_DEFINITIONS_H
#include <libbde/types.h>
#define LIBBDE_VERSION @VERSION@
/* The version string
*/
#define LIBBDE_VERSION_STRING "@VERSION@"
/* The file access
* bit 1 set to 1 for read access
* bit 2 set to 1 for write access
* bit 3-8 not used
*/
enum LIBBDE_ACCESS_FLAGS
{
LIBBDE_ACCESS_FLAG_READ = 0x01,
/* Reserved: not supported yet */
LIBBDE_ACCESS_FLAG_WRITE = 0x02
};
/* The file access macros
*/
#define LIBBDE_OPEN_READ ( LIBBDE_ACCESS_FLAG_READ )
/* Reserved: not supported yet */
#define LIBBDE_OPEN_WRITE ( LIBBDE_ACCESS_FLAG_WRITE )
/* Reserved: not supported yet */
#define LIBBDE_OPEN_READ_WRITE ( LIBBDE_ACCESS_FLAG_READ | LIBBDE_ACCESS_FLAG_WRITE )
/* The version
*/
enum LIBBDE_VERSIONS
{
LIBBDE_VERSION_WINDOWS_VISTA = 6,
LIBBDE_VERSION_WINDOWS_7 = 7,
LIBBDE_VERSION_TO_GO = (int) 'T'
};
/* The encryption methods
*/
enum LIBBDE_ENCRYPTION_METHODS
{
LIBBDE_ENCRYPTION_METHOD_AES_128_CBC_DIFFUSER = 0x8000UL,
LIBBDE_ENCRYPTION_METHOD_AES_256_CBC_DIFFUSER = 0x8001UL,
LIBBDE_ENCRYPTION_METHOD_AES_128_CBC = 0x8002UL,
LIBBDE_ENCRYPTION_METHOD_AES_256_CBC = 0x8003UL,
LIBBDE_ENCRYPTION_METHOD_AES_128_XTS = 0x8004UL,
LIBBDE_ENCRYPTION_METHOD_AES_256_XTS = 0x8005UL
};
/* The key protection types
*/
enum LIBBDE_KEY_PROTECTION_TYPES
{
LIBBDE_KEY_PROTECTION_TYPE_CLEAR_KEY = 0x0000,
LIBBDE_KEY_PROTECTION_TYPE_TPM = 0x0100,
LIBBDE_KEY_PROTECTION_TYPE_STARTUP_KEY = 0x0200,
LIBBDE_KEY_PROTECTION_TYPE_TPM_AND_PIN = 0x0500,
LIBBDE_KEY_PROTECTION_TYPE_RECOVERY_PASSWORD = 0x0800,
LIBBDE_KEY_PROTECTION_TYPE_PASSWORD = 0x2000,
};
#endif /* !defined( _LIBBDE_DEFINITIONS_H ) */
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
-
- $Id$
-
- This file is part of the OpenLink Software Virtuoso Open-Source (VOS)
- project.
-
- Copyright (C) 1998-2020 OpenLink Software
-
- This project is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; only version 2 of the License, dated June 1991.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-->
<rdf:RDF xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:owl ="http://www.w3.org/2002/07/owl#"
xmlns:virtrdf="http://www.openlinksw.com/schemas/virtrdf#"
xmlns:foaf="http://xmlns.com/foaf/0.1/">
<owl:Ontology rdf:about="http://xmlns.com/foaf/0.1/">
<rdfs:label>FOAF</rdfs:label>
<rdfs:comment>'Friend Of A Friend' descriptions of people, groups, documents and projects.</rdfs:comment>
<virtrdf:catName>Profiles</virtrdf:catName>
<virtrdf:version>1.00</virtrdf:version>
</owl:Ontology>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/accountName">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Account Name</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/aimChatID">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>AIM chat ID</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/currentProject-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Current Project URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/currentProject-name">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Current Project Name</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/gender">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Gender</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/givenname">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>GivenName</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/firstName">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>First Name</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/family_name">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Family Name</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/homepage-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Homepage</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/icqChatID">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>ICQ chat ID</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/interest">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Interest</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/jabberID">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Jabber ID</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/known-name">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Knows</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/maker-name">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Made By</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/mbox-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>e-mail</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/member-name">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Member-Of</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/msnChatID">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>MSN Chat ID</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/name">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Name</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/nick">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>nick</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/page">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Web Page</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/pastProject-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Past Project URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/phone">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Phone</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/primaryTopic-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Primary Topic URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/surname">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Surname</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/title">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Title</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/topic">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Topic URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/topic_interest-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Topic of Interest, URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/weblog-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Weblog URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/workInfoHomepage-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Work Info URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/workplaceHomepage-uri">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>list</virtrdf:cardinality>
<virtrdf:label>Workplace URI</virtrdf:label>
</rdf:Property>
<rdf:Property rdf:about="http://xmlns.com/foaf/0.1/yahooChatID">
<rdfs:Range rdf:resource="http://www.w3.org/2001/XMLSchema#string"/>
<virtrdf:cardinality>single</virtrdf:cardinality>
<virtrdf:label>Yahoo chat ID</virtrdf:label>
</rdf:Property>
</rdf:RDF>
| {
"pile_set_name": "Github"
} |
---
Description: Computes the product of two spherical harmonics functions (f and g). Both functions are of order N = 4.
ms.assetid: 05427a18-447e-45d7-a851-e580298c9a1f
title: D3DXSHMultiply4 function (D3DX10Math.h)
ms.topic: reference
ms.date: 05/31/2018
topic_type:
- APIRef
- kbSyntax
api_name:
- D3DXSHMultiply4
api_type:
- LibDef
api_location:
- D3DX10.lib
- D3DX10.dll
---
# D3DXSHMultiply4 function
Computes the product of two spherical harmonics functions (*f* and *g*). Both functions are of order N = 4.
## Syntax
```C++
FLOAT* D3DXSHMultiply4(
_In_ FLOAT *pOut,
_In_ const FLOAT *pF,
_In_ const FLOAT *pG
);
```
## Parameters
<dl> <dt>
*pOut* \[in\]
</dt> <dd>
Type: **[**FLOAT**](../winprog/windows-data-types.md)\***
Pointer to the output SH coefficients — basis function *Y*ₗₘ is stored at l² + *m* + l. The order *N* determines the length of the array, where there should always be *N*² coefficients.
</dd> <dt>
*pF* \[in\]
</dt> <dd>
Type: **const [**FLOAT**](../winprog/windows-data-types.md)\***
Input SH coefficients for first function.
</dd> <dt>
*pG* \[in\]
</dt> <dd>
Type: **const [**FLOAT**](../winprog/windows-data-types.md)\***
Second set of input SH coefficients.
</dd> </dl>
## Return value
Type: **[**FLOAT**](../winprog/windows-data-types.md)\***
Pointer to SH output coefficients.
## Remarks
The product of two SH functions of order N = 4 generates an SH function of order 2 × *N* - 1 = 7, but the results are truncated. This means that the product commutes ( *f* × *g* = *g* × *f* ) but doesn't associate ( *f* × ( *g* × *h* ) ≠ ( *f* × *g* ) × *h* ).
This function uses the following equation:
```
pOut[i] = int(y_i(s) * f(s) * g(s))
```
where y\_i(s) is the ith SH basis function, and where f(s) and g(s) use the following SH function:
```
sum_i(y_i(s)*c_i)
```
## Requirements
| | |
|--------------------|-----------------------------------------------------------------------------------------|
| Header<br/> | <dl> <dt>D3DX10Math.h</dt> </dl> |
| Library<br/> | <dl> <dt>D3DX10.lib</dt> </dl> |
## See also
<dl> <dt>
[Math Functions](d3d10-graphics-reference-d3dx10-functions-math.md)
</dt> </dl>
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.