text
stringlengths 2
100k
| meta
dict |
---|---|
// RUN: %clang_cc1 -fsyntax-only -verify -Wno-undef %s
// rdar://2362963
#if FOO // ok.
#endif
#pragma GCC diagnostic warning "-Wundef"
#if FOO // expected-warning {{'FOO' is not defined}}
#endif
#pragma GCC diagnostic ignored "-Wun" "def"
#if FOO // ok.
#endif
#pragma GCC diagnostic error "-Wundef"
#if FOO // expected-error {{'FOO' is not defined}}
#endif
#define foo error
#pragma GCC diagnostic foo "-Wundef" // expected-warning {{pragma diagnostic expected 'error', 'warning', 'ignored', 'fatal', 'push', or 'pop'}}
#pragma GCC diagnostic error 42 // expected-error {{expected string literal in pragma diagnostic}}
#pragma GCC diagnostic error "-Wundef" 42 // expected-warning {{unexpected token in pragma diagnostic}}
#pragma GCC diagnostic error "invalid-name" // expected-warning {{pragma diagnostic expected option name (e.g. "-Wundef")}}
#pragma GCC diagnostic error "-Winvalid-name" // expected-warning {{unknown warning group '-Winvalid-name', ignored}}
| {
"pile_set_name": "Github"
} |
# compiler flags
string(APPEND CMAKE_CXX_FLAGS_INIT " /D_UNICODE /DUNICODE /MP /DWIN32 /D_WINDOWS /W3 /GR /Zi /EHsc /DTARGET_WINDOWS")
string(APPEND CMAKE_CXX_FLAGS_INIT " /DWINAPI_FAMILY=2 /DTARGET_WINDOWS_STORE /D_WINSOCK_DEPRECATED_NO_WARNINGS /D_CRT_NONSTDC_NO_DEPRECATE")
string(APPEND CMAKE_CXX_FLAGS_DEBUG_INIT " /D_DEBUG /MDd /Ob0 /Od /RTC1 /D_ITERATOR_DEBUG_LEVEL=0")
string(APPEND CMAKE_CXX_FLAGS_RELEASE_INIT " /MD /Ox /Ob2 /Oi /Ot /Oy /GL /DNDEBUG")
# linker flags
string(APPEND CMAKE_SHARED_LINKER_FLAGS_INIT " /DYNAMICBASE /NXCOMPAT /APPCONTAINER")
# win32 specific flags
if("$ENV{Platform}" STREQUAL X86)
string(APPEND CMAKE_CXX_FLAGS_INIT " /arch:SSE2")
string(APPEND CMAKE_SHARED_LINKER_FLAGS_INIT " /SAFESEH")
endif()
| {
"pile_set_name": "Github"
} |
{"data": [{"title": "Paris_France", "paragraphs": [
{"context": "Paris is the largest city in France with over 2 million inhabitants. It is the capital of France.",
"qas": [
{"answers": [{"answer_start": 1, "text": "Paris"}, {"answer_start": 1, "text": "Paris"}], "question": "What is the capital of France?", "id": "1e39"}
]
}]}
]}
| {
"pile_set_name": "Github"
} |
$:.push File.expand_path("../lib", __FILE__)
# Maintain your gem's version:
require "writefully/version"
# Describe your gem and declare its dependencies:
Gem::Specification.new do |s|
s.name = "writefully"
s.version = Writefully::VERSION
s.authors = ["Zack Siri"]
s.email = ["[email protected]"]
s.homepage = "http://www.codemy.net"
s.summary = %q{Makes publishing content easier by using black magic}
s.description = %q{Allows developer to quickly publish to their site using git hooks}
s.license = "MIT"
s.files = Dir["{app,config,db,lib,scripts,bin}/**/*", "MIT-LICENSE", "Rakefile", "README.md"]
s.executables = ["writefully", "wf-write", "wf-app"]
s.test_files = Dir["spec/**/*"]
s.add_dependency 'rails', '>= 4.2'
s.add_dependency 'coffee-rails', '~> 4.0'
s.add_dependency 'sass-rails', '~> 5.0'
s.add_dependency 'sprockets', '= 2.11.0'
s.add_dependency 'celluloid'
s.add_dependency 'fog'
s.add_dependency 'unf'
s.add_dependency 'listen', '~> 2.0'
s.add_dependency 'thor'
s.add_dependency 'pg'
s.add_dependency 'hashie'
s.add_dependency 'friendly_id'
s.add_dependency 'github_api'
s.add_dependency 'connection_pool'
s.add_dependency 'redis'
s.add_dependency 'redis-namespace'
s.add_dependency 'activerecord-import'
s.add_dependency 'jquery-rails'
s.add_dependency 'turbolinks'
s.add_dependency 'transponder'
s.add_dependency 'bootstrap-sass'
s.add_development_dependency 'pry'
s.add_development_dependency 'rspec-rails'
s.add_development_dependency 'rspec-its'
s.add_development_dependency 'capybara'
end
| {
"pile_set_name": "Github"
} |
--TEST--
Test open_basedir configuration
--SKIPIF--
<?php
if (substr(PHP_OS, 0, 3) != 'WIN') {
die('skip Windows only variation');
}
?>
--INI--
open_basedir=.
--FILE--
<?php
require_once "open_basedir.inc";
$initdir = getcwd();
test_open_basedir_before("mkdir");
var_dump(mkdir("../bad/blah"));
var_dump(mkdir("../blah"));
var_dump(mkdir("../bad/./blah"));
var_dump(mkdir("./.././blah"));
var_dump(mkdir($initdir."/test/ok/blah"));
var_dump(rmdir($initdir."/test/ok/blah"));
test_open_basedir_after("mkdir");
?>
--CLEAN--
<?php
require_once "open_basedir.inc";
delete_directories();
?>
--EXPECTF--
*** Testing open_basedir configuration [mkdir] ***
bool(true)
bool(true)
bool(true)
bool(true)
bool(true)
Warning: mkdir(): open_basedir restriction in effect. File(../bad/blah) is not within the allowed path(s): (.) in %s on line %d
bool(false)
Warning: mkdir(): open_basedir restriction in effect. File(../blah) is not within the allowed path(s): (.) in %s on line %d
bool(false)
Warning: mkdir(): open_basedir restriction in effect. File(../bad/./blah) is not within the allowed path(s): (.) in %s on line %d
bool(false)
Warning: mkdir(): open_basedir restriction in effect. File(./.././blah) is not within the allowed path(s): (.) in %s on line %d
bool(false)
bool(true)
bool(true)
*** Finished testing open_basedir configuration [mkdir] ***
| {
"pile_set_name": "Github"
} |
<?php
/**
* @file
* short_scale_formatter.module
*/
/**
* Implements hook_theme().
*/
function short_scale_formatter_theme($existing, $type, $theme, $path) {
return array(
'short_scale_number' => array(
'variables' => array(
'wrapper_element' => NULL,
'wrapper_title_text' => NULL,
'number' => 0,
'formatted' => NULL,
'character' => NULL,
),
),
);
}
/**
* Implements hook_field_formatter_info().
*/
function short_scale_formatter_field_formatter_info() {
return array(
'short_scale_formatter' => array(
'label' => t('Short Scale number suffix'),
'field types' => array('number_integer'),
'settings' => array(
'wrapper_element' => 'span',
'wrapper_title_text' => '',
'thousand' => 'K',
'million' => 'M',
'billion' => 'B',
'trillion' => 'T',
),
),
);
}
/**
* Implements hook_field_formatter_settings_form().
*/
function short_scale_formatter_field_formatter_settings_form($field, $instance, $view_mode, $form, &$form_state) {
$display = $instance['display'][$view_mode];
$settings = $display['settings'];
$elements = array();
$elements['wrapper_element'] = array(
'#type' => 'textfield',
'#title' => t('Wrapper element'),
'#default_value' => $settings['wrapper_element'],
'#required' => TRUE,
);
$elements['wrapper_title_text'] = array(
'#type' => 'textfield',
'#title' => t('Wrapper title attribute text'),
'#default_value' => $settings['wrapper_title_text'],
'#description' => t('You can use @count within the string which will be replaced with the original number.'),
);
$elements['thousand'] = array(
'#type' => 'textfield',
'#title' => t('Character for thousands'),
'#default_value' => $settings['thousand'],
'#required' => TRUE,
);
$elements['million'] = array(
'#type' => 'textfield',
'#title' => t('Character for millions'),
'#default_value' => $settings['million'],
'#required' => TRUE,
);
$elements['billion'] = array(
'#type' => 'textfield',
'#title' => t('Character for billions'),
'#default_value' => $settings['billion'],
'#required' => TRUE,
);
$elements['trillion'] = array(
'#type' => 'textfield',
'#title' => t('Character for trillions'),
'#default_value' => $settings['trillion'],
'#required' => TRUE,
);
return $elements;
}
/**
* Implements hook_field_formatter_settings_summary().
*/
function short_scale_formatter_field_formatter_settings_summary($field, $instance, $view_mode) {
$display = $instance['display'][$view_mode];
$settings = $display['settings'];
$summary = array();
if (!empty($settings['wrapper_element'])) {
$summary[] = t('<strong>Wrapper element:</strong> @element', array('@element' => $settings['wrapper_element']));
}
if (!empty($settings['wrapper_title_text'])) {
$summary[] = t('<strong>Text for title attribute:</strong> @text', array('@text' => $settings['wrapper_title_text']));
}
$characters = array(
'thousand' => 'Thousands',
'million' => 'Millions',
'billion' => 'Billions',
'trillion' => 'Trillions',
);
foreach ($characters as $character => $title) {
if (!empty($settings[$character])) {
$summary[] = t('<strong>@title:</strong> "@char"', array('@title' => $title, '@char' => $settings[$character]));
}
}
return !empty($summary) ? implode('<br/>', $summary) : '';
}
/**
* Implements hook_field_formatter_view().
*/
function short_scale_formatter_field_formatter_view($entity_type, $entity, $field, $instance, $langcode, $items, $display) {
$element = array();
$settings = $display['settings'];
switch ($display['type']) {
case 'short_scale_formatter':
$variables = array(
'#theme' => 'short_scale_number',
'#wrapper_element' => check_plain($settings['wrapper_element']),
'#wrapper_title_text' => check_plain($settings['wrapper_title_text']),
);
$characters = array($settings['thousand'], $settings['million'], $settings['billion'], $settings['trillion']);
$characters = array_map('check_plain', $characters);
foreach ($items as $delta => $item) {
$variables['#number'] = $item['value'];
$element[$delta] = array_merge($variables, short_scale_formatter_format_number($item['value'], $characters));
}
break;
}
return $element;
}
/**
* short_scale_formatter_format_number().
*
* This function does the work of formatting the number. This code is based off
* of code from the following URL but reworked slightly for what we want. No
* rounding up, 1 decimal place.
* http://stackoverflow.com/questions/4753251/how-to-go-about-formatting-1200-to-1-2k-in-java
*/
function short_scale_formatter_format_number($n, $characters, $iteration = 0) {
if ($n < 1000 && $iteration == 0) {
return array(
'#formatted' => $n,
'#character' => NULL,
);
}
$d = ($n / 100) / 10.0;
if ($d < 1000) {
$return = array(
'#formatted' => (floor($d * 10) / 10) . $characters[$iteration],
'#character' => strtolower($characters[$iteration]),
);
}
else {
$return = short_scale_formatter_format_number($d, $characters, $iteration + 1);
}
return $return;
}
/**
* Theme callback function.
*/
function theme_short_scale_number($variables) {
$output = NULL;
$element_name = !empty($variables['wrapper_element']) ? $variables['wrapper_element'] : NULL;
if ($element_name) {
$title = NULL;
$classes = 'number';
if (!empty($variables['wrapper_title_text'])) {
$title = ' title="' . t($variables['wrapper_title_text'], array('@count' => $variables['number'])) . '"';
}
if (!empty($variables['character'])) {
$classes .= ' number-scale-' . $variables['character'];
}
$output = '<' . $element_name . ' class="' . $classes . '"' . $title . '>';
}
$output .= $variables['formatted'];
if ($element_name) {
$output .= "</$element_name>";
}
return $output;
}
| {
"pile_set_name": "Github"
} |
-- phpMyAdmin SQL Dump
-- version 4.1.14
-- http://www.phpmyadmin.net
--
-- Host: 127.0.0.1
-- Generation Time: 2019-03-26 06:17:04
-- 服务器版本: 5.6.17
-- PHP Version: 5.5.12
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
--
-- Database: `test`
--
-- --------------------------------------------------------
--
-- 表的结构 `item_table`
--
CREATE TABLE IF NOT EXISTS `item_table` (
`ID` int(11) NOT NULL AUTO_INCREMENT,
`title` varchar(32) NOT NULL,
`price` float NOT NULL,
`count` int(11) NOT NULL,
PRIMARY KEY (`ID`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 AUTO_INCREMENT=3 ;
--
-- 转存表中的数据 `item_table`
--
INSERT INTO `item_table` (`ID`, `title`, `price`, `count`) VALUES
(1, '运动服', 199, 999),
(2, '运动裤', 299, 998);
-- --------------------------------------------------------
--
-- 表的结构 `user_table`
--
CREATE TABLE IF NOT EXISTS `user_table` (
`ID` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(32) NOT NULL,
`password` varchar(32) NOT NULL,
PRIMARY KEY (`ID`),
UNIQUE KEY `username` (`username`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 AUTO_INCREMENT=10 ;
--
-- 转存表中的数据 `user_table`
--
INSERT INTO `user_table` (`ID`, `username`, `password`) VALUES
(1, 'lee', '123456'),
(2, 'chen', '654321'),
(9, 'kate', '666666'),
(8, 'lily', '888888');
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
| {
"pile_set_name": "Github"
} |
document.addEventListener('DOMContentLoaded', function (event) {
var button = document.querySelector('.animate-slide'),
closeButton = document.querySelector('.animation-container__close'),
radios = document.getElementsByName('animation'),
animationTypeSpan = document.querySelectorAll('[data-animation-type]'),
select = document.getElementById('animation-select'),
mainBlock = document.querySelector('[data-block="out"]'),
customOverlay = document.querySelector('.custom-overlay'),
container = '.animation-container',
popup = '[data-block="in"]',
animation,
showOverlay,
radioButtonValue;
function animateBlocks(event) {
showOverlay = document.getElementById('is_overlay').checked;
if (getComputedStyle(document.getElementById('animation-list')).display !== 'none') {
animation = document.querySelector('input[type=radio]:checked').getAttribute('id');
} else {
if (getComputedStyle(document.getElementById('animation-select')).display !== 'none') {
animation = select.options[select.selectedIndex].value;
}
}
document.querySelector(popup).setAttribute('data-type', 'popup');
AnimateTransition({
container: container,
blockIn: popup,
animation: animation,
onTransitionStart: function (blockIn, blockOut, container, event) {
button.setAttribute('disabled', 'disabled');
mainBlock.style.backgroundImage = 'none';
if (showOverlay) {
customOverlay.style.display = 'block';
}
},
onTransitionEnd: function (blockIn, blockOut, container, event) {
}
});
}
button.addEventListener('click', animateBlocks);
/**
* Closes popup
* @param {Event} event
*/
function closePopup(event) {
animation = animation.replace(/-in([^-in]*)$/, '-out$1');
showOverlay = document.getElementById('is_overlay').checked;
AnimateTransition({
container: container,
blockOut: popup,
animation: animation,
showOverlay: showOverlay,
onTransitionEnd: function (blockIn, blockOut, container, event) {
container.appendChild(blockOut);
button.removeAttribute('disabled');
blockOut.removeAttribute('data-type');
mainBlock.style.backgroundImage = '';
if (showOverlay) {
customOverlay.style.display = 'none';
}
}
});
}
closeButton.addEventListener('click', closePopup);
/**
* Changes animation type in code example on radio button click
* @param {Event} event
*/
function radioClick(event) {
radioButtonValue = event.target.id;
for (var index = 0; index < animationTypeSpan.length; index += 1) {
animationTypeSpan[index].innerText = "'" + radioButtonValue + "'";
}
}
for (var index = 0; index < radios.length; index += 1) {
radios[index].addEventListener('click', radioClick);
}
}); | {
"pile_set_name": "Github"
} |
// +build arm,linux
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs types_linux.go
package unix
const (
sizeofPtr = 0x4
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x4
sizeofLongLong = 0x8
PathMax = 0x1000
)
type (
_C_short int16
_C_int int32
_C_long int32
_C_long_long int64
)
type Timespec struct {
Sec int32
Nsec int32
}
type Timeval struct {
Sec int32
Usec int32
}
type Timex struct {
Modes uint32
Offset int32
Freq int32
Maxerror int32
Esterror int32
Status int32
Constant int32
Precision int32
Tolerance int32
Time Timeval
Tick int32
Ppsfreq int32
Jitter int32
Shift int32
Stabil int32
Jitcnt int32
Calcnt int32
Errcnt int32
Stbcnt int32
Tai int32
Pad_cgo_0 [44]byte
}
type Time_t int32
type Tms struct {
Utime int32
Stime int32
Cutime int32
Cstime int32
}
type Utimbuf struct {
Actime int32
Modtime int32
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int32
Ixrss int32
Idrss int32
Isrss int32
Minflt int32
Majflt int32
Nswap int32
Inblock int32
Oublock int32
Msgsnd int32
Msgrcv int32
Nsignals int32
Nvcsw int32
Nivcsw int32
}
type Rlimit struct {
Cur uint64
Max uint64
}
type _Gid_t uint32
type Stat_t struct {
Dev uint64
X__pad1 uint16
Pad_cgo_0 [2]byte
X__st_ino uint32
Mode uint32
Nlink uint32
Uid uint32
Gid uint32
Rdev uint64
X__pad2 uint16
Pad_cgo_1 [6]byte
Size int64
Blksize int32
Pad_cgo_2 [4]byte
Blocks int64
Atim Timespec
Mtim Timespec
Ctim Timespec
Ino uint64
}
type Statfs_t struct {
Type int32
Bsize int32
Blocks uint64
Bfree uint64
Bavail uint64
Files uint64
Ffree uint64
Fsid Fsid
Namelen int32
Frsize int32
Flags int32
Spare [4]int32
Pad_cgo_0 [4]byte
}
type Dirent struct {
Ino uint64
Off int64
Reclen uint16
Type uint8
Name [256]uint8
Pad_cgo_0 [5]byte
}
type Fsid struct {
X__val [2]int32
}
type Flock_t struct {
Type int16
Whence int16
Pad_cgo_0 [4]byte
Start int64
Len int64
Pid int32
Pad_cgo_1 [4]byte
}
type RawSockaddrInet4 struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type RawSockaddrInet6 struct {
Family uint16
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Family uint16
Path [108]int8
}
type RawSockaddrLinklayer struct {
Family uint16
Protocol uint16
Ifindex int32
Hatype uint16
Pkttype uint8
Halen uint8
Addr [8]uint8
}
type RawSockaddrNetlink struct {
Family uint16
Pad uint16
Pid uint32
Groups uint32
}
type RawSockaddrHCI struct {
Family uint16
Dev uint16
Channel uint16
}
type RawSockaddr struct {
Family uint16
Data [14]uint8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [96]uint8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint32
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPMreqn struct {
Multiaddr [4]byte /* in_addr */
Address [4]byte /* in_addr */
Ifindex int32
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type Msghdr struct {
Name *byte
Namelen uint32
Iov *Iovec
Iovlen uint32
Control *byte
Controllen uint32
Flags int32
}
type Cmsghdr struct {
Len uint32
Level int32
Type int32
X__cmsg_data [0]uint8
}
type Inet4Pktinfo struct {
Ifindex int32
Spec_dst [4]byte /* in_addr */
Addr [4]byte /* in_addr */
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Data [8]uint32
}
type Ucred struct {
Pid int32
Uid uint32
Gid uint32
}
type TCPInfo struct {
State uint8
Ca_state uint8
Retransmits uint8
Probes uint8
Backoff uint8
Options uint8
Pad_cgo_0 [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
Rcv_mss uint32
Unacked uint32
Sacked uint32
Lost uint32
Retrans uint32
Fackets uint32
Last_data_sent uint32
Last_ack_sent uint32
Last_data_recv uint32
Last_ack_recv uint32
Pmtu uint32
Rcv_ssthresh uint32
Rtt uint32
Rttvar uint32
Snd_ssthresh uint32
Snd_cwnd uint32
Advmss uint32
Reordering uint32
Rcv_rtt uint32
Rcv_space uint32
Total_retrans uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x70
SizeofSockaddrUnix = 0x6e
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
SizeofIPv6Mreq = 0x14
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofInet4Pktinfo = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
)
const (
IFA_UNSPEC = 0x0
IFA_ADDRESS = 0x1
IFA_LOCAL = 0x2
IFA_LABEL = 0x3
IFA_BROADCAST = 0x4
IFA_ANYCAST = 0x5
IFA_CACHEINFO = 0x6
IFA_MULTICAST = 0x7
IFLA_UNSPEC = 0x0
IFLA_ADDRESS = 0x1
IFLA_BROADCAST = 0x2
IFLA_IFNAME = 0x3
IFLA_MTU = 0x4
IFLA_LINK = 0x5
IFLA_QDISC = 0x6
IFLA_STATS = 0x7
IFLA_COST = 0x8
IFLA_PRIORITY = 0x9
IFLA_MASTER = 0xa
IFLA_WIRELESS = 0xb
IFLA_PROTINFO = 0xc
IFLA_TXQLEN = 0xd
IFLA_MAP = 0xe
IFLA_WEIGHT = 0xf
IFLA_OPERSTATE = 0x10
IFLA_LINKMODE = 0x11
IFLA_LINKINFO = 0x12
IFLA_NET_NS_PID = 0x13
IFLA_IFALIAS = 0x14
IFLA_MAX = 0x1d
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
RT_SCOPE_HOST = 0xfe
RT_SCOPE_NOWHERE = 0xff
RT_TABLE_UNSPEC = 0x0
RT_TABLE_COMPAT = 0xfc
RT_TABLE_DEFAULT = 0xfd
RT_TABLE_MAIN = 0xfe
RT_TABLE_LOCAL = 0xff
RT_TABLE_MAX = 0xffffffff
RTA_UNSPEC = 0x0
RTA_DST = 0x1
RTA_SRC = 0x2
RTA_IIF = 0x3
RTA_OIF = 0x4
RTA_GATEWAY = 0x5
RTA_PRIORITY = 0x6
RTA_PREFSRC = 0x7
RTA_METRICS = 0x8
RTA_MULTIPATH = 0x9
RTA_FLOW = 0xb
RTA_CACHEINFO = 0xc
RTA_TABLE = 0xf
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
RTN_BROADCAST = 0x3
RTN_ANYCAST = 0x4
RTN_MULTICAST = 0x5
RTN_BLACKHOLE = 0x6
RTN_UNREACHABLE = 0x7
RTN_PROHIBIT = 0x8
RTN_THROW = 0x9
RTN_NAT = 0xa
RTN_XRESOLVE = 0xb
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
RTNLGRP_NEIGH = 0x3
RTNLGRP_TC = 0x4
RTNLGRP_IPV4_IFADDR = 0x5
RTNLGRP_IPV4_MROUTE = 0x6
RTNLGRP_IPV4_ROUTE = 0x7
RTNLGRP_IPV4_RULE = 0x8
RTNLGRP_IPV6_IFADDR = 0x9
RTNLGRP_IPV6_MROUTE = 0xa
RTNLGRP_IPV6_ROUTE = 0xb
RTNLGRP_IPV6_IFINFO = 0xc
RTNLGRP_IPV6_PREFIX = 0x12
RTNLGRP_IPV6_RULE = 0x13
RTNLGRP_ND_USEROPT = 0x14
SizeofNlMsghdr = 0x10
SizeofNlMsgerr = 0x14
SizeofRtGenmsg = 0x1
SizeofNlAttr = 0x4
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
)
type NlMsghdr struct {
Len uint32
Type uint16
Flags uint16
Seq uint32
Pid uint32
}
type NlMsgerr struct {
Error int32
Msg NlMsghdr
}
type RtGenmsg struct {
Family uint8
}
type NlAttr struct {
Len uint16
Type uint16
}
type RtAttr struct {
Len uint16
Type uint16
}
type IfInfomsg struct {
Family uint8
X__ifi_pad uint8
Type uint16
Index int32
Flags uint32
Change uint32
}
type IfAddrmsg struct {
Family uint8
Prefixlen uint8
Flags uint8
Scope uint8
Index uint32
}
type RtMsg struct {
Family uint8
Dst_len uint8
Src_len uint8
Tos uint8
Table uint8
Protocol uint8
Scope uint8
Type uint8
Flags uint32
}
type RtNexthop struct {
Len uint16
Flags uint8
Hops uint8
Ifindex int32
}
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x8
)
type SockFilter struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type SockFprog struct {
Len uint16
Pad_cgo_0 [2]byte
Filter *SockFilter
}
type InotifyEvent struct {
Wd int32
Mask uint32
Cookie uint32
Len uint32
Name [0]uint8
}
const SizeofInotifyEvent = 0x10
type PtraceRegs struct {
Uregs [18]uint32
}
type FdSet struct {
Bits [32]int32
}
type Sysinfo_t struct {
Uptime int32
Loads [3]uint32
Totalram uint32
Freeram uint32
Sharedram uint32
Bufferram uint32
Totalswap uint32
Freeswap uint32
Procs uint16
Pad uint16
Totalhigh uint32
Freehigh uint32
Unit uint32
X_f [8]uint8
}
type Utsname struct {
Sysname [65]uint8
Nodename [65]uint8
Release [65]uint8
Version [65]uint8
Machine [65]uint8
Domainname [65]uint8
}
type Ustat_t struct {
Tfree int32
Tinode uint32
Fname [6]uint8
Fpack [6]uint8
}
type EpollEvent struct {
Events uint32
PadFd int32
Fd int32
Pad int32
}
const (
AT_FDCWD = -0x64
AT_REMOVEDIR = 0x200
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLIN = 0x1
POLLPRI = 0x2
POLLOUT = 0x4
POLLRDHUP = 0x2000
POLLERR = 0x8
POLLHUP = 0x10
POLLNVAL = 0x20
)
type Sigset_t struct {
X__val [16]uint64
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Line uint8
Cc [19]uint8
Ispeed uint32
Ospeed uint32
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) ppy Pty Ltd <[email protected]>. Licensed under the MIT Licence.
// See the LICENCE file in the repository root for full licence text.
using osu.Game.Rulesets.Judgements;
using osu.Game.Rulesets.Objects;
namespace osu.Game.Rulesets.Taiko.Objects
{
public class BarLine : TaikoHitObject, IBarLine
{
public bool Major { get; set; }
public override Judgement CreateJudgement() => new IgnoreJudgement();
}
}
| {
"pile_set_name": "Github"
} |
import React, { Component } from "react";
const withData = WrappedComponent => {
return class extends Component {
constructor(props) {
super(props);
this.state = {
data: []
};
}
componentDidMount() {
fetch(this.props.api)
.then(res => res.json())
.then(data => this.setState({ data: data.items }));
}
render() {
return <WrappedComponent {...this.props} data={this.state.data} />;
}
};
};
export default withData;
| {
"pile_set_name": "Github"
} |
@echo off
REM ---------------------------------------------------------------------------
REM Copyright 2017 WSO2, Inc. http://www.wso2.org
REM
REM Licensed under the Apache License, Version 2.0 (the "License");
REM you may not use this file except in compliance with the License.
REM You may obtain a copy of the License at
REM
REM http://www.apache.org/licenses/LICENSE-2.0
REM
REM Unless required by applicable law or agreed to in writing, software
REM distributed under the License is distributed on an "AS IS" BASIS,
REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
REM See the License for the specific language governing permissions and
REM limitations under the License.
rem ---------------------------------------------------------------------------
rem Main Script for WSO2 Carbon
rem
rem Environment Variable Prequisites
rem
rem CARBON_HOME Home of CARBON installation. If not set I will try
rem to figure it out.
rem
rem
rem JAVA_HOME Must point at your Java Development Kit installation.
rem
rem JAVA_OPTS (Optional) Java runtime options used when the commands
rem is executed.
rem ---------------------------------------------------------------------------
rem ----- if JAVA_HOME is not set we're not happy ------------------------------
:checkJava
if "%JAVA_HOME%" == "" goto noJavaHome
if not exist "%JAVA_HOME%\bin\java.exe" goto noJavaHome
goto checkServer
:noJavaHome
echo "You must set the JAVA_HOME variable before running CARBON."
goto end
rem ----- Only set CARBON_HOME if not already set ----------------------------
:checkServer
rem %~sdp0 is expanded pathname of the current script under NT with spaces in the path removed
SET CARBON_HOME=%~sdp0..
SET curDrive=%cd:~0,1%
SET wsasDrive=%CARBON_HOME:~0,1%
if not "%curDrive%" == "%wsasDrive%" %wsasDrive%:
rem find CARBON_HOME if it does not exist due to either an invalid value passed
rem by the user or the %0 problem on Windows 9x
if not exist "%CARBON_HOME%\bin\version.txt" goto noServerHome
set AXIS2_HOME=%CARBON_HOME%
goto updateClasspath
:noServerHome
echo CARBON_HOME is set incorrectly or CARBON could not be located. Please set CARBON_HOME.
goto end
rem ----- update classpath -----------------------------------------------------
:updateClasspath
setlocal EnableDelayedExpansion
cd %CARBON_HOME%
set CARBON_CLASSPATH=
FOR %%C in ("%CARBON_HOME%\bin\*.jar") DO set CARBON_CLASSPATH=!CARBON_CLASSPATH!;".\bin\%%~nC%%~xC"
set CARBON_CLASSPATH="%JAVA_HOME%\lib\tools.jar";%CARBON_CLASSPATH%;
FOR %%D in ("%CARBON_HOME%\wso2\lib\commons-lang*.jar") DO set CARBON_CLASSPATH=!CARBON_CLASSPATH!;".\wso2\lib\%%~nD%%~xD"
rem ----- Process the input command -------------------------------------------
rem Slurp the command line arguments. This loop allows for an unlimited number
rem of arguments (up to the command line limit, anyway).
:setupArgs
if ""%1""=="""" goto doneStart
if ""%1""==""-run"" goto commandLifecycle
if ""%1""==""--run"" goto commandLifecycle
if ""%1""==""run"" goto commandLifecycle
if ""%1""==""-restart"" goto commandLifecycle
if ""%1""==""--restart"" goto commandLifecycle
if ""%1""==""restart"" goto commandLifecycle
if ""%1""==""debug"" goto commandDebug
if ""%1""==""-debug"" goto commandDebug
if ""%1""==""--debug"" goto commandDebug
if ""%1""==""version"" goto commandVersion
if ""%1""==""-version"" goto commandVersion
if ""%1""==""--version"" goto commandVersion
shift
goto setupArgs
rem ----- commandVersion -------------------------------------------------------
:commandVersion
shift
type "%CARBON_HOME%\bin\version.txt"
type "%CARBON_HOME%\bin\wso2carbon-version.txt"
goto end
rem ----- commandDebug ---------------------------------------------------------
:commandDebug
shift
set DEBUG_PORT=%1
if "%DEBUG_PORT%"=="" goto noDebugPort
if not "%JAVA_OPTS%"=="" echo Warning !!!. User specified JAVA_OPTS will be ignored, once you give the --debug option.
set JAVA_OPTS=-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=%DEBUG_PORT%
echo Please start the remote debugging client to continue...
goto findJdk
:noDebugPort
echo Please specify the debug port after the --debug option
goto end
rem ----- commandLifecycle -----------------------------------------------------
:commandLifecycle
goto findJdk
:doneStart
if "%OS%"=="Windows_NT" @setlocal
if "%OS%"=="WINNT" @setlocal
rem ---------- Handle the SSL Issue with proper JDK version --------------------
rem find the version of the jdk
:findJdk
set CMD=RUN %*
:checkJdk17
"%JAVA_HOME%\bin\java" -version 2>&1 | findstr /r "1.[7|8]" >NUL
IF ERRORLEVEL 1 goto unknownJdk
goto jdk17
:unknownJdk
echo Starting WSO2 Carbon (in unsupported JDK)
echo [ERROR] CARBON is supported only on JDK 1.7 and 1.8
goto jdk17
:jdk17
goto runServer
rem ----------------- Execute The Requested Command ----------------------------
:runServer
cd %CARBON_HOME%
rem ------------------ Remove tmp folder on startup -----------------------------
IF NOT EXIST %CARBON_HOME%\tmp mkdir %CARBON_HOME%\tmp\work
set TMP_DIR=%CARBON_HOME%\tmp
cd "%TMP_DIR%"
del *.* /s /q > nul
FOR /d %%G in ("*.*") DO rmdir %%G /s /q
cd ..
rem ---------- Add jars to classpath ----------------
set CARBON_CLASSPATH=.\lib;%CARBON_CLASSPATH%
set JAVA_ENDORSED=".\wso2\lib\endorsed";"%JAVA_HOME%\jre\lib\endorsed";"%JAVA_HOME%\lib\endorsed"
set profile=-Dprofile=device-key-manager
set CMD_LINE_ARGS=-Xbootclasspath/a:%CARBON_XBOOTCLASSPATH% -Xms256m -Xmx1024m -XX:MaxPermSize=256m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="%CARBON_HOME%\repository\logs\heap-dump.hprof" -Dcom.sun.management.jmxremote -classpath %CARBON_CLASSPATH% %JAVA_OPTS% -Djava.endorsed.dirs=%JAVA_ENDORSED% -Dcarbon.registry.root=/ -Dcarbon.home="%CARBON_HOME%" -Dwso2.server.standalone=true -Djava.command="%JAVA_HOME%\bin\java" -Djava.opts="%JAVA_OPTS%" -Djava.io.tmpdir="%CARBON_HOME%\tmp" -Dlogger.server.name="IoT-Core" -Dcatalina.base="%CARBON_HOME%\wso2\lib\tomcat" -Djava.util.logging.config.file="%CARBON_HOME%\conf\etc\logging-bridge.properties" -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Dcomponents.repo="%CARBON_HOME%\wso2\components\plugins" -Dcarbon.config.dir.path="%CARBON_HOME%\conf" -Dcarbon.components.dir.path="%CARBON_HOME%\wso2\components" -Dcarbon.extensions.dir.path="%CARBON_HOME%\extensions" -Dcarbon.dropins.dir.path="%CARBON_HOME%\dropins" -Dcarbon.external.lib.dir.path="%CARBON_HOME%\lib" -Dcarbon.patches.dir.path="%CARBON_HOME%\patches" -Dcarbon.servicepacks.dir.path="%CARBON_HOME%\servicepacks" -Dcarbon.internal.lib.dir.path="%CARBON_HOME%\wso2\lib" -Dconf.location="%CARBON_HOME%\conf" -Dcom.atomikos.icatch.file="%CARBON_HOME%\wso2\lib\transactions.properties" -Dcom.atomikos.icatch.hide_init_file_path=true -Dorg.apache.jasper.compiler.Parser.STRICT_QUOTE_ESCAPING=false -Dorg.apache.jasper.runtime.BodyContentImpl.LIMIT_BUFFER=true -Dcom.sun.jndi.ldap.connect.pool.authentication=simple -Dcom.sun.jndi.ldap.connect.pool.timeout=3000 -Dorg.terracotta.quartz.skipUpdateCheck=true -Djava.security.egd=file:/dev/./urandom -Dfile.encoding=UTF8 -Djava.net.preferIPv4Stack=true -Dcom.ibm.cacheLocalHost=true -DworkerNode=false -Dorg.wso2.ignoreHostnameVerification=true -Dorg.opensaml.httpclient.https.disableHostnameVerification=true -Diot.analytics.host="localhost" -Diot.analytics.https.port="9445" -Diot.manager.host="localhost" -Diot.manager.https.port="9443" -Dmqtt.broker.host="localhost" -Dmqtt.broker.port="1886" -Diot.core.host="localhost" -Diot.core.https.port="9444" -Diot.keymanager.host="localhost" -Diot.keymanager.https.port="9447" -Diot.gateway.host="localhost" -Diot.gateway.https.port="8244" -Diot.gateway.http.port="8281" -Diot.gateway.carbon.https.port="9444" -Diot.gateway.carbon.http.port="9764" -Diot.apimpublisher.host="localhost" -Diot.apimpublisher.https.port="9443" -Diot.apimstore.host="localhost" -Diot.apimstore.https.port="9443" %profile%
:runJava
echo JAVA_HOME environment variable is set to %JAVA_HOME%
echo CARBON_HOME environment variable is set to %CARBON_HOME%
"%JAVA_HOME%\bin\java" %CMD_LINE_ARGS% org.wso2.carbon.bootstrap.Bootstrap %CMD%
if "%ERRORLEVEL%"=="121" goto runJava
:end
goto endlocal
:endlocal
:END | {
"pile_set_name": "Github"
} |
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<assembly>
<id>bin</id>
<baseDirectory/>
<formats>
<format>tar.gz</format>
</formats>
<fileSets>
<fileSet>
<directory>src/main/scripts</directory>
<outputDirectory>scripts</outputDirectory>
<includes>
<include>run_*.sh</include>
</includes>
</fileSet>
<fileSet>
<directory>target</directory>
<outputDirectory></outputDirectory>
<includes>
<include>terrapin*.jar</include>
</includes>
</fileSet>
<fileSet>
<directory>target/classes</directory>
<outputDirectory></outputDirectory>
<includes>
<include>*.properties</include>
</includes>
</fileSet>
</fileSets>
<!-- use this section if you want to package dependencies -->
<dependencySets>
<dependencySet>
<outputDirectory>lib</outputDirectory>
<useStrictFiltering>true</useStrictFiltering>
<useProjectArtifact>false</useProjectArtifact>
<scope>runtime</scope>
</dependencySet>
</dependencySets>
</assembly>
| {
"pile_set_name": "Github"
} |
# /* Copyright (C) 2001
# * Housemarque Oy
# * http://www.housemarque.com
# *
# * Distributed under the Boost Software License, Version 1.0. (See
# * accompanying file LICENSE_1_0.txt or copy at
# * http://www.boost.org/LICENSE_1_0.txt)
# */
#
# /* Revised by Paul Mensonides (2002) */
#
# /* See http://www.boost.org for most recent version. */
#
# ifndef BOOST_PREPROCESSOR_LIST_APPEND_HPP
# define BOOST_PREPROCESSOR_LIST_APPEND_HPP
#
# include <boost/preprocessor/config/config.hpp>
# include <boost/preprocessor/list/fold_right.hpp>
#
# /* BOOST_PP_LIST_APPEND */
#
# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()
# define BOOST_PP_LIST_APPEND(a, b) BOOST_PP_LIST_FOLD_RIGHT(BOOST_PP_LIST_APPEND_O, b, a)
# else
# define BOOST_PP_LIST_APPEND(a, b) BOOST_PP_LIST_APPEND_I(a, b)
# define BOOST_PP_LIST_APPEND_I(a, b) BOOST_PP_LIST_FOLD_RIGHT(BOOST_PP_LIST_APPEND_O, b, a)
# endif
#
# define BOOST_PP_LIST_APPEND_O(d, s, x) (x, s)
#
# /* BOOST_PP_LIST_APPEND_D */
#
# if ~BOOST_PP_CONFIG_FLAGS() & BOOST_PP_CONFIG_EDG()
# define BOOST_PP_LIST_APPEND_D(d, a, b) BOOST_PP_LIST_FOLD_RIGHT_ ## d(BOOST_PP_LIST_APPEND_O, b, a)
# else
# define BOOST_PP_LIST_APPEND_D(d, a, b) BOOST_PP_LIST_APPEND_D_I(d, a, b)
# define BOOST_PP_LIST_APPEND_D_I(d, a, b) BOOST_PP_LIST_FOLD_RIGHT_ ## d(BOOST_PP_LIST_APPEND_O, b, a)
# endif
#
# endif
| {
"pile_set_name": "Github"
} |
I setup health checking. When I fail some hosts, Envoy starts routing to all of them again. Why?
================================================================================================
This feature is known as the load balancer :ref:`panic threshold
<arch_overview_load_balancing_panic_threshold>`. It is used to prevent cascading failure when
upstream hosts start failing health checks in large numbers.
| {
"pile_set_name": "Github"
} |
procfs provides functions to retrieve system, kernel and process
metrics from the pseudo-filesystem proc.
Copyright 2014-2015 The Prometheus Authors
This product includes software developed at
SoundCloud Ltd. (http://soundcloud.com/).
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 10e39dc960b53f249aad7de61bcd1323
timeCreated: 1503396677
licenseType: Pro
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
." Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
." DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
."
." This code is free software; you can redistribute it and/or modify it
." under the terms of the GNU General Public License version 2 only, as
." published by the Free Software Foundation.
."
." This code is distributed in the hope that it will be useful, but WITHOUT
." ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
." FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
." version 2 for more details (a copy is included in the LICENSE file that
." accompanied this code).
."
." You should have received a copy of the GNU General Public License version
." 2 along with this work; if not, write to the Free Software Foundation,
." Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
."
." Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
." or visit www.oracle.com if you need additional information or have any
." questions.
."
.TH rmiregistry 1 "16 Mar 2012"
.LP
.SH "Name"
rmiregistry \- The Java Remote Object Registry
.LP
.RS 3
The \f3rmiregistry\fP command starts a remote object registry on the specified port on the current host.
.RE
.LP
.SH "SYNOPSIS"
.LP
.nf
\f3
.fl
rmiregistry [\fP\f4port\fP\f3]
.fl
\fP
.fi
.LP
.SH "DESCRIPTION"
.LP
.LP
The \f3rmiregistry\fP command creates and starts a remote object registry on the specified \f2port\fP on the current host. If \f2port\fP is omitted, the registry is started on port 1099. The \f3rmiregistry\fP command produces no output and is typically run in the background. For example:
.LP
.LP
\f2rmiregistry &\fP
.LP
.LP
A remote object registry is a bootstrap naming service that is used by RMI servers on the same host to bind remote objects to names. Clients on local and remote hosts can then look up remote objects and make remote method invocations.
.LP
.LP
The registry is typically used to locate the first remote object on which an application needs to invoke methods. That object in turn will provide application\-specific support for finding other objects.
.LP
.LP
The methods of the \f2java.rmi.registry.LocateRegistry\fP class are used to get a registry operating on the local host or local host and port.
.LP
.LP
The URL\-based methods of the \f2java.rmi.Naming\fP class operate on a registry and can be used to look up a remote object on any host, and on the local host: bind a simple (string) name to a remote object, rebind a new name to a remote object (overriding the old binding), unbind a remote object, and list the URLs bound in the registry.
.LP
.SH "OPTIONS"
.LP
.RS 3
.TP 3
\-J
Used in conjunction with any \f2java\fP option, it passes the option following the \f2\-J\fP (no spaces between the \-J and the option) on to the \f2java\fP interpreter.
.RE
.LP
.SH "SEE ALSO"
.LP
java(1),
.na
\f2java.rmi.registry.LocateRegistry\fP @
.fi
http://docs.oracle.com/javase/7/docs/api/java/rmi/registry/LocateRegistry.html and
.na
\f2java.rmi.Naming\fP @
.fi
http://docs.oracle.com/javase/7/docs/api/java/rmi/Naming.html
| {
"pile_set_name": "Github"
} |
/*
* WARNING: do not edit!
* Generated by Makefile from include/openssl/opensslconf.h.in
*
* Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <openssl/opensslv.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef OPENSSL_ALGORITHM_DEFINES
# error OPENSSL_ALGORITHM_DEFINES no longer supported
#endif
/*
* OpenSSL was configured with the following options:
*/
#ifndef OPENSSL_NO_COMP
# define OPENSSL_NO_COMP
#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_RC5
# define OPENSSL_NO_RC5
#endif
#ifndef OPENSSL_THREADS
# define OPENSSL_THREADS
#endif
#ifndef OPENSSL_RAND_SEED_OS
# define OPENSSL_RAND_SEED_OS
#endif
#ifndef OPENSSL_NO_AFALGENG
# define OPENSSL_NO_AFALGENG
#endif
#ifndef OPENSSL_NO_ASAN
# define OPENSSL_NO_ASAN
#endif
#ifndef OPENSSL_NO_ASM
# define OPENSSL_NO_ASM
#endif
#ifndef OPENSSL_NO_CRYPTO_MDEBUG
# define OPENSSL_NO_CRYPTO_MDEBUG
#endif
#ifndef OPENSSL_NO_CRYPTO_MDEBUG_BACKTRACE
# define OPENSSL_NO_CRYPTO_MDEBUG_BACKTRACE
#endif
#ifndef OPENSSL_NO_DEVCRYPTOENG
# define OPENSSL_NO_DEVCRYPTOENG
#endif
#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
# define OPENSSL_NO_EC_NISTP_64_GCC_128
#endif
#ifndef OPENSSL_NO_EGD
# define OPENSSL_NO_EGD
#endif
#ifndef OPENSSL_NO_EXTERNAL_TESTS
# define OPENSSL_NO_EXTERNAL_TESTS
#endif
#ifndef OPENSSL_NO_FUZZ_AFL
# define OPENSSL_NO_FUZZ_AFL
#endif
#ifndef OPENSSL_NO_FUZZ_LIBFUZZER
# define OPENSSL_NO_FUZZ_LIBFUZZER
#endif
#ifndef OPENSSL_NO_HEARTBEATS
# define OPENSSL_NO_HEARTBEATS
#endif
#ifndef OPENSSL_NO_MSAN
# define OPENSSL_NO_MSAN
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
#ifndef OPENSSL_NO_SSL3
# define OPENSSL_NO_SSL3
#endif
#ifndef OPENSSL_NO_SSL3_METHOD
# define OPENSSL_NO_SSL3_METHOD
#endif
#ifndef OPENSSL_NO_UBSAN
# define OPENSSL_NO_UBSAN
#endif
#ifndef OPENSSL_NO_UNIT_TEST
# define OPENSSL_NO_UNIT_TEST
#endif
#ifndef OPENSSL_NO_WEAK_SSL_CIPHERS
# define OPENSSL_NO_WEAK_SSL_CIPHERS
#endif
#ifndef OPENSSL_NO_DYNAMIC_ENGINE
# define OPENSSL_NO_DYNAMIC_ENGINE
#endif
/*
* Sometimes OPENSSSL_NO_xxx ends up with an empty file and some compilers
* don't like that. This will hopefully silence them.
*/
#define NON_EMPTY_TRANSLATION_UNIT static void *dummy = &dummy;
/*
* Applications should use -DOPENSSL_API_COMPAT=<version> to suppress the
* declarations of functions deprecated in or before <version>. Otherwise, they
* still won't see them if the library has been built to disable deprecated
* functions.
*/
#ifndef DECLARE_DEPRECATED
# define DECLARE_DEPRECATED(f) f;
# ifdef __GNUC__
# if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 0)
# undef DECLARE_DEPRECATED
# define DECLARE_DEPRECATED(f) f __attribute__ ((deprecated));
# endif
# endif
#endif
#ifndef OPENSSL_FILE
# ifdef OPENSSL_NO_FILENAMES
# define OPENSSL_FILE ""
# define OPENSSL_LINE 0
# else
# define OPENSSL_FILE __FILE__
# define OPENSSL_LINE __LINE__
# endif
#endif
#ifndef OPENSSL_MIN_API
# define OPENSSL_MIN_API 0
#endif
#if !defined(OPENSSL_API_COMPAT) || OPENSSL_API_COMPAT < OPENSSL_MIN_API
# undef OPENSSL_API_COMPAT
# define OPENSSL_API_COMPAT OPENSSL_MIN_API
#endif
/*
* Do not deprecate things to be deprecated in version 1.2.0 before the
* OpenSSL version number matches.
*/
#if OPENSSL_VERSION_NUMBER < 0x10200000L
# define DEPRECATEDIN_1_2_0(f) f;
#elif OPENSSL_API_COMPAT < 0x10200000L
# define DEPRECATEDIN_1_2_0(f) DECLARE_DEPRECATED(f)
#else
# define DEPRECATEDIN_1_2_0(f)
#endif
#if OPENSSL_API_COMPAT < 0x10100000L
# define DEPRECATEDIN_1_1_0(f) DECLARE_DEPRECATED(f)
#else
# define DEPRECATEDIN_1_1_0(f)
#endif
#if OPENSSL_API_COMPAT < 0x10000000L
# define DEPRECATEDIN_1_0_0(f) DECLARE_DEPRECATED(f)
#else
# define DEPRECATEDIN_1_0_0(f)
#endif
#if OPENSSL_API_COMPAT < 0x00908000L
# define DEPRECATEDIN_0_9_8(f) DECLARE_DEPRECATED(f)
#else
# define DEPRECATEDIN_0_9_8(f)
#endif
/* Generate 80386 code? */
#undef I386_ONLY
#undef OPENSSL_UNISTD
#define OPENSSL_UNISTD <unistd.h>
#undef OPENSSL_EXPORT_VAR_AS_FUNCTION
/*
* The following are cipher-specific, but are part of the public API.
*/
#if !defined(OPENSSL_SYS_UEFI)
# undef BN_LLONG
/* Only one for the following should be defined */
# define SIXTY_FOUR_BIT_LONG
# undef SIXTY_FOUR_BIT
# undef THIRTY_TWO_BIT
#endif
#define RC4_INT unsigned char
#ifdef __cplusplus
}
#endif
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
# Script to start any CustomPiOS raspbian image from qemu
# Usage: qemu_boot.sh </path/to/zip/with/img/file.zip>
set -x
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ZIP_IMG=$1
DEST=/tmp
source ${DIR}/common.sh
IMG_NAME=$(unzip -Z "${ZIP_IMG}" | head -n 3 | tail -n 1 | awk '{ print $9 }')
BASE_IMG_PATH=${DEST}/"${IMG_NAME}"
if [ ! -f "${BASE_IMG_PATH}" ]; then
unzip -o "${ZIP_IMG}" -d "${DEST}"
BASE_ROOT_PARTITION=2
BASE_MOUNT_PATH=${DEST}/mount
mkdir -p "${BASE_MOUNT_PATH}"
sudo bash -c "$(declare -f mount_image); $(declare -f detach_all_loopback); mount_image $BASE_IMG_PATH $BASE_ROOT_PARTITION $BASE_MOUNT_PATH"
pushd "${BASE_MOUNT_PATH}"
sudo bash -c "$(declare -f fixLd); fixLd"
sudo sed -e '/PARTUUID/ s/^#*/#/' -i etc/fstab
sudo bash -c 'echo "/dev/sda1 /boot vfat defaults 0 2" >> etc/fstab'
popd
sudo bash -c "$(declare -f unmount_image); unmount_image $BASE_MOUNT_PATH force"
fi
KERNEL_VERSION=kernel-qemu-4.19.50-buster
DTB_VERSION=versatile-pb.dtb
KERNEL_PATH=${DEST}/${KERNEL_VERSION}
DTB_PATH=${DEST}/${DTB_VERSION}
if [ ! -f "${KERNEL_PATH}" ] ; then
wget https://github.com/dhruvvyas90/qemu-rpi-kernel/raw/master/${KERNEL_VERSION} -O "${KERNEL_PATH}"
fi
if [ ! -f "${DTB_PATH}" ] ; then
wget https://github.com/dhruvvyas90/qemu-rpi-kernel/raw/master/${DTB_VERSION} -O "${DTB_PATH}"
fi
/usr/bin/qemu-system-arm -kernel ${KERNEL_PATH} -cpu arm1176 -m 256 -M versatilepb -dtb ${DTB_PATH} -no-reboot -serial stdio -append 'root=/dev/sda2 panic=1 rootfstype=ext4 rw' -hda ${BASE_IMG_PATH} -net nic -net user,hostfwd=tcp::5022-:22
#sudo umount ${BASE_MOUNT_PATH}
| {
"pile_set_name": "Github"
} |
木更津友里最新番号
【RD-429】この熟女いやらしい!麗しの美熟女11人 SEXに餓えた女たちの禁断の悦び 淫汁タレ流し4時間
【RD-343】この熟女いやらしい!お願い、もう戻れないの 身も心も溶かして!
【HEDV-104】若奥さまは犯されたい 7</a>2009-09-18クリスタル映像$$$HERMES121分钟 | {
"pile_set_name": "Github"
} |
require "unit/whitehall/authority/authority_test_helper"
require "ostruct"
class WorldEditorFatalityNoticeTest < ActiveSupport::TestCase
def fatality_world_editor(world_locations, id = 1)
o = OpenStruct.new(id: id, handles_fatalities?: true)
OpenStruct.new(
id: id,
gds_editor?: false,
departmental_editor?: false,
world_editor?: true,
organisation: o,
world_locations: world_locations || [],
)
end
def normal_world_editor(world_locations, id = 1)
o = OpenStruct.new(id: id, handles_fatalities?: false)
OpenStruct.new(
id: id,
gds_editor?: false,
departmental_editor?: false,
world_editor?: true,
organisation: o,
world_locations: world_locations || [],
)
end
include AuthorityTestHelper
test "cannot create a new fatality notice about their location if their organisation cannot handle fatalities" do
assert_not enforcer_for(normal_world_editor(["hat land"]), FatalityNotice).can?(:create)
end
test "cannot create a new fatality notice about their location even if their organisation can handle fatalities" do
assert_not enforcer_for(fatality_world_editor(["hat land"]), FatalityNotice).can?(:create)
end
test "cannot see a fatality notice about their location if their organisation cannot handle fatalities" do
user = normal_world_editor(["hat land", "tie land"])
edition = with_locations(normal_fatality_notice, ["shirt land", "hat land"])
assert_not enforcer_for(user, edition).can?(:see)
end
test "cannot see a fatality notice about their location even if their organisation can handle fatalities" do
user = fatality_world_editor(["hat land", "tie land"])
edition = with_locations(normal_fatality_notice, ["shirt land", "hat land"])
assert_not enforcer_for(user, edition).can?(:see)
end
test "cannot do anything to a fatality notice about their location if their organisation cannot handle fatalities" do
user = normal_world_editor(["hat land", "tie land"])
edition = with_locations(normal_fatality_notice, ["shirt land", "hat land"])
enforcer = enforcer_for(user, edition)
Whitehall::Authority::Rules::EditionRules.actions.each do |action|
assert_not enforcer.can?(action)
end
end
test "cannot do anything to a fatality notice about their location even if their organisation can handle fatalities" do
user = fatality_world_editor(["hat land", "tie land"])
edition = with_locations(normal_fatality_notice, ["shirt land", "hat land"])
enforcer = enforcer_for(user, edition)
Whitehall::Authority::Rules::EditionRules.actions.each do |action|
assert_not enforcer.can?(action)
end
end
end
| {
"pile_set_name": "Github"
} |
namespace WebApplication.Services
{
using System;
public class DateTimeService : IDateTimeService
{
public DateTime GetTime() => DateTime.UtcNow;
}
}
| {
"pile_set_name": "Github"
} |
/*
* Cirrus Logic CLPS711X CLK driver
*
* Copyright (C) 2014 Alexander Shiyan <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mfd/syscon/clps711x.h>
#include <dt-bindings/clock/clps711x-clock.h>
#define CLPS711X_SYSCON1 (0x0100)
#define CLPS711X_SYSCON2 (0x1100)
#define CLPS711X_SYSFLG2 (CLPS711X_SYSCON2 + SYSFLG_OFFSET)
#define CLPS711X_PLLR (0xa5a8)
#define CLPS711X_EXT_FREQ (13000000)
#define CLPS711X_OSC_FREQ (3686400)
static const struct clk_div_table spi_div_table[] = {
{ .val = 0, .div = 32, },
{ .val = 1, .div = 8, },
{ .val = 2, .div = 2, },
{ .val = 3, .div = 1, },
};
static const struct clk_div_table timer_div_table[] = {
{ .val = 0, .div = 256, },
{ .val = 1, .div = 1, },
};
struct clps711x_clk {
spinlock_t lock;
struct clk_hw_onecell_data clk_data;
};
static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
u32 fref)
{
u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi;
struct clps711x_clk *clps711x_clk;
unsigned i;
if (!base)
return ERR_PTR(-ENOMEM);
clps711x_clk = kzalloc(sizeof(*clps711x_clk) +
sizeof(*clps711x_clk->clk_data.hws) * CLPS711X_CLK_MAX,
GFP_KERNEL);
if (!clps711x_clk)
return ERR_PTR(-ENOMEM);
spin_lock_init(&clps711x_clk->lock);
/* Read PLL multiplier value and sanity check */
tmp = readl(base + CLPS711X_PLLR) >> 24;
if (((tmp >= 10) && (tmp <= 50)) || !fref)
f_pll = DIV_ROUND_UP(CLPS711X_OSC_FREQ * tmp, 2);
else
f_pll = fref;
tmp = readl(base + CLPS711X_SYSFLG2);
if (tmp & SYSFLG2_CKMODE) {
f_cpu = CLPS711X_EXT_FREQ;
f_bus = CLPS711X_EXT_FREQ;
f_spi = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 96);
f_pll = 0;
f_pwm = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 128);
} else {
f_cpu = f_pll;
if (f_cpu > 36864000)
f_bus = DIV_ROUND_UP(f_cpu, 2);
else
f_bus = 36864000 / 2;
f_spi = DIV_ROUND_CLOSEST(f_cpu, 576);
f_pwm = DIV_ROUND_CLOSEST(f_cpu, 768);
}
if (tmp & SYSFLG2_CKMODE) {
if (readl(base + CLPS711X_SYSCON2) & SYSCON2_OSTB)
f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 26);
else
f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 24);
} else
f_tim = DIV_ROUND_CLOSEST(f_cpu, 144);
tmp = readl(base + CLPS711X_SYSCON1);
/* Timer1 in free running mode.
* Counter will wrap around to 0xffff when it underflows
* and will continue to count down.
*/
tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
/* Timer2 in prescale mode.
* Value writen is automatically re-loaded when
* the counter underflows.
*/
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
writel(tmp, base + CLPS711X_SYSCON1);
clps711x_clk->clk_data.hws[CLPS711X_CLK_DUMMY] =
clk_hw_register_fixed_rate(NULL, "dummy", NULL, 0, 0);
clps711x_clk->clk_data.hws[CLPS711X_CLK_CPU] =
clk_hw_register_fixed_rate(NULL, "cpu", NULL, 0, f_cpu);
clps711x_clk->clk_data.hws[CLPS711X_CLK_BUS] =
clk_hw_register_fixed_rate(NULL, "bus", NULL, 0, f_bus);
clps711x_clk->clk_data.hws[CLPS711X_CLK_PLL] =
clk_hw_register_fixed_rate(NULL, "pll", NULL, 0, f_pll);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMERREF] =
clk_hw_register_fixed_rate(NULL, "timer_ref", NULL, 0, f_tim);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1] =
clk_hw_register_divider_table(NULL, "timer1", "timer_ref", 0,
base + CLPS711X_SYSCON1, 5, 1, 0,
timer_div_table, &clps711x_clk->lock);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2] =
clk_hw_register_divider_table(NULL, "timer2", "timer_ref", 0,
base + CLPS711X_SYSCON1, 7, 1, 0,
timer_div_table, &clps711x_clk->lock);
clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM] =
clk_hw_register_fixed_rate(NULL, "pwm", NULL, 0, f_pwm);
clps711x_clk->clk_data.hws[CLPS711X_CLK_SPIREF] =
clk_hw_register_fixed_rate(NULL, "spi_ref", NULL, 0, f_spi);
clps711x_clk->clk_data.hws[CLPS711X_CLK_SPI] =
clk_hw_register_divider_table(NULL, "spi", "spi_ref", 0,
base + CLPS711X_SYSCON1, 16, 2, 0,
spi_div_table, &clps711x_clk->lock);
clps711x_clk->clk_data.hws[CLPS711X_CLK_UART] =
clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
for (i = 0; i < CLPS711X_CLK_MAX; i++)
if (IS_ERR(clps711x_clk->clk_data.hws[i]))
pr_err("clk %i: register failed with %ld\n",
i, PTR_ERR(clps711x_clk->clk_data.hws[i]));
return clps711x_clk;
}
void __init clps711x_clk_init(void __iomem *base)
{
struct clps711x_clk *clps711x_clk;
clps711x_clk = _clps711x_clk_init(base, 73728000);
BUG_ON(IS_ERR(clps711x_clk));
/* Clocksource */
clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1],
NULL, "clps711x-timer.0");
clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2],
NULL, "clps711x-timer.1");
/* Drivers */
clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM],
NULL, "clps711x-pwm");
clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
NULL, "clps711x-uart.0");
clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
NULL, "clps711x-uart.1");
}
#ifdef CONFIG_OF
static void __init clps711x_clk_init_dt(struct device_node *np)
{
void __iomem *base = of_iomap(np, 0);
struct clps711x_clk *clps711x_clk;
u32 fref = 0;
WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
clps711x_clk = _clps711x_clk_init(base, fref);
BUG_ON(IS_ERR(clps711x_clk));
clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
&clps711x_clk->clk_data);
}
CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
#endif
| {
"pile_set_name": "Github"
} |
<!DOCTYPE PATCH SYSTEM "http://vvvv.org/versions/vvvv45debug29.3.dtd" >
<PATCH nodename="C:\Users\joreg\dev\repos\vvvv\public\vvvv45\lib\nodes\native\ShellExecute (Windows) help.v4p">
<BOUNDS height="7050" left="1065" top="1425" type="Window" width="4125">
</BOUNDS>
<NODE id="6" nodename="ShellExecute (Windows)">
<BOUNDS height="0" left="345" top="2535" type="Node" width="0">
</BOUNDS>
<PIN pinname="File" visible="1">
</PIN>
<PIN pinname="Do Execute" slicecount="1" visible="1" values="0.00000">
</PIN>
<PIN pinname="Show Window" slicecount="1" values="1">
</PIN>
<PIN pinname="Commandline Arguments" visible="1" slicecount="1" values="||">
</PIN>
</NODE>
<NODE componentmode="InABox" id="5" nodename="IOBox (String)">
<BOUNDS height="0" left="330" top="975" type="Node" width="0">
</BOUNDS>
<BOUNDS height="160" left="3930" top="5415" type="Window" width="215">
</BOUNDS>
<BOUNDS height="345" left="330" top="975" type="Box" width="1785">
</BOUNDS>
<PIN pinname="Input String" slicecount="1" values="C:\WINDOWS\NOTEPAD.EXE">
</PIN>
<PIN pinname="Output String" visible="1">
</PIN>
</NODE>
<NODE componentmode="InABox" id="4" nodename="IOBOX (VALUE ADVANCED)">
<BOUNDS height="0" left="2205" top="975" type="Node" width="0">
</BOUNDS>
<BOUNDS height="160" left="6405" top="5625" type="Window" width="215">
</BOUNDS>
<BOUNDS height="690" left="2205" top="975" type="Box" width="705">
</BOUNDS>
<PIN pinname="Y Output Value" visible="1">
</PIN>
<PIN pinname="Minimum" slicecount="1" values="0">
</PIN>
<PIN pinname="Maximum" slicecount="1" values="1">
</PIN>
<PIN pinname="Slider Behavior" slicecount="1" values="Bang">
</PIN>
<PIN pinname="Show Value" slicecount="1" values="0">
</PIN>
<PIN pinname="Show Slider" slicecount="1" values="1">
</PIN>
<PIN pinname="X Input Value" slicecount="1" values="0">
</PIN>
<PIN pinname="Y Input Value" slicecount="1" values="0">
</PIN>
</NODE>
<LINK dstnodeid="6" dstpinname="Do Execute" linkstyle="PolyLine" srcnodeid="4" srcpinname="Y Output Value">
</LINK>
<NODE componentmode="InABox" id="3" nodename="IOBox (String)">
<BOUNDS height="255" left="3030" top="1185" type="Node" width="1215">
</BOUNDS>
<BOUNDS height="255" left="3030" top="1185" type="Box" width="1215">
</BOUNDS>
<BOUNDS height="160" left="0" top="0" type="Window" width="215">
</BOUNDS>
<PIN pinname="Input String" slicecount="1" visible="0" values="|< Execute|">
</PIN>
<PIN pinname="Output String" visible="0">
</PIN>
<PIN pinname="Show Grid" slicecount="1" values="0">
</PIN>
</NODE>
<NODE componentmode="InABox" id="2" nodename="IOBox (String)">
<BOUNDS height="255" left="15" top="390" type="Node" width="570">
</BOUNDS>
<BOUNDS height="240" left="15" top="390" type="Box" width="2100">
</BOUNDS>
<BOUNDS height="160" left="9630" top="41520" type="Window" width="215">
</BOUNDS>
<PIN pinname="Input String" slicecount="1" visible="0" values="|Execute a specified file.|">
</PIN>
<PIN pinname="Output String" visible="0">
</PIN>
<PIN pinname="Show Grid" slicecount="1" values="0">
</PIN>
</NODE>
<NODE componentmode="InABox" id="1" nodename="IOBox (String)">
<BOUNDS height="270" left="15" top="15" type="Node" width="915">
</BOUNDS>
<BOUNDS height="375" left="15" top="15" type="Box" width="3765">
</BOUNDS>
<BOUNDS height="160" left="9630" top="41520" type="Window" width="215">
</BOUNDS>
<PIN pinname="Input String" slicecount="1" visible="0" values="|ShellExecute (Windows)|">
</PIN>
<PIN pinname="Output String" visible="0">
</PIN>
<PIN pinname="Show Grid" slicecount="1" values="0">
</PIN>
<PIN pinname="Size" slicecount="1" values="12">
</PIN>
</NODE>
<NODE componentmode="InABox" id="0" nodename="IOBox (String)">
<BOUNDS height="270" left="240" top="3255" type="Node" width="3495">
</BOUNDS>
<BOUNDS height="2280" left="240" top="3255" type="Box" width="3555">
</BOUNDS>
<BOUNDS height="160" left="0" top="0" type="Window" width="215">
</BOUNDS>
<PIN pinname="Input String" slicecount="1" visible="0" values="|to execute windows shell commands use as file: c:\windows\system32\cmd.exe&cr;&lf;&cr;&lf;and as arguments:&cr;&lf;/c command arguments&cr;&lf;&cr;&lf;where command is any of the shell commands like 'del' or 'md' and arguments is an argument as you would give it on the shell. don't forget double quotes for filenames with spaces!&cr;&lf;|">
</PIN>
<PIN pinname="Output String" visible="0">
</PIN>
<PIN pinname="Show Grid" slicecount="1" values="0">
</PIN>
</NODE>
<LINK srcnodeid="5" srcpinname="Output String" dstnodeid="6" dstpinname="File">
</LINK>
<NODE nodename="IOBox (String)" componentmode="InABox" id="8" systemname="IOBox (String)">
<BOUNDS type="Node" left="300" top="5775" width="705" height="270">
</BOUNDS>
<BOUNDS type="Box" left="300" top="5775" width="735" height="255">
</BOUNDS>
<PIN pinname="Input String" visible="0" slicecount="1" values="|see also|">
</PIN>
<PIN pinname="Output String" visible="0">
</PIN>
<PIN pinname="Show Grid" slicecount="1" values="0">
</PIN>
<PIN pinname="String Type" slicecount="1" values="MultiLine">
</PIN>
</NODE>
<NODE systemname="ShellExecute (Windows Advanced)" nodename="ShellExecute (Windows Advanced)" componentmode="Hidden" id="7" filename="%VVVV%\addonpack\lib\nodes\plugins\ShellExecute.dll">
<BOUNDS type="Node" left="345" top="6105" width="975" height="270">
</BOUNDS>
</NODE>
</PATCH> | {
"pile_set_name": "Github"
} |
// Code generated by sdkgen. DO NOT EDIT.
//nolint
package containerregistry
import (
"context"
"google.golang.org/grpc"
containerregistry "github.com/yandex-cloud/go-genproto/yandex/cloud/containerregistry/v1"
"github.com/yandex-cloud/go-genproto/yandex/cloud/operation"
)
//revive:disable
// LifecyclePolicyServiceClient is a containerregistry.LifecyclePolicyServiceClient with
// lazy GRPC connection initialization.
type LifecyclePolicyServiceClient struct {
getConn func(ctx context.Context) (*grpc.ClientConn, error)
}
// Create implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) Create(ctx context.Context, in *containerregistry.CreateLifecyclePolicyRequest, opts ...grpc.CallOption) (*operation.Operation, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).Create(ctx, in, opts...)
}
// Delete implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) Delete(ctx context.Context, in *containerregistry.DeleteLifecyclePolicyRequest, opts ...grpc.CallOption) (*operation.Operation, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).Delete(ctx, in, opts...)
}
// DryRun implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) DryRun(ctx context.Context, in *containerregistry.DryRunLifecyclePolicyRequest, opts ...grpc.CallOption) (*operation.Operation, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).DryRun(ctx, in, opts...)
}
// Get implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) Get(ctx context.Context, in *containerregistry.GetLifecyclePolicyRequest, opts ...grpc.CallOption) (*containerregistry.LifecyclePolicy, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).Get(ctx, in, opts...)
}
// GetDryRunResult implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) GetDryRunResult(ctx context.Context, in *containerregistry.GetDryRunLifecyclePolicyResultRequest, opts ...grpc.CallOption) (*containerregistry.DryRunLifecyclePolicyResult, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).GetDryRunResult(ctx, in, opts...)
}
// List implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) List(ctx context.Context, in *containerregistry.ListLifecyclePoliciesRequest, opts ...grpc.CallOption) (*containerregistry.ListLifecyclePoliciesResponse, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).List(ctx, in, opts...)
}
// ListDryRunResultAffectedImages implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) ListDryRunResultAffectedImages(ctx context.Context, in *containerregistry.ListDryRunLifecyclePolicyResultAffectedImagesRequest, opts ...grpc.CallOption) (*containerregistry.ListDryRunLifecyclePolicyResultAffectedImagesResponse, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).ListDryRunResultAffectedImages(ctx, in, opts...)
}
type LifecyclePolicyDryRunResultAffectedImagesIterator struct {
ctx context.Context
opts []grpc.CallOption
err error
started bool
client *LifecyclePolicyServiceClient
request *containerregistry.ListDryRunLifecyclePolicyResultAffectedImagesRequest
items []*containerregistry.Image
}
func (c *LifecyclePolicyServiceClient) LifecyclePolicyDryRunResultAffectedImagesIterator(ctx context.Context, dryRunLifecyclePolicyResultId string, opts ...grpc.CallOption) *LifecyclePolicyDryRunResultAffectedImagesIterator {
return &LifecyclePolicyDryRunResultAffectedImagesIterator{
ctx: ctx,
opts: opts,
client: c,
request: &containerregistry.ListDryRunLifecyclePolicyResultAffectedImagesRequest{
DryRunLifecyclePolicyResultId: dryRunLifecyclePolicyResultId,
PageSize: 1000,
},
}
}
func (it *LifecyclePolicyDryRunResultAffectedImagesIterator) Next() bool {
if it.err != nil {
return false
}
if len(it.items) > 1 {
it.items[0] = nil
it.items = it.items[1:]
return true
}
it.items = nil // consume last item, if any
if it.started && it.request.PageToken == "" {
return false
}
it.started = true
response, err := it.client.ListDryRunResultAffectedImages(it.ctx, it.request, it.opts...)
it.err = err
if err != nil {
return false
}
it.items = response.AffectedImages
it.request.PageToken = response.NextPageToken
return len(it.items) > 0
}
func (it *LifecyclePolicyDryRunResultAffectedImagesIterator) Value() *containerregistry.Image {
if len(it.items) == 0 {
panic("calling Value on empty iterator")
}
return it.items[0]
}
func (it *LifecyclePolicyDryRunResultAffectedImagesIterator) Error() error {
return it.err
}
// ListDryRunResults implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) ListDryRunResults(ctx context.Context, in *containerregistry.ListDryRunLifecyclePolicyResultsRequest, opts ...grpc.CallOption) (*containerregistry.ListDryRunLifecyclePolicyResultsResponse, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).ListDryRunResults(ctx, in, opts...)
}
type LifecyclePolicyDryRunResultsIterator struct {
ctx context.Context
opts []grpc.CallOption
err error
started bool
client *LifecyclePolicyServiceClient
request *containerregistry.ListDryRunLifecyclePolicyResultsRequest
items []*containerregistry.DryRunLifecyclePolicyResult
}
func (c *LifecyclePolicyServiceClient) LifecyclePolicyDryRunResultsIterator(ctx context.Context, lifecyclePolicyId string, opts ...grpc.CallOption) *LifecyclePolicyDryRunResultsIterator {
return &LifecyclePolicyDryRunResultsIterator{
ctx: ctx,
opts: opts,
client: c,
request: &containerregistry.ListDryRunLifecyclePolicyResultsRequest{
LifecyclePolicyId: lifecyclePolicyId,
PageSize: 1000,
},
}
}
func (it *LifecyclePolicyDryRunResultsIterator) Next() bool {
if it.err != nil {
return false
}
if len(it.items) > 1 {
it.items[0] = nil
it.items = it.items[1:]
return true
}
it.items = nil // consume last item, if any
if it.started && it.request.PageToken == "" {
return false
}
it.started = true
response, err := it.client.ListDryRunResults(it.ctx, it.request, it.opts...)
it.err = err
if err != nil {
return false
}
it.items = response.DryRunLifecyclePolicyResults
it.request.PageToken = response.NextPageToken
return len(it.items) > 0
}
func (it *LifecyclePolicyDryRunResultsIterator) Value() *containerregistry.DryRunLifecyclePolicyResult {
if len(it.items) == 0 {
panic("calling Value on empty iterator")
}
return it.items[0]
}
func (it *LifecyclePolicyDryRunResultsIterator) Error() error {
return it.err
}
// Update implements containerregistry.LifecyclePolicyServiceClient
func (c *LifecyclePolicyServiceClient) Update(ctx context.Context, in *containerregistry.UpdateLifecyclePolicyRequest, opts ...grpc.CallOption) (*operation.Operation, error) {
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
return containerregistry.NewLifecyclePolicyServiceClient(conn).Update(ctx, in, opts...)
}
| {
"pile_set_name": "Github"
} |
<component name="libraryTable">
<library name="Gradle: junit:junit:4.12@jar">
<CLASSES>
<root url="jar://$USER_HOME$/.gradle/caches/modules-2/files-2.1/junit/junit/4.12/2973d150c0dc1fefe998f834810d68f278ea58ec/junit-4.12.jar!/" />
</CLASSES>
<JAVADOC />
<SOURCES>
<root url="jar://$USER_HOME$/.gradle/caches/modules-2/files-2.1/junit/junit/4.12/a6c32b40bf3d76eca54e3c601e5d1470c86fcdfa/junit-4.12-sources.jar!/" />
</SOURCES>
</library>
</component> | {
"pile_set_name": "Github"
} |
/* Miscellaneous definitions for VIA chipsets
Currently used only by drivers/parport/parport_pc.c */
/* Values for SuperIO function select configuration register */
#define VIA_FUNCTION_PARPORT_SPP 0x00
#define VIA_FUNCTION_PARPORT_ECP 0x01
#define VIA_FUNCTION_PARPORT_EPP 0x02
#define VIA_FUNCTION_PARPORT_DISABLE 0x03
#define VIA_FUNCTION_PROBE 0xFF /* Special magic value to be used in code, not to be written into chip */
/* Bits for parallel port mode configuration register */
#define VIA_PARPORT_ECPEPP 0X20
#define VIA_PARPORT_BIDIR 0x80
/* VIA configuration registers */
#define VIA_CONFIG_INDEX 0x3F0
#define VIA_CONFIG_DATA 0x3F1
/* Mask for parallel port IRQ bits (in ISA PnP IRQ routing register 1) */
#define VIA_IRQCONTROL_PARALLEL 0xF0
/* Mask for parallel port DMA bits (in ISA PnP DMA routing register) */
#define VIA_DMACONTROL_PARALLEL 0x0C
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: aff9190e7c4f0f2418d0b551cdfac094
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<beans
xmlns="http://java.sun.com/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://jboss.org/schema/cdi/beans_1_0.xsd">
</beans>
| {
"pile_set_name": "Github"
} |
#!/bin/bash
#kill all running tasks
/var/www/sync/stopall
#start video 03
/usr/bin/omxplayer-sync -mu -o both /media/internal/video/03* > /dev/null 2>&1 & echo $! & | {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#if !defined(AVB_INSIDE_LIBAVB_H) && !defined(AVB_COMPILATION)
#error "Never include this file directly, include libavb.h instead."
#endif
#ifndef AVB_HASH_DESCRIPTOR_H_
#define AVB_HASH_DESCRIPTOR_H_
#include "avb_descriptor.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Flags for hash descriptors.
*
* AVB_HASH_DESCRIPTOR_FLAGS_DO_NOT_USE_AB: Do not apply the default A/B
* partition logic to this partition. This is intentionally a negative boolean
* because A/B should be both the default and most used in practice.
*/
typedef enum {
AVB_HASH_DESCRIPTOR_FLAGS_DO_NOT_USE_AB = (1 << 0),
} AvbHashDescriptorFlags;
/* A descriptor containing information about hash for an image.
*
* This descriptor is typically used for boot partitions to verify the
* entire kernel+initramfs image before executing it.
*
* Following this struct are |partition_name_len| bytes of the
* partition name (UTF-8 encoded), |salt_len| bytes of salt, and then
* |digest_len| bytes of the digest.
*
* The |reserved| field is for future expansion and must be set to NUL
* bytes.
*
* Changes in v1.1:
* - flags field is added which supports AVB_HASH_DESCRIPTOR_FLAGS_USE_AB
* - digest_len may be zero, which indicates the use of a persistent digest
*/
typedef struct AvbHashDescriptor {
AvbDescriptor parent_descriptor;
uint64_t image_size;
uint8_t hash_algorithm[32];
uint32_t partition_name_len;
uint32_t salt_len;
uint32_t digest_len;
uint32_t flags;
uint8_t reserved[60];
} AVB_ATTR_PACKED AvbHashDescriptor;
/* Copies |src| to |dest| and validates, byte-swapping fields in the
* process if needed. Returns true if valid, false if invalid.
*
* Data following the struct is not validated nor copied.
*/
bool avb_hash_descriptor_validate_and_byteswap(const AvbHashDescriptor* src,
AvbHashDescriptor* dest)
AVB_ATTR_WARN_UNUSED_RESULT;
#ifdef __cplusplus
}
#endif
#endif /* AVB_HASH_DESCRIPTOR_H_ */
| {
"pile_set_name": "Github"
} |
/*
*
* WordPres版微信小程序
* author: NiZerin
* organization: 泽林博客 www.iacblog.com
* github: https://github.com/CrazyNing98/WeChatMiniProgram-Blog
* 技术支持微信号:NINGCZ19980501
* 开源协议:MIT
* Copyright (c) 2017 https://www.iacblog.com/ All rights reserved.
*
*/
var Api = require('../../utils/api.js');
var util = require('../../utils/util.js');
var WxParse = require('../../wxParse/wxParse.js');
var wxApi = require('../../utils/wxApi.js')
var wxRequest = require('../../utils/wxRequest.js')
var Auth = require('../../utils/auth.js');
import config from '../../utils/config.js'
var app = getApp();
let interstitialAd = null
Page({
data: {
title: '页面内容',
pageData: {},
pagesList: {},
display: 'none',
wxParseData: [],
praiseList: [],
dialog: {
title: '',
content: '',
hidden: true
},
userInfo: {},
isLoginPopup: false,
openid: "",
system: ""
},
onLoad: function(options) {
if (wx.createInterstitialAd) {
interstitialAd = wx.createInterstitialAd({
adUnitId: 'adunit-9fa574b054c9be1c'
})
interstitialAd.onLoad(() => {
console.log('onLoad event emit')
})
interstitialAd.onError((err) => {
console.log('onError event emit', err)
})
interstitialAd.onClose((res) => {
console.log('onClose event emit', res)
})
interstitialAd.show().catch((err) => {
console.error(err)
})
}
var self = this;
wx.setNavigationBarTitle({
title: config.getWebsiteName + '-关于站长',
success: function(res) {
// success
}
});
Auth.setUserInfoData(self);
Auth.checkLogin(self);
this.fetchData(config.getAboutId);
wx.getSystemInfo({
success: function(t) {
var system = t.system.indexOf('iOS') != -1 ? 'iOS' : 'Android';
self.setData({
system: system
});
}
})
},
praise: function() {
var self = this;
var minAppType = config.getMinAppType;
var system = self.data.system;
if (minAppType == "0" && system == 'Android') {
if (self.data.openid) {
wx.navigateTo({
url: '../pay/pay?flag=2&openid=' + self.data.openid + '&postid=' + config.getAboutId
})
} else {
Auth.checkSession(self, 'isLoginNow');
}
} else {
var src = config.getZanImageUrl;
wx.previewImage({
urls: [src],
});
}
},
onPullDownRefresh: function() {
var self = this;
self.setData({
display: 'none',
pageData: {},
wxParseData: {},
});
this.fetchData(config.getAboutId);
//消除下刷新出现空白矩形的问题。
wx.stopPullDownRefresh()
},
onShareAppMessage: function() {
return {
title: '关于“' + config.getWebsiteName + '”官方小程序',
path: 'pages/about/about',
success: function(res) {
// 转发成功
},
fail: function(res) {
// 转发失败
}
}
},
gotowebpage: function() {
var self = this;
var minAppType = config.getMinAppType;
var url = '';
if (minAppType == "0") {
url = '../webpage/webpage?';
wx.navigateTo({
url: url
})
} else {
self.copyLink(config.getDomain);
}
},
copyLink: function(url) {
//this.ShowHideMenu();
wx.setClipboardData({
data: url,
success: function(res) {
wx.getClipboardData({
success: function(res) {
wx.showToast({
title: '链接已复制',
image: '../../images/link.png',
duration: 2000
})
}
})
}
})
},
//给a标签添加跳转和复制链接事件
wxParseTagATap: function(e) {
var self = this;
var href = e.currentTarget.dataset.src;
console.log(href);
var domain = config.getDomain;
//我们可以在这里进行一些路由处理
if (href.indexOf(domain) == -1) {
wx.setClipboardData({
data: href,
success: function(res) {
wx.getClipboardData({
success: function(res) {
wx.showToast({
title: '链接已复制',
//icon: 'success',
image: '../../images/link.png',
duration: 2000
})
}
})
}
})
} else {
var slug = util.GetUrlFileName(href, domain);
if (slug == 'index') {
wx.switchTab({
url: '../index/index'
})
} else {
var getPostSlugRequest = wxRequest.getRequest(Api.getPostBySlug(slug));
getPostSlugRequest
.then(res => {
var postID = res.data[0].id;
var openLinkCount = wx.getStorageSync('openLinkCount') || 0;
if (openLinkCount > 4) {
wx.redirectTo({
url: '../detail/detail?id=' + postID
})
} else {
wx.navigateTo({
url: '../detail/detail?id=' + postID
})
openLinkCount++;
wx.setStorageSync('openLinkCount', openLinkCount);
}
})
}
}
},
agreeGetUser: function(e) {
var userInfo = e.detail.userInfo;
var self = this;
if (userInfo) {
auth.getUsreInfo(e.detail);
self.setData({
userInfo: userInfo
});
}
setTimeout(function() {
self.setData({
isLoginPopup: false
})
}, 1200);
},
closeLoginPopup() {
this.setData({
isLoginPopup: false
});
},
openLoginPopup() {
this.setData({
isLoginPopup: true
});
},
fetchData: function(id) {
var self = this;
var getPageRequest = wxRequest.getRequest(Api.getPageByID(id));
getPageRequest.then(response => {
console.log(response);
WxParse.wxParse('article', 'html', response.data.content.rendered, self, 5);
self.setData({
pageData: response.data,
// wxParseData: WxParse('md',response.data.content.rendered)
//wxParseData: WxParse.wxParse('article', 'html', response.data.content.rendered, self, 5)
});
self.setData({
display: 'block'
});
}).then(res => {
var getAllPraiseRequest = wxRequest.getRequest(Api.getAllPraiseUrl());
getAllPraiseRequest.then(response => {
if (response.data.status == '200') {
var _avatarurls = response.data.avatarurls;
var avatarurls = [];
for (var i = 0; i < _avatarurls.length; i++) {
var avatarurl = "../../images/gravatar.png";
if (_avatarurls[i].avatarurl.indexOf('wx.qlogo.cn') != -1) {
avatarurl = _avatarurls[i].avatarurl;
}
avatarurls[i] = avatarurl;
}
self.setData({
praiseList: avatarurls
});
} else {
console.log(response);
}
})
})
.then(res => {
if (!app.globalData.isGetOpenid) {
// auth.getUsreInfo();
}
})
}
}) | {
"pile_set_name": "Github"
} |
angular.module('plunker', [])
.factory('plunkGenerator', function ($document) {
return function (ngVersion, bsVersion, version, module, content) {
var form = angular.element('<form style="display: none;" method="post" action="http://plnkr.co/edit/?p=preview" target="_blank"></form>');
var addField = function (name, value) {
var input = angular.element('<input type="hidden" name="' + name + '">');
input.attr('value', value);
form.append(input);
};
var indexContent = function (content, version) {
return '<!doctype html>\n' +
'<html ng-app="plunker">\n' +
' <head>\n' +
' <script src="//ajax.googleapis.com/ajax/libs/angularjs/'+ngVersion+'/angular.js"></script>\n' +
' <script src="//angular-ui.github.io/bootstrap/ui-bootstrap-tpls-'+version+'.js"></script>\n' +
' <script src="example.js"></script>\n' +
' <link href="//netdna.bootstrapcdn.com/bootstrap/'+bsVersion+'/css/bootstrap.min.css" rel="stylesheet">\n' +
' </head>\n' +
' <body>\n\n' +
content + '\n' +
' </body>\n' +
'</html>\n';
};
var scriptContent = function(content) {
return "angular.module('plunker', ['ui.bootstrap']);" + "\n" + content;
};
addField('description', 'http://angular-ui.github.io/bootstrap/');
addField('files[index.html]', indexContent(content.markup, version));
addField('files[example.js]', scriptContent(content.javascript));
$document.find('body').append(form);
form[0].submit();
form.remove();
};
})
.controller('PlunkerCtrl', function ($scope, plunkGenerator) {
$scope.content = {};
$scope.edit = function (ngVersion, bsVersion, version, module) {
plunkGenerator(ngVersion, bsVersion, version, module, $scope.content);
};
})
.directive('plunkerContent', function () {
return {
link:function (scope, element, attrs) {
scope.content[attrs.plunkerContent] = element.text().trim();
}
}
});
| {
"pile_set_name": "Github"
} |
<?php
/*
* @copyright 2014 Mautic Contributors. All rights reserved
* @author Mautic
*
* @link http://mautic.org
*
* @license GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html
*/
namespace Mautic\PageBundle\EventListener;
use Mautic\CoreBundle\EventListener\ChannelTrait;
use Mautic\LeadBundle\Event\LeadChangeEvent;
use Mautic\LeadBundle\Event\LeadMergeEvent;
use Mautic\LeadBundle\Event\LeadTimelineEvent;
use Mautic\LeadBundle\LeadEvents;
use Mautic\LeadBundle\Model\ChannelTimelineInterface;
use Mautic\PageBundle\Model\PageModel;
use Mautic\PageBundle\Model\VideoModel;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Symfony\Component\Routing\RouterInterface;
use Symfony\Component\Translation\TranslatorInterface;
class LeadSubscriber implements EventSubscriberInterface
{
use ChannelTrait;
/**
* @var PageModel
*/
private $pageModel;
/**
* @var VideoModel
*/
private $pageVideoModel;
/**
* @var TranslatorInterface
*/
private $translator;
/**
* @var RouterInterface
*/
private $router;
public function __construct(
PageModel $pageModel,
VideoModel $pageVideoModel,
TranslatorInterface $translator,
RouterInterface $router
) {
$this->pageModel = $pageModel;
$this->pageVideoModel = $pageVideoModel;
$this->translator = $translator;
$this->router = $router;
}
/**
* {@inheritdoc}
*/
public static function getSubscribedEvents()
{
return [
LeadEvents::TIMELINE_ON_GENERATE => [
['onTimelineGenerate', 0],
['onTimelineGenerateVideo', 0],
],
LeadEvents::CURRENT_LEAD_CHANGED => ['onLeadChange', 0],
LeadEvents::LEAD_POST_MERGE => ['onLeadMerge', 0],
];
}
/**
* Compile events for the lead timeline.
*/
public function onTimelineGenerate(LeadTimelineEvent $event)
{
// Set available event types
$eventTypeKey = 'page.hit';
$eventTypeName = $this->translator->trans('mautic.page.event.hit');
$event->addEventType($eventTypeKey, $eventTypeName);
$event->addSerializerGroup('pageList', 'hitDetails');
if (!$event->isApplicable($eventTypeKey)) {
return;
}
$hits = $this->pageModel->getHitRepository()->getLeadHits(
$event->getLeadId(),
$event->getQueryOptions()
);
// Add to counter
$event->addToCounter($eventTypeKey, $hits);
if (!$event->isEngagementCount()) {
// Add the hits to the event array
foreach ($hits['results'] as $hit) {
$template = 'MauticPageBundle:SubscribedEvents\Timeline:index.html.php';
$icon = 'fa-link';
if (!empty($hit['source'])) {
if ($channelModel = $this->getChannelModel($hit['source'])) {
if ($channelModel instanceof ChannelTimelineInterface) {
if ($overrideTemplate = $channelModel->getChannelTimelineTemplate($eventTypeKey, $hit)) {
$template = $overrideTemplate;
}
if ($overrideEventTypeName = $channelModel->getChannelTimelineLabel($eventTypeKey, $hit)) {
$eventTypeName = $overrideEventTypeName;
}
if ($overrideIcon = $channelModel->getChannelTimelineIcon($eventTypeKey, $hit)) {
$icon = $overrideIcon;
}
}
/* @deprecated - BC support to be removed in 3.0 */
// Allow a custom template if applicable
if (method_exists($channelModel, 'getPageHitLeadTimelineTemplate')) {
$template = $channelModel->getPageHitLeadTimelineTemplate($hit);
}
if (method_exists($channelModel, 'getPageHitLeadTimelineLabel')) {
$eventTypeName = $channelModel->getPageHitLeadTimelineLabel($hit);
}
if (method_exists($channelModel, 'getPageHitLeadTimelineIcon')) {
$icon = $channelModel->getPageHitLeadTimelineIcon($hit);
}
/* end deprecation */
if (!empty($hit['sourceId'])) {
if ($source = $this->getChannelEntityName($hit['source'], $hit['sourceId'], true)) {
$hit['sourceName'] = $source['name'];
$hit['sourceRoute'] = $source['url'];
}
}
}
}
if (!empty($hit['page_id'])) {
$page = $this->pageModel->getEntity($hit['page_id']);
$eventLabel = [
'label' => $page->getTitle(),
'href' => $this->router->generate('mautic_page_action', ['objectAction' => 'view', 'objectId' => $hit['page_id']]),
];
} else {
$eventLabel = [
'label' => (isset($hit['urlTitle'])) ? $hit['urlTitle'] : $hit['url'],
'href' => $hit['url'],
'isExternal' => true,
];
}
$contactId = $hit['lead_id'];
unset($hit['lead_id']);
$event->addEvent(
[
'event' => $eventTypeKey,
'eventId' => $hit['hitId'],
'eventLabel' => $eventLabel,
'eventType' => $eventTypeName,
'timestamp' => $hit['dateHit'],
'extra' => [
'hit' => $hit,
],
'contentTemplate' => $template,
'icon' => $icon,
'contactId' => $contactId,
]
);
}
}
}
/**
* Compile events for the lead timeline.
*/
public function onTimelineGenerateVideo(LeadTimelineEvent $event)
{
// Set available event types
$eventTypeKey = 'page.videohit';
$eventTypeName = $this->translator->trans('mautic.page.event.videohit');
$event->addEventType($eventTypeKey, $eventTypeName);
$event->addSerializerGroup('pageList', 'hitDetails');
if (!$event->isApplicable($eventTypeKey)) {
return;
}
$hits = $this->pageVideoModel->getHitRepository()->getTimelineStats(
$event->getLeadId(),
$event->getQueryOptions()
);
$event->addToCounter($eventTypeKey, $hits);
if (!$event->isEngagementCount()) {
// Add the hits to the event array
foreach ($hits['results'] as $hit) {
$template = 'MauticPageBundle:SubscribedEvents\Timeline:videohit.html.php';
$eventLabel = $eventTypeName;
$event->addEvent(
[
'event' => $eventTypeKey,
'eventLabel' => $eventLabel,
'eventType' => $eventTypeName,
'timestamp' => $hit['date_hit'],
'extra' => [
'hit' => $hit,
],
'contentTemplate' => $template,
'icon' => 'fa-video-camera',
]
);
}
}
}
public function onLeadChange(LeadChangeEvent $event)
{
$this->pageModel->getHitRepository()->updateLeadByTrackingId(
$event->getNewLead()->getId(),
$event->getNewTrackingId(),
$event->getOldTrackingId()
);
}
public function onLeadMerge(LeadMergeEvent $event)
{
$this->pageModel->getHitRepository()->updateLead(
$event->getLoser()->getId(),
$event->getVictor()->getId()
);
$this->pageVideoModel->getHitRepository()->updateLead(
$event->getLoser()->getId(),
$event->getVictor()->getId()
);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2004 Michal Ludvig <[email protected]>
*
*/
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/padlock.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include <asm/byteorder.h>
#include <asm/processor.h>
#include <asm/i387.h>
/*
* Number of data blocks actually fetched for each xcrypt insn.
* Processors with prefetch errata will fetch extra blocks.
*/
static unsigned int ecb_fetch_blocks = 2;
#define MAX_ECB_FETCH_BLOCKS (8)
#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
static unsigned int cbc_fetch_blocks = 1;
#define MAX_CBC_FETCH_BLOCKS (4)
#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
/* Control word. */
struct cword {
unsigned int __attribute__ ((__packed__))
rounds:4,
algo:3,
keygen:1,
interm:1,
encdec:1,
ksize:2;
} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
/* Whenever making any changes to the following
* structure *make sure* you keep E, d_data
* and cword aligned on 16 Bytes boundaries and
* the Hardware can access 16 * 16 bytes of E and d_data
* (only the first 15 * 16 bytes matter but the HW reads
* more).
*/
struct aes_ctx {
u32 E[AES_MAX_KEYLENGTH_U32]
__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
u32 d_data[AES_MAX_KEYLENGTH_U32]
__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
struct {
struct cword encrypt;
struct cword decrypt;
} cword;
u32 *D;
};
static DEFINE_PER_CPU(struct cword *, paes_last_cword);
/* Tells whether the ACE is capable to generate
the extended key for a given key_len. */
static inline int
aes_hw_extkey_available(uint8_t key_len)
{
/* TODO: We should check the actual CPU model/stepping
as it's possible that the capability will be
added in the next CPU revisions. */
if (key_len == 16)
return 1;
return 0;
}
static inline struct aes_ctx *aes_ctx_common(void *ctx)
{
unsigned long addr = (unsigned long)ctx;
unsigned long align = PADLOCK_ALIGNMENT;
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return (struct aes_ctx *)ALIGN(addr, align);
}
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
{
return aes_ctx_common(crypto_tfm_ctx(tfm));
}
static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
{
return aes_ctx_common(crypto_blkcipher_ctx(tfm));
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 *flags = &tfm->crt_flags;
struct crypto_aes_ctx gen_aes;
int cpu;
if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/*
* If the hardware is capable of generating the extended key
* itself we must supply the plain key for both encryption
* and decryption.
*/
ctx->D = ctx->E;
ctx->E[0] = le32_to_cpu(key[0]);
ctx->E[1] = le32_to_cpu(key[1]);
ctx->E[2] = le32_to_cpu(key[2]);
ctx->E[3] = le32_to_cpu(key[3]);
/* Prepare control words. */
memset(&ctx->cword, 0, sizeof(ctx->cword));
ctx->cword.decrypt.encdec = 1;
ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
ctx->cword.encrypt.ksize = (key_len - 16) / 8;
ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
/* Don't generate extended keys if the hardware can do it. */
if (aes_hw_extkey_available(key_len))
goto ok;
ctx->D = ctx->d_data;
ctx->cword.encrypt.keygen = 1;
ctx->cword.decrypt.keygen = 1;
if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
ok:
for_each_online_cpu(cpu)
if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
&ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
per_cpu(paes_last_cword, cpu) = NULL;
return 0;
}
/* ====== Encryption/decryption routines ====== */
/* These are the real call to PadLock. */
static inline void padlock_reset_key(struct cword *cword)
{
int cpu = raw_smp_processor_id();
if (cword != per_cpu(paes_last_cword, cpu))
#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl");
#else
asm volatile ("pushfq; popfq");
#endif
}
static inline void padlock_store_cword(struct cword *cword)
{
per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
}
/*
* While the padlock instructions don't use FP/SSE registers, they
* generate a spurious DNA fault when cr0.ts is '1'. These instructions
* should be used only inside the irq_ts_save/restore() context
*/
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
struct cword *control_word, int count)
{
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
: "d"(control_word), "b"(key), "c"(count));
}
static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
u8 *iv, struct cword *control_word, int count)
{
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
: "d" (control_word), "b" (key), "c" (count));
return iv;
}
static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
struct cword *cword, int count)
{
/*
* Padlock prefetches extra data so we must provide mapped input buffers.
* Assume there are at least 16 bytes of stack already in use.
*/
u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
memcpy(tmp, in, count * AES_BLOCK_SIZE);
rep_xcrypt_ecb(tmp, out, key, cword, count);
}
static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
u8 *iv, struct cword *cword, int count)
{
/*
* Padlock prefetches extra data so we must provide mapped input buffers.
* Assume there are at least 16 bytes of stack already in use.
*/
u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
memcpy(tmp, in, count * AES_BLOCK_SIZE);
return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
}
static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
struct cword *cword, int count)
{
/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
* We could avoid some copying here but it's probably not worth it.
*/
if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
ecb_crypt_copy(in, out, key, cword, count);
return;
}
rep_xcrypt_ecb(in, out, key, cword, count);
}
static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
u8 *iv, struct cword *cword, int count)
{
/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
return cbc_crypt_copy(in, out, key, iv, cword, count);
return rep_xcrypt_cbc(in, out, key, iv, cword, count);
}
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
void *control_word, u32 count)
{
u32 initial = count & (ecb_fetch_blocks - 1);
if (count < ecb_fetch_blocks) {
ecb_crypt(input, output, key, control_word, count);
return;
}
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
: "d"(control_word), "b"(key), "c"(initial));
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
: "d"(control_word), "b"(key), "c"(count - initial));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
u8 *iv, void *control_word, u32 count)
{
u32 initial = count & (cbc_fetch_blocks - 1);
if (count < cbc_fetch_blocks)
return cbc_crypt(input, output, key, iv, control_word, count);
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
: "d" (control_word), "b" (key), "c" (initial));
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
: "d" (control_word), "b" (key), "c" (count-initial));
return iv;
}
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt,
}
}
};
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
return err;
}
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;
padlock_reset_key(&ctx->cword.decrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
return err;
}
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-padlock",
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = aes_set_key,
.encrypt = ecb_aes_encrypt,
.decrypt = ecb_aes_decrypt,
}
}
};
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
walk.iv, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.decrypt);
return err;
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
return err;
}
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-padlock",
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
}
}
};
static struct x86_cpu_id padlock_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
{}
};
MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
static int __init padlock_init(void)
{
int ret;
struct cpuinfo_x86 *c = &cpu_data(0);
if (!x86_match_cpu(padlock_cpu_id))
return -ENODEV;
if (!cpu_has_xcrypt_enabled) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
if ((ret = crypto_register_alg(&aes_alg)))
goto aes_err;
if ((ret = crypto_register_alg(&ecb_aes_alg)))
goto ecb_aes_err;
if ((ret = crypto_register_alg(&cbc_aes_alg)))
goto cbc_aes_err;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
}
out:
return ret;
cbc_aes_err:
crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
goto out;
}
static void __exit padlock_fini(void)
{
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
MODULE_ALIAS("aes");
| {
"pile_set_name": "Github"
} |
class EmbiggenObsFieldValue < ActiveRecord::Migration
def up
change_column :observation_fields, :allowed_values, :string, :limit => 512
end
def down
change_column :observation_fields, :allowed_values, :string, :limit => 256
end
end
| {
"pile_set_name": "Github"
} |
{
"tag": "body",
"children": [
{
"tag": "div",
"children": [
{
"tag": "div",
"children": [
{
"size": [
80,
20
],
"text": "XX X"
},
{
"tag": "br",
"content": {
"size": [
0,
20
],
"position": [
88,
8
]
},
"padding_box": {
"size": [
0,
20
],
"position": [
88,
8
]
},
"border": [
0,
0,
0,
0
],
"border_box": {
"size": [
0,
20
],
"position": [
88,
8
]
},
"padding": [
0,
0,
0,
0
],
"id": ""
},
{
"size": [
100,
20
],
"text": "X XXX"
},
{
"tag": "br",
"content": {
"size": [
0,
20
],
"position": [
108,
28
]
},
"padding_box": {
"size": [
0,
20
],
"position": [
108,
28
]
},
"border": [
0,
0,
0,
0
],
"border_box": {
"size": [
0,
20
],
"position": [
108,
28
]
},
"padding": [
0,
0,
0,
0
],
"id": ""
},
{
"size": [
20,
20
],
"text": "X"
},
{
"tag": "br",
"content": {
"size": [
0,
20
],
"position": [
28,
48
]
},
"padding_box": {
"size": [
0,
20
],
"position": [
28,
48
]
},
"border": [
0,
0,
0,
0
],
"border_box": {
"size": [
0,
20
],
"position": [
28,
48
]
},
"padding": [
0,
0,
0,
0
],
"id": ""
},
{
"size": [
60,
40
],
"text": "XX XXX"
}
],
"content": {
"size": [
100,
200
],
"position": [
8,
8
]
},
"padding_box": {
"size": [
100,
200
],
"position": [
8,
8
]
},
"border": [
0,
0,
0,
0
],
"border_box": {
"size": [
100,
200
],
"position": [
8,
8
]
},
"padding": [
0,
0,
0,
0
],
"id": "item"
}
],
"content": {
"size": [
100,
200
],
"position": [
8,
8
]
},
"padding_box": {
"size": [
100,
200
],
"position": [
8,
8
]
},
"border": [
0,
0,
0,
0
],
"border_box": {
"size": [
100,
200
],
"position": [
8,
8
]
},
"padding": [
0,
0,
0,
0
],
"id": "grid"
}
],
"content": {
"size": [
1008,
200
],
"position": [
8,
8
]
},
"padding_box": {
"size": [
1008,
200
],
"position": [
8,
8
]
},
"border": [
0,
0,
0,
0
],
"border_box": {
"size": [
1008,
200
],
"position": [
8,
8
]
},
"padding": [
0,
0,
0,
0
],
"id": ""
} | {
"pile_set_name": "Github"
} |
# Copyright 2011-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=7
if [[ ${PV} == 9999 ]]; then
EGIT_REPO_URI="https://github.com/systemd/systemd.git"
inherit git-r3
else
if [[ ${PV} == *.* ]]; then
MY_PN=systemd-stable
else
MY_PN=systemd
fi
MY_PV=${PV/_/-}
MY_P=${MY_PN}-${MY_PV}
S=${WORKDIR}/${MY_P}
SRC_URI="https://github.com/systemd/${MY_PN}/archive/v${MY_PV}/${MY_P}.tar.gz"
KEYWORDS="~alpha ~amd64 ~arm ~arm64 ~hppa ~ia64 ~mips ~ppc ~ppc64 ~sparc ~x86"
fi
PYTHON_COMPAT=( python3_{6,7,8} )
inherit bash-completion-r1 linux-info meson multilib-minimal ninja-utils pam python-any-r1 systemd toolchain-funcs udev usr-ldscript
DESCRIPTION="System and service manager for Linux"
HOMEPAGE="https://www.freedesktop.org/wiki/Software/systemd"
LICENSE="GPL-2 LGPL-2.1 MIT public-domain"
SLOT="0/2"
IUSE="acl apparmor audit build cgroup-hybrid cryptsetup curl dns-over-tls elfutils +gcrypt gnuefi homed http +hwdb idn importd +kmod +lz4 lzma nat pam pcre pkcs11 policykit pwquality qrcode repart +resolvconf +seccomp selinux split-usr static-libs +sysv-utils test vanilla xkb"
REQUIRED_USE="
homed? ( cryptsetup )
importd? ( curl gcrypt lzma )
"
RESTRICT="!test? ( test )"
MINKV="3.11"
OPENSSL_DEP=">=dev-libs/openssl-1.1.0:0="
COMMON_DEPEND=">=sys-apps/util-linux-2.30:0=[${MULTILIB_USEDEP}]
sys-libs/libcap:0=[${MULTILIB_USEDEP}]
acl? ( sys-apps/acl:0= )
apparmor? ( sys-libs/libapparmor:0= )
audit? ( >=sys-process/audit-2:0= )
cryptsetup? ( >=sys-fs/cryptsetup-2.0.1:0= )
curl? ( net-misc/curl:0= )
dns-over-tls? ( >=net-libs/gnutls-3.6.0:0= )
elfutils? ( >=dev-libs/elfutils-0.158:0= )
gcrypt? ( >=dev-libs/libgcrypt-1.4.5:0=[${MULTILIB_USEDEP}] )
homed? ( ${OPENSSL_DEP} )
http? (
>=net-libs/libmicrohttpd-0.9.33:0=[epoll(+)]
>=net-libs/gnutls-3.1.4:0=
)
idn? ( net-dns/libidn2:= )
importd? (
app-arch/bzip2:0=
sys-libs/zlib:0=
)
kmod? ( >=sys-apps/kmod-15:0= )
lz4? ( >=app-arch/lz4-0_p131:0=[${MULTILIB_USEDEP}] )
lzma? ( >=app-arch/xz-utils-5.0.5-r1:0=[${MULTILIB_USEDEP}] )
nat? ( net-firewall/iptables:0= )
pam? ( sys-libs/pam:=[${MULTILIB_USEDEP}] )
pkcs11? ( app-crypt/p11-kit:0= )
pcre? ( dev-libs/libpcre2 )
pwquality? ( dev-libs/libpwquality:0= )
qrcode? ( media-gfx/qrencode:0= )
repart? ( ${OPENSSL_DEP} )
seccomp? ( >=sys-libs/libseccomp-2.3.3:0= )
selinux? ( sys-libs/libselinux:0= )
xkb? ( >=x11-libs/libxkbcommon-0.4.1:0= )"
# Newer linux-headers needed by ia64, bug #480218
DEPEND="${COMMON_DEPEND}
>=sys-kernel/linux-headers-${MINKV}
gnuefi? ( >=sys-boot/gnu-efi-3.0.2 )
"
# baselayout-2.2 has /run
RDEPEND="${COMMON_DEPEND}
acct-group/adm
acct-group/wheel
acct-group/kmem
acct-group/tty
acct-group/utmp
acct-group/audio
acct-group/cdrom
acct-group/dialout
acct-group/disk
acct-group/input
acct-group/kvm
acct-group/render
acct-group/tape
acct-group/video
acct-group/systemd-journal
acct-user/systemd-journal-remote
acct-user/systemd-coredump
acct-user/systemd-network
acct-user/systemd-resolve
acct-user/systemd-timesync
>=sys-apps/baselayout-2.2
selinux? ( sec-policy/selinux-base-policy[systemd] )
sysv-utils? ( !sys-apps/sysvinit )
!sysv-utils? ( sys-apps/sysvinit )
resolvconf? ( !net-dns/openresolv )
!build? ( || (
sys-apps/util-linux[kill(-)]
sys-process/procps[kill(+)]
sys-apps/coreutils[kill(-)]
) )
!sys-auth/nss-myhostname
!sys-fs/eudev
!sys-fs/udev
"
# sys-apps/dbus: the daemon only (+ build-time lib dep for tests)
PDEPEND=">=sys-apps/dbus-1.9.8[systemd]
hwdb? ( >=sys-apps/hwids-20150417[udev] )
>=sys-fs/udev-init-scripts-34
policykit? ( sys-auth/polkit )
!vanilla? ( sys-apps/gentoo-systemd-integration )"
BDEPEND="
app-arch/xz-utils:0
dev-util/gperf
>=dev-util/meson-0.46
>=dev-util/intltool-0.50
>=sys-apps/coreutils-8.16
sys-devel/m4
virtual/pkgconfig
test? ( sys-apps/dbus )
app-text/docbook-xml-dtd:4.2
app-text/docbook-xml-dtd:4.5
app-text/docbook-xsl-stylesheets
dev-libs/libxslt:0
$(python_gen_any_dep 'dev-python/lxml[${PYTHON_USEDEP}]')
"
python_check_deps() {
has_version -b "dev-python/lxml[${PYTHON_USEDEP}]"
}
pkg_pretend() {
if [[ ${MERGE_TYPE} != buildonly ]]; then
if use test && has pid-sandbox ${FEATURES}; then
ewarn "Tests are known to fail with PID sandboxing enabled."
ewarn "See https://bugs.gentoo.org/674458."
fi
local CONFIG_CHECK="~AUTOFS4_FS ~BLK_DEV_BSG ~CGROUPS
~CHECKPOINT_RESTORE ~DEVTMPFS ~EPOLL ~FANOTIFY ~FHANDLE
~INOTIFY_USER ~IPV6 ~NET ~NET_NS ~PROC_FS ~SIGNALFD ~SYSFS
~TIMERFD ~TMPFS_XATTR ~UNIX ~USER_NS
~CRYPTO_HMAC ~CRYPTO_SHA256 ~CRYPTO_USER_API_HASH
~!GRKERNSEC_PROC ~!IDE ~!SYSFS_DEPRECATED
~!SYSFS_DEPRECATED_V2"
use acl && CONFIG_CHECK+=" ~TMPFS_POSIX_ACL"
use seccomp && CONFIG_CHECK+=" ~SECCOMP ~SECCOMP_FILTER"
kernel_is -lt 3 7 && CONFIG_CHECK+=" ~HOTPLUG"
kernel_is -lt 4 7 && CONFIG_CHECK+=" ~DEVPTS_MULTIPLE_INSTANCES"
kernel_is -ge 4 10 && CONFIG_CHECK+=" ~CGROUP_BPF"
if linux_config_exists; then
local uevent_helper_path=$(linux_chkconfig_string UEVENT_HELPER_PATH)
if [[ -n ${uevent_helper_path} ]] && [[ ${uevent_helper_path} != '""' ]]; then
ewarn "It's recommended to set an empty value to the following kernel config option:"
ewarn "CONFIG_UEVENT_HELPER_PATH=${uevent_helper_path}"
fi
if linux_chkconfig_present X86; then
CONFIG_CHECK+=" ~DMIID"
fi
fi
if kernel_is -lt ${MINKV//./ }; then
ewarn "Kernel version at least ${MINKV} required"
fi
check_extra_config
fi
}
pkg_setup() {
:
}
src_unpack() {
default
[[ ${PV} != 9999 ]] || git-r3_src_unpack
}
src_prepare() {
# Do NOT add patches here
local PATCHES=()
[[ -d "${WORKDIR}"/patches ]] && PATCHES+=( "${WORKDIR}"/patches )
# Add local patches here
PATCHES+=(
)
if ! use vanilla; then
PATCHES+=(
"${FILESDIR}/gentoo-generator-path-r2.patch"
"${FILESDIR}/gentoo-systemctl-disable-sysv-sync.patch"
"${FILESDIR}/gentoo-journald-audit.patch"
)
fi
default
}
src_configure() {
# Prevent conflicts with i686 cross toolchain, bug 559726
tc-export AR CC NM OBJCOPY RANLIB
python_setup
multilib-minimal_src_configure
}
meson_use() {
usex "$1" true false
}
meson_multilib() {
if multilib_is_native_abi; then
echo true
else
echo false
fi
}
meson_multilib_native_use() {
if multilib_is_native_abi && use "$1"; then
echo true
else
echo false
fi
}
multilib_src_configure() {
local myconf=(
--localstatedir="${EPREFIX}/var"
-Dsupport-url="https://gentoo.org/support/"
-Dpamlibdir="$(getpam_mod_dir)"
# avoid bash-completion dep
-Dbashcompletiondir="$(get_bashcompdir)"
# make sure we get /bin:/sbin in PATH
-Dsplit-usr=$(usex split-usr true false)
-Dsplit-bin=true
-Drootprefix="$(usex split-usr "${EPREFIX:-/}" "${EPREFIX}/usr")"
-Drootlibdir="${EPREFIX}/usr/$(get_libdir)"
# Avoid infinite exec recursion, bug 642724
-Dtelinit-path="${EPREFIX}/lib/sysvinit/telinit"
# no deps
-Dima=true
-Ddefault-hierarchy=$(usex cgroup-hybrid hybrid unified)
# Optional components/dependencies
-Dacl=$(meson_multilib_native_use acl)
-Dapparmor=$(meson_multilib_native_use apparmor)
-Daudit=$(meson_multilib_native_use audit)
-Dlibcryptsetup=$(meson_multilib_native_use cryptsetup)
-Dlibcurl=$(meson_multilib_native_use curl)
-Ddns-over-tls=$(meson_multilib_native_use dns-over-tls)
-Delfutils=$(meson_multilib_native_use elfutils)
-Dgcrypt=$(meson_use gcrypt)
-Dgnu-efi=$(meson_multilib_native_use gnuefi)
-Defi-libdir="${ESYSROOT}/usr/$(get_libdir)"
-Dhomed=$(meson_multilib_native_use homed)
-Dhwdb=$(meson_multilib_native_use hwdb)
-Dmicrohttpd=$(meson_multilib_native_use http)
-Didn=$(meson_multilib_native_use idn)
-Dimportd=$(meson_multilib_native_use importd)
-Dbzip2=$(meson_multilib_native_use importd)
-Dzlib=$(meson_multilib_native_use importd)
-Dkmod=$(meson_multilib_native_use kmod)
-Dlz4=$(meson_use lz4)
-Dxz=$(meson_use lzma)
-Dlibiptc=$(meson_multilib_native_use nat)
-Dpam=$(meson_use pam)
-Dp11kit=$(meson_multilib_native_use pkcs11)
-Dpcre2=$(meson_multilib_native_use pcre)
-Dpolkit=$(meson_multilib_native_use policykit)
-Dpwquality=$(meson_multilib_native_use pwquality)
-Dqrencode=$(meson_multilib_native_use qrcode)
-Drepart=$(meson_multilib_native_use repart)
-Dseccomp=$(meson_multilib_native_use seccomp)
-Dselinux=$(meson_multilib_native_use selinux)
-Ddbus=$(meson_multilib_native_use test)
-Dxkbcommon=$(meson_multilib_native_use xkb)
-Dntp-servers="0.gentoo.pool.ntp.org 1.gentoo.pool.ntp.org 2.gentoo.pool.ntp.org 3.gentoo.pool.ntp.org"
# Breaks screen, tmux, etc.
-Ddefault-kill-user-processes=false
-Dcreate-log-dirs=false
# multilib options
-Dbacklight=$(meson_multilib)
-Dbinfmt=$(meson_multilib)
-Dcoredump=$(meson_multilib)
-Denvironment-d=$(meson_multilib)
-Dfirstboot=$(meson_multilib)
-Dhibernate=$(meson_multilib)
-Dhostnamed=$(meson_multilib)
-Dldconfig=$(meson_multilib)
-Dlocaled=$(meson_multilib)
-Dman=$(meson_multilib)
-Dnetworkd=$(meson_multilib)
-Dquotacheck=$(meson_multilib)
-Drandomseed=$(meson_multilib)
-Drfkill=$(meson_multilib)
-Dsysusers=$(meson_multilib)
-Dtimedated=$(meson_multilib)
-Dtimesyncd=$(meson_multilib)
-Dtmpfiles=$(meson_multilib)
-Dvconsole=$(meson_multilib)
# static-libs
-Dstatic-libsystemd=$(usex static-libs true false)
-Dstatic-libudev=$(usex static-libs true false)
)
meson_src_configure "${myconf[@]}"
}
multilib_src_compile() {
eninja
}
multilib_src_test() {
unset DBUS_SESSION_BUS_ADDRESS XDG_RUNTIME_DIR
meson_src_test
}
multilib_src_install() {
DESTDIR="${D}" eninja install
}
multilib_src_install_all() {
local rootprefix=$(usex split-usr '' /usr)
# meson doesn't know about docdir
mv "${ED}"/usr/share/doc/{systemd,${PF}} || die
einstalldocs
dodoc "${FILESDIR}"/nsswitch.conf
if ! use resolvconf; then
rm -f "${ED}${rootprefix}"/sbin/resolvconf || die
fi
rm "${ED}"/etc/init.d/README || die
rm "${ED}${rootprefix}"/lib/systemd/system-generators/systemd-sysv-generator || die
if ! use sysv-utils; then
rm "${ED}${rootprefix}"/sbin/{halt,init,poweroff,reboot,runlevel,shutdown,telinit} || die
rm "${ED}"/usr/share/man/man1/init.1 || die
rm "${ED}"/usr/share/man/man8/{halt,poweroff,reboot,runlevel,shutdown,telinit}.8 || die
fi
if ! use resolvconf && ! use sysv-utils; then
rmdir "${ED}${rootprefix}"/sbin || die
fi
# Preserve empty dirs in /etc & /var, bug #437008
keepdir /etc/{binfmt.d,modules-load.d,tmpfiles.d}
keepdir /etc/kernel/install.d
keepdir /etc/systemd/{network,system,user}
keepdir /etc/udev/rules.d
if use hwdb; then
keepdir /etc/udev/hwdb.d
fi
keepdir "${rootprefix}"/lib/systemd/{system-sleep,system-shutdown}
keepdir /usr/lib/{binfmt.d,modules-load.d}
keepdir /usr/lib/systemd/user-generators
keepdir /var/lib/systemd
keepdir /var/log/journal
# Symlink /etc/sysctl.conf for easy migration.
dosym ../sysctl.conf /etc/sysctl.d/99-sysctl.conf
if use hwdb; then
rm -r "${ED}${rootprefix}"/lib/udev/hwdb.d || die
fi
if use split-usr; then
# Avoid breaking boot/reboot
dosym ../../../lib/systemd/systemd /usr/lib/systemd/systemd
dosym ../../../lib/systemd/systemd-shutdown /usr/lib/systemd/systemd-shutdown
fi
gen_usr_ldscript -a systemd udev
}
migrate_locale() {
local envd_locale_def="${EROOT}/etc/env.d/02locale"
local envd_locale=( "${EROOT}"/etc/env.d/??locale )
local locale_conf="${EROOT}/etc/locale.conf"
if [[ ! -L ${locale_conf} && ! -e ${locale_conf} ]]; then
# If locale.conf does not exist...
if [[ -e ${envd_locale} ]]; then
# ...either copy env.d/??locale if there's one
ebegin "Moving ${envd_locale} to ${locale_conf}"
mv "${envd_locale}" "${locale_conf}"
eend ${?} || FAIL=1
else
# ...or create a dummy default
ebegin "Creating ${locale_conf}"
cat > "${locale_conf}" <<-EOF
# This file has been created by the sys-apps/systemd ebuild.
# See locale.conf(5) and localectl(1).
# LANG=${LANG}
EOF
eend ${?} || FAIL=1
fi
fi
if [[ ! -L ${envd_locale} ]]; then
# now, if env.d/??locale is not a symlink (to locale.conf)...
if [[ -e ${envd_locale} ]]; then
# ...warn the user that he has duplicate locale settings
ewarn
ewarn "To ensure consistent behavior, you should replace ${envd_locale}"
ewarn "with a symlink to ${locale_conf}. Please migrate your settings"
ewarn "and create the symlink with the following command:"
ewarn "ln -s -n -f ../locale.conf ${envd_locale}"
ewarn
else
# ...or just create the symlink if there's nothing here
ebegin "Creating ${envd_locale_def} -> ../locale.conf symlink"
ln -n -s ../locale.conf "${envd_locale_def}"
eend ${?} || FAIL=1
fi
fi
}
save_enabled_units() {
ENABLED_UNITS=()
type systemctl &>/dev/null || return
for x; do
if systemctl --quiet --root="${ROOT:-/}" is-enabled "${x}"; then
ENABLED_UNITS+=( "${x}" )
fi
done
}
pkg_preinst() {
save_enabled_units {machines,remote-{cryptsetup,fs}}.target [email protected]
if ! use split-usr; then
local dir
for dir in bin sbin lib; do
if [[ ! ${EROOT}/${dir} -ef ${EROOT}/usr/${dir} ]]; then
eerror "\"${EROOT}/${dir}\" and \"${EROOT}/usr/${dir}\" are not merged."
eerror "One of them should be a symbolic link to the other one."
FAIL=1
fi
done
if [[ ${FAIL} ]]; then
eerror "Migration to system layout with merged directories must be performed before"
eerror "rebuilding ${CATEGORY}/${PN} with USE=\"-split-usr\" to avoid run-time breakage."
die "System layout with split directories still used"
fi
fi
}
pkg_postinst() {
systemd_update_catalog
# Keep this here in case the database format changes so it gets updated
# when required. Despite that this file is owned by sys-apps/hwids.
if has_version "sys-apps/hwids[udev]"; then
udevadm hwdb --update --root="${EROOT}"
fi
udev_reload || FAIL=1
# Bug 465468, make sure locales are respect, and ensure consistency
# between OpenRC & systemd
migrate_locale
systemd_reenable systemd-networkd.service systemd-resolved.service
if [[ ${ENABLED_UNITS[@]} ]]; then
systemctl --root="${ROOT:-/}" enable "${ENABLED_UNITS[@]}"
fi
if [[ -z ${REPLACING_VERSIONS} ]]; then
if type systemctl &>/dev/null; then
systemctl --root="${ROOT:-/}" enable [email protected] remote-fs.target || FAIL=1
fi
elog "To enable a useful set of services, run the following:"
elog " systemctl preset-all --preset-mode=enable-only"
fi
if [[ -L ${EROOT}/var/lib/systemd/timesync ]]; then
rm "${EROOT}/var/lib/systemd/timesync"
fi
if [[ -z ${ROOT} && -d /run/systemd/system ]]; then
ebegin "Reexecuting system manager"
systemctl daemon-reexec
eend $?
fi
if [[ ${FAIL} ]]; then
eerror "One of the postinst commands failed. Please check the postinst output"
eerror "for errors. You may need to clean up your system and/or try installing"
eerror "systemd again."
eerror
fi
}
pkg_prerm() {
# If removing systemd completely, remove the catalog database.
if [[ ! ${REPLACED_BY_VERSION} ]]; then
rm -f -v "${EROOT}"/var/lib/systemd/catalog/database
fi
}
| {
"pile_set_name": "Github"
} |
--csv-at-analysis csv_output.csv --csv-stable
| {
"pile_set_name": "Github"
} |
<?php
/**
* Passbolt ~ Open source password manager for teams
* Copyright (c) Passbolt SA (https://www.passbolt.com)
*
* Licensed under GNU Affero General Public License version 3 of the or any later version.
* For full copyright and license information, please see the LICENSE.txt
* Redistributions of files must retain the above copyright notice.
*
* @copyright Copyright (c) Passbolt SA (https://www.passbolt.com)
* @license https://opensource.org/licenses/AGPL-3.0 AGPL License
* @link https://www.passbolt.com Passbolt(tm)
* @since 2.13.0
*/
namespace App\Log;
use Cake\Log\Log as CakeLog;
use Psr\Log\LoggerInterface;
use Psr\Log\LoggerTrait;
class DefaultLogger implements LoggerInterface
{
use LoggerTrait;
/**
* @param mixed $level Level
* @param string $message Message
* @param array $context Context
* @return bool|void
*/
public function log($level, $message, array $context = [])
{
return CakeLog::write($level, $message, $context);
}
}
| {
"pile_set_name": "Github"
} |
/**
* @license
* Copyright The Closure Library Authors.
* SPDX-License-Identifier: Apache-2.0
*/
/** @fileoverview Unit Test for the unsafe API of the HTML Sanitizer. */
goog.module('goog.html.UnsafeTest');
goog.setTestOnly();
const AttributeWhitelist = goog.require('goog.html.sanitizer.AttributeWhitelist');
const Const = goog.require('goog.string.Const');
const HtmlSanitizer = goog.require('goog.html.sanitizer.HtmlSanitizer');
const SafeHtml = goog.require('goog.html.SafeHtml');
const TagWhitelist = goog.require('goog.html.sanitizer.TagWhitelist');
const dom = goog.require('goog.testing.dom');
const testSuite = goog.require('goog.testing.testSuite');
const unsafe = goog.require('goog.html.sanitizer.unsafe');
const userAgent = goog.require('goog.userAgent');
const isSupported = !userAgent.IE || userAgent.isVersionOrHigher(10);
const just = Const.from('test');
/**
* Sanitizes the original HTML and asserts that it is the same as the expected
* HTML. Supports adding tags and attributes through the unsafe API.
* @param {string} originalHtml
* @param {string} expectedHtml
* @param {?Array<string>=} tags
* @param {?Array<(string|!goog.html.sanitizer.HtmlSanitizerAttributePolicy)>=}
* attrs
* @param {?HtmlSanitizer.Builder=} opt_builder
*/
function assertSanitizedHtml(
originalHtml, expectedHtml, tags = undefined, attrs = undefined,
opt_builder) {
let builder = opt_builder || new HtmlSanitizer.Builder();
if (tags) builder = unsafe.alsoAllowTags(just, builder, tags);
if (attrs) builder = unsafe.alsoAllowAttributes(just, builder, attrs);
const sanitizer = builder.build();
const sanitized = sanitizer.sanitize(originalHtml);
if (!isSupported) {
assertEquals('', SafeHtml.unwrap(sanitized));
return;
}
dom.assertHtmlMatches(
expectedHtml, SafeHtml.unwrap(sanitized),
true /* opt_strictAttributes */);
}
testSuite({
testAllowEmptyTagList() {
const input = '<sdf><aaa></aaa></sdf><b></b>';
const expected = '<span><span></span></span><b></b>';
assertSanitizedHtml(input, expected, []);
},
testAllowBlacklistedTag() {
const input = '<div><script>aaa</script></div>';
assertSanitizedHtml(input, input, ['SCriPT']);
},
testAllowUnknownTags() {
const input = '<hello><bye>aaa</bye></hello><zzz></zzz>';
const expected = '<hello><span>aaa</span></hello><zzz></zzz>';
assertSanitizedHtml(input, expected, ['HElLO', 'zZZ']);
},
testAllowAlreadyWhiteListedTag() {
const input = '<hello><p><zzz></zzz></p></hello>';
const expected = '<span><p><zzz></zzz></p></span>';
assertSanitizedHtml(input, expected, ['p', 'ZZZ']);
},
testAllowEmptyAttrList() {
const input = '<a href="#" qwe="nope">b</a>';
const expected = '<a href="#">b</a>';
assertSanitizedHtml(input, expected, null, []);
},
testAllowUnknownAttributeSimple() {
const input = '<qqq zzz="3" nnn="no"></qqq>';
const expected = '<span zzz="3"></span>';
assertSanitizedHtml(input, expected, null, ['Zzz']);
},
testAllowUnknownAttributeWildCard() {
const input = '<div ab="yes" bb="no"><img ab="yep" bb="no" /></div>';
const expected = '<div ab="yes"><img ab="yep" /></div>';
assertSanitizedHtml(
input, expected, null, [{tagName: '*', attributeName: 'aB'}]);
},
testAllowUnknownAttributeOnSpecificTag() {
const input = '<a www="3" zzz="4">fff</a><img www="3" />';
const expected = '<a www="3">fff</a><img />';
assertSanitizedHtml(
input, expected, null, [{tagName: 'a', attributeName: 'WwW'}]);
},
testAllowUnknownAttributePolicy() {
const input = '<img ab="yes" /><img ab="no" />';
const expected = '<img ab="yes" /><img />';
assertSanitizedHtml(input, expected, null, [{
tagName: '*',
attributeName: 'aB',
policy: function(value, hints) {
assertEquals(hints.attributeName, 'ab');
return value === 'yes' ? value : null;
},
}]);
},
testAllowOverwriteAttrPolicy() {
const input = '<a href="yes"></a><a href="no"></a>';
const expected = '<a href="yes"></a><a></a>';
assertSanitizedHtml(input, expected, null, [{
tagName: 'a',
attributeName: 'href',
policy: function(value) {
return value === 'yes' ? value : null;
},
}]);
},
testAllowDAttribute() {
const input = '<path d="1.5 1.5 1.5 14.5 14.5 14.5 14.5 1.5"/>';
const expected = '<path d="1.5 1.5 1.5 14.5 14.5 14.5 14.5 1.5"/>';
assertSanitizedHtml(
input, expected, ['path'], [{tagName: 'path', attributeName: 'd'}]);
},
testWhitelistAliasing() {
const builder = new HtmlSanitizer.Builder();
unsafe.alsoAllowTags(just, builder, ['QqQ']);
unsafe.alsoAllowAttributes(just, builder, ['QqQ']);
builder.build();
assertUndefined(TagWhitelist['QQQ']);
assertUndefined(TagWhitelist['QqQ']);
assertUndefined(TagWhitelist['qqq']);
assertUndefined(AttributeWhitelist['* QQQ']);
assertUndefined(AttributeWhitelist['* QqQ']);
assertUndefined(AttributeWhitelist['* qqq']);
},
testAllowRelaxExistingAttributePolicyWildcard() {
const input = '<a href="javascript:alert(1)"></a>';
// define a tag-specific one, takes precedence
assertSanitizedHtml(input, input, null, [
{tagName: 'a', attributeName: 'href', policy: goog.functions.identity}
]);
// overwrite the global one
assertSanitizedHtml(input, input, null, [
{tagName: '*', attributeName: 'href', policy: goog.functions.identity}
]);
},
testAllowRelaxExistingAttributePolicySpecific() {
const input = '<a target="foo"></a>';
const expected = '<a></a>';
// overwrite the global one, the specific one still has precedence
assertSanitizedHtml(input, expected, null, [
{tagName: '*', attributeName: 'target', policy: goog.functions.identity},
]);
// overwrite the tag-specific one, this one should take precedence
assertSanitizedHtml(input, input, null, [
{tagName: 'a', attributeName: 'target', policy: goog.functions.identity},
]);
},
testAlsoAllowTagsInBlacklist() {
// Simplified use case taken from KaTex output HTML. The real configuration
// would allow more attributes and apply a stricter policy on their values
// to reduce the attack surface.
const input = '<svg width="1px"><line x1="3" /><path d="M 10 30" /></svg>';
assertSanitizedHtml(input, input, ['svg', 'line', 'path'], [
{tagName: 'svg', attributeName: 'width', policy: goog.functions.identity},
{tagName: 'line', attributeName: 'x1', policy: goog.functions.identity},
{tagName: 'path', attributeName: 'd', policy: goog.functions.identity},
]);
},
});
| {
"pile_set_name": "Github"
} |
<?php
/**
* @file user-profile-item.tpl.php
* Default theme implementation to present profile items (values from user
* account profile fields or modules).
*
* This template is used to loop through and render each field configured
* for the user's account. It can also be the data from modules. The output is
* grouped by categories.
*
* @see user-profile-category.tpl.php
* for the parent markup. Implemented as a definition list by default.
* @see user-profile.tpl.php
* where all items and categories are collected and printed out.
*
* Available variables:
* - $title: Field title for the profile item.
* - $value: User defined value for the profile item or data from a module.
* - $attributes: HTML attributes. Usually renders classes.
*
* @see template_preprocess_user_profile_item()
*/
?>
<dt<?php print $attributes; ?>><?php print $title; ?></dt>
<dd<?php print $attributes; ?>><?php print $value; ?></dd>
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2013 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
namespace Zend\XmlRpc\Request;
use Zend\XmlRpc\Request as XmlRpcRequest;
use Zend\XmlRpc\Server\Exception as ServerException;
/**
* XmlRpc Request object -- Request via STDIN
*
* Extends {@link Zend_XmlRpc_Request} to accept a request via STDIN. Request is
* built at construction time using data from STDIN; if no data is available, the
* request is declared a fault.
*/
class Stdin extends XmlRpcRequest
{
/**
* Raw XML as received via request
* @var string
*/
protected $xml;
/**
* Constructor
*
* Attempts to read from php://stdin to get raw POST request; if an error
* occurs in doing so, or if the XML is invalid, the request is declared a
* fault.
*
*/
public function __construct()
{
$fh = fopen('php://stdin', 'r');
if (!$fh) {
$this->fault = new ServerException(630);
return;
}
$xml = '';
while (!feof($fh)) {
$xml .= fgets($fh);
}
fclose($fh);
$this->xml = $xml;
$this->loadXml($xml);
}
/**
* Retrieve the raw XML request
*
* @return string
*/
public function getRawRequest()
{
return $this->xml;
}
}
| {
"pile_set_name": "Github"
} |
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="24dp"
android:height="24dp"
android:viewportWidth="24.0"
android:viewportHeight="24.0">
<path
android:fillColor="#FF000000"
android:pathData="M12,2C6.48,2 2,6.48 2,12s4.48,10 10,10 10,-4.48 10,-10S17.52,2 12,2zM12,20c-4.42,0 -8,-3.58 -8,-8 0,-1.85 0.63,-3.55 1.69,-4.9L16.9,18.31C15.55,19.37 13.85,20 12,20zM18.31,16.9L7.1,5.69C8.45,4.63 10.15,4 12,4c4.42,0 8,3.58 8,8 0,1.85 -0.63,3.55 -1.69,4.9z"/>
</vector>
| {
"pile_set_name": "Github"
} |
#
# Dockerfile to build miaoski/moedict_amis
#
FROM ubuntu:14.04
MAINTAINER miaoski
ENV DEBIAN_FRONTEND noninteractive
# Pick a Ubuntu apt mirror site for better speed
# ref: https://launchpad.net/ubuntu/+archivemirrors
#
# For developers to build this image in Taiwan,
# please consider to use one of these mirrors:
# - ftp.ubuntu-tw.net
# - ftp.yzu.edu.tw
ENV UBUNTU_APT_SITE ubuntu.cs.utah.edu
RUN sed -E -i "s/([a-z]+.)?archive.ubuntu.com/$UBUNTU_APT_SITE/g" /etc/apt/sources.list
RUN sed -i "s/security.ubuntu.com/$UBUNTU_APT_SITE/g" /etc/apt/sources.list
# Disable src package as we don't need them
RUN sed -i 's/^deb-src\ /\#deb-src\ /g' /etc/apt/sources.list
RUN apt-get update && \
apt-get install -y \
git \
tree \
vim \
screen \
curl \
build-essential \
perl \
ruby \
ruby-sass \
ruby-compass \
python \
python-lxml \
unzip \
libjson-perl \
libfile-slurp-unicode-perl \
nodejs \
nodejs-legacy \
npm && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN npm install -g LiveScript jade gulp
# Switch locale
RUN locale-gen zh_TW.UTF-8
ENV LC_ALL zh_TW.UTF-8
COPY ./ /usr/local/src/moedict-webkit
WORKDIR /usr/local/src/moedict-webkit
RUN npm install
# make offline
RUN make offline-dev
# Default port is 8888
EXPOSE 8888
| {
"pile_set_name": "Github"
} |
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -instcombine < %s | FileCheck %s
define float @exact_inverse(float %x) {
; CHECK-LABEL: @exact_inverse(
; CHECK-NEXT: [[DIV:%.*]] = fmul float [[X:%.*]], 1.250000e-01
; CHECK-NEXT: ret float [[DIV]]
;
%div = fdiv float %x, 8.0
ret float %div
}
; Min normal float = 1.17549435E-38
define float @exact_inverse2(float %x) {
; CHECK-LABEL: @exact_inverse2(
; CHECK-NEXT: [[DIV:%.*]] = fmul float [[X:%.*]], 0x47D0000000000000
; CHECK-NEXT: ret float [[DIV]]
;
%div = fdiv float %x, 0x3810000000000000
ret float %div
}
; Max exponent = 1.70141183E+38; don't transform to multiply with denormal.
define float @exact_inverse_but_denorm(float %x) {
; CHECK-LABEL: @exact_inverse_but_denorm(
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[X:%.*]], 0x47E0000000000000
; CHECK-NEXT: ret float [[DIV]]
;
%div = fdiv float %x, 0x47E0000000000000
ret float %div
}
; Denormal = float 1.40129846E-45; inverse can't be represented.
define float @not_exact_inverse2(float %x) {
; CHECK-LABEL: @not_exact_inverse2(
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[X:%.*]], 0x36A0000000000000
; CHECK-NEXT: ret float [[DIV]]
;
%div = fdiv float %x, 0x36A0000000000000
ret float %div
}
; Fast math allows us to replace this fdiv.
define float @not_exact_but_allow_recip(float %x) {
; CHECK-LABEL: @not_exact_but_allow_recip(
; CHECK-NEXT: [[DIV:%.*]] = fmul arcp float [[X:%.*]], 0x3FD5555560000000
; CHECK-NEXT: ret float [[DIV]]
;
%div = fdiv arcp float %x, 3.0
ret float %div
}
; Fast math allows us to replace this fdiv, but we don't to avoid a denormal.
; TODO: What if the function attributes tell us that denormals are flushed?
define float @not_exact_but_allow_recip_but_denorm(float %x) {
; CHECK-LABEL: @not_exact_but_allow_recip_but_denorm(
; CHECK-NEXT: [[DIV:%.*]] = fdiv arcp float [[X:%.*]], 0x47E0000100000000
; CHECK-NEXT: ret float [[DIV]]
;
%div = fdiv arcp float %x, 0x47E0000100000000
ret float %div
}
define <2 x float> @exact_inverse_splat(<2 x float> %x) {
; CHECK-LABEL: @exact_inverse_splat(
; CHECK-NEXT: [[DIV:%.*]] = fmul <2 x float> [[X:%.*]], <float 2.500000e-01, float 2.500000e-01>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv <2 x float> %x, <float 4.0, float 4.0>
ret <2 x float> %div
}
; Fast math allows us to replace this fdiv.
define <2 x float> @not_exact_but_allow_recip_splat(<2 x float> %x) {
; CHECK-LABEL: @not_exact_but_allow_recip_splat(
; CHECK-NEXT: [[DIV:%.*]] = fmul arcp <2 x float> [[X:%.*]], <float 0x3FD5555560000000, float 0x3FD5555560000000>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv arcp <2 x float> %x, <float 3.0, float 3.0>
ret <2 x float> %div
}
define <2 x float> @exact_inverse_vec(<2 x float> %x) {
; CHECK-LABEL: @exact_inverse_vec(
; CHECK-NEXT: [[DIV:%.*]] = fmul <2 x float> [[X:%.*]], <float 2.500000e-01, float 1.250000e-01>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv <2 x float> %x, <float 4.0, float 8.0>
ret <2 x float> %div
}
define <2 x float> @not_exact_inverse_splat(<2 x float> %x) {
; CHECK-LABEL: @not_exact_inverse_splat(
; CHECK-NEXT: [[DIV:%.*]] = fdiv <2 x float> [[X:%.*]], <float 3.000000e+00, float 3.000000e+00>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv <2 x float> %x, <float 3.0, float 3.0>
ret <2 x float> %div
}
define <2 x float> @not_exact_inverse_vec(<2 x float> %x) {
; CHECK-LABEL: @not_exact_inverse_vec(
; CHECK-NEXT: [[DIV:%.*]] = fdiv <2 x float> [[X:%.*]], <float 4.000000e+00, float 3.000000e+00>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv <2 x float> %x, <float 4.0, float 3.0>
ret <2 x float> %div
}
define <2 x float> @not_exact_inverse_vec_arcp(<2 x float> %x) {
; CHECK-LABEL: @not_exact_inverse_vec_arcp(
; CHECK-NEXT: [[DIV:%.*]] = fmul arcp <2 x float> [[X:%.*]], <float 2.500000e-01, float 0x3FD5555560000000>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv arcp <2 x float> %x, <float 4.0, float 3.0>
ret <2 x float> %div
}
define <2 x float> @not_exact_inverse_vec_arcp_with_undef_elt(<2 x float> %x) {
; CHECK-LABEL: @not_exact_inverse_vec_arcp_with_undef_elt(
; CHECK-NEXT: [[DIV:%.*]] = fdiv arcp <2 x float> [[X:%.*]], <float undef, float 3.000000e+00>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%div = fdiv arcp <2 x float> %x, <float undef, float 3.0>
ret <2 x float> %div
}
; (X / Y) / Z --> X / (Y * Z)
define float @div_with_div_numerator(float %x, float %y, float %z) {
; CHECK-LABEL: @div_with_div_numerator(
; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc arcp float [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[DIV2:%.*]] = fdiv reassoc arcp float [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[DIV2]]
;
%div1 = fdiv ninf float %x, %y
%div2 = fdiv arcp reassoc float %div1, %z
ret float %div2
}
; Z / (X / Y) --> (Z * Y) / X
define <2 x float> @div_with_div_denominator(<2 x float> %x, <2 x float> %y, <2 x float> %z) {
; CHECK-LABEL: @div_with_div_denominator(
; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc arcp <2 x float> [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[DIV2:%.*]] = fdiv reassoc arcp <2 x float> [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret <2 x float> [[DIV2]]
;
%div1 = fdiv nnan <2 x float> %x, %y
%div2 = fdiv arcp reassoc <2 x float> %z, %div1
ret <2 x float> %div2
}
; Don't create an extra multiply if we can't eliminate the first div.
declare void @use_f32(float)
define float @div_with_div_numerator_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @div_with_div_numerator_extra_use(
; CHECK-NEXT: [[DIV1:%.*]] = fdiv float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[DIV2:%.*]] = fdiv fast float [[DIV1]], [[Z:%.*]]
; CHECK-NEXT: call void @use_f32(float [[DIV1]])
; CHECK-NEXT: ret float [[DIV2]]
;
%div1 = fdiv float %x, %y
%div2 = fdiv fast float %div1, %z
call void @use_f32(float %div1)
ret float %div2
}
define float @div_with_div_denominator_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @div_with_div_denominator_extra_use(
; CHECK-NEXT: [[DIV1:%.*]] = fdiv float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[DIV2:%.*]] = fdiv fast float [[Z:%.*]], [[DIV1]]
; CHECK-NEXT: call void @use_f32(float [[DIV1]])
; CHECK-NEXT: ret float [[DIV2]]
;
%div1 = fdiv float %x, %y
%div2 = fdiv fast float %z, %div1
call void @use_f32(float %div1)
ret float %div2
}
define float @fneg_fneg(float %x, float %y) {
; CHECK-LABEL: @fneg_fneg(
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret float [[DIV]]
;
%x.fneg = fsub float -0.0, %x
%y.fneg = fsub float -0.0, %y
%div = fdiv float %x.fneg, %y.fneg
ret float %div
}
; The test above shows that no FMF are needed, but show that we are not dropping FMF.
define float @fneg_fneg_fast(float %x, float %y) {
; CHECK-LABEL: @fneg_fneg_fast(
; CHECK-NEXT: [[DIV:%.*]] = fdiv fast float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret float [[DIV]]
;
%x.fneg = fsub float -0.0, %x
%y.fneg = fsub float -0.0, %y
%div = fdiv fast float %x.fneg, %y.fneg
ret float %div
}
define <2 x float> @fneg_fneg_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @fneg_fneg_vec(
; CHECK-NEXT: [[DIV:%.*]] = fdiv <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%xneg = fsub <2 x float> <float -0.0, float -0.0>, %x
%yneg = fsub <2 x float> <float -0.0, float -0.0>, %y
%div = fdiv <2 x float> %xneg, %yneg
ret <2 x float> %div
}
define <2 x float> @fneg_fneg_vec_undef_elts(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @fneg_fneg_vec_undef_elts(
; CHECK-NEXT: [[DIV:%.*]] = fdiv <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%xneg = fsub <2 x float> <float undef, float -0.0>, %x
%yneg = fsub <2 x float> <float -0.0, float undef>, %y
%div = fdiv <2 x float> %xneg, %yneg
ret <2 x float> %div
}
define float @fneg_dividend_constant_divisor(float %x) {
; CHECK-LABEL: @fneg_dividend_constant_divisor(
; CHECK-NEXT: [[DIV:%.*]] = fdiv nsz float [[X:%.*]], -3.000000e+00
; CHECK-NEXT: ret float [[DIV]]
;
%neg = fsub float -0.0, %x
%div = fdiv nsz float %neg, 3.0
ret float %div
}
define float @fneg_divisor_constant_dividend(float %x) {
; CHECK-LABEL: @fneg_divisor_constant_dividend(
; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan float 3.000000e+00, [[X:%.*]]
; CHECK-NEXT: ret float [[DIV]]
;
%neg = fsub float -0.0, %x
%div = fdiv nnan float -3.0, %neg
ret float %div
}
define <2 x float> @fneg_dividend_constant_divisor_vec(<2 x float> %x) {
; CHECK-LABEL: @fneg_dividend_constant_divisor_vec(
; CHECK-NEXT: [[DIV:%.*]] = fdiv ninf <2 x float> [[X:%.*]], <float -3.000000e+00, float 8.000000e+00>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%neg = fsub <2 x float> <float -0.0, float -0.0>, %x
%div = fdiv ninf <2 x float> %neg, <float 3.0, float -8.0>
ret <2 x float> %div
}
define <2 x float> @fneg_dividend_constant_divisor_vec_undef_elt(<2 x float> %x) {
; CHECK-LABEL: @fneg_dividend_constant_divisor_vec_undef_elt(
; CHECK-NEXT: [[DIV:%.*]] = fdiv ninf <2 x float> [[X:%.*]], <float -3.000000e+00, float 8.000000e+00>
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%neg = fsub <2 x float> <float undef, float -0.0>, %x
%div = fdiv ninf <2 x float> %neg, <float 3.0, float -8.0>
ret <2 x float> %div
}
define <2 x float> @fneg_divisor_constant_dividend_vec(<2 x float> %x) {
; CHECK-LABEL: @fneg_divisor_constant_dividend_vec(
; CHECK-NEXT: [[DIV:%.*]] = fdiv afn <2 x float> <float 3.000000e+00, float -5.000000e+00>, [[X:%.*]]
; CHECK-NEXT: ret <2 x float> [[DIV]]
;
%neg = fsub <2 x float> <float -0.0, float -0.0>, %x
%div = fdiv afn <2 x float> <float -3.0, float 5.0>, %neg
ret <2 x float> %div
}
; X / (X * Y) --> 1.0 / Y
define float @div_factor(float %x, float %y) {
; CHECK-LABEL: @div_factor(
; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan float 1.000000e+00, [[Y:%.*]]
; CHECK-NEXT: ret float [[D]]
;
%m = fmul float %x, %y
%d = fdiv nnan reassoc float %x, %m
ret float %d;
}
; We can't do the transform without 'nnan' because if x is NAN and y is a number, this should return NAN.
define float @div_factor_too_strict(float %x, float %y) {
; CHECK-LABEL: @div_factor_too_strict(
; CHECK-NEXT: [[M:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[D:%.*]] = fdiv reassoc float [[X]], [[M]]
; CHECK-NEXT: ret float [[D]]
;
%m = fmul float %x, %y
%d = fdiv reassoc float %x, %m
ret float %d
}
; Commute, verify vector types, and show that we are not dropping extra FMF.
; X / (Y * X) --> 1.0 / Y
define <2 x float> @div_factor_commute(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @div_factor_commute(
; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan ninf nsz <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[Y:%.*]]
; CHECK-NEXT: ret <2 x float> [[D]]
;
%m = fmul <2 x float> %y, %x
%d = fdiv nnan ninf nsz reassoc <2 x float> %x, %m
ret <2 x float> %d
}
; C1/(X*C2) => (C1/C2) / X
define <2 x float> @div_constant_dividend1(<2 x float> %x) {
; CHECK-LABEL: @div_constant_dividend1(
; CHECK-NEXT: [[T2:%.*]] = fdiv reassoc arcp <2 x float> <float 5.000000e+00, float 1.000000e+00>, [[X:%.*]]
; CHECK-NEXT: ret <2 x float> [[T2]]
;
%t1 = fmul <2 x float> %x, <float 3.0e0, float 7.0e0>
%t2 = fdiv arcp reassoc <2 x float> <float 15.0e0, float 7.0e0>, %t1
ret <2 x float> %t2
}
define <2 x float> @div_constant_dividend1_arcp_only(<2 x float> %x) {
; CHECK-LABEL: @div_constant_dividend1_arcp_only(
; CHECK-NEXT: [[T1:%.*]] = fmul <2 x float> [[X:%.*]], <float 3.000000e+00, float 7.000000e+00>
; CHECK-NEXT: [[T2:%.*]] = fdiv arcp <2 x float> <float 1.500000e+01, float 7.000000e+00>, [[T1]]
; CHECK-NEXT: ret <2 x float> [[T2]]
;
%t1 = fmul <2 x float> %x, <float 3.0e0, float 7.0e0>
%t2 = fdiv arcp <2 x float> <float 15.0e0, float 7.0e0>, %t1
ret <2 x float> %t2
}
; C1/(X/C2) => (C1*C2) / X
define <2 x float> @div_constant_dividend2(<2 x float> %x) {
; CHECK-LABEL: @div_constant_dividend2(
; CHECK-NEXT: [[T2:%.*]] = fdiv reassoc arcp <2 x float> <float 4.500000e+01, float 4.900000e+01>, [[X:%.*]]
; CHECK-NEXT: ret <2 x float> [[T2]]
;
%t1 = fdiv <2 x float> %x, <float 3.0e0, float -7.0e0>
%t2 = fdiv arcp reassoc <2 x float> <float 15.0e0, float -7.0e0>, %t1
ret <2 x float> %t2
}
define <2 x float> @div_constant_dividend2_reassoc_only(<2 x float> %x) {
; CHECK-LABEL: @div_constant_dividend2_reassoc_only(
; CHECK-NEXT: [[T1:%.*]] = fdiv <2 x float> [[X:%.*]], <float 3.000000e+00, float -7.000000e+00>
; CHECK-NEXT: [[T2:%.*]] = fdiv reassoc <2 x float> <float 1.500000e+01, float -7.000000e+00>, [[T1]]
; CHECK-NEXT: ret <2 x float> [[T2]]
;
%t1 = fdiv <2 x float> %x, <float 3.0e0, float -7.0e0>
%t2 = fdiv reassoc <2 x float> <float 15.0e0, float -7.0e0>, %t1
ret <2 x float> %t2
}
; C1/(C2/X) => (C1/C2) * X
; This tests the combination of 2 folds: (C1 * X) / C2 --> (C1 / C2) * X
define <2 x float> @div_constant_dividend3(<2 x float> %x) {
; CHECK-LABEL: @div_constant_dividend3(
; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc arcp <2 x float> [[X:%.*]], <float 1.500000e+01, float -7.000000e+00>
; CHECK-NEXT: [[T2:%.*]] = fmul reassoc arcp <2 x float> [[TMP1]], <float 0x3FD5555560000000, float 0x3FC24924A0000000>
; CHECK-NEXT: ret <2 x float> [[T2]]
;
%t1 = fdiv <2 x float> <float 3.0e0, float 7.0e0>, %x
%t2 = fdiv arcp reassoc <2 x float> <float 15.0e0, float -7.0e0>, %t1
ret <2 x float> %t2
}
| {
"pile_set_name": "Github"
} |
<section>
<div class="container">
<h1>{$LANG.domainautorenewstatus}</h1>
{if $save}
<div class="alert alert-success alert-dismissable">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
{$LANG.changessavedsuccessfully}
</div>
{/if}
<p>{$LANG.domainautorenewinfo}</p>
<div class="alert">
<h3 class="alert-heading">{$LANG.domainbulkmanagementchangeaffect}</h3>
{foreach from=$domains item=domain name=domains}{$domain}{if !$smarty.foreach.domains.last}, {/if}{/foreach}
</div>
<form method="post" action="clientarea.php" class="well text-center">
<input type="hidden" name="action" value="bulkdomain">
<input type="hidden" name="update" value="autorenew">
<input type="hidden" name="save" value="1">
{foreach from=$domainids item=domainid}
<input type="hidden" name="domids[]" value="{$domainid}">
{/foreach}
<h4 class="margin-bottom">{$LANG.domainautorenewrecommend}</h4>
<input type="submit" name="enable" value="{$LANG.domainsautorenewenable}" class="btn btn-success btn-lg">
<input type="submit" name="disable" value="{$LANG.domainsautorenewdisable}" class="btn btn-danger btn-lg">
</form>
</div>
</section>
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn ( ) {
echo "$*"
}
die ( ) {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
esac
# For Cygwin, ensure paths are in UNIX format before anything is touched.
if $cygwin ; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
fi
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >&-
APP_HOME="`pwd -P`"
cd "$SAVED" >&-
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
function splitJvmOpts() {
JVM_OPTS=("$@")
}
eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
| {
"pile_set_name": "Github"
} |
# IO.Swagger.Api.UserApi
All URIs are relative to *http://petstore.swagger.io:80/v2*
Method | HTTP request | Description
------------- | ------------- | -------------
[**CreateUser**](UserApi.md#createuser) | **POST** /user | Create user
[**CreateUsersWithArrayInput**](UserApi.md#createuserswitharrayinput) | **POST** /user/createWithArray | Creates list of users with given input array
[**CreateUsersWithListInput**](UserApi.md#createuserswithlistinput) | **POST** /user/createWithList | Creates list of users with given input array
[**DeleteUser**](UserApi.md#deleteuser) | **DELETE** /user/{username} | Delete user
[**GetUserByName**](UserApi.md#getuserbyname) | **GET** /user/{username} | Get user by user name
[**LoginUser**](UserApi.md#loginuser) | **GET** /user/login | Logs user into the system
[**LogoutUser**](UserApi.md#logoutuser) | **GET** /user/logout | Logs out current logged in user session
[**UpdateUser**](UserApi.md#updateuser) | **PUT** /user/{username} | Updated user
<a name="createuser"></a>
# **CreateUser**
> void CreateUser (User body)
Create user
This can only be done by the logged in user.
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class CreateUserExample
{
public void main()
{
var apiInstance = new UserApi();
var body = new User(); // User | Created user object
try
{
// Create user
apiInstance.CreateUser(body);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.CreateUser: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**body** | [**User**](User.md)| Created user object |
### Return type
void (empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="createuserswitharrayinput"></a>
# **CreateUsersWithArrayInput**
> void CreateUsersWithArrayInput (List<User> body)
Creates list of users with given input array
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class CreateUsersWithArrayInputExample
{
public void main()
{
var apiInstance = new UserApi();
var body = new List<User>(); // List<User> | List of user object
try
{
// Creates list of users with given input array
apiInstance.CreateUsersWithArrayInput(body);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.CreateUsersWithArrayInput: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**body** | [**List<User>**](User.md)| List of user object |
### Return type
void (empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="createuserswithlistinput"></a>
# **CreateUsersWithListInput**
> void CreateUsersWithListInput (List<User> body)
Creates list of users with given input array
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class CreateUsersWithListInputExample
{
public void main()
{
var apiInstance = new UserApi();
var body = new List<User>(); // List<User> | List of user object
try
{
// Creates list of users with given input array
apiInstance.CreateUsersWithListInput(body);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.CreateUsersWithListInput: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**body** | [**List<User>**](User.md)| List of user object |
### Return type
void (empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="deleteuser"></a>
# **DeleteUser**
> void DeleteUser (string username)
Delete user
This can only be done by the logged in user.
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class DeleteUserExample
{
public void main()
{
var apiInstance = new UserApi();
var username = username_example; // string | The name that needs to be deleted
try
{
// Delete user
apiInstance.DeleteUser(username);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.DeleteUser: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**username** | **string**| The name that needs to be deleted |
### Return type
void (empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="getuserbyname"></a>
# **GetUserByName**
> User GetUserByName (string username)
Get user by user name
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class GetUserByNameExample
{
public void main()
{
var apiInstance = new UserApi();
var username = username_example; // string | The name that needs to be fetched. Use user1 for testing.
try
{
// Get user by user name
User result = apiInstance.GetUserByName(username);
Debug.WriteLine(result);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.GetUserByName: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**username** | **string**| The name that needs to be fetched. Use user1 for testing. |
### Return type
[**User**](User.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="loginuser"></a>
# **LoginUser**
> string LoginUser (string username, string password)
Logs user into the system
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class LoginUserExample
{
public void main()
{
var apiInstance = new UserApi();
var username = username_example; // string | The user name for login
var password = password_example; // string | The password for login in clear text
try
{
// Logs user into the system
string result = apiInstance.LoginUser(username, password);
Debug.WriteLine(result);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.LoginUser: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**username** | **string**| The user name for login |
**password** | **string**| The password for login in clear text |
### Return type
**string**
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="logoutuser"></a>
# **LogoutUser**
> void LogoutUser ()
Logs out current logged in user session
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class LogoutUserExample
{
public void main()
{
var apiInstance = new UserApi();
try
{
// Logs out current logged in user session
apiInstance.LogoutUser();
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.LogoutUser: " + e.Message );
}
}
}
}
```
### Parameters
This endpoint does not need any parameter.
### Return type
void (empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
<a name="updateuser"></a>
# **UpdateUser**
> void UpdateUser (string username, User body)
Updated user
This can only be done by the logged in user.
### Example
```csharp
using System;
using System.Diagnostics;
using IO.Swagger.Api;
using IO.Swagger.Client;
using IO.Swagger.Model;
namespace Example
{
public class UpdateUserExample
{
public void main()
{
var apiInstance = new UserApi();
var username = username_example; // string | name that need to be deleted
var body = new User(); // User | Updated user object
try
{
// Updated user
apiInstance.UpdateUser(username, body);
}
catch (Exception e)
{
Debug.Print("Exception when calling UserApi.UpdateUser: " + e.Message );
}
}
}
}
```
### Parameters
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**username** | **string**| name that need to be deleted |
**body** | [**User**](User.md)| Updated user object |
### Return type
void (empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/xml, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
| {
"pile_set_name": "Github"
} |
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Example - example-example28-debug</title>
<script src="../../../angular.js"></script>
<script src="script.js"></script>
</head>
<body ng-app="FilterInControllerModule">
<div ng-controller="FilterController as ctrl">
<div>
All entries:
<span ng-repeat="entry in ctrl.array">{{entry.name}} </span>
</div>
<div>
Entries that contain an "a":
<span ng-repeat="entry in ctrl.filteredArray">{{entry.name}} </span>
</div>
</div>
</body>
</html> | {
"pile_set_name": "Github"
} |
using Util.Ui.Builders;
namespace Util.Ui.Zorro.Forms.Builders {
/// <summary>
/// NgZorro文本框包装器生成器
/// </summary>
public class TextBoxWrapperBuilder : TagBuilder {
/// <summary>
/// 初始化NgZorro文本框包装器生成器
/// </summary>
public TextBoxWrapperBuilder() : base( "x-textbox" ) {
}
}
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: cee75051bd22b8b48a7218d4a48f60a6
TextureImporter:
internalIDToNameTable: []
externalObjects: {}
serializedVersion: 11
mipmaps:
mipMapMode: 0
enableMipMap: 1
sRGBTexture: 1
linearTexture: 0
fadeOut: 0
borderMipMap: 0
mipMapsPreserveCoverage: 0
alphaTestReferenceValue: 0.5
mipMapFadeDistanceStart: 1
mipMapFadeDistanceEnd: 3
bumpmap:
convertToNormalMap: 0
externalNormalMap: 0
heightScale: 0.25
normalMapFilter: 0
isReadable: 0
streamingMipmaps: 0
streamingMipmapsPriority: 0
vTOnly: 0
grayScaleToAlpha: 0
generateCubemap: 6
cubemapConvolution: 0
seamlessCubemap: 0
textureFormat: 1
maxTextureSize: 2048
textureSettings:
serializedVersion: 2
filterMode: -1
aniso: -1
mipBias: -100
wrapU: -1
wrapV: -1
wrapW: -1
nPOTScale: 1
lightmap: 0
compressionQuality: 50
spriteMode: 0
spriteExtrude: 1
spriteMeshType: 1
alignment: 0
spritePivot: {x: 0.5, y: 0.5}
spritePixelsToUnits: 100
spriteBorder: {x: 0, y: 0, z: 0, w: 0}
spriteGenerateFallbackPhysicsShape: 1
alphaUsage: 1
alphaIsTransparency: 0
spriteTessellationDetail: -1
textureType: 0
textureShape: 1
singleChannelComponent: 0
flipbookRows: 1
flipbookColumns: 1
maxTextureSizeSet: 0
compressionQualitySet: 0
textureFormatSet: 0
ignorePngGamma: 0
applyGammaDecoding: 0
platformSettings:
- serializedVersion: 3
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1
textureCompression: 1
compressionQuality: 50
crunchedCompression: 0
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
forceMaximumCompressionQuality_BC6H_BC7: 0
spriteSheet:
serializedVersion: 2
sprites: []
outline: []
physicsShape: []
bones: []
spriteID:
internalID: 0
vertices: []
indices:
edges: []
weights: []
secondaryTextures: []
spritePackingTag:
pSDRemoveMatte: 0
pSDShowRemoveMatteOption: 0
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
test111
| {
"pile_set_name": "Github"
} |
EXTRA_DIST = \
LICENSE
noinst_LTLIBRARIES = libipcrypt.la
libipcrypt_la_SOURCES = \
ipcrypt.c \
ipcrypt.h
| {
"pile_set_name": "Github"
} |
//= link_tree ../images
//= link_directory ../javascripts .js
//= link_directory ../stylesheets .css
//= link sail_manifest.js
| {
"pile_set_name": "Github"
} |
documentation_complete: true
prodtype: fedora,rhcos4,ol7,ol8,rhel7,rhel8,rhv4
title: 'Disable debug-shell SystemD Service'
description: |-
SystemD's <tt>debug-shell</tt> service is intended to
diagnose SystemD related boot issues with various <tt>systemctl</tt>
commands. Once enabled and following a system reboot, the root shell
will be available on <tt>tty9</tt> which is access by pressing
<tt>CTRL-ALT-F9</tt>. The <tt>debug-shell</tt> service should only be used
for SystemD related issues and should otherwise be disabled.
<br /><br />
By default, the <tt>debug-shell</tt> SystemD service is already disabled.
{{{ describe_service_disable(service="debug-shell") }}}
rationale: |-
This prevents attackers with physical access from trivially bypassing security
on the machine through valid troubleshooting configurations and gaining root
access when the system is rebooted.
severity: medium
identifiers:
cce@rhel7: CCE-80206-6
cce@rhel8: CCE-80876-6
cce@rhcos4: CCE-82496-1
references:
cui: 3.4.5
hipaa: 164.308(a)(1)(ii)(B),164.308(a)(7)(i),164.308(a)(7)(ii)(A),164.310(a)(1),164.310(a)(2)(i),164.310(a)(2)(ii),164.310(a)(2)(iii),164.310(b),164.310(c),164.310(d)(1),164.310(d)(2)(iii)
ospp: FIA_UAU.1
srg: SRG-OS-000324-GPOS-00125
ocil: '{{{ ocil_service_disabled(service="debug-shell") }}}'
platform: machine
template:
name: service_disabled
vars:
servicename: debug-shell
packagename: systemd
| {
"pile_set_name": "Github"
} |
<!doctype html>
<!--
@license
Copyright (c) 2015 The Polymer Project Authors. All rights reserved.
This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
Code distributed by Google as part of the polymer project is also
subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
-->
<html>
<head>
<meta charset="UTF-8">
<title>paper-icon-button basic tests</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0">
<script src="../../webcomponentsjs/webcomponents-lite.js"></script>
<script src="../../web-component-tester/browser.js"></script>
<link rel="import" href="../../iron-icons/iron-icons.html">
<link rel="import" href="../paper-icon-button.html">
</head>
<body>
<test-fixture id="TrivialIconButton">
<template>
<div style="line-height:30px;">
<paper-icon-button id="fab1" icon="add"></paper-icon-button>
</div>
</template>
</test-fixture>
<test-fixture id="SrcIconButton">
<template>
<paper-icon-button src="add.png"></paper-icon-button>
</template>
</test-fixture>
<script>
var b1;
var b2;
function centerOf(element) {
var rect = element.getBoundingClientRect();
return {left: rect.left + rect.width / 2, top: rect.top + rect.height / 2};
}
function approxEqual(p1, p2) {
return Math.abs(p1.left - p2.left) <= 2 && Math.abs(p1.top-p2.top) <= 2;
}
setup(function() {
b1 = fixture('TrivialIconButton').querySelector('#fab1');
b2 = fixture('SrcIconButton');
});
test('applies an icon specified by the `icon` attribute', function() {
assert.strictEqual(!!b1.$.icon.src, false);
assert.ok(Polymer.dom(b1.$.icon.root).querySelector('svg'));
});
test('applies an icon specified by the `src` attribute', function() {
assert.strictEqual(!!b2.$.icon.src, true);
assert.ok(b2.$.icon.src);
});
test('renders correctly independent of line height', function() {
assert.ok(approxEqual(centerOf(b1.$.icon), centerOf(b1)));
});
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
With either growing number of issues or introduction
of more complex reducers, the performance starts going
down significantly.
Solution: enable parallel reduction of issues
Since issues are independent trees of records, there's
not much reason to process each issue sequentially.
As a part of this solution, we needed to avoid re-compiling
reducer functions for every individual instance of `DuktapeReducer`
as that is often cost-prohibitive. This is accomplished
by allowing cloning `DuktapeReducer` in which it'll copy
saved bytecode for functions as well as the state these functions
had.
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\Security\Acl\Model;
/**
* This interface represents an access control list (ACL) for a domain object.
* Each domain object can have exactly one associated ACL.
*
* An ACL contains all access control entries (ACE) for a given domain object.
* In order to avoid needing references to the domain object itself, implementations
* use ObjectIdentity implementations as an additional level of indirection.
*
* @author Johannes M. Schmitt <[email protected]>
*/
interface AclInterface extends \Serializable
{
/**
* Returns all class-based ACEs associated with this ACL
*
* @return array
*/
function getClassAces();
/**
* Returns all class-field-based ACEs associated with this ACL
*
* @param string $field
* @return array
*/
function getClassFieldAces($field);
/**
* Returns all object-based ACEs associated with this ACL
*
* @return array
*/
function getObjectAces();
/**
* Returns all object-field-based ACEs associated with this ACL
*
* @param string $field
* @return array
*/
function getObjectFieldAces($field);
/**
* Returns the object identity associated with this ACL
*
* @return ObjectIdentityInterface
*/
function getObjectIdentity();
/**
* Returns the parent ACL, or null if there is none.
*
* @return AclInterface|null
*/
function getParentAcl();
/**
* Whether this ACL is inheriting ACEs from a parent ACL.
*
* @return Boolean
*/
function isEntriesInheriting();
/**
* Determines whether field access is granted
*
* @param string $field
* @param array $masks
* @param array $securityIdentities
* @param Boolean $administrativeMode
* @return Boolean
*/
function isFieldGranted($field, array $masks, array $securityIdentities, $administrativeMode = false);
/**
* Determines whether access is granted
*
* @throws NoAceFoundException when no ACE was applicable for this request
* @param array $masks
* @param array $securityIdentities
* @param Boolean $administrativeMode
* @return Boolean
*/
function isGranted(array $masks, array $securityIdentities, $administrativeMode = false);
/**
* Whether the ACL has loaded ACEs for all of the passed security identities
*
* @param mixed $securityIdentities an implementation of SecurityIdentityInterface, or an array thereof
* @return Boolean
*/
function isSidLoaded($securityIdentities);
} | {
"pile_set_name": "Github"
} |
'use strict';
//
// Sales chart
//
var SalesChart = (function() {
// Variables
var $chart = $('#chart-sales-dark');
// Methods
function init($chart) {
var salesChart = new Chart($chart, {
type: 'line',
options: {
scales: {
yAxes: [{
gridLines: {
lineWidth: 1,
color: Charts.colors.gray[900],
zeroLineColor: Charts.colors.gray[900]
},
ticks: {
callback: function(value) {
if (!(value % 10)) {
return '$' + value + 'k';
}
}
}
}]
},
tooltips: {
callbacks: {
label: function(item, data) {
var label = data.datasets[item.datasetIndex].label || '';
var yLabel = item.yLabel;
var content = '';
if (data.datasets.length > 1) {
content += '<span class="popover-body-label mr-auto">' + label + '</span>';
}
content += '<span class="popover-body-value">$' + yLabel + 'k</span>';
return content;
}
}
}
},
data: {
labels: ['May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],
datasets: [{
label: 'Performance',
data: [0, 20, 10, 30, 15, 40, 20, 60, 60]
}]
}
});
// Save to jQuery object
$chart.data('chart', salesChart);
};
// Events
if ($chart.length) {
init($chart);
}
})();
| {
"pile_set_name": "Github"
} |
<mjml>
<mj-body background-color="#fff">
<mj-section>
<mj-column>
<mj-image
width="100px"
src="{{host}}/assets/images/logos/logo_Gauzy.svg"
></mj-image>
<mj-divider
border-color="#1B005D"
border-width="1px"
></mj-divider>
<mj-text font-size="20px" font-family="helvetica"
>Расписание Удалить</mj-text
>
<mj-text font-size="16px" font-family="helvetica">
<p>Уважаемые {{candidateName}},</p>
<p>
Ваш табель для проекта {{project}} успешно удален ... !!
</p>
</mj-text>
<mj-text font-size="16px" font-family="helvetica">
<p>С уважением,</p>
<p>Команда Гаузи</p>
</mj-text>
<mj-divider
border-color="#1B005D"
border-width="1px"
></mj-divider>
<mj-text align="center" font-size="12px" font-family="helvetica"
>© 2019,
<a href="https://app.gauzy.co/." style="color:#598bff"
>Gauzy</a
>
by
<a href="https://ever.co/" style="color:#598bff"
>Ever Co. LTD.</a
>
All rights reserved.
</mj-text>
</mj-column>
</mj-section>
</mj-body>
</mjml>
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_COMPRESS_LITERALS_H
#define ZSTD_COMPRESS_LITERALS_H
#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2);
#endif /* ZSTD_COMPRESS_LITERALS_H */
| {
"pile_set_name": "Github"
} |
AUI()
.use('aui-base','aui-delayed-task', 'liferay-poller',
function(A) {
Liferay.namespace('BladePoller');
Liferay.BladePoller.Manager = {
init: function() {
var instance = this;
instance._portletId = A.one("#pollerPortletId").val();
instance._bladePollerContainer = A.one('#bladePollerContainer');
console.log("Init: portletId:"+instance._portletId+
", containerId:"+instance._bladePollerContainer);
instance._updateTask = new A.debounce(instance._updateMessage,30000,
instance);
instance._updateTask.delay(0);
Liferay.Poller.addListener(instance._portletId,instance._onPollerUpdate, instance);
Liferay.on(
'sessionExpired',
function(event) {
Liferay.Poller.removeListener(instance._portletId);
instance._bladePollerContainer.hide();
}
);
},
send: function(options, id) {
console.log("Options:" + options + ", id:" + id);
var instance = this;
Liferay.Poller.submitRequest(instance._portletId, options, id);
instance._updateTask();
},
_updateMessage: function() {
console.log("Update Message");
var instance = this;
instance.send( {
status : 'OK'
});
},
_onPollerUpdate : function(response, chunkId) {
console.log("updating...");
var instance = this;
instance._bladePollerContainer.text(response.content.message);
instance._bladePollerContainer.show();
instance.send(
{
status : 'OK'
}
);
}
};
A.augment(Liferay.BladePoller.Manager, A.Attribute, true);
Liferay.publish(
'pollerPortletReady',
{
defaultFn: A.bind('init', Liferay.BladePoller.Manager),
fireOnce: true
}
);
A.on(
'domready',
function() {
Liferay.fire('pollerPortletReady');
}
);
}
); | {
"pile_set_name": "Github"
} |
/*
* RedGL - MIT License
* Copyright (c) 2018 - 2019 By RedCamel([email protected])
* https://github.com/redcamel/RedGL2/blob/dev/LICENSE
* Last modification time of this file - 2019.6.20 11:36
*/
"use strict";
var RedPostEffect_Threshold;
(function () {
var vSource, fSource;
var PROGRAM_NAME = 'RedPostEffectThresholdProgram';
var checked;
vSource = RedBasePostEffect['baseVertexShaderSource1']
fSource = function () {
/* @preserve
precision highp float;
uniform sampler2D u_diffuseTexture;
uniform float u_threshold_value;
void main() {
vec4 finalColor = texture2D(u_diffuseTexture, vTexcoord);
float v;
if(0.2126 * finalColor.r + 0.7152 * finalColor.g + 0.0722 * finalColor.b >= u_threshold_value) v = 1.0;
else v = 0.0;
finalColor.r = finalColor.g = finalColor.b = v;
gl_FragColor = finalColor;
}
*/
};
/*DOC:
{
constructorYn : true,
title :`RedPostEffect_Threshold`,
description : `
Threshold 이펙트
postEffectManager.addEffect( effect Instance ) 로 추가.
`,
params : {
redGL : [
{type:'RedGL'}
]
},
extends : [
'RedBasePostEffect',
'RedBaseMaterial'
],
demo : '../example/postEffect/adjustments/RedPostEffect_Threshold.html',
example : `
var effect;
effect = RedPostEffect_Threshold(RedGL Instance); // 포스트이펙트 생성
// postEffectManager는 RedView 생성시 자동생성됨.
(RedView Instance)['postEffectManager'].addEffect(effect); // 뷰에 이펙트 추가
`,
return : 'RedPostEffect_Threshold Instance'
}
:DOC*/
RedPostEffect_Threshold = function (redGL) {
if (!(this instanceof RedPostEffect_Threshold)) return new RedPostEffect_Threshold(redGL);
redGL instanceof RedGL || RedGLUtil.throwFunc('RedPostEffect_Threshold : RedGL Instance만 허용.', redGL);
this['frameBuffer'] = RedFrameBuffer(redGL);
this['diffuseTexture'] = null;
this['threshold'] = 128;
/////////////////////////////////////////
// 일반 프로퍼티
this['program'] = RedProgram['makeProgram'](redGL, PROGRAM_NAME, vSource, fSource);
this['_UUID'] = RedGL.makeUUID();
if (!checked) {
this.checkUniformAndProperty();
checked = true;
}
console.log(this);
};
RedPostEffect_Threshold.prototype = new RedBasePostEffect();
RedPostEffect_Threshold.prototype['updateTexture'] = function (lastFrameBufferTexture) {
this['diffuseTexture'] = lastFrameBufferTexture;
};
RedDefinePropertyInfo.definePrototypes(
'RedPostEffect_Threshold',
['diffuseTexture', 'sampler2D'],
/*DOC:
{
code : 'PROPERTY',
title :`threshold`,
description : `
최소 유효값
기본값 : 128
min: 1
max: 255
`,
return : 'Number'
}
:DOC*/
[
'threshold', 'number',
{
min: 1, max: 255, callback: function (v) {
this['_threshold_value'] = v / 255
}
}
]
);
Object.freeze(RedPostEffect_Threshold);
})(); | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<ProjectOpt xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="project_opt.xsd">
<SchemaVersion>1.0</SchemaVersion>
<Header>### uVision Project, (C) Keil Software</Header>
<Extensions>
<cExt>*.c</cExt>
<aExt>*.s*; *.src; *.a*</aExt>
<oExt>*.obj</oExt>
<lExt>*.lib</lExt>
<tExt>*.txt; *.h; *.inc</tExt>
<pExt>*.plm</pExt>
<CppX>*.cpp</CppX>
</Extensions>
<DaveTm>
<dwLowDateTime>0</dwLowDateTime>
<dwHighDateTime>0</dwHighDateTime>
</DaveTm>
<Target>
<TargetName>Target 1</TargetName>
<ToolsetNumber>0x4</ToolsetNumber>
<ToolsetName>ARM-ADS</ToolsetName>
<TargetOption>
<CLKADS>12000000</CLKADS>
<OPTTT>
<gFlags>1</gFlags>
<BeepAtEnd>1</BeepAtEnd>
<RunSim>1</RunSim>
<RunTarget>0</RunTarget>
</OPTTT>
<OPTHX>
<HexSelection>1</HexSelection>
<FlashByte>65535</FlashByte>
<HexRangeLowAddress>0</HexRangeLowAddress>
<HexRangeHighAddress>0</HexRangeHighAddress>
<HexOffset>0</HexOffset>
</OPTHX>
<OPTLEX>
<PageWidth>79</PageWidth>
<PageLength>66</PageLength>
<TabStop>8</TabStop>
<ListingPath>.\debug\</ListingPath>
</OPTLEX>
<ListingPage>
<CreateCListing>1</CreateCListing>
<CreateAListing>1</CreateAListing>
<CreateLListing>1</CreateLListing>
<CreateIListing>0</CreateIListing>
<AsmCond>1</AsmCond>
<AsmSymb>1</AsmSymb>
<AsmXref>0</AsmXref>
<CCond>1</CCond>
<CCode>0</CCode>
<CListInc>0</CListInc>
<CSymb>0</CSymb>
<LinkerCodeListing>0</LinkerCodeListing>
</ListingPage>
<OPTXL>
<LMap>1</LMap>
<LComments>1</LComments>
<LGenerateSymbols>1</LGenerateSymbols>
<LLibSym>1</LLibSym>
<LLines>1</LLines>
<LLocSym>1</LLocSym>
<LPubSym>1</LPubSym>
<LXref>0</LXref>
<LExpSel>0</LExpSel>
</OPTXL>
<OPTFL>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<IsCurrentTarget>1</IsCurrentTarget>
</OPTFL>
<CpuCode>255</CpuCode>
<DllOpt>
<SimDllName>SARMCM3.DLL</SimDllName>
<SimDllArguments></SimDllArguments>
<SimDlgDllName>DCM.DLL</SimDlgDllName>
<SimDlgDllArguments>-pCM3</SimDlgDllArguments>
<TargetDllName>SARMCM3.DLL</TargetDllName>
<TargetDllArguments></TargetDllArguments>
<TargetDlgDllName>TCM.DLL</TargetDlgDllName>
<TargetDlgDllArguments>-pCM3</TargetDlgDllArguments>
</DllOpt>
<DebugOpt>
<uSim>1</uSim>
<uTrg>0</uTrg>
<sLdApp>1</sLdApp>
<sGomain>1</sGomain>
<sRbreak>1</sRbreak>
<sRwatch>1</sRwatch>
<sRmem>1</sRmem>
<sRfunc>1</sRfunc>
<sRbox>1</sRbox>
<tLdApp>1</tLdApp>
<tGomain>0</tGomain>
<tRbreak>1</tRbreak>
<tRwatch>1</tRwatch>
<tRmem>1</tRmem>
<tRfunc>0</tRfunc>
<tRbox>1</tRbox>
<sRunDeb>0</sRunDeb>
<sLrtime>0</sLrtime>
<nTsel>-1</nTsel>
<sDll></sDll>
<sDllPa></sDllPa>
<sDlgDll></sDlgDll>
<sDlgPa></sDlgPa>
<sIfile>.\arm_linear_interp_example.ini</sIfile>
<tDll></tDll>
<tDllPa></tDllPa>
<tDlgDll></tDlgDll>
<tDlgPa></tDlgPa>
<tIfile></tIfile>
<pMon></pMon>
</DebugOpt>
<TargetDriverDllRegistry>
<SetRegEntry>
<Number>0</Number>
<Key>DLGDARM</Key>
<Name>(1010=-1,-1,-1,-1,0)(1007=-1,-1,-1,-1,0)(1008=-1,-1,-1,-1,0)(1009=-1,-1,-1,-1,0)</Name>
</SetRegEntry>
<SetRegEntry>
<Number>0</Number>
<Key>ARMDBGFLAGS</Key>
<Name>-T0</Name>
</SetRegEntry>
</TargetDriverDllRegistry>
<Breakpoint>
<Bp>
<Number>0</Number>
<Type>0</Type>
<LineNumber>175</LineNumber>
<EnabledFlag>1</EnabledFlag>
<Address>454</Address>
<ByteObject>0</ByteObject>
<ManyObjects>0</ManyObjects>
<SizeOfObject>0</SizeOfObject>
<BreakByAccess>0</BreakByAccess>
<BreakIfRCount>1</BreakIfRCount>
<Filename></Filename>
<ExecCommand></ExecCommand>
<Expression>\\arm_cortexM3l_linear_interp_example\../arm_linear_interp_example_f32.c\175</Expression>
</Bp>
<Bp>
<Number>1</Number>
<Type>0</Type>
<LineNumber>178</LineNumber>
<EnabledFlag>1</EnabledFlag>
<Address>456</Address>
<ByteObject>0</ByteObject>
<ManyObjects>0</ManyObjects>
<SizeOfObject>0</SizeOfObject>
<BreakByAccess>0</BreakByAccess>
<BreakIfRCount>1</BreakIfRCount>
<Filename></Filename>
<ExecCommand></ExecCommand>
<Expression>\\arm_cortexM3l_linear_interp_example\../arm_linear_interp_example_f32.c\178</Expression>
</Bp>
</Breakpoint>
<DebugFlag>
<trace>0</trace>
<periodic>0</periodic>
<aLwin>1</aLwin>
<aCover>0</aCover>
<aSer1>0</aSer1>
<aSer2>0</aSer2>
<aPa>0</aPa>
<viewmode>1</viewmode>
<vrSel>0</vrSel>
<aSym>0</aSym>
<aTbox>0</aTbox>
<AscS1>0</AscS1>
<AscS2>0</AscS2>
<AscS3>0</AscS3>
<aSer3>0</aSer3>
<eProf>0</eProf>
<aLa>0</aLa>
<aPa1>0</aPa1>
<AscS4>0</AscS4>
<aSer4>0</aSer4>
<StkLoc>0</StkLoc>
<TrcWin>0</TrcWin>
<newCpu>0</newCpu>
<uProt>0</uProt>
</DebugFlag>
<LintExecutable></LintExecutable>
<LintConfigFile></LintConfigFile>
</TargetOption>
</Target>
<Group>
<GroupName>Source Group 1</GroupName>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<File>
<GroupNumber>1</GroupNumber>
<FileNumber>1</FileNumber>
<FileType>1</FileType>
<tvExp>0</tvExp>
<Focus>0</Focus>
<ColumnNumber>0</ColumnNumber>
<tvExpOptDlg>0</tvExpOptDlg>
<TopLine>1</TopLine>
<CurrentLine>1</CurrentLine>
<bDave2>0</bDave2>
<PathWithFileName>..\arm_linear_interp_example_f32.c</PathWithFileName>
<FilenameWithoutPath>arm_linear_interp_example_f32.c</FilenameWithoutPath>
</File>
<File>
<GroupNumber>1</GroupNumber>
<FileNumber>2</FileNumber>
<FileType>1</FileType>
<tvExp>0</tvExp>
<Focus>0</Focus>
<ColumnNumber>0</ColumnNumber>
<tvExpOptDlg>0</tvExpOptDlg>
<TopLine>0</TopLine>
<CurrentLine>0</CurrentLine>
<bDave2>0</bDave2>
<PathWithFileName>..\arm_linear_interp_data.c</PathWithFileName>
<FilenameWithoutPath>arm_linear_interp_data.c</FilenameWithoutPath>
</File>
<File>
<GroupNumber>1</GroupNumber>
<FileNumber>3</FileNumber>
<FileType>1</FileType>
<tvExp>0</tvExp>
<Focus>0</Focus>
<ColumnNumber>0</ColumnNumber>
<tvExpOptDlg>0</tvExpOptDlg>
<TopLine>0</TopLine>
<CurrentLine>0</CurrentLine>
<bDave2>0</bDave2>
<PathWithFileName>..\..\Common\Source\math_helper.c</PathWithFileName>
<FilenameWithoutPath>math_helper.c</FilenameWithoutPath>
</File>
</Group>
<Group>
<GroupName>CMSIS DeviceSupport</GroupName>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<File>
<GroupNumber>2</GroupNumber>
<FileNumber>4</FileNumber>
<FileType>1</FileType>
<tvExp>0</tvExp>
<Focus>0</Focus>
<ColumnNumber>0</ColumnNumber>
<tvExpOptDlg>0</tvExpOptDlg>
<TopLine>0</TopLine>
<CurrentLine>0</CurrentLine>
<bDave2>0</bDave2>
<PathWithFileName>..\system_ARMCM3.c</PathWithFileName>
<FilenameWithoutPath>system_ARMCM3.c</FilenameWithoutPath>
</File>
<File>
<GroupNumber>2</GroupNumber>
<FileNumber>5</FileNumber>
<FileType>2</FileType>
<tvExp>0</tvExp>
<Focus>0</Focus>
<ColumnNumber>0</ColumnNumber>
<tvExpOptDlg>0</tvExpOptDlg>
<TopLine>0</TopLine>
<CurrentLine>0</CurrentLine>
<bDave2>0</bDave2>
<PathWithFileName>.\startup_ARMCM3.s</PathWithFileName>
<FilenameWithoutPath>startup_ARMCM3.s</FilenameWithoutPath>
</File>
</Group>
<Group>
<GroupName>CMSIS SW_DSP_Library</GroupName>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<File>
<GroupNumber>3</GroupNumber>
<FileNumber>6</FileNumber>
<FileType>4</FileType>
<tvExp>0</tvExp>
<Focus>0</Focus>
<ColumnNumber>0</ColumnNumber>
<tvExpOptDlg>0</tvExpOptDlg>
<TopLine>0</TopLine>
<CurrentLine>0</CurrentLine>
<bDave2>0</bDave2>
<PathWithFileName>..\..\..\..\Lib\ARM\arm_cortexM3l_math.lib</PathWithFileName>
<FilenameWithoutPath>arm_cortexM3l_math.lib</FilenameWithoutPath>
</File>
</Group>
<MDIGroups>
<Orientation>1</Orientation>
<ActiveMDIGroup>0</ActiveMDIGroup>
<MDIGroup>
<Size>100</Size>
<ActiveTab>0</ActiveTab>
<Documents>
<Doc>
<Name>..\arm_linear_interp_example_f32.c</Name>
<ColumnNumber>0</ColumnNumber>
<TopLine>1</TopLine>
<CurrentLine>1</CurrentLine>
</Doc>
</Documents>
</MDIGroup>
</MDIGroups>
</ProjectOpt>
| {
"pile_set_name": "Github"
} |
package fasthttp
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"mime/multipart"
"os"
"sync"
"github.com/valyala/bytebufferpool"
)
// Request represents HTTP request.
//
// It is forbidden copying Request instances. Create new instances
// and use CopyTo instead.
//
// Request instance MUST NOT be used from concurrently running goroutines.
type Request struct {
noCopy noCopy
// Request header
//
// Copying Header by value is forbidden. Use pointer to Header instead.
Header RequestHeader
uri URI
postArgs Args
bodyStream io.Reader
w requestBodyWriter
body *bytebufferpool.ByteBuffer
multipartForm *multipart.Form
multipartFormBoundary string
// Group bool members in order to reduce Request object size.
parsedURI bool
parsedPostArgs bool
keepBodyBuffer bool
isTLS bool
}
// Response represents HTTP response.
//
// It is forbidden copying Response instances. Create new instances
// and use CopyTo instead.
//
// Response instance MUST NOT be used from concurrently running goroutines.
type Response struct {
noCopy noCopy
// Response header
//
// Copying Header by value is forbidden. Use pointer to Header instead.
Header ResponseHeader
bodyStream io.Reader
w responseBodyWriter
body *bytebufferpool.ByteBuffer
// Response.Read() skips reading body if set to true.
// Use it for reading HEAD responses.
//
// Response.Write() skips writing body if set to true.
// Use it for writing HEAD responses.
SkipBody bool
keepBodyBuffer bool
}
// SetHost sets host for the request.
func (req *Request) SetHost(host string) {
req.URI().SetHost(host)
}
// SetHostBytes sets host for the request.
func (req *Request) SetHostBytes(host []byte) {
req.URI().SetHostBytes(host)
}
// Host returns the host for the given request.
func (req *Request) Host() []byte {
return req.URI().Host()
}
// SetRequestURI sets RequestURI.
func (req *Request) SetRequestURI(requestURI string) {
req.Header.SetRequestURI(requestURI)
req.parsedURI = false
}
// SetRequestURIBytes sets RequestURI.
func (req *Request) SetRequestURIBytes(requestURI []byte) {
req.Header.SetRequestURIBytes(requestURI)
req.parsedURI = false
}
// RequestURI returns request's URI.
func (req *Request) RequestURI() []byte {
if req.parsedURI {
requestURI := req.uri.RequestURI()
req.SetRequestURIBytes(requestURI)
}
return req.Header.RequestURI()
}
// StatusCode returns response status code.
func (resp *Response) StatusCode() int {
return resp.Header.StatusCode()
}
// SetStatusCode sets response status code.
func (resp *Response) SetStatusCode(statusCode int) {
resp.Header.SetStatusCode(statusCode)
}
// ConnectionClose returns true if 'Connection: close' header is set.
func (resp *Response) ConnectionClose() bool {
return resp.Header.ConnectionClose()
}
// SetConnectionClose sets 'Connection: close' header.
func (resp *Response) SetConnectionClose() {
resp.Header.SetConnectionClose()
}
// ConnectionClose returns true if 'Connection: close' header is set.
func (req *Request) ConnectionClose() bool {
return req.Header.ConnectionClose()
}
// SetConnectionClose sets 'Connection: close' header.
func (req *Request) SetConnectionClose() {
req.Header.SetConnectionClose()
}
// SendFile registers file on the given path to be used as response body
// when Write is called.
//
// Note that SendFile doesn't set Content-Type, so set it yourself
// with Header.SetContentType.
func (resp *Response) SendFile(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
fileInfo, err := f.Stat()
if err != nil {
f.Close()
return err
}
size64 := fileInfo.Size()
size := int(size64)
if int64(size) != size64 {
size = -1
}
resp.Header.SetLastModified(fileInfo.ModTime())
resp.SetBodyStream(f, size)
return nil
}
// SetBodyStream sets request body stream and, optionally body size.
//
// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes
// before returning io.EOF.
//
// If bodySize < 0, then bodyStream is read until io.EOF.
//
// bodyStream.Close() is called after finishing reading all body data
// if it implements io.Closer.
//
// Note that GET and HEAD requests cannot have body.
//
// See also SetBodyStreamWriter.
func (req *Request) SetBodyStream(bodyStream io.Reader, bodySize int) {
req.ResetBody()
req.bodyStream = bodyStream
req.Header.SetContentLength(bodySize)
}
// SetBodyStream sets response body stream and, optionally body size.
//
// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes
// before returning io.EOF.
//
// If bodySize < 0, then bodyStream is read until io.EOF.
//
// bodyStream.Close() is called after finishing reading all body data
// if it implements io.Closer.
//
// See also SetBodyStreamWriter.
func (resp *Response) SetBodyStream(bodyStream io.Reader, bodySize int) {
resp.ResetBody()
resp.bodyStream = bodyStream
resp.Header.SetContentLength(bodySize)
}
// IsBodyStream returns true if body is set via SetBodyStream*
func (req *Request) IsBodyStream() bool {
return req.bodyStream != nil
}
// IsBodyStream returns true if body is set via SetBodyStream*
func (resp *Response) IsBodyStream() bool {
return resp.bodyStream != nil
}
// SetBodyStreamWriter registers the given sw for populating request body.
//
// This function may be used in the following cases:
//
// * if request body is too big (more than 10MB).
// * if request body is streamed from slow external sources.
// * if request body must be streamed to the server in chunks
// (aka `http client push` or `chunked transfer-encoding`).
//
// Note that GET and HEAD requests cannot have body.
//
/// See also SetBodyStream.
func (req *Request) SetBodyStreamWriter(sw StreamWriter) {
sr := NewStreamReader(sw)
req.SetBodyStream(sr, -1)
}
// SetBodyStreamWriter registers the given sw for populating response body.
//
// This function may be used in the following cases:
//
// * if response body is too big (more than 10MB).
// * if response body is streamed from slow external sources.
// * if response body must be streamed to the client in chunks
// (aka `http server push` or `chunked transfer-encoding`).
//
// See also SetBodyStream.
func (resp *Response) SetBodyStreamWriter(sw StreamWriter) {
sr := NewStreamReader(sw)
resp.SetBodyStream(sr, -1)
}
// BodyWriter returns writer for populating response body.
//
// If used inside RequestHandler, the returned writer must not be used
// after returning from RequestHandler. Use RequestCtx.Write
// or SetBodyStreamWriter in this case.
func (resp *Response) BodyWriter() io.Writer {
resp.w.r = resp
return &resp.w
}
// BodyWriter returns writer for populating request body.
func (req *Request) BodyWriter() io.Writer {
req.w.r = req
return &req.w
}
type responseBodyWriter struct {
r *Response
}
func (w *responseBodyWriter) Write(p []byte) (int, error) {
w.r.AppendBody(p)
return len(p), nil
}
type requestBodyWriter struct {
r *Request
}
func (w *requestBodyWriter) Write(p []byte) (int, error) {
w.r.AppendBody(p)
return len(p), nil
}
// Body returns response body.
func (resp *Response) Body() []byte {
if resp.bodyStream != nil {
bodyBuf := resp.bodyBuffer()
bodyBuf.Reset()
_, err := copyZeroAlloc(bodyBuf, resp.bodyStream)
resp.closeBodyStream()
if err != nil {
bodyBuf.SetString(err.Error())
}
}
return resp.bodyBytes()
}
func (resp *Response) bodyBytes() []byte {
if resp.body == nil {
return nil
}
return resp.body.B
}
func (req *Request) bodyBytes() []byte {
if req.body == nil {
return nil
}
return req.body.B
}
func (resp *Response) bodyBuffer() *bytebufferpool.ByteBuffer {
if resp.body == nil {
resp.body = responseBodyPool.Get()
}
return resp.body
}
func (req *Request) bodyBuffer() *bytebufferpool.ByteBuffer {
if req.body == nil {
req.body = requestBodyPool.Get()
}
return req.body
}
var (
responseBodyPool bytebufferpool.Pool
requestBodyPool bytebufferpool.Pool
)
// BodyGunzip returns un-gzipped body data.
//
// This method may be used if the request header contains
// 'Content-Encoding: gzip' for reading un-gzipped body.
// Use Body for reading gzipped request body.
func (req *Request) BodyGunzip() ([]byte, error) {
return gunzipData(req.Body())
}
// BodyGunzip returns un-gzipped body data.
//
// This method may be used if the response header contains
// 'Content-Encoding: gzip' for reading un-gzipped body.
// Use Body for reading gzipped response body.
func (resp *Response) BodyGunzip() ([]byte, error) {
return gunzipData(resp.Body())
}
func gunzipData(p []byte) ([]byte, error) {
var bb ByteBuffer
_, err := WriteGunzip(&bb, p)
if err != nil {
return nil, err
}
return bb.B, nil
}
// BodyInflate returns inflated body data.
//
// This method may be used if the response header contains
// 'Content-Encoding: deflate' for reading inflated request body.
// Use Body for reading deflated request body.
func (req *Request) BodyInflate() ([]byte, error) {
return inflateData(req.Body())
}
// BodyInflate returns inflated body data.
//
// This method may be used if the response header contains
// 'Content-Encoding: deflate' for reading inflated response body.
// Use Body for reading deflated response body.
func (resp *Response) BodyInflate() ([]byte, error) {
return inflateData(resp.Body())
}
func inflateData(p []byte) ([]byte, error) {
var bb ByteBuffer
_, err := WriteInflate(&bb, p)
if err != nil {
return nil, err
}
return bb.B, nil
}
// BodyWriteTo writes request body to w.
func (req *Request) BodyWriteTo(w io.Writer) error {
if req.bodyStream != nil {
_, err := copyZeroAlloc(w, req.bodyStream)
req.closeBodyStream()
return err
}
if req.onlyMultipartForm() {
return WriteMultipartForm(w, req.multipartForm, req.multipartFormBoundary)
}
_, err := w.Write(req.bodyBytes())
return err
}
// BodyWriteTo writes response body to w.
func (resp *Response) BodyWriteTo(w io.Writer) error {
if resp.bodyStream != nil {
_, err := copyZeroAlloc(w, resp.bodyStream)
resp.closeBodyStream()
return err
}
_, err := w.Write(resp.bodyBytes())
return err
}
// AppendBody appends p to response body.
//
// It is safe re-using p after the function returns.
func (resp *Response) AppendBody(p []byte) {
resp.AppendBodyString(b2s(p))
}
// AppendBodyString appends s to response body.
func (resp *Response) AppendBodyString(s string) {
resp.closeBodyStream()
resp.bodyBuffer().WriteString(s)
}
// SetBody sets response body.
//
// It is safe re-using body argument after the function returns.
func (resp *Response) SetBody(body []byte) {
resp.SetBodyString(b2s(body))
}
// SetBodyString sets response body.
func (resp *Response) SetBodyString(body string) {
resp.closeBodyStream()
bodyBuf := resp.bodyBuffer()
bodyBuf.Reset()
bodyBuf.WriteString(body)
}
// ResetBody resets response body.
func (resp *Response) ResetBody() {
resp.closeBodyStream()
if resp.body != nil {
if resp.keepBodyBuffer {
resp.body.Reset()
} else {
responseBodyPool.Put(resp.body)
resp.body = nil
}
}
}
// ReleaseBody retires the response body if it is greater than "size" bytes.
//
// This permits GC to reclaim the large buffer. If used, must be before
// ReleaseResponse.
//
// Use this method only if you really understand how it works.
// The majority of workloads don't need this method.
func (resp *Response) ReleaseBody(size int) {
if cap(resp.body.B) > size {
resp.closeBodyStream()
resp.body = nil
}
}
// ReleaseBody retires the request body if it is greater than "size" bytes.
//
// This permits GC to reclaim the large buffer. If used, must be before
// ReleaseRequest.
//
// Use this method only if you really understand how it works.
// The majority of workloads don't need this method.
func (req *Request) ReleaseBody(size int) {
if cap(req.body.B) > size {
req.closeBodyStream()
req.body = nil
}
}
// SwapBody swaps response body with the given body and returns
// the previous response body.
//
// It is forbidden to use the body passed to SwapBody after
// the function returns.
func (resp *Response) SwapBody(body []byte) []byte {
bb := resp.bodyBuffer()
if resp.bodyStream != nil {
bb.Reset()
_, err := copyZeroAlloc(bb, resp.bodyStream)
resp.closeBodyStream()
if err != nil {
bb.Reset()
bb.SetString(err.Error())
}
}
oldBody := bb.B
bb.B = body
return oldBody
}
// SwapBody swaps request body with the given body and returns
// the previous request body.
//
// It is forbidden to use the body passed to SwapBody after
// the function returns.
func (req *Request) SwapBody(body []byte) []byte {
bb := req.bodyBuffer()
if req.bodyStream != nil {
bb.Reset()
_, err := copyZeroAlloc(bb, req.bodyStream)
req.closeBodyStream()
if err != nil {
bb.Reset()
bb.SetString(err.Error())
}
}
oldBody := bb.B
bb.B = body
return oldBody
}
// Body returns request body.
func (req *Request) Body() []byte {
if req.bodyStream != nil {
bodyBuf := req.bodyBuffer()
bodyBuf.Reset()
_, err := copyZeroAlloc(bodyBuf, req.bodyStream)
req.closeBodyStream()
if err != nil {
bodyBuf.SetString(err.Error())
}
} else if req.onlyMultipartForm() {
body, err := marshalMultipartForm(req.multipartForm, req.multipartFormBoundary)
if err != nil {
return []byte(err.Error())
}
return body
}
return req.bodyBytes()
}
// AppendBody appends p to request body.
//
// It is safe re-using p after the function returns.
func (req *Request) AppendBody(p []byte) {
req.AppendBodyString(b2s(p))
}
// AppendBodyString appends s to request body.
func (req *Request) AppendBodyString(s string) {
req.RemoveMultipartFormFiles()
req.closeBodyStream()
req.bodyBuffer().WriteString(s)
}
// SetBody sets request body.
//
// It is safe re-using body argument after the function returns.
func (req *Request) SetBody(body []byte) {
req.SetBodyString(b2s(body))
}
// SetBodyString sets request body.
func (req *Request) SetBodyString(body string) {
req.RemoveMultipartFormFiles()
req.closeBodyStream()
req.bodyBuffer().SetString(body)
}
// ResetBody resets request body.
func (req *Request) ResetBody() {
req.RemoveMultipartFormFiles()
req.closeBodyStream()
if req.body != nil {
if req.keepBodyBuffer {
req.body.Reset()
} else {
requestBodyPool.Put(req.body)
req.body = nil
}
}
}
// CopyTo copies req contents to dst except of body stream.
func (req *Request) CopyTo(dst *Request) {
req.copyToSkipBody(dst)
if req.body != nil {
dst.bodyBuffer().Set(req.body.B)
} else if dst.body != nil {
dst.body.Reset()
}
}
func (req *Request) copyToSkipBody(dst *Request) {
dst.Reset()
req.Header.CopyTo(&dst.Header)
req.uri.CopyTo(&dst.uri)
dst.parsedURI = req.parsedURI
req.postArgs.CopyTo(&dst.postArgs)
dst.parsedPostArgs = req.parsedPostArgs
dst.isTLS = req.isTLS
// do not copy multipartForm - it will be automatically
// re-created on the first call to MultipartForm.
}
// CopyTo copies resp contents to dst except of body stream.
func (resp *Response) CopyTo(dst *Response) {
resp.copyToSkipBody(dst)
if resp.body != nil {
dst.bodyBuffer().Set(resp.body.B)
} else if dst.body != nil {
dst.body.Reset()
}
}
func (resp *Response) copyToSkipBody(dst *Response) {
dst.Reset()
resp.Header.CopyTo(&dst.Header)
dst.SkipBody = resp.SkipBody
}
func swapRequestBody(a, b *Request) {
a.body, b.body = b.body, a.body
a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream
}
func swapResponseBody(a, b *Response) {
a.body, b.body = b.body, a.body
a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream
}
// URI returns request URI
func (req *Request) URI() *URI {
req.parseURI()
return &req.uri
}
func (req *Request) parseURI() {
if req.parsedURI {
return
}
req.parsedURI = true
req.uri.parseQuick(req.Header.RequestURI(), &req.Header, req.isTLS)
}
// PostArgs returns POST arguments.
func (req *Request) PostArgs() *Args {
req.parsePostArgs()
return &req.postArgs
}
func (req *Request) parsePostArgs() {
if req.parsedPostArgs {
return
}
req.parsedPostArgs = true
if !bytes.HasPrefix(req.Header.ContentType(), strPostArgsContentType) {
return
}
req.postArgs.ParseBytes(req.bodyBytes())
}
// ErrNoMultipartForm means that the request's Content-Type
// isn't 'multipart/form-data'.
var ErrNoMultipartForm = errors.New("request has no multipart/form-data Content-Type")
// MultipartForm returns requests's multipart form.
//
// Returns ErrNoMultipartForm if request's Content-Type
// isn't 'multipart/form-data'.
//
// RemoveMultipartFormFiles must be called after returned multipart form
// is processed.
func (req *Request) MultipartForm() (*multipart.Form, error) {
if req.multipartForm != nil {
return req.multipartForm, nil
}
req.multipartFormBoundary = string(req.Header.MultipartFormBoundary())
if len(req.multipartFormBoundary) == 0 {
return nil, ErrNoMultipartForm
}
ce := req.Header.peek(strContentEncoding)
body := req.bodyBytes()
if bytes.Equal(ce, strGzip) {
// Do not care about memory usage here.
var err error
if body, err = AppendGunzipBytes(nil, body); err != nil {
return nil, fmt.Errorf("cannot gunzip request body: %s", err)
}
} else if len(ce) > 0 {
return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce)
}
f, err := readMultipartForm(bytes.NewReader(body), req.multipartFormBoundary, len(body), len(body))
if err != nil {
return nil, err
}
req.multipartForm = f
return f, nil
}
func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) {
var buf ByteBuffer
if err := WriteMultipartForm(&buf, f, boundary); err != nil {
return nil, err
}
return buf.B, nil
}
// WriteMultipartForm writes the given multipart form f with the given
// boundary to w.
func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error {
// Do not care about memory allocations here, since multipart
// form processing is slooow.
if len(boundary) == 0 {
panic("BUG: form boundary cannot be empty")
}
mw := multipart.NewWriter(w)
if err := mw.SetBoundary(boundary); err != nil {
return fmt.Errorf("cannot use form boundary %q: %s", boundary, err)
}
// marshal values
for k, vv := range f.Value {
for _, v := range vv {
if err := mw.WriteField(k, v); err != nil {
return fmt.Errorf("cannot write form field %q value %q: %s", k, v, err)
}
}
}
// marshal files
for k, fvv := range f.File {
for _, fv := range fvv {
vw, err := mw.CreateFormFile(k, fv.Filename)
if err != nil {
return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err)
}
fh, err := fv.Open()
if err != nil {
return fmt.Errorf("cannot open form file %q (%q): %s", k, fv.Filename, err)
}
if _, err = copyZeroAlloc(vw, fh); err != nil {
return fmt.Errorf("error when copying form file %q (%q): %s", k, fv.Filename, err)
}
if err = fh.Close(); err != nil {
return fmt.Errorf("cannot close form file %q (%q): %s", k, fv.Filename, err)
}
}
}
if err := mw.Close(); err != nil {
return fmt.Errorf("error when closing multipart form writer: %s", err)
}
return nil
}
func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize int) (*multipart.Form, error) {
// Do not care about memory allocations here, since they are tiny
// compared to multipart data (aka multi-MB files) usually sent
// in multipart/form-data requests.
if size <= 0 {
panic(fmt.Sprintf("BUG: form size must be greater than 0. Given %d", size))
}
lr := io.LimitReader(r, int64(size))
mr := multipart.NewReader(lr, boundary)
f, err := mr.ReadForm(int64(maxInMemoryFileSize))
if err != nil {
return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err)
}
return f, nil
}
// Reset clears request contents.
func (req *Request) Reset() {
req.Header.Reset()
req.resetSkipHeader()
}
func (req *Request) resetSkipHeader() {
req.ResetBody()
req.uri.Reset()
req.parsedURI = false
req.postArgs.Reset()
req.parsedPostArgs = false
req.isTLS = false
}
// RemoveMultipartFormFiles removes multipart/form-data temporary files
// associated with the request.
func (req *Request) RemoveMultipartFormFiles() {
if req.multipartForm != nil {
// Do not check for error, since these files may be deleted or moved
// to new places by user code.
req.multipartForm.RemoveAll()
req.multipartForm = nil
}
req.multipartFormBoundary = ""
}
// Reset clears response contents.
func (resp *Response) Reset() {
resp.Header.Reset()
resp.resetSkipHeader()
resp.SkipBody = false
}
func (resp *Response) resetSkipHeader() {
resp.ResetBody()
}
// Read reads request (including body) from the given r.
//
// RemoveMultipartFormFiles or Reset must be called after
// reading multipart/form-data request in order to delete temporarily
// uploaded files.
//
// If MayContinue returns true, the caller must:
//
// - Either send StatusExpectationFailed response if request headers don't
// satisfy the caller.
// - Or send StatusContinue response before reading request body
// with ContinueReadBody.
// - Or close the connection.
//
// io.EOF is returned if r is closed before reading the first header byte.
func (req *Request) Read(r *bufio.Reader) error {
return req.ReadLimitBody(r, 0)
}
const defaultMaxInMemoryFileSize = 16 * 1024 * 1024
var errGetOnly = errors.New("non-GET request received")
// ReadLimitBody reads request from the given r, limiting the body size.
//
// If maxBodySize > 0 and the body size exceeds maxBodySize,
// then ErrBodyTooLarge is returned.
//
// RemoveMultipartFormFiles or Reset must be called after
// reading multipart/form-data request in order to delete temporarily
// uploaded files.
//
// If MayContinue returns true, the caller must:
//
// - Either send StatusExpectationFailed response if request headers don't
// satisfy the caller.
// - Or send StatusContinue response before reading request body
// with ContinueReadBody.
// - Or close the connection.
//
// io.EOF is returned if r is closed before reading the first header byte.
func (req *Request) ReadLimitBody(r *bufio.Reader, maxBodySize int) error {
req.resetSkipHeader()
return req.readLimitBody(r, maxBodySize, false)
}
func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool) error {
// Do not reset the request here - the caller must reset it before
// calling this method.
err := req.Header.Read(r)
if err != nil {
return err
}
if getOnly && !req.Header.IsGet() {
return errGetOnly
}
if req.Header.noBody() {
return nil
}
if req.MayContinue() {
// 'Expect: 100-continue' header found. Let the caller deciding
// whether to read request body or
// to return StatusExpectationFailed.
return nil
}
return req.ContinueReadBody(r, maxBodySize)
}
// MayContinue returns true if the request contains
// 'Expect: 100-continue' header.
//
// The caller must do one of the following actions if MayContinue returns true:
//
// - Either send StatusExpectationFailed response if request headers don't
// satisfy the caller.
// - Or send StatusContinue response before reading request body
// with ContinueReadBody.
// - Or close the connection.
func (req *Request) MayContinue() bool {
return bytes.Equal(req.Header.peek(strExpect), str100Continue)
}
// ContinueReadBody reads request body if request header contains
// 'Expect: 100-continue'.
//
// The caller must send StatusContinue response before calling this method.
//
// If maxBodySize > 0 and the body size exceeds maxBodySize,
// then ErrBodyTooLarge is returned.
func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error {
var err error
contentLength := req.Header.ContentLength()
if contentLength > 0 {
if maxBodySize > 0 && contentLength > maxBodySize {
return ErrBodyTooLarge
}
// Pre-read multipart form data of known length.
// This way we limit memory usage for large file uploads, since their contents
// is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize.
req.multipartFormBoundary = string(req.Header.MultipartFormBoundary())
if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 {
req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize)
if err != nil {
req.Reset()
}
return err
}
}
if contentLength == -2 {
// identity body has no sense for http requests, since
// the end of body is determined by connection close.
// So just ignore request body for requests without
// 'Content-Length' and 'Transfer-Encoding' headers.
req.Header.SetContentLength(0)
return nil
}
bodyBuf := req.bodyBuffer()
bodyBuf.Reset()
bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B)
if err != nil {
req.Reset()
return err
}
req.Header.SetContentLength(len(bodyBuf.B))
return nil
}
// Read reads response (including body) from the given r.
//
// io.EOF is returned if r is closed before reading the first header byte.
func (resp *Response) Read(r *bufio.Reader) error {
return resp.ReadLimitBody(r, 0)
}
// ReadLimitBody reads response from the given r, limiting the body size.
//
// If maxBodySize > 0 and the body size exceeds maxBodySize,
// then ErrBodyTooLarge is returned.
//
// io.EOF is returned if r is closed before reading the first header byte.
func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error {
resp.resetSkipHeader()
err := resp.Header.Read(r)
if err != nil {
return err
}
if resp.Header.StatusCode() == StatusContinue {
// Read the next response according to http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html .
if err = resp.Header.Read(r); err != nil {
return err
}
}
if !resp.mustSkipBody() {
bodyBuf := resp.bodyBuffer()
bodyBuf.Reset()
bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B)
if err != nil {
resp.Reset()
return err
}
resp.Header.SetContentLength(len(bodyBuf.B))
}
return nil
}
func (resp *Response) mustSkipBody() bool {
return resp.SkipBody || resp.Header.mustSkipContentLength()
}
var errRequestHostRequired = errors.New("missing required Host header in request")
// WriteTo writes request to w. It implements io.WriterTo.
func (req *Request) WriteTo(w io.Writer) (int64, error) {
return writeBufio(req, w)
}
// WriteTo writes response to w. It implements io.WriterTo.
func (resp *Response) WriteTo(w io.Writer) (int64, error) {
return writeBufio(resp, w)
}
func writeBufio(hw httpWriter, w io.Writer) (int64, error) {
sw := acquireStatsWriter(w)
bw := acquireBufioWriter(sw)
err1 := hw.Write(bw)
err2 := bw.Flush()
releaseBufioWriter(bw)
n := sw.bytesWritten
releaseStatsWriter(sw)
err := err1
if err == nil {
err = err2
}
return n, err
}
type statsWriter struct {
w io.Writer
bytesWritten int64
}
func (w *statsWriter) Write(p []byte) (int, error) {
n, err := w.w.Write(p)
w.bytesWritten += int64(n)
return n, err
}
func acquireStatsWriter(w io.Writer) *statsWriter {
v := statsWriterPool.Get()
if v == nil {
return &statsWriter{
w: w,
}
}
sw := v.(*statsWriter)
sw.w = w
return sw
}
func releaseStatsWriter(sw *statsWriter) {
sw.w = nil
sw.bytesWritten = 0
statsWriterPool.Put(sw)
}
var statsWriterPool sync.Pool
func acquireBufioWriter(w io.Writer) *bufio.Writer {
v := bufioWriterPool.Get()
if v == nil {
return bufio.NewWriter(w)
}
bw := v.(*bufio.Writer)
bw.Reset(w)
return bw
}
func releaseBufioWriter(bw *bufio.Writer) {
bufioWriterPool.Put(bw)
}
var bufioWriterPool sync.Pool
func (req *Request) onlyMultipartForm() bool {
return req.multipartForm != nil && (req.body == nil || len(req.body.B) == 0)
}
// Write writes request to w.
//
// Write doesn't flush request to w for performance reasons.
//
// See also WriteTo.
func (req *Request) Write(w *bufio.Writer) error {
if len(req.Header.Host()) == 0 || req.parsedURI {
uri := req.URI()
host := uri.Host()
if len(host) == 0 {
return errRequestHostRequired
}
req.Header.SetHostBytes(host)
req.Header.SetRequestURIBytes(uri.RequestURI())
}
if req.bodyStream != nil {
return req.writeBodyStream(w)
}
body := req.bodyBytes()
var err error
if req.onlyMultipartForm() {
body, err = marshalMultipartForm(req.multipartForm, req.multipartFormBoundary)
if err != nil {
return fmt.Errorf("error when marshaling multipart form: %s", err)
}
req.Header.SetMultipartFormBoundary(req.multipartFormBoundary)
}
hasBody := !req.Header.noBody()
if hasBody {
req.Header.SetContentLength(len(body))
}
if err = req.Header.Write(w); err != nil {
return err
}
if hasBody {
_, err = w.Write(body)
} else if len(body) > 0 {
return fmt.Errorf("non-zero body for non-POST request. body=%q", body)
}
return err
}
// WriteGzip writes response with gzipped body to w.
//
// The method gzips response body and sets 'Content-Encoding: gzip'
// header before writing response to w.
//
// WriteGzip doesn't flush response to w for performance reasons.
func (resp *Response) WriteGzip(w *bufio.Writer) error {
return resp.WriteGzipLevel(w, CompressDefaultCompression)
}
// WriteGzipLevel writes response with gzipped body to w.
//
// Level is the desired compression level:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
//
// The method gzips response body and sets 'Content-Encoding: gzip'
// header before writing response to w.
//
// WriteGzipLevel doesn't flush response to w for performance reasons.
func (resp *Response) WriteGzipLevel(w *bufio.Writer, level int) error {
if err := resp.gzipBody(level); err != nil {
return err
}
return resp.Write(w)
}
// WriteDeflate writes response with deflated body to w.
//
// The method deflates response body and sets 'Content-Encoding: deflate'
// header before writing response to w.
//
// WriteDeflate doesn't flush response to w for performance reasons.
func (resp *Response) WriteDeflate(w *bufio.Writer) error {
return resp.WriteDeflateLevel(w, CompressDefaultCompression)
}
// WriteDeflateLevel writes response with deflated body to w.
//
// Level is the desired compression level:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
//
// The method deflates response body and sets 'Content-Encoding: deflate'
// header before writing response to w.
//
// WriteDeflateLevel doesn't flush response to w for performance reasons.
func (resp *Response) WriteDeflateLevel(w *bufio.Writer, level int) error {
if err := resp.deflateBody(level); err != nil {
return err
}
return resp.Write(w)
}
func (resp *Response) gzipBody(level int) error {
if len(resp.Header.peek(strContentEncoding)) > 0 {
// It looks like the body is already compressed.
// Do not compress it again.
return nil
}
if !resp.Header.isCompressibleContentType() {
// The content-type cannot be compressed.
return nil
}
if resp.bodyStream != nil {
// Reset Content-Length to -1, since it is impossible
// to determine body size beforehand of streamed compression.
// For https://github.com/valyala/fasthttp/issues/176 .
resp.Header.SetContentLength(-1)
// Do not care about memory allocations here, since gzip is slow
// and allocates a lot of memory by itself.
bs := resp.bodyStream
resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
zw := acquireStacklessGzipWriter(sw, level)
fw := &flushWriter{
wf: zw,
bw: sw,
}
copyZeroAlloc(fw, bs)
releaseStacklessGzipWriter(zw, level)
if bsc, ok := bs.(io.Closer); ok {
bsc.Close()
}
})
} else {
bodyBytes := resp.bodyBytes()
if len(bodyBytes) < minCompressLen {
// There is no sense in spending CPU time on small body compression,
// since there is a very high probability that the compressed
// body size will be bigger than the original body size.
return nil
}
w := responseBodyPool.Get()
w.B = AppendGzipBytesLevel(w.B, bodyBytes, level)
// Hack: swap resp.body with w.
if resp.body != nil {
responseBodyPool.Put(resp.body)
}
resp.body = w
}
resp.Header.SetCanonical(strContentEncoding, strGzip)
return nil
}
func (resp *Response) deflateBody(level int) error {
if len(resp.Header.peek(strContentEncoding)) > 0 {
// It looks like the body is already compressed.
// Do not compress it again.
return nil
}
if !resp.Header.isCompressibleContentType() {
// The content-type cannot be compressed.
return nil
}
if resp.bodyStream != nil {
// Reset Content-Length to -1, since it is impossible
// to determine body size beforehand of streamed compression.
// For https://github.com/valyala/fasthttp/issues/176 .
resp.Header.SetContentLength(-1)
// Do not care about memory allocations here, since flate is slow
// and allocates a lot of memory by itself.
bs := resp.bodyStream
resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
zw := acquireStacklessDeflateWriter(sw, level)
fw := &flushWriter{
wf: zw,
bw: sw,
}
copyZeroAlloc(fw, bs)
releaseStacklessDeflateWriter(zw, level)
if bsc, ok := bs.(io.Closer); ok {
bsc.Close()
}
})
} else {
bodyBytes := resp.bodyBytes()
if len(bodyBytes) < minCompressLen {
// There is no sense in spending CPU time on small body compression,
// since there is a very high probability that the compressed
// body size will be bigger than the original body size.
return nil
}
w := responseBodyPool.Get()
w.B = AppendDeflateBytesLevel(w.B, bodyBytes, level)
// Hack: swap resp.body with w.
if resp.body != nil {
responseBodyPool.Put(resp.body)
}
resp.body = w
}
resp.Header.SetCanonical(strContentEncoding, strDeflate)
return nil
}
// Bodies with sizes smaller than minCompressLen aren't compressed at all
const minCompressLen = 200
type writeFlusher interface {
io.Writer
Flush() error
}
type flushWriter struct {
wf writeFlusher
bw *bufio.Writer
}
func (w *flushWriter) Write(p []byte) (int, error) {
n, err := w.wf.Write(p)
if err != nil {
return 0, err
}
if err = w.wf.Flush(); err != nil {
return 0, err
}
if err = w.bw.Flush(); err != nil {
return 0, err
}
return n, nil
}
// Write writes response to w.
//
// Write doesn't flush response to w for performance reasons.
//
// See also WriteTo.
func (resp *Response) Write(w *bufio.Writer) error {
sendBody := !resp.mustSkipBody()
if resp.bodyStream != nil {
return resp.writeBodyStream(w, sendBody)
}
body := resp.bodyBytes()
bodyLen := len(body)
if sendBody || bodyLen > 0 {
resp.Header.SetContentLength(bodyLen)
}
if err := resp.Header.Write(w); err != nil {
return err
}
if sendBody {
if _, err := w.Write(body); err != nil {
return err
}
}
return nil
}
func (req *Request) writeBodyStream(w *bufio.Writer) error {
var err error
contentLength := req.Header.ContentLength()
if contentLength < 0 {
lrSize := limitedReaderSize(req.bodyStream)
if lrSize >= 0 {
contentLength = int(lrSize)
if int64(contentLength) != lrSize {
contentLength = -1
}
if contentLength >= 0 {
req.Header.SetContentLength(contentLength)
}
}
}
if contentLength >= 0 {
if err = req.Header.Write(w); err == nil {
err = writeBodyFixedSize(w, req.bodyStream, int64(contentLength))
}
} else {
req.Header.SetContentLength(-1)
if err = req.Header.Write(w); err == nil {
err = writeBodyChunked(w, req.bodyStream)
}
}
err1 := req.closeBodyStream()
if err == nil {
err = err1
}
return err
}
func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) error {
var err error
contentLength := resp.Header.ContentLength()
if contentLength < 0 {
lrSize := limitedReaderSize(resp.bodyStream)
if lrSize >= 0 {
contentLength = int(lrSize)
if int64(contentLength) != lrSize {
contentLength = -1
}
if contentLength >= 0 {
resp.Header.SetContentLength(contentLength)
}
}
}
if contentLength >= 0 {
if err = resp.Header.Write(w); err == nil && sendBody {
err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength))
}
} else {
resp.Header.SetContentLength(-1)
if err = resp.Header.Write(w); err == nil && sendBody {
err = writeBodyChunked(w, resp.bodyStream)
}
}
err1 := resp.closeBodyStream()
if err == nil {
err = err1
}
return err
}
func (req *Request) closeBodyStream() error {
if req.bodyStream == nil {
return nil
}
var err error
if bsc, ok := req.bodyStream.(io.Closer); ok {
err = bsc.Close()
}
req.bodyStream = nil
return err
}
func (resp *Response) closeBodyStream() error {
if resp.bodyStream == nil {
return nil
}
var err error
if bsc, ok := resp.bodyStream.(io.Closer); ok {
err = bsc.Close()
}
resp.bodyStream = nil
return err
}
// String returns request representation.
//
// Returns error message instead of request representation on error.
//
// Use Write instead of String for performance-critical code.
func (req *Request) String() string {
return getHTTPString(req)
}
// String returns response representation.
//
// Returns error message instead of response representation on error.
//
// Use Write instead of String for performance-critical code.
func (resp *Response) String() string {
return getHTTPString(resp)
}
func getHTTPString(hw httpWriter) string {
w := AcquireByteBuffer()
bw := bufio.NewWriter(w)
if err := hw.Write(bw); err != nil {
return err.Error()
}
if err := bw.Flush(); err != nil {
return err.Error()
}
s := string(w.B)
ReleaseByteBuffer(w)
return s
}
type httpWriter interface {
Write(w *bufio.Writer) error
}
func writeBodyChunked(w *bufio.Writer, r io.Reader) error {
vbuf := copyBufPool.Get()
buf := vbuf.([]byte)
var err error
var n int
for {
n, err = r.Read(buf)
if n == 0 {
if err == nil {
panic("BUG: io.Reader returned 0, nil")
}
if err == io.EOF {
if err = writeChunk(w, buf[:0]); err != nil {
break
}
err = nil
}
break
}
if err = writeChunk(w, buf[:n]); err != nil {
break
}
}
copyBufPool.Put(vbuf)
return err
}
func limitedReaderSize(r io.Reader) int64 {
lr, ok := r.(*io.LimitedReader)
if !ok {
return -1
}
return lr.N
}
func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int64) error {
if size > maxSmallFileSize {
// w buffer must be empty for triggering
// sendfile path in bufio.Writer.ReadFrom.
if err := w.Flush(); err != nil {
return err
}
}
// Unwrap a single limited reader for triggering sendfile path
// in net.TCPConn.ReadFrom.
lr, ok := r.(*io.LimitedReader)
if ok {
r = lr.R
}
n, err := copyZeroAlloc(w, r)
if ok {
lr.N -= n
}
if n != size && err == nil {
err = fmt.Errorf("copied %d bytes from body stream instead of %d bytes", n, size)
}
return err
}
func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) {
vbuf := copyBufPool.Get()
buf := vbuf.([]byte)
n, err := io.CopyBuffer(w, r, buf)
copyBufPool.Put(vbuf)
return n, err
}
var copyBufPool = sync.Pool{
New: func() interface{} {
return make([]byte, 4096)
},
}
func writeChunk(w *bufio.Writer, b []byte) error {
n := len(b)
writeHexInt(w, n)
w.Write(strCRLF)
w.Write(b)
_, err := w.Write(strCRLF)
err1 := w.Flush()
if err == nil {
err = err1
}
return err
}
// ErrBodyTooLarge is returned if either request or response body exceeds
// the given limit.
var ErrBodyTooLarge = errors.New("body size exceeds the given limit")
func readBody(r *bufio.Reader, contentLength int, maxBodySize int, dst []byte) ([]byte, error) {
dst = dst[:0]
if contentLength >= 0 {
if maxBodySize > 0 && contentLength > maxBodySize {
return dst, ErrBodyTooLarge
}
return appendBodyFixedSize(r, dst, contentLength)
}
if contentLength == -1 {
return readBodyChunked(r, maxBodySize, dst)
}
return readBodyIdentity(r, maxBodySize, dst)
}
func readBodyIdentity(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) {
dst = dst[:cap(dst)]
if len(dst) == 0 {
dst = make([]byte, 1024)
}
offset := 0
for {
nn, err := r.Read(dst[offset:])
if nn <= 0 {
if err != nil {
if err == io.EOF {
return dst[:offset], nil
}
return dst[:offset], err
}
panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn))
}
offset += nn
if maxBodySize > 0 && offset > maxBodySize {
return dst[:offset], ErrBodyTooLarge
}
if len(dst) == offset {
n := round2(2 * offset)
if maxBodySize > 0 && n > maxBodySize {
n = maxBodySize + 1
}
b := make([]byte, n)
copy(b, dst)
dst = b
}
}
}
func appendBodyFixedSize(r *bufio.Reader, dst []byte, n int) ([]byte, error) {
if n == 0 {
return dst, nil
}
offset := len(dst)
dstLen := offset + n
if cap(dst) < dstLen {
b := make([]byte, round2(dstLen))
copy(b, dst)
dst = b
}
dst = dst[:dstLen]
for {
nn, err := r.Read(dst[offset:])
if nn <= 0 {
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return dst[:offset], err
}
panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn))
}
offset += nn
if offset == dstLen {
return dst, nil
}
}
}
func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) {
if len(dst) > 0 {
panic("BUG: expected zero-length buffer")
}
strCRLFLen := len(strCRLF)
for {
chunkSize, err := parseChunkSize(r)
if err != nil {
return dst, err
}
if maxBodySize > 0 && len(dst)+chunkSize > maxBodySize {
return dst, ErrBodyTooLarge
}
dst, err = appendBodyFixedSize(r, dst, chunkSize+strCRLFLen)
if err != nil {
return dst, err
}
if !bytes.Equal(dst[len(dst)-strCRLFLen:], strCRLF) {
return dst, fmt.Errorf("cannot find crlf at the end of chunk")
}
dst = dst[:len(dst)-strCRLFLen]
if chunkSize == 0 {
return dst, nil
}
}
}
func parseChunkSize(r *bufio.Reader) (int, error) {
n, err := readHexInt(r)
if err != nil {
return -1, err
}
c, err := r.ReadByte()
if err != nil {
return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err)
}
if c != '\r' {
return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r')
}
c, err = r.ReadByte()
if err != nil {
return -1, fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err)
}
if c != '\n' {
return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n')
}
return n, nil
}
func round2(n int) int {
if n <= 0 {
return 0
}
n--
x := uint(0)
for n > 0 {
n >>= 1
x++
}
return 1 << x
}
| {
"pile_set_name": "Github"
} |
// generated by stringer -output vt_string.go -type VT; DO NOT EDIT
package ole
import "fmt"
const (
_VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL"
_VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR"
_VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR"
_VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID"
_VT_name_4 = "VT_BSTR_BLOBVT_VECTOR"
_VT_name_5 = "VT_ARRAY"
_VT_name_6 = "VT_BYREF"
_VT_name_7 = "VT_RESERVED"
_VT_name_8 = "VT_ILLEGAL"
)
var (
_VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110}
_VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122}
_VT_index_2 = [...]uint8{0, 9, 19, 30}
_VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98}
_VT_index_4 = [...]uint8{0, 12, 21}
_VT_index_5 = [...]uint8{0, 8}
_VT_index_6 = [...]uint8{0, 8}
_VT_index_7 = [...]uint8{0, 11}
_VT_index_8 = [...]uint8{0, 10}
)
func (i VT) String() string {
switch {
case 0 <= i && i <= 14:
return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]]
case 16 <= i && i <= 31:
i -= 16
return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]]
case 36 <= i && i <= 38:
i -= 36
return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]]
case 64 <= i && i <= 72:
i -= 64
return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]]
case 4095 <= i && i <= 4096:
i -= 4095
return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]]
case i == 8192:
return _VT_name_5
case i == 16384:
return _VT_name_6
case i == 32768:
return _VT_name_7
case i == 65535:
return _VT_name_8
default:
return fmt.Sprintf("VT(%d)", i)
}
}
| {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
require 'singleton'
class ActivityPub::TagManager
include Singleton
include RoutingHelper
CONTEXT = 'https://www.w3.org/ns/activitystreams'
COLLECTIONS = {
public: 'https://www.w3.org/ns/activitystreams#Public',
}.freeze
def url_for(target)
return target.url if target.respond_to?(:local?) && !target.local?
return unless target.respond_to?(:object_type)
case target.object_type
when :person
target.instance_actor? ? about_more_url(instance_actor: true) : short_account_url(target)
when :note, :comment, :activity
return activity_account_status_url(target.account, target) if target.reblog?
short_account_status_url(target.account, target)
end
end
def uri_for(target)
return target.uri if target.respond_to?(:local?) && !target.local?
case target.object_type
when :person
target.instance_actor? ? instance_actor_url : account_url(target)
when :note, :comment, :activity
return activity_account_status_url(target.account, target) if target.reblog?
account_status_url(target.account, target)
when :emoji
emoji_url(target)
end
end
def generate_uri_for(_target)
URI.join(root_url, 'payloads', SecureRandom.uuid)
end
def activity_uri_for(target)
raise ArgumentError, 'target must be a local activity' unless %i(note comment activity).include?(target.object_type) && target.local?
activity_account_status_url(target.account, target)
end
def replies_uri_for(target, page_params = nil)
raise ArgumentError, 'target must be a local activity' unless %i(note comment activity).include?(target.object_type) && target.local?
account_status_replies_url(target.account, target, page_params)
end
# Primary audience of a status
# Public statuses go out to primarily the public collection
# Unlisted and private statuses go out primarily to the followers collection
# Others go out only to the people they mention
def to(status)
case status.visibility
when 'public'
[COLLECTIONS[:public]]
when 'unlisted', 'private'
[account_followers_url(status.account)]
when 'direct', 'limited'
if status.account.silenced?
# Only notify followers if the account is locally silenced
account_ids = status.active_mentions.pluck(:account_id)
to = status.account.followers.where(id: account_ids).each_with_object([]) do |account, result|
result << uri_for(account)
result << account_followers_url(account) if account.group?
end
to.concat(FollowRequest.where(target_account_id: status.account_id, account_id: account_ids).each_with_object([]) do |request, result|
result << uri_for(request.account)
result << account_followers_url(request.account) if request.account.group?
end)
else
status.active_mentions.each_with_object([]) do |mention, result|
result << uri_for(mention.account)
result << account_followers_url(mention.account) if mention.account.group?
end
end
end
end
# Secondary audience of a status
# Public statuses go out to followers as well
# Unlisted statuses go to the public as well
# Both of those and private statuses also go to the people mentioned in them
# Direct ones don't have a secondary audience
def cc(status)
cc = []
cc << uri_for(status.reblog.account) if status.reblog?
case status.visibility
when 'public'
cc << account_followers_url(status.account)
when 'unlisted'
cc << COLLECTIONS[:public]
end
unless status.direct_visibility? || status.limited_visibility?
if status.account.silenced?
# Only notify followers if the account is locally silenced
account_ids = status.active_mentions.pluck(:account_id)
cc.concat(status.account.followers.where(id: account_ids).each_with_object([]) do |account, result|
result << uri_for(account)
result << account_followers_url(account) if account.group?
end)
cc.concat(FollowRequest.where(target_account_id: status.account_id, account_id: account_ids).each_with_object([]) do |request, result|
result << uri_for(request.account)
result << account_followers_url(request.account) if request.account.group?
end)
else
cc.concat(status.active_mentions.each_with_object([]) do |mention, result|
result << uri_for(mention.account)
result << account_followers_url(mention.account) if mention.account.group?
end)
end
end
cc
end
def local_uri?(uri)
return false if uri.nil?
uri = Addressable::URI.parse(uri)
host = uri.normalized_host
host = "#{host}:#{uri.port}" if uri.port
!host.nil? && (::TagManager.instance.local_domain?(host) || ::TagManager.instance.web_domain?(host))
end
def uri_to_local_id(uri, param = :id)
path_params = Rails.application.routes.recognize_path(uri)
path_params[:username] = Rails.configuration.x.local_domain if path_params[:controller] == 'instance_actors'
path_params[param]
end
def uri_to_resource(uri, klass)
return if uri.nil?
if local_uri?(uri)
case klass.name
when 'Account'
klass.find_local(uri_to_local_id(uri, :username))
else
StatusFinder.new(uri).status
end
elsif OStatus::TagManager.instance.local_id?(uri)
klass.find_by(id: OStatus::TagManager.instance.unique_tag_to_local_id(uri, klass.to_s))
else
klass.find_by(uri: uri.split('#').first)
end
rescue ActiveRecord::RecordNotFound
nil
end
end
| {
"pile_set_name": "Github"
} |
<?php
/**
* ProcessWire Fuel
*
* Fuel maintains a single instance each of multiple objects used throughout the application.
* The objects contained in fuel provide access to the ProcessWire API. For instance, $pages,
* $users, $fields, and so on. The fuel is required to keep the system running, so to speak.
*
* ProcessWire 2.x
* Copyright (C) 2015 by Ryan Cramer
* This file licensed under Mozilla Public License v2.0 http://mozilla.org/MPL/2.0/
*
* https://processwire.com
*
* @property ProcessWire $wire
* @property Database $db
* @property WireDatabasePDO $database
* @property Session $session
* @property Notices $notices
* @property Sanitizer $sanitizer
* @property Fields $fields
* @property Fieldtypes $fieldtypes
* @property Fieldgroups $fieldgroups
* @property Templates $templates
* @property Pages $pages
* @property Page $page
* @property Process $process
* @property Modules $modules
* @property Permissions $permissions
* @property Roles $roles
* @property Users $users
* @property User $user
* @property WireCache $cache
* @property WireInput $input
* @property Languages $languages If LanguageSupport installed
* @property Config $config
* @property Fuel $fuel
*
*/
class Fuel implements IteratorAggregate {
protected $data = array();
protected $lock = array();
/**
* @param string $key API variable name to set - should be valid PHP variable name.
* @param object|mixed $value Value for the API variable.
* @param bool $lock Whether to prevent this API variable from being overwritten in the future.
* @return $this
* @throws WireException When you try to set a previously locked API variable, a WireException will be thrown.
*
*/
public function set($key, $value, $lock = false) {
if(isset($this->lock[$key]) && $value !== $this->data[$key]) {
throw new WireException("API variable '$key' is locked and may not be set again");
}
$this->data[$key] = $value;
if($lock) $this->lock[$key] = true;
return $this;
}
/**
* Remove an API variable from the Fuel
*
* @param $key
* @return bool Returns true on success
*
*/
public function remove($key) {
if(isset($this->data[$key])) {
unset($this->data[$key]);
unset($this->lock[$key]);
return true;
}
return false;
}
public function __get($key) {
return isset($this->data[$key]) ? $this->data[$key] : null;
}
public function getIterator() {
return new ArrayObject($this->data);
}
public function getArray() {
return $this->data;
}
}
| {
"pile_set_name": "Github"
} |
#ifndef __PERF_QUOTE_H
#define __PERF_QUOTE_H
#include <stddef.h>
#include <stdio.h>
/* Help to copy the thing properly quoted for the shell safety.
* any single quote is replaced with '\'', any exclamation point
* is replaced with '\!', and the whole thing is enclosed in a
* single quote pair.
*
* For example, if you are passing the result to system() as an
* argument:
*
* sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1))
*
* would be appropriate. If the system() is going to call ssh to
* run the command on the other side:
*
* sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1));
* sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd));
*
* Note that the above examples leak memory! Remember to free result from
* sq_quote() in a real application.
*/
extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
#endif /* __PERF_QUOTE_H */
| {
"pile_set_name": "Github"
} |
---
Description: Gives an overview of the changes to the Windows Parental Controls introduced in Windows 7.
ms.assetid: 5723fddd-52e2-46a1-a48f-647d479b21d9
title: Whats New in Windows 7 Parental Controls
ms.topic: article
ms.date: 05/31/2018
---
# What's New in Windows 7 Parental Controls
## Overview of Parental Controls Changes for Windows 7
The purpose of this document is to give an overview of the changes to the Windows Parental Controls introduced in Windows 7 and to enable third-party parental control solution providers to take advantage of these changes. This document assumes readers' familiarity with Parental Controls for Windows Vista and will only reflect changes made to this functionality in Windows 7 that are relevant for third-party parental control solutions development. A full update of MSDN Windows Parental Control documentation will follow at a later date.
## Key Design Decisions for Windows 7 Parental Control Changes
Changes to Parental Controls introduced in Windows 7 continue the overarching goal of promoting third-party parental control solutions' coexistence with the in-box functionality. The changes are:
- Removal of web filtering and activity reporting from the in-box parental controls functionality. The in-box parental controls provide core offline Microsoft-implemented restrictions such as time limits, application restrictions, and game restrictions. The web filtering, activity reporting, and other functionality can be provided by Microsoft or third-party parental control solutions. For example, Windows Live Family Safety solution provides web filtering, remote management, and activity monitoring, as well as contact management for all Windows Live applications.
- Enabling third-party solutions to replace the in-box provider's configuration user interface while still relying on the in-box implementation of time, application, and game restrictions.
- Enabling third-party solutions to be discovered and enabled on the computer by a parent or guardian (administrator account).
## Parental Controls Top-level User Interface Changes in Windows 7
Windows 7 brings the following changes to the Parental Controls Control Panel top-level user interface:
- The Additional controls section is introduced where controls that provide additional functionality such as web filtering, activity reporting, and so on, can be selected from a drop down list box. Microsoft or third-party providers need to register their solutions with Windows 7 Parental Controls for them to be selectable from the Additional controls drop down list box. For information about registering a solution, see Provider Registration, later in this topic).
- The logo image of the currently selected provider is displayed in the upper-right corner of the page.
- The managed user tiles can display a summary of the parental settings provided by the currently selected provider.
The currently selected provider might choose to use its own user interface for User Control screens for the managed users, or it might select to rely on the in-box WPC implementation of this screen. In-box implementation has the following changes made to its elements:
- The activity reporting section is removed.
- The link to view activity reports is removed.
## Parental Controls API Overview: Windows 7 Changes
The integration mechanism for third-party solution providers was expanded to allow:
- Provider registration. Upon registration, a provider becomes selectable in the Additional controls drop-down list box on the Parental Controls Control Panel screen.
- Querying for the currently selected provider. A public COM interface is exposed to enable this functionality.
- Also new is the set of COM interfaces to be implemented by the providers to allow:
- Enabling or disabling of the provider by WPC upon user selection of additional controls.
- WPC to pass control to the provider to configure managed user's parental control settings.
- WPC to query the provider for the summary of managed user's parental control settings.
## Third-party Provider Integration
### Provider Registration
To register a new provider with Parental Controls, a registry value must be written to the Providers key of Windows Parental Controls. The value name is a unique GUID that is used to identify the provider. The value data will be a path to a registry key in **HKEY\_LOCAL\_MACHINE** that contains provider information.
```
HKEY_LOCAL_MACHINE
SOFTWARE
Microsoft
Windows
CurrentVersion
Parental Controls
Providers
{45D63315-0824-4df4-B8A4-EF137D8810D1} = SOFTWARE\Microsoft\Family Safety\WPC\
```
At the registry key location specified, the following values are expected.
| Term | Description |
|----------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <span id="LogoImage"></span><span id="logoimage"></span><span id="LOGOIMAGE"></span>**LogoImage**<br/> | A fully qualified path to a resource binary with a negative resource ID for the provider logo image (stored as an **IMAGE\_BITMAP**).<br/> |
| <span id="DisplayName"></span><span id="displayname"></span><span id="DISPLAYNAME"></span>**DisplayName**<br/> | A fully qualified path to a resource binary with a negative resource ID for the provider name. **DisplayName** length should not exceed 50 characters.<br/> |
| <span id="Description"></span><span id="description"></span><span id="DESCRIPTION"></span>**Description**<br/> | A fully qualified path to a resource binary with a negative resource ID for the provider description. The description length should not exceed 200 characters.<br/> |
| <span id="StateCLSID"></span><span id="stateclsid"></span><span id="STATECLSID"></span>**StateCLSID**<br/> | The class ID of the provider's class which implements IWPCProviderState.<br/> |
| <span id="ConfigCLSID"></span><span id="configclsid"></span><span id="CONFIGCLSID"></span>**ConfigCLSID**<br/> | The class ID of the provider's class, which implements IWPCProviderConfig. **StateCLSID** and **ConfigCLSID** can be the same.<br/> |
| <span id="GRSVisible"></span><span id="grsvisible"></span><span id="GRSVISIBLE"></span>**GRSVisible**<br/> | An optional **DWORD** nonzero value that specifies that Windows Parental Controls displays a link to the Game Rating System screen after a provider is selected as the new current provider.<br/> |
```
HKEY_LOCAL_MACHINE
SOFTWARE
Microsoft
Family Safety
WPC
LogoImage = C:\Program Files\Windows Live\Family Safety\fssui.rll,-40001
DisplayName = C:\Program Files\Windows Live\Family Safety\fssui.rll,-40002
Description = C:\Program Files\Windows Live\Family Safety\fssui.rll,-40003
StateCLSID = {B4BAAE4D-3D86-4fa9-86F0-CF82C94D8A6A}
ConfigCLSID = {B4BAAE4D-3D86-4fa9-86F0-CF82C94D8A6A}
GRSVisible = 0x00000001 (1)
```
The Parental Controls Control Panel uses the **LogoImage**, **DisplayName**, and **Description** to change the main page of the Parental Controls Control Panel when that provider is selected. The **StateCLSID** value is used when the provider is enabled or disabled. The **ConfigCLSID** value is used when the user interface gets dynamic information about each user (this is only the case if the provider is currently selected).
| {
"pile_set_name": "Github"
} |
{
"name": "torrentz",
"displayName": "Torrentz.eu Search",
"examples": ["search torrentz for godzilla", "torrentz batman"],
"creator_name": "arnomoonens",
"creator_url": "https://github.com/arnomoonens",
"categories": ["Media", "Search"]
} | {
"pile_set_name": "Github"
} |
/***************************************************************************
* ROM Properties Page shell extension. (libromdata) *
* xbox360_xdbf_structs.h: Microsoft Xbox 360 game resource structures. *
* *
* Copyright (c) 2019-2020 by David Korth. *
* SPDX-License-Identifier: GPL-2.0-or-later *
***************************************************************************/
#ifndef __ROMPROPERTIES_LIBROMDATA_CONSOLE_XBOX360_XDBF_STRUCTS_H__
#define __ROMPROPERTIES_LIBROMDATA_CONSOLE_XBOX360_XDBF_STRUCTS_H__
#include <stdint.h>
#include "common.h"
#ifdef __cplusplus
extern "C" {
#endif
// NOTE: Entries begin after all headers:
// - XDBF_Header
// - XDBF_Entry * entry_table_length
// - XDBG_Free_Space_Entry * free_space_table_length
/**
* Microsoft Xbox 360 XDBF header.
* References:
* - https://github.com/xenia-project/xenia/blob/HEAD/src/xenia/kernel/util/xdbf_utils.h
* - https://github.com/xenia-project/xenia/blob/HEAD/src/xenia/kernel/util/xdbf_utils.cc
* - https://free60project.github.io/wiki/XDBF.html
* - https://free60project.github.io/wiki/GPD.html
* - https://free60project.github.io/wiki/SPA.html
*
* All fields are in big-endian.
*/
#define XDBF_MAGIC 'XDBF'
#define XDBF_VERSION 0x10000
typedef struct _XDBF_Header {
uint32_t magic; // [0x000] 'XDBF'
uint32_t version; // [0x004] Version (0x10000)
uint32_t entry_table_length; // [0x008] Entry table length, in number of entries
uint32_t entry_count; // [0x00C] Entry count (# of used entries)
uint32_t free_space_table_length; // [0x010] Free space table length, in number of entries
uint32_t free_space_table_count; // [0x014] Free space table entry count (# of used entries)
} XDBF_Header;
ASSERT_STRUCT(XDBF_Header, 6*sizeof(uint32_t));
// Title resource ID.
// This resource ID contains the game title in each language-specific string table.
// (Namespace XDBF_SPA_NAMESPACE_STRING, ID from XDBF_Language_e)
// It's also used for the dashboard icon. (Namespace XDBF_SPA_IMAGE)
// For the game's default language, see the 'XSTC' block.
#define XDBF_ID_TITLE 0x8000
/**
* XDBF entry
* All fields are in big-endian.
*/
#pragma pack(1)
typedef struct PACKED _XDBF_Entry {
uint16_t namespace_id; // [0x000] See XDBF_Namespace_e
uint64_t resource_id; // [0x002] ID
uint32_t offset; // [0x00A] Offset specifier
uint32_t length; // [0x00E] Length
} XDBF_Entry;
ASSERT_STRUCT(XDBF_Entry, 18);
#pragma pack()
/**
* XDBG free space table entry
* All fields are in big-endian.
*/
typedef struct _XDBF_Free_Space_Entry {
uint32_t offset; // [0x000] Offset specifier
uint32_t length; // [0x004] Length
} XDBF_Free_Space_Entry;
ASSERT_STRUCT(XDBF_Free_Space_Entry, 2*sizeof(uint32_t));
/**
* XDBF: Namespace IDs
*/
typedef enum {
/** SPA (XEX XDBF) **/
XDBF_SPA_NAMESPACE_METADATA = 1, // Metadata
XDBF_SPA_NAMESPACE_IMAGE = 2, // Image (usually PNG format)
XDBF_SPA_NAMESPACE_STRING_TABLE = 3, // String table (ID == XDBF_Language_e)
} XDBF_Namespace_e;
/**
* XSTC: Default language block.
* Namespace ID: XDBF_NAMESPACE_METADATA
* ID: XDBF_XSTC_MAGIC
* All fields are in big-endian.
*/
#define XDBF_XSTC_MAGIC 'XSTC'
#define XDBF_XSTC_VERSION 1
typedef struct _XDBF_XSTC {
uint32_t magic; // [0x000] 'XSTC'
uint32_t version; // [0x004] Version (1)
uint32_t size; // [0x008] sizeof(XDBF_XSTC) - sizeof(uint32_t)
uint32_t default_language; // [0x00C] See XDBF_Language_e
} XDBF_XSTC;
ASSERT_STRUCT(XDBF_XSTC, 4*sizeof(uint32_t));
/**
* XDBF: Language IDs
*/
typedef enum {
XDBF_LANGUAGE_UNKNOWN = 0,
XDBF_LANGUAGE_ENGLISH = 1,
XDBF_LANGUAGE_JAPANESE = 2,
XDBF_LANGUAGE_GERMAN = 3,
XDBF_LANGUAGE_FRENCH = 4,
XDBF_LANGUAGE_SPANISH = 5,
XDBF_LANGUAGE_ITALIAN = 6,
XDBF_LANGUAGE_KOREAN = 7,
XDBF_LANGUAGE_CHINESE_TRAD = 8, // Traditional Chinese
XDBF_LANGUAGE_PORTUGUESE = 9,
XDBF_LANGUAGE_CHINESE_SIMP = 10, // Simplified Chinese
XDBF_LANGUAGE_POLISH = 11,
XDBF_LANGUAGE_RUSSIAN = 12,
XDBF_LANGUAGE_MAX
} XDBF_Language_e;
/** String tables **/
/** NOTE: String tables are encoded using UTF-8. **/
/**
* XDBF: String table header
* Namespace ID: XDBF_NAMESPACE_STRING_TABLE
* ID: See XDBF_Language_e
* All fields are in big-endian.
*/
#define XDBF_XSTR_MAGIC 'XSTR'
#define XDBF_XSTR_VERSION 1
#pragma pack(1)
typedef struct PACKED _XDBF_XSTR_Header {
uint32_t magic; // [0x000] 'XSTR'
uint32_t version; // [0x004] Version (1)
uint32_t size; // [0x008] Size
uint16_t string_count; // [0x00C] String count
} XDBF_XSTR_Header;
ASSERT_STRUCT(XDBF_XSTR_Header, 14);
#pragma pack()
/**
* XDBF: String table entry header
* All fields are in big-endian.
*/
typedef struct _XDBF_XSTR_Entry_Header {
uint16_t string_id; // [0x000] ID
uint16_t length; // [0x002] String length (NOT NULL-terminated)
} XDBF_XSTR_Entry_Header;
ASSERT_STRUCT(XDBF_XSTR_Entry_Header, 2*sizeof(uint16_t));
/**
* XDBF: Title ID
* Contains two characters and a 16-bit number.
* NOTE: Struct positioning only works with the original BE32 value.
* TODO: Combine with XEX2 version.
*/
typedef union _XDBF_Title_ID {
struct {
char c[2];
uint16_t u16;
};
uint32_t u32;
} XDBF_Title_ID;
ASSERT_STRUCT(XDBF_Title_ID, sizeof(uint32_t));
/**
* XDBF: XACH - Achievements table
* All fields are in big-endian.
*/
#define XDBF_XACH_MAGIC 'XACH'
#define XDBF_XACH_VERSION 1
#pragma pack(1)
typedef struct PACKED _XDBF_XACH_Header {
uint32_t magic; // [0x000] 'XACH'
uint32_t version; // [0x004] Version (1)
uint32_t size; // [0x008] Structure size, minus magic
uint16_t xach_count; // [0x00C] Achivement count.
// NOTE: Should be compared to structure size
// and XDBF table entry.
// Following XDBF_XACH_Header are xach_count instances
// of XDBF_XACH_Entry.
} XDBF_XACH_Header;
ASSERT_STRUCT(XDBF_XACH_Header, 14);
#pragma pack()
/**
* XDBF: XACH - Achievements table entry
* All fields are in big-endian.
*/
typedef struct _XDBF_XACH_Entry {
uint16_t achievement_id; // [0x000] Achievement ID
uint16_t name_id; // [0x002] Name ID (string table)
uint16_t unlocked_desc_id; // [0x004] Unlocked description ID (string table)
uint16_t locked_desc_id; // [0x006] Locked description ID (string table)
uint32_t image_id; // [0x008] Image ID
uint16_t gamerscore; // [0x00C] Gamerscore
uint16_t unknown1; // [0x00E]
uint32_t flags; // [0x010] Flags (??)
uint32_t unknown2[4]; // [0x014]
} XDBF_XACH_Entry;
ASSERT_STRUCT(XDBF_XACH_Entry, 0x24);
/**
* XDBF: XTHD - contains title information
* All fields are in big-endian.
*/
#define XDBF_XTHD_MAGIC 'XTHD'
#define XDBF_XTHD_VERSION 1
typedef struct _XDBF_XTHD {
uint32_t magic; // [0x000] 'XTHD'
uint32_t version; // [0x004] Version (1)
uint32_t size; // [0x008] Size (might be 0?)
XDBF_Title_ID title_id; // [0x00C] Title ID
uint32_t title_type; // [0x010] Type (See XDBF_Title_Type_e)
struct {
uint16_t major;
uint16_t minor;
uint16_t build;
uint16_t revision;
} title_version; // [0x014] Title version
uint32_t unknown[4]; // [0x01C]
} XDBF_XTHD;
ASSERT_STRUCT(XDBF_XTHD, 0x2C);
/**
* XDBF: Title type
*/
typedef enum {
XDBF_TITLE_TYPE_SYSTEM = 0, // System title
XDBF_TITLE_TYPE_FULL = 1, // Full retail game
XDBF_TITLE_TYPE_DEMO = 2, // Demo
XDBF_TITLE_TYPE_DOWNLOAD = 3, // Download game (XBLA, etc)
} XDBF_Title_Type_e;
/**
* XDBF: XGAA - Avatar awards
* All fields are in big-endian.
*/
#define XDBF_XGAA_MAGIC 'XGAA'
#define XDBF_XGAA_VERSION 1
#pragma pack(1)
typedef struct PACKED _XDBF_XGAA_Header {
uint32_t magic; // [0x000] 'XGAA'
uint32_t version; // [0x004] Version (1)
uint32_t size; // [0x008] Size (must be at least 14)
uint16_t xgaa_count; // [0x00C] Number of avatar awards
// Following XDBF_XGAA_Header are xgaa_count instances
// of XDBF_XGAA_Entry.
} XDBF_XGAA_Header;
ASSERT_STRUCT(XDBF_XGAA_Header, 14);
#pragma pack()
/**
* XDBF: XGAA - Avatar award entry
* All fields are in big-endian.
*/
typedef struct _XDBF_XGAA_Entry {
uint32_t unk_0x000; // [0x000] ???
uint16_t avatar_award_id; // [0x004] Avatar award ID
uint16_t unk_0x006; // [0x006] ???
uint8_t unk_0x008[4]; // [0x008] ???
XDBF_Title_ID title_id; // [0x00C] Title ID
uint16_t name_id; // [0x010] Name ID (string table)
uint16_t unlocked_desc_id; // [0x012] Unlocked description ID (string table)
uint16_t locked_desc_id; // [0x014] Locked description ID (string table)
uint16_t unk_0x016; // [0x016] ???
uint32_t image_id; // [0x018] Image ID
uint8_t unk_0x01C[8]; // [0x01C] ???
} XDBF_XGAA_Entry;
ASSERT_STRUCT(XDBF_XGAA_Entry, 36);
/**
* XDBF: XSRC - xlast XML data
*
* Contains a gzipped UTF-16LE translation file, which can be
* used to get things like developer, publisher, genre, and
* description.
*
* All fields are in big-endian.
*/
#define XDBF_XSRC_MAGIC 'XSRC'
#define XDBF_XSRC_VERSION 1
typedef struct _XDBF_XSRC_Header {
uint32_t magic; // [0x000] 'XSRC'
uint32_t version; // [0x004] Version (1)
uint32_t size; // [0x008] Size of entire struct, including gzipped data.
uint32_t filename_len; // [0x00C] Length of the original filename.
// Following this header is the original filename,
// then XDBF_XSRC_Header2.
} XDBF_XSRC_Header;
ASSERT_STRUCT(XDBF_XSRC_Header, 4*sizeof(uint32_t));
/**
* XDBF: XSRC - second header, stored after the filename.
* All fields are in big-endian.
*/
typedef struct _XDBF_XSRC_Header2 {
uint32_t uncompressed_size; // [0x000] Uncompressed data size
uint32_t compressed_size; // [0x004] Compressed data size
} XDBF_XSRC_Header2;
ASSERT_STRUCT(XDBF_XSRC_Header2, 2*sizeof(uint32_t));
#ifdef __cplusplus
}
#endif
#endif /* __ROMPROPERTIES_LIBROMDATA_CONSOLE_XBOX360_XDBF_STRUCTS_H__ */
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2017 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.grpc.shared;
import java.io.IOException;
import com.google.common.io.Resources;
import com.google.protobuf.Empty;
import com.linecorp.armeria.grpc.GithubApi.SearchResponse;
import com.linecorp.armeria.grpc.GithubServiceGrpc.GithubServiceImplBase;
import io.grpc.stub.StreamObserver;
/**
* The {@link GithubApiService} mocks the GitHub API by always sending the same response.
*/
public class GithubApiService extends GithubServiceImplBase {
public static final SearchResponse SEARCH_RESPONSE;
static {
try {
SEARCH_RESPONSE = SearchResponse.parseFrom(
Resources.toByteArray(Resources.getResource("github_search_response.binarypb")));
} catch (IOException e) {
throw new Error("Could not read proto.", e);
}
}
@Override
public void simple(SearchResponse request, StreamObserver<SearchResponse> responseObserver) {
responseObserver.onNext(SEARCH_RESPONSE);
responseObserver.onCompleted();
}
@Override
public void empty(Empty request, StreamObserver<Empty> responseObserver) {
responseObserver.onNext(Empty.getDefaultInstance());
responseObserver.onCompleted();
}
}
| {
"pile_set_name": "Github"
} |
var baseUnset = require('./_baseUnset');
/**
* Removes the property at `path` of `object`.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to unset.
* @returns {boolean} Returns `true` if the property is deleted, else `false`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 7 } }] };
* _.unset(object, 'a[0].b.c');
* // => true
*
* console.log(object);
* // => { 'a': [{ 'b': {} }] };
*
* _.unset(object, ['a', '0', 'b', 'c']);
* // => true
*
* console.log(object);
* // => { 'a': [{ 'b': {} }] };
*/
function unset(object, path) {
return object == null ? true : baseUnset(object, path);
}
module.exports = unset;
| {
"pile_set_name": "Github"
} |
#ifndef ASSEMBLY_DBG_H
#define ASSEMBLY_DBG_H 1
#include "config.h"
#include "Assembly/Options.h"
#include "Common/Log.h"
#include "Common/MemoryUtil.h"
#include "Common/Options.h"
#include "Common/StringUtil.h" // for toSI
#include "Common/Timer.h"
#include "Graph/Properties.h"
#include <boost/graph/graph_traits.hpp>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <iomanip>
#include <sstream>
#include <utility>
using boost::graph_traits;
/** A hash table mapping vertices to vertex properties. */
class SequenceCollectionHash
{
public:
typedef SequenceDataHash::key_type key_type;
typedef SequenceDataHash::mapped_type mapped_type;
typedef SequenceDataHash::value_type value_type;
typedef SequenceDataHash::iterator iterator;
typedef SequenceDataHash::const_iterator const_iterator;
typedef mapped_type::Symbol Symbol;
typedef mapped_type::SymbolSet SymbolSet;
typedef mapped_type::SymbolSetPair SymbolSetPair;
typedef key_type vertex_descriptor;
typedef mapped_type vertex_bundled;
typedef std::pair<key_type, key_type> edge_descriptor;
/** Remove the specified sequence if it exists. */
void remove(const key_type& seq)
{
setFlag(seq, SF_DELETE);
}
/** Shrink the hash table. */
void shrink() {
m_data.rehash(0);
printLoad();
}
/** Return the data associated with the specified key. */
const mapped_type operator[](const key_type& key) const
{
bool rc;
const_iterator it = find(key, rc);
assert(it != m_data.end());
return rc ? ~it->second : it->second;
}
iterator begin() { return m_data.begin(); }
const_iterator begin() const { return m_data.begin(); }
iterator end() { return m_data.end(); }
const_iterator end() const { return m_data.end(); }
/** Return true if this collection is empty. */
bool empty() const { return m_data.empty(); }
/** Return the number of sequences in this collection. */
size_t size() const { return m_data.size(); }
// Not a network sequence collection. Nothing to do.
size_t pumpNetwork() { return 0; }
/** The observer callback function. */
typedef void (*SeqObserver)(SequenceCollectionHash* c,
const value_type& seq);
/** Attach the specified observer. */
void attach(SeqObserver f)
{
assert(m_seqObserver == NULL);
m_seqObserver = f;
}
/** Detach the specified observer. */
void detach(SeqObserver f)
{
assert(m_seqObserver == f);
(void)f;
m_seqObserver = NULL;
}
bool isAdjacencyLoaded() const { return m_adjacencyLoaded; }
SequenceCollectionHash()
: m_seqObserver(NULL), m_adjacencyLoaded(false)
{
#if HAVE_GOOGLE_SPARSE_HASH_MAP
// sparse_hash_set uses 2.67 bits per element on a 64-bit
// architecture and 2 bits per element on a 32-bit architecture.
// The number of elements is rounded up to a power of two.
if (opt::rank >= 0) {
// Initialize sparsehash size to 2^30 (~1 billion) empty buckets.
// Setting the initial sparsehash size to a large
// number avoids the prohibitively slow step of resizing
// the hash table and rehashing *every* element when the
// maximum load factor is exceeded.
//
// The initial memory footprint per CPU core is
// 2^30 * 2.67 / 8 ~= 0.358 GB. The value of 2^30 buckets
// was chosen to accomodate a k=144 32-thread assembly of human
// (NA24143) uncorrected Illumina reads with ~70X coverage and
// 20,317,980,431 distinct 144-mers, without requiring sparsehash
// resizing. For further details on the test dataset, see:
// "ABySS 2.0: Resource-efficient assembly of large genomes
// using a Bloom filter".
m_data.rehash((size_t)pow(2, 30));
m_data.min_load_factor(0.2);
} else {
// Allocate a big hash for a single processor.
m_data.rehash(1<<29);
m_data.max_load_factor(0.4);
}
#endif
}
/** sparse_hash_set requires that set_deleted_key()
* is called before calling erase(). This key cannot
* be an existing kmer in m_data. This function sets
* the deleted key and should be called after all
* data has been loaded.
*/
void setDeletedKey()
{
#if HAVE_GOOGLE_SPARSE_HASH_MAP
for (SequenceDataHash::iterator it = m_data.begin();
it != m_data.end(); it++) {
key_type rc(reverseComplement(it->first));
bool isrc;
SequenceDataHash::iterator search = find(rc, isrc);
// If this is false, we should have a palindrome or we're
// doing a SS assembly.
if (isrc || search == m_data.end()) {
m_data.set_deleted_key(rc);
return;
}
}
logger(1) << "error: unable to set deleted key.\n";
exit(EXIT_FAILURE);
#else
return;
#endif
}
/** Add the specified k-mer to this collection. */
void add(const key_type& seq, unsigned coverage = 1)
{
bool rc;
iterator it = find(seq, rc);
if (it == m_data.end()) {
m_data.insert(std::make_pair(seq, mapped_type(SENSE, coverage)));
} else if (coverage > 0) {
assert(!rc || !opt::ss);
it->second.addMultiplicity(rc ? ANTISENSE : SENSE, coverage);
}
}
/** Clean up by erasing sequences flagged as deleted.
* @return the number of sequences erased
*/
size_t cleanup()
{
Timer(__func__);
size_t count = 0;
for (iterator it = m_data.begin(); it != m_data.end();) {
if (it->second.deleted()) {
m_data.erase(it++);
count++;
} else
++it;
}
shrink();
return count;
}
/** Add an edge to this k-mer. */
bool setBaseExtension(
const key_type& kmer, extDirection dir, Symbol base)
{
bool rc;
iterator it = find(kmer, rc);
if (it == m_data.end())
return false;
if (opt::ss) {
assert(!rc);
it->second.setBaseExtension(dir, base);
} else {
bool palindrome = kmer.isPalindrome();
if (!rc || palindrome)
it->second.setBaseExtension(dir, base);
if (rc || palindrome)
it->second.setBaseExtension(!dir, reverseComplement(base));
}
return true;
}
/** Remove the specified extensions from this k-mer. */
void removeExtension(const key_type& kmer,
extDirection dir, SymbolSet ext)
{
bool rc;
iterator it = find(kmer, rc);
assert(it != m_data.end());
if (opt::ss) {
assert(!rc);
it->second.removeExtension(dir, ext);
} else {
bool palindrome = kmer.isPalindrome();
if (!rc || palindrome)
it->second.removeExtension(dir, ext);
if (rc || palindrome)
it->second.removeExtension(!dir, ext.complement());
}
notify(*it);
}
/** Remove the specified edge of this vertex. */
void removeExtension(const key_type& seq, extDirection dir, Symbol base)
{
removeExtension(seq, dir, SymbolSet(base));
}
void setFlag(const key_type& key, SeqFlag flag)
{
bool rc;
iterator it = find(key, rc);
assert(it != m_data.end());
it->second.setFlag(rc ? complement(flag) : flag);
}
/** Mark the specified sequence in both directions. */
void mark(const key_type& seq)
{
setFlag(seq, SeqFlag(SF_MARK_SENSE | SF_MARK_ANTISENSE));
}
/** Mark the specified sequence. */
void mark(const key_type& seq, extDirection sense)
{
setFlag(seq, sense == SENSE
? SF_MARK_SENSE : SF_MARK_ANTISENSE);
}
/** Clear the specified flag for all vertices. */
void wipeFlag(SeqFlag flag)
{
for (iterator it = m_data.begin();
it != m_data.end(); ++it)
it->second.clearFlag(flag);
}
/** Print the load of the hash table. */
void printLoad() const
{
size_t size = m_data.size();
size_t buckets = m_data.bucket_count();
logger(1) << "Hash load: " << size << " / " << buckets << " = "
<< std::setprecision(3) << (float)size / buckets
<< " using " << toSI(getMemoryUsage()) << "B" << std::endl;
}
private:
const_iterator
find(const key_type& key) const
{
return m_data.find(key);
}
iterator
find(const key_type& key)
{
return m_data.find(key);
}
/** Return an iterator pointing to the specified k-mer or its
* reverse complement. Return in rc whether the sequence is reversed.
*/
iterator
find(const key_type& key, bool& rc)
{
iterator it = find(key);
if (opt::ss || it != m_data.end()) {
rc = false;
return it;
} else {
rc = true;
return find(reverseComplement(key));
}
}
public:
/** Return an iterator pointing to the specified k-mer or its
* reverse complement. Return in rc whether the sequence is reversed.
*/
const_iterator
find(const key_type& key, bool& rc) const
{
const_iterator it = find(key);
if (opt::ss || it != m_data.end()) {
rc = false;
return it;
} else {
rc = true;
return find(reverseComplement(key));
}
}
/** Return the sequence and data of the specified key.
* The key sequence may not contain data. The returned sequence will
* contain data.
*/
const value_type&
getSeqAndData(const key_type& key) const
{
bool rc;
const_iterator it = find(key, rc);
// rc should not be ignored. This seems quite dubious.
// The edges of this k-mer should be complemented.
assert(it != m_data.end());
return *it;
}
/** Return the data of the specified key. */
bool getSeqData(const key_type& key,
SymbolSetPair& extRecord, int& multiplicity) const
{
bool rc;
const_iterator it = find(key, rc);
assert(!rc || !opt::ss);
if (it == m_data.end())
return false;
const mapped_type data = it->second;
extRecord = rc ? data.extension().complement() : data.extension();
multiplicity = data.getMultiplicity();
return true;
}
/** Write this collection to disk.
* @param path does not include the extension
*/
void store(const char* path)
{
assert(path != NULL);
#if HAVE_GOOGLE_SPARSE_HASH_MAP
std::ostringstream s;
s << path;
if (opt::rank >= 0)
s << '-' << std::setfill('0') << std::setw(3) << opt::rank;
s << ".kmer";
FILE* f = fopen(s.str().c_str(), "w");
if (f == NULL) {
perror(s.str().c_str());
exit(EXIT_FAILURE);
}
shrink();
m_data.write_metadata(f);
m_data.write_nopointer_data(f);
fclose(f);
#else
// Not supported.
assert(false);
exit(EXIT_FAILURE);
#endif
}
/** Load this collection from disk. */
void load(const char* path)
{
#if HAVE_GOOGLE_SPARSE_HASH_MAP
FILE* f = fopen(path, "r");
if (f == NULL) {
perror(path);
exit(EXIT_FAILURE);
}
m_data.read_metadata(f);
m_data.read_nopointer_data(f);
fclose(f);
m_adjacencyLoaded = true;
#else
(void)path;
// Not supported.
assert(false);
exit(EXIT_FAILURE);
#endif
}
/** Indicate that this is a colour-space collection. */
void setColourSpace(bool flag)
{
if (!m_data.empty())
assert(opt::colourSpace == flag);
opt::colourSpace = flag;
}
private:
/** Call the observers of the specified sequence. */
void notify(const value_type& seq)
{
if (m_seqObserver != NULL)
m_seqObserver(this, seq);
}
/** The underlying collection. */
SequenceDataHash m_data;
/** The observers. Only a single observer is implemented.*/
SeqObserver m_seqObserver;
/** Whether adjacency information has been loaded. */
bool m_adjacencyLoaded;
};
// Forward declaration
class DBGEdgeIterator;
// Graph
namespace boost {
template <>
struct graph_traits<SequenceCollectionHash> {
// Graph
typedef SequenceCollectionHash Graph;
typedef Graph::key_type vertex_descriptor;
typedef boost::directed_tag directed_category;
struct traversal_category
: boost::adjacency_graph_tag, boost::vertex_list_graph_tag
{ };
typedef boost::disallow_parallel_edge_tag edge_parallel_category;
// IncidenceGraph
typedef std::pair<vertex_descriptor, vertex_descriptor>
edge_descriptor;
typedef unsigned degree_size_type;
// VertexListGraph
typedef size_t vertices_size_type;
// EdgeListGraph
typedef size_t edges_size_type;
typedef DBGEdgeIterator edge_iterator;
// Other
typedef Graph::Symbol Symbol;
typedef Graph::SymbolSet SymbolSet;
static const unsigned NUM_SYMBOLS = SymbolSet::NUM;
// AdjacencyGraph
/** Iterate through the adjacent vertices of a vertex. */
struct adjacency_iterator
: public std::iterator<std::input_iterator_tag, vertex_descriptor>
{
/** Skip to the next edge that is present. */
void next()
{
for (; m_i < NUM_SYMBOLS && !m_adj.checkBase(Symbol(m_i)); ++m_i) {
}
if (m_i < NUM_SYMBOLS)
m_v.setLastBase(SENSE, Symbol(m_i));
}
public:
adjacency_iterator() : m_i(NUM_SYMBOLS) { }
adjacency_iterator(
vertex_descriptor u, SymbolSet adj)
: m_v(u), m_adj(adj), m_i(0)
{
m_v.shift(SENSE);
next();
}
const vertex_descriptor& operator*() const
{
assert(m_i < NUM_SYMBOLS);
return m_v;
}
bool operator==(const adjacency_iterator& it) const
{
return m_i == it.m_i;
}
bool operator!=(const adjacency_iterator& it) const
{
return !(*this == it);
}
adjacency_iterator& operator++()
{
assert(m_i < NUM_SYMBOLS);
++m_i;
next();
return *this;
}
private:
vertex_descriptor m_v;
SymbolSet m_adj;
short unsigned m_i;
}; // adjacency_iterator
// IncidenceGraph
/** Iterate through the out edges of a vertex. */
struct out_edge_iterator
: public std::iterator<std::input_iterator_tag, edge_descriptor>
{
/** Skip to the next edge that is present. */
void next()
{
for (; m_i < NUM_SYMBOLS && !m_adj.checkBase(Symbol(m_i)); ++m_i) {
}
if (m_i < NUM_SYMBOLS)
m_e.second.setLastBase(SENSE, Symbol(m_i));
}
public:
out_edge_iterator() : m_i(NUM_SYMBOLS) { }
out_edge_iterator(
vertex_descriptor u, SymbolSet adj)
: m_e(u, u), m_adj(adj), m_i(0)
{
m_e.second.shift(SENSE);
next();
}
const edge_descriptor& operator*() const
{
assert(m_i < NUM_SYMBOLS);
return m_e;
}
bool operator==(const out_edge_iterator& it) const
{
return m_i == it.m_i;
}
bool operator!=(const out_edge_iterator& it) const
{
return !(*this == it);
}
out_edge_iterator& operator++()
{
assert(m_i < NUM_SYMBOLS);
++m_i;
next();
return *this;
}
private:
edge_descriptor m_e;
SymbolSet m_adj;
short unsigned m_i;
}; // out_edge_iterator
// BidirectionalGraph
/** Iterate through the in-edges of a vertex. */
struct in_edge_iterator
: public std::iterator<std::input_iterator_tag, edge_descriptor>
{
/** Skip to the next edge that is present. */
void next()
{
for (; m_i < NUM_SYMBOLS && !m_adj.checkBase(Symbol(m_i)); ++m_i) {
}
if (m_i < NUM_SYMBOLS)
m_e.first.setLastBase(ANTISENSE, Symbol(m_i));
}
public:
in_edge_iterator() : m_i(NUM_SYMBOLS) { }
in_edge_iterator(
vertex_descriptor u, SymbolSet adj)
: m_e(u, u), m_adj(adj), m_i(0)
{
m_e.first.shift(ANTISENSE);
next();
}
const edge_descriptor& operator*() const
{
assert(m_i < NUM_SYMBOLS);
return m_e;
}
bool operator==(const in_edge_iterator& it) const
{
return m_i == it.m_i;
}
bool operator!=(const in_edge_iterator& it) const
{
return !(*this == it);
}
in_edge_iterator& operator++()
{
assert(m_i < NUM_SYMBOLS);
++m_i;
next();
return *this;
}
private:
edge_descriptor m_e;
SymbolSet m_adj;
short unsigned m_i;
}; // in_edge_iterator
// VertexListGraph
/** Iterate through the vertices of this graph. */
struct vertex_iterator
: public std::iterator<std::input_iterator_tag, vertex_descriptor>
{
typedef Graph::const_iterator It;
public:
vertex_iterator(const It& it) : m_it(it), m_sense(false) { }
const vertex_descriptor operator*() const
{
return m_sense ? reverseComplement(m_it->first) : m_it->first;
}
bool operator==(const vertex_iterator& it) const
{
return m_it == it.m_it && m_sense == it.m_sense;
}
bool operator!=(const vertex_iterator& it) const
{
return !(*this == it);
}
vertex_iterator& operator++()
{
if (m_sense) {
++m_it;
m_sense = false;
} else
m_sense = true;
return *this;
}
private:
It m_it;
bool m_sense;
}; // vertex_iterator
}; // graph_traits<SequenceCollectionHash>
} // namespace boost
// IncidenceGraph
static inline
std::pair<
graph_traits<SequenceCollectionHash>::out_edge_iterator,
graph_traits<SequenceCollectionHash>::out_edge_iterator>
out_edges(
graph_traits<SequenceCollectionHash>::vertex_descriptor u,
const SequenceCollectionHash& g)
{
typedef graph_traits<SequenceCollectionHash> GTraits;
typedef GTraits::out_edge_iterator out_edge_iterator;
typedef GTraits::SymbolSet SymbolSet;
SymbolSet adj = g[u].getExtension(SENSE);
return std::make_pair(
out_edge_iterator(u, adj),
out_edge_iterator());
}
static inline
graph_traits<SequenceCollectionHash>::degree_size_type
out_degree(
graph_traits<SequenceCollectionHash>::vertex_descriptor u,
const SequenceCollectionHash& g)
{
return g[u].getExtension(SENSE).outDegree();
}
// BidirectionalGraph
static inline
std::pair<
graph_traits<SequenceCollectionHash>::in_edge_iterator,
graph_traits<SequenceCollectionHash>::in_edge_iterator>
in_edges(
graph_traits<SequenceCollectionHash>::vertex_descriptor u,
const SequenceCollectionHash& g)
{
typedef graph_traits<SequenceCollectionHash> GTraits;
typedef GTraits::in_edge_iterator in_edge_iterator;
typedef GTraits::SymbolSet SymbolSet;
SymbolSet adj = g[u].getExtension(ANTISENSE);
return std::make_pair(
in_edge_iterator(u, adj),
in_edge_iterator());
}
static inline
graph_traits<SequenceCollectionHash>::degree_size_type
in_degree(graph_traits<SequenceCollectionHash>::vertex_descriptor u,
const SequenceCollectionHash& g)
{
return g[u].getExtension(ANTISENSE).outDegree();
}
// AdjacencyGraph
static inline
std::pair<graph_traits<SequenceCollectionHash>::adjacency_iterator,
graph_traits<SequenceCollectionHash>::adjacency_iterator>
adjacent_vertices(
graph_traits<SequenceCollectionHash>::vertex_descriptor u,
const SequenceCollectionHash& g)
{
typedef graph_traits<SequenceCollectionHash>::adjacency_iterator
adjacency_iterator;
typedef graph_traits<SequenceCollectionHash>::SymbolSet SymbolSet;
SymbolSet adj = g[u].getExtension(SENSE);
return std::make_pair(adjacency_iterator(u, adj),
adjacency_iterator());
}
// VertexListGraph
static inline
std::pair<graph_traits<SequenceCollectionHash>::vertex_iterator,
graph_traits<SequenceCollectionHash>::vertex_iterator>
vertices(const SequenceCollectionHash& g)
{
return std::make_pair(g.begin(), g.end());
}
// EdgeListGraph
/** Iterate through the edges of this graph. */
class DBGEdgeIterator
: public std::iterator<std::input_iterator_tag,
graph_traits<SequenceCollectionHash>::edge_descriptor>
{
typedef graph_traits<SequenceCollectionHash> GTraits;
typedef GTraits::adjacency_iterator adjacency_iterator;
typedef GTraits::edge_descriptor edge_descriptor;
typedef GTraits::edge_iterator edge_iterator;
typedef GTraits::vertex_iterator vertex_iterator;
void nextVertex()
{
vertex_iterator vlast = vertices(*m_g).second;
for (; m_vit != vlast; ++m_vit) {
std::pair<adjacency_iterator, adjacency_iterator>
adj = adjacent_vertices(*m_vit, *m_g);
if (adj.first != adj.second) {
m_eit = adj.first;
return;
}
}
// Set m_eit to a known value.
static const adjacency_iterator s_eitNULL;
m_eit = s_eitNULL;
}
public:
DBGEdgeIterator(const SequenceCollectionHash* g, const vertex_iterator& vit)
: m_g(g), m_vit(vit)
{
nextVertex();
}
edge_descriptor operator*() const
{
return edge_descriptor(*m_vit, *m_eit);
}
bool operator==(const edge_iterator& it) const
{
return m_vit == it.m_vit && m_eit == it.m_eit;
}
bool operator!=(const edge_iterator& it) const
{
return !(*this == it);
}
edge_iterator& operator++()
{
if (++m_eit == adjacent_vertices(*m_vit, *m_g).second) {
++m_vit;
nextVertex();
}
return *this;
}
edge_iterator operator++(int)
{
edge_iterator it = *this;
++*this;
return it;
}
private:
const SequenceCollectionHash* m_g;
vertex_iterator m_vit;
adjacency_iterator m_eit;
}; // DBGEdgeIterator
/** Iterate through the edges of this graph. */
static inline
std::pair<
graph_traits<SequenceCollectionHash>::edge_iterator,
graph_traits<SequenceCollectionHash>::edge_iterator>
edges(const SequenceCollectionHash& g)
{
typedef graph_traits<SequenceCollectionHash> GTraits;
typedef GTraits::vertex_iterator vertex_iterator;
typedef GTraits::edge_iterator edge_iterator;
std::pair<vertex_iterator, vertex_iterator> uit = vertices(g);
return std::make_pair(
edge_iterator(&g, uit.first),
edge_iterator(&g, uit.second));
}
// EdgeMutableGraph
/** Remove the edge (u,v) from the graph. */
static inline
void
remove_edge(
graph_traits<SequenceCollectionHash>::vertex_descriptor u,
graph_traits<SequenceCollectionHash>::vertex_descriptor v,
SequenceCollectionHash& g)
{
g.removeExtension(u, SENSE, v.back());
}
/** Remove the edge e from the graph. */
static inline
void
remove_edge(
graph_traits<SequenceCollectionHash>::edge_descriptor e,
SequenceCollectionHash& g)
{
remove_edge(source(e, g), target(e, g), g);
}
// PropertyGraph
/** Return the reverse complement of the specified k-mer. */
static inline
graph_traits<SequenceCollectionHash>::vertex_descriptor
get(vertex_complement_t, const SequenceCollectionHash&,
graph_traits<SequenceCollectionHash>::vertex_descriptor u)
{
return reverseComplement(u);
}
/** Return whether this vertex has been removed. */
static inline
bool get(vertex_removed_t, const SequenceCollectionHash& g,
graph_traits<SequenceCollectionHash>::vertex_descriptor u)
{
return g.getSeqAndData(u).second.deleted();
}
/** Return the name of this vertex. */
static inline
std::string
get(vertex_name_t, const SequenceCollectionHash&,
graph_traits<SequenceCollectionHash>::vertex_descriptor u)
{
return u.str();
}
/** Return the properties of this vertex. */
static inline
vertex_bundle_type<SequenceCollectionHash>::type
get(vertex_bundle_t, const SequenceCollectionHash& g,
graph_traits<SequenceCollectionHash>::vertex_descriptor u)
{
return g[u];
}
/** Return the coverage of this vertex. */
static inline
unsigned
get(vertex_coverage_t, const SequenceCollectionHash& g,
graph_traits<SequenceCollectionHash>::vertex_descriptor u)
{
return g[u].getMultiplicity();
}
/** Return the properties of this edge. */
static inline
no_property get(edge_bundle_t, const SequenceCollectionHash&,
graph_traits<SequenceCollectionHash>::edge_descriptor)
{
return no_property();
}
#endif
| {
"pile_set_name": "Github"
} |
local T, C, L, _ = unpack(select(2, ...))
if not T.classic or C.reminder.raid_buffs_enable ~= true then return end
----------------------------------------------------------------------------------------
-- Raid buffs on player(by Elv22)
----------------------------------------------------------------------------------------
-- Locals
local flaskBuffs = T.ReminderBuffs["Flask"]
local otherBuffs = T.ReminderBuffs["Other"]
local foodBuffs = T.ReminderBuffs["Food"]
local spell3Buffs = T.ReminderBuffs["Spell3Buff"]
local spell4Buffs = T.ReminderBuffs["Spell4Buff"]
local spell5Buffs = T.ReminderBuffs["Spell5Buff"]
local spell6Buffs = T.ReminderBuffs["Spell6Buff"]
local spell7Buffs = T.ReminderBuffs["Spell7Buff"]
local customBuffs = T.ReminderBuffs["Custom"]
local visible
local isPresent = {
flask = false,
food = false,
spell3 = false,
spell4 = false,
spell5 = false,
spell6 = false,
spell7 = false,
custom = false,
}
-- Aura Checks
local function CheckElixir()
local requireFlask, otherBuffsRequired = T.ReminderFlaskRequirements()
local hasFlask, otherBuffsCount, meetsRequirements = false, 0, false
FlaskFrame.t:SetTexture("")
if requireFlask then
if flaskBuffs and flaskBuffs[1] then
for i, flaskBuffs in pairs(flaskBuffs) do
local name, _, icon = GetSpellInfo(flaskBuffs)
if i == 1 then
FlaskFrame.t:SetTexture(icon)
end
if T.CheckPlayerBuff(name) then
FlaskFrame:SetAlpha(C.reminder.raid_buffs_alpha)
hasFlask = true
break
end
end
end
else
hasFlask = true
end
if FlaskFrame.t:GetTexture() == "" then
FlaskFrame.t:SetTexture(134821)
end
if not hasFlask then
FlaskFrame:SetAlpha(1)
isPresent.flask = false
return
end
if otherBuffsRequired > 0 then
if otherBuffs then
for k, _ in pairs(otherBuffs) do
for _, v in pairs(otherBuffs[k]) do
local name = GetSpellInfo(v)
if T.CheckPlayerBuff(name) then
otherBuffsCount = otherBuffsCount + 1
if otherBuffsCount >= otherBuffsRequired then
meetsRequirements = true
break
end
end
end
end
end
else
meetsRequirements = true
end
if hasFlask and meetsRequirements then
FlaskFrame:SetAlpha(C.reminder.raid_buffs_alpha)
isPresent.flask = true
return
else
FlaskFrame:SetAlpha(1)
isPresent.flask = false
return
end
end
local function CheckBuff(list, frame, n)
if list and list[1] then
for i, list in pairs(list) do
local name, _, icon = GetSpellInfo(list)
if i == 1 then
frame.t:SetTexture(icon)
end
if T.CheckPlayerBuff(name) then
frame:SetAlpha(C.reminder.raid_buffs_alpha)
isPresent[n] = true
break
else
frame:SetAlpha(1)
isPresent[n] = false
end
end
end
end
-- Main Script
local function OnAuraChange(self, event, unit)
if event == "UNIT_AURA" and unit ~= "player" then return end
-- If We're a caster we may want to see different buffs
if T.Role == "Caster" or T.Role == "Healer" then
T.ReminderCasterBuffs()
else
T.ReminderPhysicalBuffs()
end
spell4Buffs = T.ReminderBuffs["Spell4Buff"]
spell5Buffs = T.ReminderBuffs["Spell5Buff"]
spell6Buffs = T.ReminderBuffs["Spell6Buff"]
-- Start checking buffs to see if we can find a match from the list
CheckElixir()
CheckBuff(foodBuffs, FoodFrame, "food")
CheckBuff(spell3Buffs, Spell3Frame, "spell3")
CheckBuff(spell4Buffs, Spell4Frame, "spell4")
CheckBuff(spell5Buffs, Spell5Frame, "spell5")
CheckBuff(spell6Buffs, Spell6Frame, "spell6")
CheckBuff(spell7Buffs, Spell7Frame, "spell7")
if customBuffs and customBuffs[1] then
CheckBuff(customBuffs, CustomFrame, "custom")
else
CustomFrame:Hide()
isPresent.custom = true
end
local _, instanceType = IsInInstance()
if (not IsInGroup() or instanceType ~= "raid") and C.reminder.raid_buffs_always == false then
RaidBuffReminder:SetAlpha(0)
visible = false
elseif isPresent.flask == true and isPresent.food == true and isPresent.spell3 == true and isPresent.spell4 == true and isPresent.spell5 == true and isPresent.spell6 == true and isPresent.spell7 == true and isPresent.custom == true then
if not visible then
RaidBuffReminder:SetAlpha(0)
visible = false
end
if visible then
UIFrameFadeOut(RaidBuffReminder, 0.5)
visible = false
end
else
if not visible then
UIFrameFadeIn(RaidBuffReminder, 0.5)
visible = true
end
end
end
-- Create Anchor
local RaidBuffsAnchor = CreateFrame("Frame", "RaidBuffsAnchor", UIParent)
RaidBuffsAnchor:SetWidth((C.reminder.raid_buffs_size * 6) + 15)
RaidBuffsAnchor:SetHeight(C.reminder.raid_buffs_size)
RaidBuffsAnchor:SetPoint(unpack(C.position.raid_buffs))
-- Create Main bar
local raidbuff_reminder = CreateFrame("Frame", "RaidBuffReminder", UIParent)
raidbuff_reminder:CreatePanel("Invisible", (C.reminder.raid_buffs_size * 6) + 15, C.reminder.raid_buffs_size + 4, "TOPLEFT", RaidBuffsAnchor, "TOPLEFT", 0, 4)
raidbuff_reminder:RegisterEvent("UNIT_AURA")
raidbuff_reminder:RegisterEvent("PLAYER_ENTERING_WORLD")
raidbuff_reminder:RegisterEvent("CHARACTER_POINTS_CHANGED")
raidbuff_reminder:RegisterEvent("ZONE_CHANGED_NEW_AREA")
raidbuff_reminder:SetScript("OnEvent", OnAuraChange)
-- Function to create buttons
local function CreateButton(name, relativeTo, firstbutton)
local button = CreateFrame("Frame", name, RaidBuffReminder)
if firstbutton == true then
button:CreatePanel("Default", C.reminder.raid_buffs_size, C.reminder.raid_buffs_size, "BOTTOMLEFT", relativeTo, "BOTTOMLEFT", 0, 0)
else
button:CreatePanel("Default", C.reminder.raid_buffs_size, C.reminder.raid_buffs_size, "LEFT", relativeTo, "RIGHT", 3, 0)
end
button:SetFrameLevel(RaidBuffReminder:GetFrameLevel() + 2)
button.t = button:CreateTexture(name..".t", "OVERLAY")
button.t:SetTexCoord(0.1, 0.9, 0.1, 0.9)
button.t:SetPoint("TOPLEFT", 2, -2)
button.t:SetPoint("BOTTOMRIGHT", -2, 2)
end
-- Create Buttons
do
CreateButton("FlaskFrame", RaidBuffReminder, true)
CreateButton("FoodFrame", FlaskFrame, false)
CreateButton("Spell3Frame", FoodFrame, false)
CreateButton("Spell4Frame", Spell3Frame, false)
CreateButton("Spell5Frame", Spell4Frame, false)
CreateButton("Spell6Frame", Spell5Frame, false)
CreateButton("Spell7Frame", Spell6Frame, false)
CreateButton("CustomFrame", Spell7Frame, false)
end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2002 Roman Zippel <[email protected]>
* Released under the terms of the GNU GPL v2.0.
*/
#ifndef LKC_H
#define LKC_H
#include "expr.h"
#ifndef KBUILD_NO_NLS
# include <libintl.h>
#else
static inline const char *gettext(const char *txt) { return txt; }
static inline void textdomain(const char *domainname) {}
static inline void bindtextdomain(const char *name, const char *dir) {}
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifdef LKC_DIRECT_LINK
#define P(name,type,arg) extern type name arg
#else
#include "lkc_defs.h"
#define P(name,type,arg) extern type (*name ## _p) arg
#endif
#include "lkc_proto.h"
#undef P
#define SRCTREE "srctree"
#define PACKAGE "linux"
#define LOCALEDIR "/usr/share/locale"
#define _(text) gettext(text)
#define N_(text) (text)
#define TF_COMMAND 0x0001
#define TF_PARAM 0x0002
#define TF_OPTION 0x0004
enum conf_def_mode {
def_default,
def_yes,
def_mod,
def_no,
def_random
};
#define T_OPT_MODULES 1
#define T_OPT_DEFCONFIG_LIST 2
#define T_OPT_ENV 3
struct kconf_id {
int name;
int token;
unsigned int flags;
enum symbol_type stype;
};
int zconfparse(void);
void zconfdump(FILE *out);
extern int zconfdebug;
void zconf_starthelp(void);
FILE *zconf_fopen(const char *name);
void zconf_initscan(const char *name);
void zconf_nextfile(const char *name);
int zconf_lineno(void);
char *zconf_curname(void);
/* confdata.c */
const char *conf_get_configname(void);
const char *conf_get_autoconfig_name(void);
char *conf_get_default_confname(void);
void sym_set_change_count(int count);
void sym_add_change_count(int count);
void conf_set_all_new_symbols(enum conf_def_mode mode);
/* kconfig_load.c */
void kconfig_load(void);
/* menu.c */
void menu_init(void);
void menu_warn(struct menu *menu, const char *fmt, ...);
struct menu *menu_add_menu(void);
void menu_end_menu(void);
void menu_add_entry(struct symbol *sym);
void menu_end_entry(void);
void menu_add_dep(struct expr *dep);
struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep);
struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
void menu_add_option(int token, char *arg);
void menu_finalize(struct menu *parent);
void menu_set_type(int type);
/* util.c */
struct file *file_lookup(const char *name);
int file_write_dep(const char *name);
struct gstr {
size_t len;
char *s;
};
struct gstr str_new(void);
struct gstr str_assign(const char *s);
void str_free(struct gstr *gs);
void str_append(struct gstr *gs, const char *s);
void str_printf(struct gstr *gs, const char *fmt, ...);
const char *str_get(struct gstr *gs);
/* symbol.c */
extern struct expr *sym_env_list;
void sym_init(void);
void sym_clear_all_valid(void);
void sym_set_all_changed(void);
void sym_set_changed(struct symbol *sym);
struct symbol *sym_check_deps(struct symbol *sym);
struct property *prop_alloc(enum prop_type type, struct symbol *sym);
struct symbol *prop_get_symbol(struct property *prop);
struct property *sym_get_env_prop(struct symbol *sym);
static inline tristate sym_get_tristate_value(struct symbol *sym)
{
return sym->curr.tri;
}
static inline struct symbol *sym_get_choice_value(struct symbol *sym)
{
return (struct symbol *)sym->curr.val;
}
static inline bool sym_set_choice_value(struct symbol *ch, struct symbol *chval)
{
return sym_set_tristate_value(chval, yes);
}
static inline bool sym_is_choice(struct symbol *sym)
{
return sym->flags & SYMBOL_CHOICE ? true : false;
}
static inline bool sym_is_choice_value(struct symbol *sym)
{
return sym->flags & SYMBOL_CHOICEVAL ? true : false;
}
static inline bool sym_is_optional(struct symbol *sym)
{
return sym->flags & SYMBOL_OPTIONAL ? true : false;
}
static inline bool sym_has_value(struct symbol *sym)
{
return sym->flags & SYMBOL_DEF_USER ? true : false;
}
#ifdef __cplusplus
}
#endif
#endif /* LKC_H */
| {
"pile_set_name": "Github"
} |
<?php
use Illuminate\Support\Facades\Schema;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Database\Migrations\Migration;
class AddBrewMethodsIcon extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::table('brew_methods', function (Blueprint $table) {
$table->string('icon')->after('method');
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::table('brew_methods', function (Blueprint $table) {
$table->dropColumn('icon');
});
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.java2d.pisces;
import java.util.Arrays;
/**
* An object used to cache pre-rendered complex paths.
*
* @see PiscesRenderer#render
*/
final class PiscesCache {
final int bboxX0, bboxY0, bboxX1, bboxY1;
// rowAARLE[i] holds the encoding of the pixel row with y = bboxY0+i.
// The format of each of the inner arrays is: rowAARLE[i][0,1] = (x0, n)
// where x0 is the first x in row i with nonzero alpha, and n is the
// number of RLE entries in this row. rowAARLE[i][j,j+1] for j>1 is
// (val,runlen)
final int[][] rowAARLE;
// RLE encodings are added in increasing y rows and then in increasing
// x inside those rows. Therefore, at any one time there is a well
// defined position (x,y) where a run length is about to be added (or
// the row terminated). x0,y0 is this (x,y)-(bboxX0,bboxY0). They
// are used to get indices into the current tile.
private int x0 = Integer.MIN_VALUE, y0 = Integer.MIN_VALUE;
// touchedTile[i][j] is the sum of all the alphas in the tile with
// y=i*TILE_SIZE+bboxY0 and x=j*TILE_SIZE+bboxX0.
private final int[][] touchedTile;
static final int TILE_SIZE_LG = 5;
static final int TILE_SIZE = 1 << TILE_SIZE_LG; // 32
private static final int INIT_ROW_SIZE = 8; // enough for 3 run lengths
PiscesCache(int minx, int miny, int maxx, int maxy) {
assert maxy >= miny && maxx >= minx;
bboxX0 = minx;
bboxY0 = miny;
bboxX1 = maxx + 1;
bboxY1 = maxy + 1;
// we could just leave the inner arrays as null and allocate them
// lazily (which would be beneficial for shapes with gaps), but we
// assume there won't be too many of those so we allocate everything
// up front (which is better for other cases)
rowAARLE = new int[bboxY1 - bboxY0 + 1][INIT_ROW_SIZE];
x0 = 0;
y0 = -1; // -1 makes the first assert in startRow succeed
// the ceiling of (maxy - miny + 1) / TILE_SIZE;
int nyTiles = (maxy - miny + TILE_SIZE) >> TILE_SIZE_LG;
int nxTiles = (maxx - minx + TILE_SIZE) >> TILE_SIZE_LG;
touchedTile = new int[nyTiles][nxTiles];
}
void addRLERun(int val, int runLen) {
if (runLen > 0) {
addTupleToRow(y0, val, runLen);
if (val != 0) {
// the x and y of the current row, minus bboxX0, bboxY0
int tx = x0 >> TILE_SIZE_LG;
int ty = y0 >> TILE_SIZE_LG;
int tx1 = (x0 + runLen - 1) >> TILE_SIZE_LG;
// while we forbid rows from starting before bboxx0, our users
// can still store rows that go beyond bboxx1 (although this
// shouldn't happen), so it's a good idea to check that i
// is not going out of bounds in touchedTile[ty]
if (tx1 >= touchedTile[ty].length) {
tx1 = touchedTile[ty].length - 1;
}
if (tx <= tx1) {
int nextTileXCoord = (tx + 1) << TILE_SIZE_LG;
if (nextTileXCoord > x0+runLen) {
touchedTile[ty][tx] += val * runLen;
} else {
touchedTile[ty][tx] += val * (nextTileXCoord - x0);
}
tx++;
}
// don't go all the way to tx1 - we need to handle the last
// tile as a special case (just like we did with the first
for (; tx < tx1; tx++) {
// try {
touchedTile[ty][tx] += (val << TILE_SIZE_LG);
// } catch (RuntimeException e) {
// System.out.println("x0, y0: " + x0 + ", " + y0);
// System.out.printf("tx, ty, tx1: %d, %d, %d %n", tx, ty, tx1);
// System.out.printf("bboxX/Y0/1: %d, %d, %d, %d %n",
// bboxX0, bboxY0, bboxX1, bboxY1);
// throw e;
// }
}
// they will be equal unless x0>>TILE_SIZE_LG == tx1
if (tx == tx1) {
int lastXCoord = Math.min(x0 + runLen, (tx + 1) << TILE_SIZE_LG);
int txXCoord = tx << TILE_SIZE_LG;
touchedTile[ty][tx] += val * (lastXCoord - txXCoord);
}
}
x0 += runLen;
}
}
void startRow(int y, int x) {
// rows are supposed to be added by increasing y.
assert y - bboxY0 > y0;
assert y <= bboxY1; // perhaps this should be < instead of <=
y0 = y - bboxY0;
// this should be a new, uninitialized row.
assert rowAARLE[y0][1] == 0;
x0 = x - bboxX0;
assert x0 >= 0 : "Input must not be to the left of bbox bounds";
// the way addTupleToRow is implemented it would work for this but it's
// not a good idea to use it because it is meant for adding
// RLE tuples, not the first tuple (which is special).
rowAARLE[y0][0] = x;
rowAARLE[y0][1] = 2;
}
int alphaSumInTile(int x, int y) {
x -= bboxX0;
y -= bboxY0;
return touchedTile[y>>TILE_SIZE_LG][x>>TILE_SIZE_LG];
}
int minTouched(int rowidx) {
return rowAARLE[rowidx][0];
}
int rowLength(int rowidx) {
return rowAARLE[rowidx][1];
}
private void addTupleToRow(int row, int a, int b) {
int end = rowAARLE[row][1];
rowAARLE[row] = Helpers.widenArray(rowAARLE[row], end, 2);
rowAARLE[row][end++] = a;
rowAARLE[row][end++] = b;
rowAARLE[row][1] = end;
}
@Override
public String toString() {
String ret = "bbox = ["+
bboxX0+", "+bboxY0+" => "+
bboxX1+", "+bboxY1+"]\n";
for (int[] row : rowAARLE) {
if (row != null) {
ret += ("minTouchedX=" + row[0] +
"\tRLE Entries: " + Arrays.toString(
Arrays.copyOfRange(row, 2, row[1])) + "\n");
} else {
ret += "[]\n";
}
}
return ret;
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import (
"errors"
"fmt"
)
// A VM is an emulated BPF virtual machine.
type VM struct {
filter []Instruction
}
// NewVM returns a new VM using the input BPF program.
func NewVM(filter []Instruction) (*VM, error) {
if len(filter) == 0 {
return nil, errors.New("one or more Instructions must be specified")
}
for i, ins := range filter {
check := len(filter) - (i + 1)
switch ins := ins.(type) {
// Check for out-of-bounds jumps in instructions
case Jump:
if check <= int(ins.Skip) {
return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
}
case JumpIf:
if check <= int(ins.SkipTrue) {
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
}
if check <= int(ins.SkipFalse) {
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
}
case JumpIfX:
if check <= int(ins.SkipTrue) {
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
}
if check <= int(ins.SkipFalse) {
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
}
// Check for division or modulus by zero
case ALUOpConstant:
if ins.Val != 0 {
break
}
switch ins.Op {
case ALUOpDiv, ALUOpMod:
return nil, errors.New("cannot divide by zero using ALUOpConstant")
}
// Check for unknown extensions
case LoadExtension:
switch ins.Num {
case ExtLen:
default:
return nil, fmt.Errorf("extension %d not implemented", ins.Num)
}
}
}
// Make sure last instruction is a return instruction
switch filter[len(filter)-1].(type) {
case RetA, RetConstant:
default:
return nil, errors.New("BPF program must end with RetA or RetConstant")
}
// Though our VM works using disassembled instructions, we
// attempt to assemble the input filter anyway to ensure it is compatible
// with an operating system VM.
_, err := Assemble(filter)
return &VM{
filter: filter,
}, err
}
// Run runs the VM's BPF program against the input bytes.
// Run returns the number of bytes accepted by the BPF program, and any errors
// which occurred while processing the program.
func (v *VM) Run(in []byte) (int, error) {
var (
// Registers of the virtual machine
regA uint32
regX uint32
regScratch [16]uint32
// OK is true if the program should continue processing the next
// instruction, or false if not, causing the loop to break
ok = true
)
// TODO(mdlayher): implement:
// - NegateA:
// - would require a change from uint32 registers to int32
// registers
// TODO(mdlayher): add interop tests that check signedness of ALU
// operations against kernel implementation, and make sure Go
// implementation matches behavior
for i := 0; i < len(v.filter) && ok; i++ {
ins := v.filter[i]
switch ins := ins.(type) {
case ALUOpConstant:
regA = aluOpConstant(ins, regA)
case ALUOpX:
regA, ok = aluOpX(ins, regA, regX)
case Jump:
i += int(ins.Skip)
case JumpIf:
jump := jumpIf(ins, regA)
i += jump
case JumpIfX:
jump := jumpIfX(ins, regA, regX)
i += jump
case LoadAbsolute:
regA, ok = loadAbsolute(ins, in)
case LoadConstant:
regA, regX = loadConstant(ins, regA, regX)
case LoadExtension:
regA = loadExtension(ins, in)
case LoadIndirect:
regA, ok = loadIndirect(ins, in, regX)
case LoadMemShift:
regX, ok = loadMemShift(ins, in)
case LoadScratch:
regA, regX = loadScratch(ins, regScratch, regA, regX)
case RetA:
return int(regA), nil
case RetConstant:
return int(ins.Val), nil
case StoreScratch:
regScratch = storeScratch(ins, regScratch, regA, regX)
case TAX:
regX = regA
case TXA:
regA = regX
default:
return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
}
}
return 0, nil
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2016 LINE Corporation. All rights Reserved.
* LINE Corporation PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package com.linecorp.clay.view.transform
import android.graphics.Matrix
import android.graphics.PointF
import android.graphics.RectF
import com.linecorp.clay.graphic.calculateAngleForRotation
import com.linecorp.clay.graphic.centerPoint
internal class RotateTransformer(initPoint1: PointF,
initPoint2: PointF?,
val viewPort: RectF) : Transformer<Matrix> {
//use for translate
var lastPoint1: PointF
var lastPoint2: PointF? = null
var imageRect: RectF
var lastAngle: Double? = null
init {
lastPoint1 = PointF(initPoint1.x, initPoint1.y)
imageRect = RectF()
initPoint2?.let { point2 ->
lastPoint2 = PointF(point2.x, point2.y)
}
}
override fun transform(target: Matrix, points: List<PointF>) {
if (points.count() < 2) {
return
}
val point1 = points[0]
val point2 = points[1]
val pointForAngle = point1
val rotatePivot = centerPoint(point1, point2)
target.mapRect(imageRect, viewPort)
//angel is -90 to 90
val newAngle = calculateAngleForRotation(pointForAngle, rotatePivot)
if (lastAngle == null) {
lastAngle = newAngle
}
//if the new angle is in different quadrants, flip the angleDelta
var angleDelta = newAngle - lastAngle!!
if (Math.abs(angleDelta) >= 90) {
if (angleDelta < 0) {
angleDelta += 180
} else {
angleDelta -= 180
}
}
val rotate = angleDelta * RATATE_RADIO
if (!rotate.isNaN()) {
target.postRotate(rotate.toFloat(), imageRect.centerX(), imageRect.centerY())
}
lastPoint1 = point1
lastPoint2 = point2
lastAngle = newAngle
}
companion object {
private const val RATATE_RADIO = 1.1f
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="KotlinCommonCompilerArguments">
<option name="languageVersion" value="1.1" />
<option name="apiVersion" value="1.1" />
</component>
</project> | {
"pile_set_name": "Github"
} |
unity-tweak-tool (0.0.7ubuntu4) artful; urgency=medium
* Actually remove unity-webapps-common from the deps.
-- Steve Langasek <[email protected]> Thu, 24 Aug 2017 14:15:21 -0700
unity-tweak-tool (0.0.7ubuntu3) artful; urgency=medium
* Drop unity-webapps-common, which is being removed from artful.
LP: #1712874.
-- Steve Langasek <[email protected]> Thu, 24 Aug 2017 11:29:43 -0700
unity-tweak-tool (0.0.7ubuntu2) xenial; urgency=medium
* po/fi.po: Update to latest from Launchpad as requested by
Jiri Grönroos.
-- Timo Jyrinki <[email protected]> Tue, 12 Apr 2016 15:52:54 +0300
unity-tweak-tool (0.0.7ubuntu1) xenial; urgency=low
* Release 0.0.7 to Ubuntu. (LP: #1562049)
-- Freyja Development <[email protected]> Mon, 28 Mar 2016 04:36:38 +0000
unity-tweak-tool (0.0.7) xenial; urgency=medium
[ Andrew Starr-Bochicchio ]
* debian/rules: Some minor style tweaks.
* debian/control:
- Bump Standards-Version to 3.9.5
- Build depend on python3-all to fix FTBFS (LP: #1282274).
[ J Phani Mahesh]
* New upstream release
- Update Translations from Launchpad
* UnityTweakTool/section/spaghetti/compiz.py:
- Fix crash on start (LP: #1281132).
[ Barneedhar Vigneshwar]
* UnityTweakTool/section/system.py
- Fixed missing schema- org.gnome.settings-daemon.peripherals (LP: #1490154)
[ Seth Johnson ]
* UnityTweakTool/section/unity.py
- Added support for moving the launcher to the bottom of the screen
* UnityTweakTool/section/windowmanager.py
- Add raise on click feature
* Rebuilt pot files
* setup.py
- Fix missing header icons (LP: 1467211)
* New upstream release (closes LP: #1562049)
-- Seth Johnson <[email protected]> Sun, 27 Mar 2016 21:22:06 -0700
unity-tweak-tool (0.0.6ubuntu3) wily; urgency=medium
* Fix the schema used for touchpad settings to match the current GNOME
as present in Ubuntu 15.10. LP: #1490154.
-- Steve Langasek <[email protected]> Thu, 17 Sep 2015 14:55:21 -0700
unity-tweak-tool (0.0.6ubuntu2) utopic; urgency=medium
* Backport upstream commit adding support for minimizing
single window applications when clicking their icon on
the launcher (LP: #1298487).
-- Andrew Starr-Bochicchio <[email protected]> Thu, 27 Mar 2014 12:39:22 -0400
unity-tweak-tool (0.0.6ubuntu1) trusty; urgency=medium
* debian/patches/lp1281132.patch: Backport upstream commit
that fixes crash (LP: #1281132). This patch is applied
directly since this is a native package, but a copy was
kept in debian/ to be clear about what has been applied.
* debian/rules: Some minor style tweaks.
* debian/control:
- Bump Standards-Version to 3.9.5
- Build depend on python3-all to fix FTBFS (LP: #1282274).
-- Andrew Starr-Bochicchio <[email protected]> Thu, 20 Feb 2014 23:41:09 -0500
unity-tweak-tool (0.0.6) saucy; urgency=high
[ Barneedhar Vigneshwar]
* New upstream bug-fix only release (LP: #1235752)
- Trigger new build of pot files
* UnityTweakTool/section/spaghetti/unity.py
- unity-tweak-tool crashed with signal 5 in g_settings_get_value() (LP: #1235432)
[ J Phani Mahesh]
* UnityTweakTool/__init__.py
- Fix NameError: name '_formatter' is not defined (LP: #1232515)
-- Barneedhar Vigneshwar <[email protected]> Sat, 05 Oct 2013 22:45:24 +0530
unity-tweak-tool (0.0.5) saucy; urgency=low
[ J Phani Mahesh ]
* New upstream release (LP: #1226059)
- New application icon
- Show error dialog when schemas are missing instead of crashing
- Trigger new build of pot files
* UnityTweakTool/section/unity.py
- Fix Show recently used and more suggestions in dash search (LP: #1166294)
- Fix Launcher reveal sensitivity scale update issues (LP: #1168863)
* UnityTweakTool/elements/colorchooser.py
- Fix TypeError in get_rgba() (LP: #1165627)
- Fix segmentation fault on selecting custom launcher (LP: #1190398)
* UnityTweakTool/elements/option.py
- Fix "Restore defaults" button (LP: #1186634)
* UnityTweakTool/__init__.py
- Fix unity-tweak-tool crashed with dbus.exceptions.DBusException in
call_blocking() (LP: #1168738)
- Fix FileNotFoundError (LP: #1225463)
- Fix dbus.exceptions.DBusException (LP: #1170571)
* data/unity.ui
- Remove Panel transparency switch (LP: #1168836)
- Remove Launcher transparency switch (LP: #1168834)
[ Barneedhar Vigneshwar ]
* UnityTweakTool/section/unity.py
- Fix 'Can't set background blur to static' (LP: #1167343)
- Fix non-working Launcher only on primary desktop (LP: #1173977)
* UnityTweakTool/section/sphagetti/compiz.py
- Fix TypeError in color_to_hash() (LP: #1166884)
-- Barneedhar Vigneshwar <[email protected]> Mon, 16 Sep 2013 19:34:38 +0530
unity-tweak-tool (0.0.4) raring; urgency=medium
[ Barneedhar Vigneshwar ]
* New upstream release (LP: #1165141)
* data/windowmanager.ui
- Fix missing signal in the auto-raise switch (LP: #1160782)
* UnityTweakTool/section/sphagetti/theme.py
- Fix KeyError when fetching window themes (LP: #1146122)
* UnityTweakTool/section/unity.py
- Fix show-desktop switch (LP: #1156266)
- Fix 'switch between workspace' switch (LP: #1156236)
[ J Phani Mahesh ]
* debian/source_unity-tweak-tool.py
- Update Apport hook to file crash bugs against the package by default
* setup.py
- Install translated pot files
* unity-tweak-tool
- Fixed and renamed -r parameter to --reset-unity in the wrapper
* UnityTweakTool/__init__.py
- Prevent multiple instances using dbus
* UnityTweakTool/elements/radio.py
- Fix AssertionError in __init__() (LP: #1156201)
- Fix AssertionError due to missing overlay-scrollbar package (LP: #1156337)
* UnityTweakTool/section/sphagetti/compiz.py
- Fix resetting transparency values (LP: #1099067)
* UnityTweakTool/section/sphagetti/unity.py
- Fix AttributeError in refresh(): 'NoneType' object has no attribute 'get_boolean' (LP: #1155331)
[Sam Hewitt]
* debian/control
- Added dependency on python3-cairo (LP: #1156789)
* UnityTweakTool/section/sphagetti/unity.py
- Fixed unresponsive 'battery-life' switch (LP: #1129262)
-- Barneedhar Vigneshwar <[email protected]> Fri, 05 Apr 2013 23:05:49 +0530
unity-tweak-tool (0.0.3) raring; urgency=low
* New upstream release
* Closes needs-packaging bug (LP: #1126433)
-- Barneedhar Vigneshwar <[email protected]> Fri, 15 Feb 2013 20:33:41 +0530
unity-tweak-tool (0.0.2) raring; urgency=low
* New upstream release
-- Barneedhar Vigneshwar <[email protected]> Fri, 11 Jan 2013 14:30:53 +0530
unity-tweak-tool (0.0.1) raring; urgency=low
* Initial release.
-- Barneedhar Vigneshwar <[email protected]> Sun, 24 Dec 2012 16:48:06 +0530
| {
"pile_set_name": "Github"
} |
// Add some local palette classes so you can do -bg-warning -text-warning etc. Even -bg-1 if you want to paint by numbers.
@mixin palette-variant($color-name, $color-value) {
$text-color: color-yiq($color-value);
$link-color: mix($blue, $text-color, lightness($color-value));
$link-hover-color: rgba($link-color, .5) !default;
.-bg-#{$color-name} {
color: $text-color;
background-color: $color-value;
}
// Make links in paragraphs stand out more.
@include link-variant(".-bg-#{$color-name} p > a", $link-color, $link-hover-color, false);
.-text-#{$color-name} {
color: $color-value;
}
}
@each $color, $value in $colors {
@include palette-variant($color, $value);
}
@each $color, $value in $theme-colors {
@include palette-variant($color, $value);
}
@each $color, $value in $grays {
@include palette-variant($color, $value);
}
// This allows "painting by numbers", i.e. picking colors by a shortcode Ordianal.
@for $i from 1 through length($td-box-colors) {
$value: nth($td-box-colors, $i);
$name: $i - 1;
$text-color: color-yiq($value);
@include palette-variant($name, $value);
} | {
"pile_set_name": "Github"
} |
$.validator.addMethod( "time", function( value, element ) {
return this.optional( element ) || /^([01]\d|2[0-3]|[0-9])(:[0-5]\d){1,2}$/.test( value );
}, "Please enter a valid time, between 00:00 and 23:59" );
| {
"pile_set_name": "Github"
} |
#include "z_en_rl.h"
#define ROOM 0x00
#define FLAGS 0x00000010
void EnRl_Init(EnRl* this, GlobalContext* globalCtx);
void EnRl_Destroy(EnRl* this, GlobalContext* globalCtx);
void EnRl_Update(EnRl* this, GlobalContext* globalCtx);
void EnRl_Draw(EnRl* this, GlobalContext* globalCtx);
/*
const ActorInit En_Rl_InitVars =
{
ACTOR_EN_RL,
ACTORTYPE_NPC,
ROOM,
FLAGS,
OBJECT_RL,
sizeof(EnRl),
(ActorFunc)EnRl_Init,
(ActorFunc)EnRl_Destroy,
(ActorFunc)EnRl_Update,
(ActorFunc)EnRl_Draw,
};
*/
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/EnRl_Destroy.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE72D0.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7358.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE73D8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE744C.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7494.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE74B4.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE74FC.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7544.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7590.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7668.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7698.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE772C.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7798.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE77B8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE77F8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7838.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7878.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE78D4.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7954.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE79A4.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7AF8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7BF8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7C64.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7C94.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7CE8.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7D40.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7D94.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/EnRl_Update.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/EnRl_Init.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7FD0.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/func_80AE7FDC.s")
#pragma GLOBAL_ASM("asm/non_matchings/overlays/actors/ovl_En_Rl/EnRl_Draw.s")
| {
"pile_set_name": "Github"
} |
/*
This file is part of Darling.
Copyright (C) 2019 Lubos Dolezel
Darling is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Darling is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Darling. If not, see <http://www.gnu.org/licenses/>.
*/
#include <Foundation/Foundation.h>
@interface QTMovieViewInternal : NSObject
@end
| {
"pile_set_name": "Github"
} |
/*====================================================================*
- Copyright (C) 2001 Leptonica. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials
- provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY
- CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*====================================================================*/
/*!
* \file jp2kheader.c
* <pre>
*
* Read header
* l_int32 readHeaderJp2k()
* l_int32 freadHeaderJp2k()
* l_int32 readHeaderMemJp2k()
* l_int32 fgetJp2kResolution()
*
* Note: these function read image metadata from a jp2k file, without
* using any jp2k libraries.
*
* To read and write jp2k data, using the OpenJPEG library
* (http://www.openjpeg.org), see jpegio.c.
* </pre>
*/
#include <string.h>
#include <math.h>
#include "allheaders.h"
#ifndef NO_CONSOLE_IO
#define DEBUG_IHDR 0
#endif /* ~NO_CONSOLE_IO */
/* --------------------------------------------*/
#if USE_JP2KHEADER /* defined in environ.h */
/* --------------------------------------------*/
/* a sanity check on the size read from file */
static const l_int32 MAX_JP2K_WIDTH = 100000;
static const l_int32 MAX_JP2K_HEIGHT = 100000;
/*--------------------------------------------------------------------*
* Stream interface *
*--------------------------------------------------------------------*/
/*!
* \brief readHeaderJp2k()
*
* \param[in] filename
* \param[out] pw [optional]
* [out] ph ([optional]
* [out] pbps ([optional] bits/sample
* \param[out] pspp [optional] samples/pixel
* \return 0 if OK, 1 on error
*/
l_int32
readHeaderJp2k(const char *filename,
l_int32 *pw,
l_int32 *ph,
l_int32 *pbps,
l_int32 *pspp)
{
l_int32 ret;
FILE *fp;
PROCNAME("readHeaderJp2k");
if (pw) *pw = 0;
if (ph) *ph = 0;
if (pbps) *pbps = 0;
if (pspp) *pspp = 0;
if (!filename)
return ERROR_INT("filename not defined", procName, 1);
if ((fp = fopenReadStream(filename)) == NULL)
return ERROR_INT("image file not found", procName, 1);
ret = freadHeaderJp2k(fp, pw, ph, pbps, pspp);
fclose(fp);
return ret;
}
/*!
* \brief freadHeaderJp2k()
*
* \param[in] fp file stream opened for read
* \param[out] pw [optional]
* [out] ph ([optional]
* [out] pbps ([optional] bits/sample
* \param[out] pspp [optional] samples/pixel
* \return 0 if OK, 1 on error
*/
l_int32
freadHeaderJp2k(FILE *fp,
l_int32 *pw,
l_int32 *ph,
l_int32 *pbps,
l_int32 *pspp)
{
l_uint8 buf[80]; /* just need the first 80 bytes */
l_int32 nread;
PROCNAME("freadHeaderJp2k");
if (pw) *pw = 0;
if (ph) *ph = 0;
if (pbps) *pbps = 0;
if (pspp) *pspp = 0;
if (!fp)
return ERROR_INT("fp not defined", procName, 1);
rewind(fp);
nread = fread(buf, 1, sizeof(buf), fp);
if (nread != sizeof(buf))
return ERROR_INT("read failure", procName, 1);
readHeaderMemJp2k(buf, sizeof(buf), pw, ph, pbps, pspp);
rewind(fp);
return 0;
}
/*!
* \brief readHeaderMemJp2k()
*
* \param[in] data
* \param[in] size at least 80
* \param[out] pw [optional]
* [out] ph ([optional]
* [out] pbps ([optional] bits/sample
* \param[out] pspp [optional] samples/pixel
* \return 0 if OK, 1 on error
*
* <pre>
* Notes:
* (1) The ISO/IEC reference for jpeg2000 is
* http://www.jpeg.org/public/15444-1annexi.pdf
* and the file format syntax begins at page 127.
* (2) The Image Header Box begins with 'ihdr' = 0x69686472 in
* big-endian order. This typically, but not always, starts
* byte 44, with the big-endian data fields beginning at byte 48:
* h: 4 bytes
* w: 4 bytes
* spp: 2 bytes
* bps: 1 byte (contains bps - 1)
* </pre>
*/
l_int32
readHeaderMemJp2k(const l_uint8 *data,
size_t size,
l_int32 *pw,
l_int32 *ph,
l_int32 *pbps,
l_int32 *pspp)
{
l_int32 format, val, w, h, bps, spp, loc, found, windex;
l_uint8 ihdr[4] = {0x69, 0x68, 0x64, 0x72}; /* 'ihdr' */
PROCNAME("readHeaderMemJp2k");
if (pw) *pw = 0;
if (ph) *ph = 0;
if (pbps) *pbps = 0;
if (pspp) *pspp = 0;
if (!data)
return ERROR_INT("data not defined", procName, 1);
if (size < 80)
return ERROR_INT("size < 80", procName, 1);
findFileFormatBuffer(data, &format);
if (format != IFF_JP2)
return ERROR_INT("not jp2 file", procName, 1);
/* Search for beginning of the Image Header Box: 'ihdr' */
arrayFindSequence(data, size, ihdr, 4, &loc, &found);
if (!found)
return ERROR_INT("image parameters not found", procName, 1);
#if DEBUG_IHDR
if (loc != 44)
L_INFO("Beginning of ihdr is at byte %d\n", procName, loc);
#endif /* DEBUG_IHDR */
windex = loc / 4 + 1;
val = *((l_uint32 *)data + windex);
h = convertOnLittleEnd32(val);
val = *((l_uint32 *)data + windex + 1);
w = convertOnLittleEnd32(val);
val = *((l_uint16 *)data + 2 * (windex + 2));
spp = convertOnLittleEnd16(val);
bps = *(data + 4 * (windex + 2) + 2) + 1;
if (w > MAX_JP2K_WIDTH || h > MAX_JP2K_HEIGHT)
return ERROR_INT("unrealistically large sizes", procName, 1);
if (pw) *pw = w;
if (ph) *ph = h;
if (pbps) *pbps = bps;
if (pspp) *pspp = spp;
return 0;
}
/*
* fgetJp2kResolution()
*
* Input: fp (file stream opened for read)
* &xres, &yres (<return> resolution in ppi)
* Return: 0 if found; 1 if not found or on error
*
* Notes:
* (1) If the capture resolution field is not set, this is not an error;
* the returned resolution values are 0 (designating 'unknown').
* (2) Side-effect: this rewinds the stream.
* (3) The capture resolution box is optional in the jp2 spec, and
* it is usually not written.
* (4) The big-endian data fields that follow the 4 bytes of 'resc' are:
* ynum: 2 bytes
* ydenom: 2 bytes
* xnum: 2 bytes
* xdenom: 2 bytes
* yexp: 1 byte
* xexp: 1 byte
*/
l_int32
fgetJp2kResolution(FILE *fp,
l_int32 *pxres,
l_int32 *pyres)
{
l_uint8 xexp, yexp;
l_uint8 *data;
l_uint16 xnum, ynum, xdenom, ydenom; /* these jp2k fields are 2-byte */
l_int32 loc, found;
l_uint8 resc[4] = {0x72, 0x65, 0x73, 0x63}; /* 'resc' */
size_t nbytes;
l_float64 xres, yres;
PROCNAME("fgetJp2kResolution");
if (pxres) *pxres = 0;
if (pyres) *pyres = 0;
if (!pxres || !pyres)
return ERROR_INT("&xres and &yres not both defined", procName, 1);
if (!fp)
return ERROR_INT("stream not opened", procName, 1);
rewind(fp);
data = l_binaryReadStream(fp, &nbytes);
rewind(fp);
/* Search for the start of the first capture resolution box: 'resc' */
arrayFindSequence(data, nbytes, resc, 4, &loc, &found);
if (!found) {
L_WARNING("image resolution not found\n", procName);
LEPT_FREE(data);
return 1;
}
/* Extract the fields and calculate the resolution in pixels/meter.
* See section 1.5.3.7.1 of JPEG 2000 ISO/IEC 15444-1 spec. */
ynum = data[loc + 5] << 8 | data[loc + 4];
ynum = convertOnLittleEnd16(ynum);
ydenom = data[loc + 7] << 8 | data[loc + 6];
ydenom = convertOnLittleEnd16(ydenom);
xnum = data[loc + 9] << 8 | data[loc + 8];
xnum = convertOnLittleEnd16(xnum);
xdenom = data[loc + 11] << 8 | data[loc + 10];
xdenom = convertOnLittleEnd16(xdenom);
yexp = data[loc + 12];
xexp = data[loc + 13];
yres = ((l_float64)ynum / (l_float64)ydenom) * pow(10.0, (l_float64)yexp);
xres = ((l_float64)xnum / (l_float64)xdenom) * pow(10.0, (l_float64)xexp);
/* Convert from pixels/meter to ppi */
yres *= (300.0 / 11811.0);
xres *= (300.0 / 11811.0);
*pyres = (l_int32)(yres + 0.5);
*pxres = (l_int32)(xres + 0.5);
LEPT_FREE(data);
return 0;
}
/* --------------------------------------------*/
#endif /* USE_JP2KHEADER */
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
/* //device/apps/common/assets/res/any/strings.xml
**
** Copyright 2006, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
-->
<resources xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
<string name="mmcc_imsi_unknown_in_hlr" msgid="5424518490295341205">"MM#2 – karta SIM nieobsługiwana"</string>
<string name="mmcc_illegal_ms" msgid="3527626511418944853">"MM#3 – niedozwolona karta SIM"</string>
<string name="mmcc_illegal_me" msgid="3948912590657398489">"MM#6 – telefon niedozwolony"</string>
</resources>
| {
"pile_set_name": "Github"
} |
<?php
final class PhabricatorPreambleTestCase
extends PhabricatorTestCase {
/**
* @phutil-external-symbol function preamble_get_x_forwarded_for_address
*/
public function testXForwardedForLayers() {
$tests = array(
// This is normal behavior with one load balancer.
array(
'header' => '1.2.3.4',
'layers' => 1,
'expect' => '1.2.3.4',
),
// In this case, the LB received a request which already had an
// "X-Forwarded-For" header. This might be legitimate (in the case of
// a CDN request) or illegitimate (in the case of a client making
// things up). We don't want to trust it.
array(
'header' => '9.9.9.9, 1.2.3.4',
'layers' => 1,
'expect' => '1.2.3.4',
),
// Multiple layers of load balancers.
array(
'header' => '9.9.9.9, 1.2.3.4',
'layers' => 2,
'expect' => '9.9.9.9',
),
// Multiple layers of load balancers, plus a client-supplied value.
array(
'header' => '8.8.8.8, 9.9.9.9, 1.2.3.4',
'layers' => 2,
'expect' => '9.9.9.9',
),
// Multiple layers of load balancers, but this request came from
// somewhere inside the network.
array(
'header' => '1.2.3.4',
'layers' => 2,
'expect' => '1.2.3.4',
),
array(
'header' => 'A, B, C, D, E, F, G, H, I',
'layers' => 7,
'expect' => 'C',
),
);
foreach ($tests as $test) {
$header = $test['header'];
$layers = $test['layers'];
$expect = $test['expect'];
$actual = preamble_get_x_forwarded_for_address($header, $layers);
$this->assertEqual(
$expect,
$actual,
pht(
'Address after stripping %d layers from: %s',
$layers,
$header));
}
}
}
| {
"pile_set_name": "Github"
} |
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.thrift;
import junit.framework.TestCase;
import thrift.test.enumcontainers.EnumContainersTestConstants;
import thrift.test.enumcontainers.GodBean;
import thrift.test.enumcontainers.GreekGodGoddess;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
public class TestEnumContainers extends TestCase {
public void testEnumContainers() throws Exception {
final GodBean b1 = new GodBean();
b1.addToGoddess(GreekGodGoddess.HERA);
b1.getGoddess().add(GreekGodGoddess.APHRODITE);
b1.putToPower(GreekGodGoddess.ZEUS, 1000);
b1.getPower().put(GreekGodGoddess.HERA, 333);
b1.putToByAlias("Mr. Z", GreekGodGoddess.ZEUS);
b1.addToImages("Baths of Aphrodite 01.jpeg");
final GodBean b2 = new GodBean(b1);
final GodBean b3 = new GodBean();
{
final TSerializer serializer = new TSerializer();
final TDeserializer deserializer = new TDeserializer();
final byte[] bytes = serializer.serialize(b1);
deserializer.deserialize(b3, bytes);
}
assertTrue(b1.getGoddess() != b2.getGoddess());
assertTrue(b1.getPower() != b2.getPower());
assertTrue(b1.getGoddess() != b3.getGoddess());
assertTrue(b1.getPower() != b3.getPower());
for (GodBean each : new GodBean[]{b1, b2, b3}) {
assertTrue(each.getGoddess().contains(GreekGodGoddess.HERA));
assertFalse(each.getGoddess().contains(GreekGodGoddess.POSEIDON));
assertTrue(each.getGoddess() instanceof EnumSet);
assertEquals(Integer.valueOf(1000), each.getPower().get(GreekGodGoddess.ZEUS));
assertEquals(Integer.valueOf(333), each.getPower().get(GreekGodGoddess.HERA));
assertTrue(each.getPower() instanceof EnumMap);
assertTrue(each.getByAlias() instanceof HashMap);
assertTrue(each.getImages() instanceof HashSet);
}
}
public void testEnumConstants() {
assertEquals("lightning bolt", EnumContainersTestConstants.ATTRIBUTES.get(GreekGodGoddess.ZEUS));
assertTrue(EnumContainersTestConstants.ATTRIBUTES instanceof EnumMap);
assertTrue(EnumContainersTestConstants.BEAUTY.contains(GreekGodGoddess.APHRODITE));
assertTrue(EnumContainersTestConstants.BEAUTY instanceof EnumSet);
}
}
| {
"pile_set_name": "Github"
} |
augroup vim_lsp_settings_pyls_all
au!
LspRegisterServer {
\ 'name': 'pyls-all',
\ 'cmd': {server_info->lsp_settings#get('pyls-all', 'cmd', [lsp_settings#exec_path('pyls-all')])},
\ 'root_uri':{server_info->lsp_settings#get('pyls-all', 'root_uri', lsp_settings#root_uri('pyls-all'))},
\ 'initialization_options': lsp_settings#get('pyls-all', 'initialization_options', v:null),
\ 'allowlist': lsp_settings#get('pyls-all', 'allowlist', ['python']),
\ 'blocklist': lsp_settings#get('pyls-all', 'blocklist', []),
\ 'config': lsp_settings#get('pyls-all', 'config', lsp_settings#server_config('pyls-all')),
\ 'workspace_config': lsp_settings#get('pyls-all', 'workspace_config', {}),
\ 'semantic_highlight': lsp_settings#get('pyls-all', 'semantic_highlight', {}),
\ }
augroup END
| {
"pile_set_name": "Github"
} |
/**
******************************************************************************
* @file stm32f4xx_hal_dma_ex.h
* @author MCD Application Team
* @version V1.7.0
* @date 17-February-2017
* @brief Header file of DMA HAL extension module.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __STM32F4xx_HAL_DMA_EX_H
#define __STM32F4xx_HAL_DMA_EX_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "stm32f4xx_hal_def.h"
/** @addtogroup STM32F4xx_HAL_Driver
* @{
*/
/** @addtogroup DMAEx
* @{
*/
/* Exported types ------------------------------------------------------------*/
/** @defgroup DMAEx_Exported_Types DMAEx Exported Types
* @brief DMAEx Exported types
* @{
*/
/**
* @brief HAL DMA Memory definition
*/
typedef enum
{
MEMORY0 = 0x00U, /*!< Memory 0 */
MEMORY1 = 0x01U /*!< Memory 1 */
}HAL_DMA_MemoryTypeDef;
/**
* @}
*/
/* Exported functions --------------------------------------------------------*/
/** @defgroup DMAEx_Exported_Functions DMAEx Exported Functions
* @brief DMAEx Exported functions
* @{
*/
/** @defgroup DMAEx_Exported_Functions_Group1 Extended features functions
* @brief Extended features functions
* @{
*/
/* IO operation functions *******************************************************/
HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength);
HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength);
HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory);
/**
* @}
*/
/**
* @}
*/
/* Private functions ---------------------------------------------------------*/
/** @defgroup DMAEx_Private_Functions DMAEx Private Functions
* @brief DMAEx Private functions
* @{
*/
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /*__STM32F4xx_HAL_DMA_EX_H*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| {
"pile_set_name": "Github"
} |
/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
* Created: Wed Feb 14 12:32:32 2001 by [email protected]
*/
/*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <[email protected]>
*/
#ifndef __TDFX_H__
#define __TDFX_H__
/* General customization:
*/
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "tdfx"
#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
#define DRIVER_DATE "20010216"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#endif
| {
"pile_set_name": "Github"
} |
package scripting
import (
"context"
"github.com/mongodb/jasper"
"github.com/mongodb/jasper/options"
"github.com/pkg/errors"
)
// Harness provides an interface to execute code in a
// scripting environment such as a python virtual
// environment. Implementations should be make it possible to execute
// either locally or on remote systems.
type Harness interface {
// ID returns a unique ID for the underlying environment. This
// should match the ID produced by the underlying options
// implementation that produced this Harness.
ID() string
// Setup initializes the environment, and should be safe to
// call multiple times.
Setup(context.Context) error
// Run executes a command (as arguments) with the environment's
// interpreter.
Run(context context.Context, args []string) error
// RunScript takes the body of a script and should write that
// data to a file and then runs that script directly.
RunScript(ctx context.Context, script string) error
// Build will run the environments native build system to
// generate some kind of build artifact from the scripting
// environment. Pass a directory in addition to a list of
// arguments to describe any arguments to the build system.
// The Build operation returns the path of the build artifact
// produced by the operation.
Build(ctx context.Context, dir string, args []string) (string, error)
// Test provides a way for scripting harness to run tests. The
// first argument should be a directory, and the successive
// (optional) arguments should either be arguments to the test
// runner or names of specific tests to run, depending on the
// implementation.
Test(ctx context.Context, dir string, opts ...TestOptions) ([]TestResult, error)
// Cleanup should remove the files created by the scripting environment.
Cleanup(context.Context) error
}
// NewHarness constructs a scripting harness that wraps the
// manager. Use this factory function to build new harnesses, which
// are not cached in the manager (like harnesses constructed directly
// using Manager.CreateScripting), but are otherwise totally functional.
func NewHarness(m jasper.Manager, env options.ScriptingHarness) (Harness, error) {
if err := env.Validate(); err != nil {
return nil, errors.WithStack(err)
}
switch t := env.(type) {
case *options.ScriptingPython:
return &pythonEnvironment{opts: t, manager: m}, nil
case *options.ScriptingGolang:
return &golangEnvironment{opts: t, manager: m}, nil
case *options.ScriptingRoswell:
return &roswellEnvironment{opts: t, manager: m}, nil
default:
return nil, errors.Errorf("scripting environment %T (%s) is not supported", t, env.Type())
}
}
// HarnessCache provides an internal local cache for scripting
// environments.
type HarnessCache interface {
// Create creates a new Harness from the given options and adds it to the
// cache with the given ID.
Create(jasper.Manager, options.ScriptingHarness) (Harness, error)
// Get returns the matching Harness by ID.
Get(id string) (Harness, error)
// Add adds a Harness to the cache with the given ID.
Add(id string, h Harness) error
// Check returns whether a Harness with the given ID exists in the cache.
Check(id string) bool
}
////////////////////////////////////////////////////////////////////////
//
// internal
type remote interface {
jasper.Manager
CreateScripting(context.Context, options.ScriptingHarness) (Harness, error)
GetScripting(context.Context, string) (Harness, error)
}
| {
"pile_set_name": "Github"
} |
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "src/base/utils/random-number-generator.h"
namespace v8 {
namespace internal {
static const int64_t kRandomSeeds[] = {-1, 1, 42, 100, 1234567890, 987654321};
TEST(RandomSeedFlagIsUsed) {
for (unsigned n = 0; n < arraysize(kRandomSeeds); ++n) {
FLAG_random_seed = static_cast<int>(kRandomSeeds[n]);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* i = v8::Isolate::New(create_params);
v8::base::RandomNumberGenerator& rng =
*reinterpret_cast<Isolate*>(i)->random_number_generator();
CHECK_EQ(kRandomSeeds[n], rng.initial_seed());
i->Dispose();
}
}
// Chi squared for getting m 0s out of n bits.
double ChiSquared(int m, int n) {
double ys_minus_np1 = (m - n / 2.0);
double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
double ys_minus_np2 = ((n - m) - n / 2.0);
double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
return chi_squared_1 + chi_squared_2;
}
// Test for correlations between recent bits from the PRNG, or bits that are
// biased.
void RandomBitCorrelation(int random_bit) {
FLAG_random_seed = 31415926;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
v8::base::RandomNumberGenerator* rng = i_isolate->random_number_generator();
#ifdef DEBUG
const int kHistory = 2;
const int kRepeats = 1000;
#else
const int kHistory = 8;
const int kRepeats = 10000;
#endif
uint32_t history[kHistory];
// The predictor bit is either constant 0 or 1, or one of the bits from the
// history.
for (int predictor_bit = -2; predictor_bit < 32; predictor_bit++) {
// The predicted bit is one of the bits from the PRNG.
for (int ago = 0; ago < kHistory; ago++) {
// We don't want to check whether each bit predicts itself.
if (ago == 0 && predictor_bit == random_bit) continue;
// Enter the new random value into the history
for (int i = ago; i >= 0; i--) {
history[i] = bit_cast<uint32_t>(rng->NextInt());
}
// Find out how many of the bits are the same as the prediction bit.
int m = 0;
for (int i = 0; i < kRepeats; i++) {
v8::HandleScope scope(isolate);
uint32_t random = bit_cast<uint32_t>(rng->NextInt());
for (int j = ago - 1; j >= 0; j--) history[j + 1] = history[j];
history[0] = random;
int predicted;
if (predictor_bit >= 0) {
predicted = (history[ago] >> predictor_bit) & 1;
} else {
predicted = predictor_bit == -2 ? 0 : 1;
}
int bit = (random >> random_bit) & 1;
if (bit == predicted) m++;
}
// Chi squared analysis for k = 2 (2, states: same/not-same) and one
// degree of freedom (k - 1).
double chi_squared = ChiSquared(m, kRepeats);
if (chi_squared > 24) {
int percent = static_cast<int>(m * 100.0 / kRepeats);
if (predictor_bit < 0) {
PrintF("Bit %d is %d %d%% of the time\n", random_bit,
predictor_bit == -2 ? 0 : 1, percent);
} else {
PrintF("Bit %d is the same as bit %d %d ago %d%% of the time\n",
random_bit, predictor_bit, ago, percent);
}
}
// For 1 degree of freedom this corresponds to 1 in a million. We are
// running ~8000 tests, so that would be surprising.
CHECK_LE(chi_squared, 24);
// If the predictor bit is a fixed 0 or 1 then it makes no sense to
// repeat the test with a different age.
if (predictor_bit < 0) break;
}
}
isolate->Dispose();
}
#define TEST_RANDOM_BIT(BIT) \
TEST(RandomBitCorrelations##BIT) { RandomBitCorrelation(BIT); }
TEST_RANDOM_BIT(0)
TEST_RANDOM_BIT(1)
TEST_RANDOM_BIT(2)
TEST_RANDOM_BIT(3)
TEST_RANDOM_BIT(4)
TEST_RANDOM_BIT(5)
TEST_RANDOM_BIT(6)
TEST_RANDOM_BIT(7)
TEST_RANDOM_BIT(8)
TEST_RANDOM_BIT(9)
TEST_RANDOM_BIT(10)
TEST_RANDOM_BIT(11)
TEST_RANDOM_BIT(12)
TEST_RANDOM_BIT(13)
TEST_RANDOM_BIT(14)
TEST_RANDOM_BIT(15)
TEST_RANDOM_BIT(16)
TEST_RANDOM_BIT(17)
TEST_RANDOM_BIT(18)
TEST_RANDOM_BIT(19)
TEST_RANDOM_BIT(20)
TEST_RANDOM_BIT(21)
TEST_RANDOM_BIT(22)
TEST_RANDOM_BIT(23)
TEST_RANDOM_BIT(24)
TEST_RANDOM_BIT(25)
TEST_RANDOM_BIT(26)
TEST_RANDOM_BIT(27)
TEST_RANDOM_BIT(28)
TEST_RANDOM_BIT(29)
TEST_RANDOM_BIT(30)
TEST_RANDOM_BIT(31)
#undef TEST_RANDOM_BIT
} // namespace internal
} // namespace v8
| {
"pile_set_name": "Github"
} |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("SampleClient")]
[assembly: AssemblyProduct("SampleClient")]
[assembly: AssemblyCopyright("Copyright © Alan McGovern 2008")]
[assembly: ComVisible(false)]
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.